code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
from app import create_app, db
import unittest
import json
class Test(unittest.TestCase):
def setUp(self):
"""Before each test, set up a blank database"""
self.app = create_app("configmodule.TestingConfig")
self.app.testing = True
self.client = self.app.test_client()
with self.app.app_context():
db.drop_all()
db.create_all()
# Called after every test
def tearDown(self):
with self.app.app_context():
db.session.remove()
db.drop_all()
def test_user(self):
# Create user
rv = self.client.post(
"/api/users/",
data=json.dumps({"email": "[email protected]", "password": "abc123"}),
)
rv_dict = json.loads(rv.data.decode())
assert rv.status_code == 200
assert rv_dict["id"] == 1
assert "password" not in rv_dict
assert rv_dict["email"] == "[email protected]"
# Try loggin with wrong PASSWORD
rv = self.client.post("/api/users/login", data=json.dumps({"email": "[email protected]", "password": "abc1234"}))
assert rv.status_code == 401
# Try loggin with wrong Email
rv = self.client.post("/api/users/login", data=json.dumps({"email": "[email protected]", "password": "abc1234"}))
assert rv.status_code == 401
# Try loggin with right PASSWORD
rv = self.client.post("/api/users/login", data=json.dumps({"email": "[email protected]", "password": "abc123"}))
rv_dict = json.loads(rv.data.decode())
assert rv.status_code == 200
headers = {"Authorization": "Bearer " + rv_dict["access_token"]}
# Get the current user
rv = self.client.get("/api/users/", headers=headers)
rv_dict = json.loads(rv.data.decode())
assert rv.status_code == 200
assert rv_dict["email"] == "[email protected]"
rv = self.client.put("/api/users/", data=json.dumps({"name": "carl carlsson"}), headers=headers)
rv_dict = json.loads(rv.data.decode())
assert rv.status_code == 200
assert rv_dict["name"] == "Carl Carlsson"
def test_empty(self):
# Try loggin withou any users
rv = self.client.post("/api/users/login", data=json.dumps({"email": "[email protected]", "password": "abc123"}))
assert rv.status_code == 401
if __name__ == "__main__":
unittest.main()
|
normal
|
{
"blob_id": "56b4262e88793be366d8ffe0fe4427fdb2a99bd7",
"index": 7447,
"step-1": "<mask token>\n\n\nclass Test(unittest.TestCase):\n\n def setUp(self):\n \"\"\"Before each test, set up a blank database\"\"\"\n self.app = create_app('configmodule.TestingConfig')\n self.app.testing = True\n self.client = self.app.test_client()\n with self.app.app_context():\n db.drop_all()\n db.create_all()\n <mask token>\n\n def test_user(self):\n rv = self.client.post('/api/users/', data=json.dumps({'email':\n '[email protected]', 'password': 'abc123'}))\n rv_dict = json.loads(rv.data.decode())\n assert rv.status_code == 200\n assert rv_dict['id'] == 1\n assert 'password' not in rv_dict\n assert rv_dict['email'] == '[email protected]'\n rv = self.client.post('/api/users/login', data=json.dumps({'email':\n '[email protected]', 'password': 'abc1234'}))\n assert rv.status_code == 401\n rv = self.client.post('/api/users/login', data=json.dumps({'email':\n '[email protected]', 'password': 'abc1234'}))\n assert rv.status_code == 401\n rv = self.client.post('/api/users/login', data=json.dumps({'email':\n '[email protected]', 'password': 'abc123'}))\n rv_dict = json.loads(rv.data.decode())\n assert rv.status_code == 200\n headers = {'Authorization': 'Bearer ' + rv_dict['access_token']}\n rv = self.client.get('/api/users/', headers=headers)\n rv_dict = json.loads(rv.data.decode())\n assert rv.status_code == 200\n assert rv_dict['email'] == '[email protected]'\n rv = self.client.put('/api/users/', data=json.dumps({'name':\n 'carl carlsson'}), headers=headers)\n rv_dict = json.loads(rv.data.decode())\n assert rv.status_code == 200\n assert rv_dict['name'] == 'Carl Carlsson'\n\n def test_empty(self):\n rv = self.client.post('/api/users/login', data=json.dumps({'email':\n '[email protected]', 'password': 'abc123'}))\n assert rv.status_code == 401\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Test(unittest.TestCase):\n\n def setUp(self):\n \"\"\"Before each test, set up a blank database\"\"\"\n self.app = create_app('configmodule.TestingConfig')\n self.app.testing = True\n self.client = self.app.test_client()\n with self.app.app_context():\n db.drop_all()\n db.create_all()\n\n def tearDown(self):\n with self.app.app_context():\n db.session.remove()\n db.drop_all()\n\n def test_user(self):\n rv = self.client.post('/api/users/', data=json.dumps({'email':\n '[email protected]', 'password': 'abc123'}))\n rv_dict = json.loads(rv.data.decode())\n assert rv.status_code == 200\n assert rv_dict['id'] == 1\n assert 'password' not in rv_dict\n assert rv_dict['email'] == '[email protected]'\n rv = self.client.post('/api/users/login', data=json.dumps({'email':\n '[email protected]', 'password': 'abc1234'}))\n assert rv.status_code == 401\n rv = self.client.post('/api/users/login', data=json.dumps({'email':\n '[email protected]', 'password': 'abc1234'}))\n assert rv.status_code == 401\n rv = self.client.post('/api/users/login', data=json.dumps({'email':\n '[email protected]', 'password': 'abc123'}))\n rv_dict = json.loads(rv.data.decode())\n assert rv.status_code == 200\n headers = {'Authorization': 'Bearer ' + rv_dict['access_token']}\n rv = self.client.get('/api/users/', headers=headers)\n rv_dict = json.loads(rv.data.decode())\n assert rv.status_code == 200\n assert rv_dict['email'] == '[email protected]'\n rv = self.client.put('/api/users/', data=json.dumps({'name':\n 'carl carlsson'}), headers=headers)\n rv_dict = json.loads(rv.data.decode())\n assert rv.status_code == 200\n assert rv_dict['name'] == 'Carl Carlsson'\n\n def test_empty(self):\n rv = self.client.post('/api/users/login', data=json.dumps({'email':\n '[email protected]', 'password': 'abc123'}))\n assert rv.status_code == 401\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Test(unittest.TestCase):\n\n def setUp(self):\n \"\"\"Before each test, set up a blank database\"\"\"\n self.app = create_app('configmodule.TestingConfig')\n self.app.testing = True\n self.client = self.app.test_client()\n with self.app.app_context():\n db.drop_all()\n db.create_all()\n\n def tearDown(self):\n with self.app.app_context():\n db.session.remove()\n db.drop_all()\n\n def test_user(self):\n rv = self.client.post('/api/users/', data=json.dumps({'email':\n '[email protected]', 'password': 'abc123'}))\n rv_dict = json.loads(rv.data.decode())\n assert rv.status_code == 200\n assert rv_dict['id'] == 1\n assert 'password' not in rv_dict\n assert rv_dict['email'] == '[email protected]'\n rv = self.client.post('/api/users/login', data=json.dumps({'email':\n '[email protected]', 'password': 'abc1234'}))\n assert rv.status_code == 401\n rv = self.client.post('/api/users/login', data=json.dumps({'email':\n '[email protected]', 'password': 'abc1234'}))\n assert rv.status_code == 401\n rv = self.client.post('/api/users/login', data=json.dumps({'email':\n '[email protected]', 'password': 'abc123'}))\n rv_dict = json.loads(rv.data.decode())\n assert rv.status_code == 200\n headers = {'Authorization': 'Bearer ' + rv_dict['access_token']}\n rv = self.client.get('/api/users/', headers=headers)\n rv_dict = json.loads(rv.data.decode())\n assert rv.status_code == 200\n assert rv_dict['email'] == '[email protected]'\n rv = self.client.put('/api/users/', data=json.dumps({'name':\n 'carl carlsson'}), headers=headers)\n rv_dict = json.loads(rv.data.decode())\n assert rv.status_code == 200\n assert rv_dict['name'] == 'Carl Carlsson'\n\n def test_empty(self):\n rv = self.client.post('/api/users/login', data=json.dumps({'email':\n '[email protected]', 'password': 'abc123'}))\n assert rv.status_code == 401\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "from app import create_app, db\nimport unittest\nimport json\n\n\nclass Test(unittest.TestCase):\n\n def setUp(self):\n \"\"\"Before each test, set up a blank database\"\"\"\n self.app = create_app('configmodule.TestingConfig')\n self.app.testing = True\n self.client = self.app.test_client()\n with self.app.app_context():\n db.drop_all()\n db.create_all()\n\n def tearDown(self):\n with self.app.app_context():\n db.session.remove()\n db.drop_all()\n\n def test_user(self):\n rv = self.client.post('/api/users/', data=json.dumps({'email':\n '[email protected]', 'password': 'abc123'}))\n rv_dict = json.loads(rv.data.decode())\n assert rv.status_code == 200\n assert rv_dict['id'] == 1\n assert 'password' not in rv_dict\n assert rv_dict['email'] == '[email protected]'\n rv = self.client.post('/api/users/login', data=json.dumps({'email':\n '[email protected]', 'password': 'abc1234'}))\n assert rv.status_code == 401\n rv = self.client.post('/api/users/login', data=json.dumps({'email':\n '[email protected]', 'password': 'abc1234'}))\n assert rv.status_code == 401\n rv = self.client.post('/api/users/login', data=json.dumps({'email':\n '[email protected]', 'password': 'abc123'}))\n rv_dict = json.loads(rv.data.decode())\n assert rv.status_code == 200\n headers = {'Authorization': 'Bearer ' + rv_dict['access_token']}\n rv = self.client.get('/api/users/', headers=headers)\n rv_dict = json.loads(rv.data.decode())\n assert rv.status_code == 200\n assert rv_dict['email'] == '[email protected]'\n rv = self.client.put('/api/users/', data=json.dumps({'name':\n 'carl carlsson'}), headers=headers)\n rv_dict = json.loads(rv.data.decode())\n assert rv.status_code == 200\n assert rv_dict['name'] == 'Carl Carlsson'\n\n def test_empty(self):\n rv = self.client.post('/api/users/login', data=json.dumps({'email':\n '[email protected]', 'password': 'abc123'}))\n assert rv.status_code == 401\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "from app import create_app, db\nimport unittest\nimport json\n\n\nclass Test(unittest.TestCase):\n def setUp(self):\n \"\"\"Before each test, set up a blank database\"\"\"\n self.app = create_app(\"configmodule.TestingConfig\")\n self.app.testing = True\n\n self.client = self.app.test_client()\n\n with self.app.app_context():\n db.drop_all()\n db.create_all()\n\n # Called after every test\n def tearDown(self):\n with self.app.app_context():\n db.session.remove()\n db.drop_all()\n\n def test_user(self):\n # Create user\n rv = self.client.post(\n \"/api/users/\",\n data=json.dumps({\"email\": \"[email protected]\", \"password\": \"abc123\"}),\n )\n rv_dict = json.loads(rv.data.decode())\n\n assert rv.status_code == 200\n assert rv_dict[\"id\"] == 1\n assert \"password\" not in rv_dict\n assert rv_dict[\"email\"] == \"[email protected]\"\n\n # Try loggin with wrong PASSWORD\n rv = self.client.post(\"/api/users/login\", data=json.dumps({\"email\": \"[email protected]\", \"password\": \"abc1234\"}))\n assert rv.status_code == 401\n\n # Try loggin with wrong Email\n rv = self.client.post(\"/api/users/login\", data=json.dumps({\"email\": \"[email protected]\", \"password\": \"abc1234\"}))\n assert rv.status_code == 401\n\n # Try loggin with right PASSWORD\n rv = self.client.post(\"/api/users/login\", data=json.dumps({\"email\": \"[email protected]\", \"password\": \"abc123\"}))\n rv_dict = json.loads(rv.data.decode())\n assert rv.status_code == 200\n headers = {\"Authorization\": \"Bearer \" + rv_dict[\"access_token\"]}\n\n # Get the current user\n rv = self.client.get(\"/api/users/\", headers=headers)\n rv_dict = json.loads(rv.data.decode())\n assert rv.status_code == 200\n assert rv_dict[\"email\"] == \"[email protected]\"\n\n rv = self.client.put(\"/api/users/\", data=json.dumps({\"name\": \"carl carlsson\"}), headers=headers)\n rv_dict = json.loads(rv.data.decode())\n assert rv.status_code == 200\n assert rv_dict[\"name\"] == \"Carl Carlsson\"\n\n def test_empty(self):\n # Try loggin withou any users\n rv = self.client.post(\"/api/users/login\", data=json.dumps({\"email\": \"[email protected]\", \"password\": \"abc123\"}))\n assert rv.status_code == 401\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
# Generated by Django 2.2.4 on 2019-08-19 19:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('application', '0003_auto_20190818_1623'),
]
operations = [
migrations.AlterField(
model_name='user',
name='visited',
field=models.ManyToManyField(related_name='visitors', to='application.EscapeRoom'),
),
]
|
normal
|
{
"blob_id": "913e1f5a0af436ef081ab567c44b4149299d0ec6",
"index": 3154,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('application', '0003_auto_20190818_1623')]\n operations = [migrations.AlterField(model_name='user', name='visited',\n field=models.ManyToManyField(related_name='visitors', to=\n 'application.EscapeRoom'))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('application', '0003_auto_20190818_1623')]\n operations = [migrations.AlterField(model_name='user', name='visited',\n field=models.ManyToManyField(related_name='visitors', to=\n 'application.EscapeRoom'))]\n",
"step-5": "# Generated by Django 2.2.4 on 2019-08-19 19:14\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('application', '0003_auto_20190818_1623'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='user',\n name='visited',\n field=models.ManyToManyField(related_name='visitors', to='application.EscapeRoom'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from typing import Tuple, Union
from webdnn.graph.graph import Graph
from webdnn.graph.operators.zero_padding_2d import ZeroPadding2D
from webdnn.graph.operators.convolution2d import Convolution2D
from webdnn.graph.operators.max_pooling_2d import MaxPooling2D
from webdnn.graph.operators.average_pooling_2d import AveragePooling2D
from webdnn.graph.optimize_rule import OptimizeRule
from webdnn.graph.traverse import search_sub_structure
from webdnn.graph.variable import Variable
from webdnn.util import flags
class ConcatZeroPadding(OptimizeRule):
def optimize(self, graph: Graph) -> Tuple[Graph, bool]:
"""
Merges padding of ZeroPadding2D and Convolution2D | MaxPooling2D | AveragePooling2D layer
Args:
graph:
Returns:
"""
# this optimization is always applied (since backends do not implement padding)
flag_changed = False
for tail_layer in [Convolution2D, MaxPooling2D, AveragePooling2D]:
matches = search_sub_structure(graph, [ZeroPadding2D, Variable, tail_layer])
while len(matches) > 0:
match = matches[0]
a1: ZeroPadding2D = match[0]
a2: Union[Convolution2D, MaxPooling2D, AveragePooling2D] = match[2]
zero_pad = a1.parameters["padding"]
conv_pad = a2.parameters["padding"]
a2.parameters["padding"] = (zero_pad[0] + conv_pad[0], zero_pad[1] + conv_pad[1])
x1 = a1.inputs["x"]
x2 = a2.inputs["x"]
a1.remove_all()
# replace_input checks if the shape of x1 and x2 are same, but this restriction does not hold.
a2.remove_input(x2)
a2.append_input("x", x1)
flag_changed = True
matches = search_sub_structure(graph, [ZeroPadding2D, Variable, tail_layer])
return graph, flag_changed
|
normal
|
{
"blob_id": "687f7f4908e8a5448335f636edf74a627f03c306",
"index": 9110,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass ConcatZeroPadding(OptimizeRule):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass ConcatZeroPadding(OptimizeRule):\n\n def optimize(self, graph: Graph) ->Tuple[Graph, bool]:\n \"\"\"\n Merges padding of ZeroPadding2D and Convolution2D | MaxPooling2D | AveragePooling2D layer\n Args:\n graph:\n\n Returns:\n\n \"\"\"\n flag_changed = False\n for tail_layer in [Convolution2D, MaxPooling2D, AveragePooling2D]:\n matches = search_sub_structure(graph, [ZeroPadding2D, Variable,\n tail_layer])\n while len(matches) > 0:\n match = matches[0]\n a1: ZeroPadding2D = match[0]\n a2: Union[Convolution2D, MaxPooling2D, AveragePooling2D\n ] = match[2]\n zero_pad = a1.parameters['padding']\n conv_pad = a2.parameters['padding']\n a2.parameters['padding'] = zero_pad[0] + conv_pad[0], zero_pad[\n 1] + conv_pad[1]\n x1 = a1.inputs['x']\n x2 = a2.inputs['x']\n a1.remove_all()\n a2.remove_input(x2)\n a2.append_input('x', x1)\n flag_changed = True\n matches = search_sub_structure(graph, [ZeroPadding2D,\n Variable, tail_layer])\n return graph, flag_changed\n",
"step-4": "from typing import Tuple, Union\nfrom webdnn.graph.graph import Graph\nfrom webdnn.graph.operators.zero_padding_2d import ZeroPadding2D\nfrom webdnn.graph.operators.convolution2d import Convolution2D\nfrom webdnn.graph.operators.max_pooling_2d import MaxPooling2D\nfrom webdnn.graph.operators.average_pooling_2d import AveragePooling2D\nfrom webdnn.graph.optimize_rule import OptimizeRule\nfrom webdnn.graph.traverse import search_sub_structure\nfrom webdnn.graph.variable import Variable\nfrom webdnn.util import flags\n\n\nclass ConcatZeroPadding(OptimizeRule):\n\n def optimize(self, graph: Graph) ->Tuple[Graph, bool]:\n \"\"\"\n Merges padding of ZeroPadding2D and Convolution2D | MaxPooling2D | AveragePooling2D layer\n Args:\n graph:\n\n Returns:\n\n \"\"\"\n flag_changed = False\n for tail_layer in [Convolution2D, MaxPooling2D, AveragePooling2D]:\n matches = search_sub_structure(graph, [ZeroPadding2D, Variable,\n tail_layer])\n while len(matches) > 0:\n match = matches[0]\n a1: ZeroPadding2D = match[0]\n a2: Union[Convolution2D, MaxPooling2D, AveragePooling2D\n ] = match[2]\n zero_pad = a1.parameters['padding']\n conv_pad = a2.parameters['padding']\n a2.parameters['padding'] = zero_pad[0] + conv_pad[0], zero_pad[\n 1] + conv_pad[1]\n x1 = a1.inputs['x']\n x2 = a2.inputs['x']\n a1.remove_all()\n a2.remove_input(x2)\n a2.append_input('x', x1)\n flag_changed = True\n matches = search_sub_structure(graph, [ZeroPadding2D,\n Variable, tail_layer])\n return graph, flag_changed\n",
"step-5": "from typing import Tuple, Union\n\nfrom webdnn.graph.graph import Graph\nfrom webdnn.graph.operators.zero_padding_2d import ZeroPadding2D\nfrom webdnn.graph.operators.convolution2d import Convolution2D\nfrom webdnn.graph.operators.max_pooling_2d import MaxPooling2D\nfrom webdnn.graph.operators.average_pooling_2d import AveragePooling2D\nfrom webdnn.graph.optimize_rule import OptimizeRule\nfrom webdnn.graph.traverse import search_sub_structure\nfrom webdnn.graph.variable import Variable\nfrom webdnn.util import flags\n\n\nclass ConcatZeroPadding(OptimizeRule):\n def optimize(self, graph: Graph) -> Tuple[Graph, bool]:\n \"\"\"\n Merges padding of ZeroPadding2D and Convolution2D | MaxPooling2D | AveragePooling2D layer\n Args:\n graph:\n\n Returns:\n\n \"\"\"\n # this optimization is always applied (since backends do not implement padding)\n flag_changed = False\n\n for tail_layer in [Convolution2D, MaxPooling2D, AveragePooling2D]:\n matches = search_sub_structure(graph, [ZeroPadding2D, Variable, tail_layer])\n while len(matches) > 0:\n match = matches[0]\n a1: ZeroPadding2D = match[0]\n a2: Union[Convolution2D, MaxPooling2D, AveragePooling2D] = match[2]\n\n zero_pad = a1.parameters[\"padding\"]\n conv_pad = a2.parameters[\"padding\"]\n a2.parameters[\"padding\"] = (zero_pad[0] + conv_pad[0], zero_pad[1] + conv_pad[1])\n\n x1 = a1.inputs[\"x\"]\n x2 = a2.inputs[\"x\"]\n\n a1.remove_all()\n # replace_input checks if the shape of x1 and x2 are same, but this restriction does not hold.\n a2.remove_input(x2)\n a2.append_input(\"x\", x1)\n\n flag_changed = True\n matches = search_sub_structure(graph, [ZeroPadding2D, Variable, tail_layer])\n\n return graph, flag_changed\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""Sherlock Tests
This package contains various submodules used to run tests.
"""
import sys
import os
import subprocess as sp
from time import sleep
# uncomment this if using nose
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../sherlock')))
# import sherlock
|
normal
|
{
"blob_id": "8f7b1313ba31d761edcadac7b0d04b62f7af8dff",
"index": 4759,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__),\n '../sherlock')))\n",
"step-3": "<mask token>\nimport sys\nimport os\nimport subprocess as sp\nfrom time import sleep\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__),\n '../sherlock')))\n",
"step-4": "\"\"\"Sherlock Tests\r\n\r\nThis package contains various submodules used to run tests.\r\n\"\"\"\r\nimport sys\r\nimport os\r\nimport subprocess as sp\r\nfrom time import sleep\r\n\r\n# uncomment this if using nose\r\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../sherlock')))\r\n\r\n# import sherlock",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# We don't need no stinking models but django likes this file to be there if you are an app
|
normal
|
{
"blob_id": "a1304f290e0346e7aa2e22d9c2d3e7f735b1e8e7",
"index": 96,
"step-1": "\n# We don't need no stinking models but django likes this file to be there if you are an app\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
1
]
}
|
[
1
] |
import json
from django import template
from django.core.serializers.json import DjangoJSONEncoder
from django.utils.safestring import mark_safe
register = template.Library()
@register.filter
def jsonify(object):
return mark_safe(json.dumps(object, cls=DjangoJSONEncoder))
@register.simple_tag
def get_crop_url(crop, width=None, scale=1):
if width:
return crop.url_at_width(width * scale)
else:
return crop.url_at_width(crop.width * scale)
@register.assignment_tag
def get_available_crop_scales(crop, width):
return crop.available_scales(width=width)
|
normal
|
{
"blob_id": "987579da6b7ae208a66e375e0c9eca32b97199c5",
"index": 4704,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]\ndef jsonify(object):\n return mark_safe(json.dumps(object, cls=DjangoJSONEncoder))\n\n\[email protected]_tag\ndef get_crop_url(crop, width=None, scale=1):\n if width:\n return crop.url_at_width(width * scale)\n else:\n return crop.url_at_width(crop.width * scale)\n\n\[email protected]_tag\ndef get_available_crop_scales(crop, width):\n return crop.available_scales(width=width)\n",
"step-3": "<mask token>\nregister = template.Library()\n\n\[email protected]\ndef jsonify(object):\n return mark_safe(json.dumps(object, cls=DjangoJSONEncoder))\n\n\[email protected]_tag\ndef get_crop_url(crop, width=None, scale=1):\n if width:\n return crop.url_at_width(width * scale)\n else:\n return crop.url_at_width(crop.width * scale)\n\n\[email protected]_tag\ndef get_available_crop_scales(crop, width):\n return crop.available_scales(width=width)\n",
"step-4": "import json\nfrom django import template\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.utils.safestring import mark_safe\nregister = template.Library()\n\n\[email protected]\ndef jsonify(object):\n return mark_safe(json.dumps(object, cls=DjangoJSONEncoder))\n\n\[email protected]_tag\ndef get_crop_url(crop, width=None, scale=1):\n if width:\n return crop.url_at_width(width * scale)\n else:\n return crop.url_at_width(crop.width * scale)\n\n\[email protected]_tag\ndef get_available_crop_scales(crop, width):\n return crop.available_scales(width=width)\n",
"step-5": null,
"step-ids": [
0,
3,
4,
5
]
}
|
[
0,
3,
4,
5
] |
"""
Constants to be used throughout this program
stored here.
"""
ROOT_URL = "https://api.twitter.com"
UPLOAD_URL = "https://upload.twitter.com"
REQUEST_TOKEN_URL = f'{ROOT_URL}/oauth/request_token'
AUTHENTICATE_URL = f'{ROOT_URL}/oauth/authenticate'
ACCESS_TOKEN_URL = f'{ROOT_URL}/oauth/access_token'
VERSION = '1.1'
USER_SEARCH_URL = f'{ROOT_URL}/{VERSION}/users/search.json'
FRIENDSHIP_CREATE_URL = f'{ROOT_URL}/{VERSION}/friendships/create.json'
FRIENDSHIP_DESTROY_URL = f'{ROOT_URL}/{VERSION}/friendships/destroy.json'
FRIENDS_URL = f'{ROOT_URL}/{VERSION}/friends/list.json'
FOLLOWERS_URL = f'{ROOT_URL}/{VERSION}/followers/list.json'
TWEET_SEARCH_URL = f'{ROOT_URL}/{VERSION}/search/tweets.json'
TWEET_LIKE_URL = f'{ROOT_URL}/{VERSION}/favorites/create.json'
TWEET_UNLIKE_URL = f'{ROOT_URL}/{VERSION}/favorites/destroy.json'
RETWEET_URL = ROOT_URL + "/" + VERSION + "/retweet/create/{tweet_id}.json"
REMOVE_RETWEET_URL = ROOT_URL + "/" + \
VERSION + "/unretweet/create/{tweet_id}.json"
FAVOURITED_TWEETS_URL = ROOT_URL + "/" + VERSION + "/favorites/list.json"
STATUS_UPDATE_URL = f'{ROOT_URL}/{VERSION}/statuses/update.json'
MEDIA_UPLOAD_URL = f'{UPLOAD_URL}/{VERSION}/media/upload.json'
TRENDS_URL = f'{ROOT_URL}/{VERSION}/trends/place.json'
|
normal
|
{
"blob_id": "c907f6b954aa3eae21a54eba9d54c116576bd40a",
"index": 5848,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nROOT_URL = 'https://api.twitter.com'\nUPLOAD_URL = 'https://upload.twitter.com'\nREQUEST_TOKEN_URL = f'{ROOT_URL}/oauth/request_token'\nAUTHENTICATE_URL = f'{ROOT_URL}/oauth/authenticate'\nACCESS_TOKEN_URL = f'{ROOT_URL}/oauth/access_token'\nVERSION = '1.1'\nUSER_SEARCH_URL = f'{ROOT_URL}/{VERSION}/users/search.json'\nFRIENDSHIP_CREATE_URL = f'{ROOT_URL}/{VERSION}/friendships/create.json'\nFRIENDSHIP_DESTROY_URL = f'{ROOT_URL}/{VERSION}/friendships/destroy.json'\nFRIENDS_URL = f'{ROOT_URL}/{VERSION}/friends/list.json'\nFOLLOWERS_URL = f'{ROOT_URL}/{VERSION}/followers/list.json'\nTWEET_SEARCH_URL = f'{ROOT_URL}/{VERSION}/search/tweets.json'\nTWEET_LIKE_URL = f'{ROOT_URL}/{VERSION}/favorites/create.json'\nTWEET_UNLIKE_URL = f'{ROOT_URL}/{VERSION}/favorites/destroy.json'\nRETWEET_URL = ROOT_URL + '/' + VERSION + '/retweet/create/{tweet_id}.json'\nREMOVE_RETWEET_URL = (ROOT_URL + '/' + VERSION +\n '/unretweet/create/{tweet_id}.json')\nFAVOURITED_TWEETS_URL = ROOT_URL + '/' + VERSION + '/favorites/list.json'\nSTATUS_UPDATE_URL = f'{ROOT_URL}/{VERSION}/statuses/update.json'\nMEDIA_UPLOAD_URL = f'{UPLOAD_URL}/{VERSION}/media/upload.json'\nTRENDS_URL = f'{ROOT_URL}/{VERSION}/trends/place.json'\n",
"step-3": "\"\"\"\nConstants to be used throughout this program\nstored here.\n\"\"\"\nROOT_URL = \"https://api.twitter.com\"\nUPLOAD_URL = \"https://upload.twitter.com\"\n\nREQUEST_TOKEN_URL = f'{ROOT_URL}/oauth/request_token'\nAUTHENTICATE_URL = f'{ROOT_URL}/oauth/authenticate'\nACCESS_TOKEN_URL = f'{ROOT_URL}/oauth/access_token'\n\nVERSION = '1.1'\n\nUSER_SEARCH_URL = f'{ROOT_URL}/{VERSION}/users/search.json'\nFRIENDSHIP_CREATE_URL = f'{ROOT_URL}/{VERSION}/friendships/create.json'\nFRIENDSHIP_DESTROY_URL = f'{ROOT_URL}/{VERSION}/friendships/destroy.json'\nFRIENDS_URL = f'{ROOT_URL}/{VERSION}/friends/list.json'\nFOLLOWERS_URL = f'{ROOT_URL}/{VERSION}/followers/list.json'\n\nTWEET_SEARCH_URL = f'{ROOT_URL}/{VERSION}/search/tweets.json'\nTWEET_LIKE_URL = f'{ROOT_URL}/{VERSION}/favorites/create.json'\nTWEET_UNLIKE_URL = f'{ROOT_URL}/{VERSION}/favorites/destroy.json'\nRETWEET_URL = ROOT_URL + \"/\" + VERSION + \"/retweet/create/{tweet_id}.json\"\nREMOVE_RETWEET_URL = ROOT_URL + \"/\" + \\\n VERSION + \"/unretweet/create/{tweet_id}.json\"\nFAVOURITED_TWEETS_URL = ROOT_URL + \"/\" + VERSION + \"/favorites/list.json\"\n\nSTATUS_UPDATE_URL = f'{ROOT_URL}/{VERSION}/statuses/update.json'\nMEDIA_UPLOAD_URL = f'{UPLOAD_URL}/{VERSION}/media/upload.json'\n\nTRENDS_URL = f'{ROOT_URL}/{VERSION}/trends/place.json'\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def gen_diffusion_flux_pyst_mpi_kernel_2d(real_t, mpi_construct,
ghost_exchange_communicator):
diffusion_flux_pyst_kernel = gen_diffusion_flux_pyst_kernel_2d(real_t=
real_t, reset_ghost_zone=False)
kernel_support = 1
gen_diffusion_flux_pyst_mpi_kernel_2d.kernel_support = kernel_support
check_valid_ghost_size_and_kernel_support(ghost_size=
ghost_exchange_communicator.ghost_size, kernel_support=
gen_diffusion_flux_pyst_mpi_kernel_2d.kernel_support)
y_next, x_next = mpi_construct.next_grid_along
y_previous, x_previous = mpi_construct.previous_grid_along
set_fixed_val_kernel_2d = gen_set_fixed_val_pyst_kernel_2d(real_t=real_t)
def diffusion_flux_pyst_mpi_kernel_2d(diffusion_flux, field, prefactor):
diffusion_flux_pyst_mpi_kernel_2d.kernel_support = (
gen_diffusion_flux_pyst_mpi_kernel_2d.kernel_support)
ghost_size = ghost_exchange_communicator.ghost_size
ghost_exchange_communicator.exchange_scalar_field_init(field)
diffusion_flux_pyst_kernel(diffusion_flux=diffusion_flux[ghost_size
:-ghost_size, ghost_size:-ghost_size], field=field[ghost_size:-
ghost_size, ghost_size:-ghost_size], prefactor=prefactor)
ghost_exchange_communicator.exchange_finalise()
diffusion_flux_pyst_kernel(diffusion_flux=diffusion_flux[ghost_size -
kernel_support:ghost_size + 2 * kernel_support, ghost_size:-
ghost_size], field=field[ghost_size - kernel_support:ghost_size +
2 * kernel_support, ghost_size:-ghost_size], prefactor=prefactor)
diffusion_flux_pyst_kernel(diffusion_flux=diffusion_flux[-(
ghost_size + 2 * kernel_support):field.shape[0] - (ghost_size -
kernel_support), ghost_size:-ghost_size], field=field[-(
ghost_size + 2 * kernel_support):field.shape[0] - (ghost_size -
kernel_support), ghost_size:-ghost_size], prefactor=prefactor)
diffusion_flux_pyst_kernel(diffusion_flux=diffusion_flux[:,
ghost_size - kernel_support:ghost_size + 2 * kernel_support],
field=field[:, ghost_size - kernel_support:ghost_size + 2 *
kernel_support], prefactor=prefactor)
diffusion_flux_pyst_kernel(diffusion_flux=diffusion_flux[:, -(
ghost_size + 2 * kernel_support):field.shape[1] - (ghost_size -
kernel_support)], field=field[:, -(ghost_size + 2 *
kernel_support):field.shape[1] - (ghost_size - kernel_support)],
prefactor=prefactor)
boundary_width = 1
if x_previous == MPI.PROC_NULL:
set_fixed_val_kernel_2d(field=diffusion_flux[:, :ghost_size +
boundary_width], fixed_val=0.0)
if x_next == MPI.PROC_NULL:
set_fixed_val_kernel_2d(field=diffusion_flux[:, -ghost_size -
boundary_width:], fixed_val=0.0)
if y_previous == MPI.PROC_NULL:
set_fixed_val_kernel_2d(field=diffusion_flux[:ghost_size +
boundary_width, :], fixed_val=0.0)
if y_next == MPI.PROC_NULL:
set_fixed_val_kernel_2d(field=diffusion_flux[-ghost_size -
boundary_width:, :], fixed_val=0.0)
return diffusion_flux_pyst_mpi_kernel_2d
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from sopht.numeric.eulerian_grid_ops.stencil_ops_2d import gen_diffusion_flux_pyst_kernel_2d, gen_set_fixed_val_pyst_kernel_2d
from sopht_mpi.utils.mpi_utils import check_valid_ghost_size_and_kernel_support
from mpi4py import MPI
def gen_diffusion_flux_pyst_mpi_kernel_2d(real_t, mpi_construct,
ghost_exchange_communicator):
diffusion_flux_pyst_kernel = gen_diffusion_flux_pyst_kernel_2d(real_t=
real_t, reset_ghost_zone=False)
kernel_support = 1
gen_diffusion_flux_pyst_mpi_kernel_2d.kernel_support = kernel_support
check_valid_ghost_size_and_kernel_support(ghost_size=
ghost_exchange_communicator.ghost_size, kernel_support=
gen_diffusion_flux_pyst_mpi_kernel_2d.kernel_support)
y_next, x_next = mpi_construct.next_grid_along
y_previous, x_previous = mpi_construct.previous_grid_along
set_fixed_val_kernel_2d = gen_set_fixed_val_pyst_kernel_2d(real_t=real_t)
def diffusion_flux_pyst_mpi_kernel_2d(diffusion_flux, field, prefactor):
diffusion_flux_pyst_mpi_kernel_2d.kernel_support = (
gen_diffusion_flux_pyst_mpi_kernel_2d.kernel_support)
ghost_size = ghost_exchange_communicator.ghost_size
ghost_exchange_communicator.exchange_scalar_field_init(field)
diffusion_flux_pyst_kernel(diffusion_flux=diffusion_flux[ghost_size
:-ghost_size, ghost_size:-ghost_size], field=field[ghost_size:-
ghost_size, ghost_size:-ghost_size], prefactor=prefactor)
ghost_exchange_communicator.exchange_finalise()
diffusion_flux_pyst_kernel(diffusion_flux=diffusion_flux[ghost_size -
kernel_support:ghost_size + 2 * kernel_support, ghost_size:-
ghost_size], field=field[ghost_size - kernel_support:ghost_size +
2 * kernel_support, ghost_size:-ghost_size], prefactor=prefactor)
diffusion_flux_pyst_kernel(diffusion_flux=diffusion_flux[-(
ghost_size + 2 * kernel_support):field.shape[0] - (ghost_size -
kernel_support), ghost_size:-ghost_size], field=field[-(
ghost_size + 2 * kernel_support):field.shape[0] - (ghost_size -
kernel_support), ghost_size:-ghost_size], prefactor=prefactor)
diffusion_flux_pyst_kernel(diffusion_flux=diffusion_flux[:,
ghost_size - kernel_support:ghost_size + 2 * kernel_support],
field=field[:, ghost_size - kernel_support:ghost_size + 2 *
kernel_support], prefactor=prefactor)
diffusion_flux_pyst_kernel(diffusion_flux=diffusion_flux[:, -(
ghost_size + 2 * kernel_support):field.shape[1] - (ghost_size -
kernel_support)], field=field[:, -(ghost_size + 2 *
kernel_support):field.shape[1] - (ghost_size - kernel_support)],
prefactor=prefactor)
boundary_width = 1
if x_previous == MPI.PROC_NULL:
set_fixed_val_kernel_2d(field=diffusion_flux[:, :ghost_size +
boundary_width], fixed_val=0.0)
if x_next == MPI.PROC_NULL:
set_fixed_val_kernel_2d(field=diffusion_flux[:, -ghost_size -
boundary_width:], fixed_val=0.0)
if y_previous == MPI.PROC_NULL:
set_fixed_val_kernel_2d(field=diffusion_flux[:ghost_size +
boundary_width, :], fixed_val=0.0)
if y_next == MPI.PROC_NULL:
set_fixed_val_kernel_2d(field=diffusion_flux[-ghost_size -
boundary_width:, :], fixed_val=0.0)
return diffusion_flux_pyst_mpi_kernel_2d
<|reserved_special_token_1|>
"""MPI-supported kernels for computing diffusion flux in 2D."""
from sopht.numeric.eulerian_grid_ops.stencil_ops_2d import (
gen_diffusion_flux_pyst_kernel_2d,
gen_set_fixed_val_pyst_kernel_2d,
)
from sopht_mpi.utils.mpi_utils import check_valid_ghost_size_and_kernel_support
from mpi4py import MPI
def gen_diffusion_flux_pyst_mpi_kernel_2d(
real_t, mpi_construct, ghost_exchange_communicator
):
# Note currently I'm generating these for arbit size arrays, we ca optimise this
# more by generating fixed size for the interior stencil and arbit size for
# boundary crunching
diffusion_flux_pyst_kernel = gen_diffusion_flux_pyst_kernel_2d(
real_t=real_t, reset_ghost_zone=False
)
kernel_support = 1
# define this here so that ghost size and kernel support is checked during
# generation phase itself
gen_diffusion_flux_pyst_mpi_kernel_2d.kernel_support = kernel_support
check_valid_ghost_size_and_kernel_support(
ghost_size=ghost_exchange_communicator.ghost_size,
kernel_support=gen_diffusion_flux_pyst_mpi_kernel_2d.kernel_support,
)
# for setting values at physical domain boundary
y_next, x_next = mpi_construct.next_grid_along
y_previous, x_previous = mpi_construct.previous_grid_along
set_fixed_val_kernel_2d = gen_set_fixed_val_pyst_kernel_2d(real_t=real_t)
def diffusion_flux_pyst_mpi_kernel_2d(
diffusion_flux,
field,
prefactor,
):
# define kernel support for kernel
diffusion_flux_pyst_mpi_kernel_2d.kernel_support = (
gen_diffusion_flux_pyst_mpi_kernel_2d.kernel_support
)
# define variable for use later
ghost_size = ghost_exchange_communicator.ghost_size
# begin ghost comm.
ghost_exchange_communicator.exchange_scalar_field_init(field)
# crunch interior stencil
diffusion_flux_pyst_kernel(
diffusion_flux=diffusion_flux[
ghost_size:-ghost_size, ghost_size:-ghost_size
],
field=field[ghost_size:-ghost_size, ghost_size:-ghost_size],
prefactor=prefactor,
)
# finalise ghost comm.
ghost_exchange_communicator.exchange_finalise()
# crunch boundary numbers
# NOTE: we pass in arrays of width 3 * kernel support size because the
# interior stencil computation leaves out a width of kernel_support.
# Since the support needed by the kernel is kernel_support on each side,
# we need to pass an array of width 3 * kernel_support, starting from
# index +/-(ghost_size - kernel_support) on the lower and upper end.
# Pystencils then automatically sets the kernel comp. bounds and
# crunches numbers in the kernel_support thickness zone at the boundary.
# Start of Y axis
diffusion_flux_pyst_kernel(
diffusion_flux=diffusion_flux[
ghost_size - kernel_support : ghost_size + 2 * kernel_support,
ghost_size:-ghost_size,
],
field=field[
ghost_size - kernel_support : ghost_size + 2 * kernel_support,
ghost_size:-ghost_size,
],
prefactor=prefactor,
)
# End of Y axis
diffusion_flux_pyst_kernel(
diffusion_flux=diffusion_flux[
-(ghost_size + 2 * kernel_support) : field.shape[0]
- (ghost_size - kernel_support),
ghost_size:-ghost_size,
],
field=field[
-(ghost_size + 2 * kernel_support) : field.shape[0]
- (ghost_size - kernel_support),
ghost_size:-ghost_size,
],
prefactor=prefactor,
)
# Start of X axis
diffusion_flux_pyst_kernel(
diffusion_flux=diffusion_flux[
:,
ghost_size - kernel_support : ghost_size + 2 * kernel_support,
],
field=field[
:,
ghost_size - kernel_support : ghost_size + 2 * kernel_support,
],
prefactor=prefactor,
)
# End of X axis
diffusion_flux_pyst_kernel(
diffusion_flux=diffusion_flux[
:,
-(ghost_size + 2 * kernel_support) : field.shape[1]
- (ghost_size - kernel_support),
],
field=field[
:,
-(ghost_size + 2 * kernel_support) : field.shape[1]
- (ghost_size - kernel_support),
],
prefactor=prefactor,
)
# Set physical domain boundary diffusion flus to zero based on neighboring block
boundary_width = 1
if x_previous == MPI.PROC_NULL:
set_fixed_val_kernel_2d(
field=diffusion_flux[:, : ghost_size + boundary_width],
fixed_val=0.0,
)
if x_next == MPI.PROC_NULL:
set_fixed_val_kernel_2d(
field=diffusion_flux[:, -ghost_size - boundary_width :],
fixed_val=0.0,
)
if y_previous == MPI.PROC_NULL:
set_fixed_val_kernel_2d(
field=diffusion_flux[: ghost_size + boundary_width, :],
fixed_val=0.0,
)
if y_next == MPI.PROC_NULL:
set_fixed_val_kernel_2d(
field=diffusion_flux[-ghost_size - boundary_width :, :],
fixed_val=0.0,
)
return diffusion_flux_pyst_mpi_kernel_2d
|
flexible
|
{
"blob_id": "ba8cb18544e4ded8b229bfb9cc4b28599119414f",
"index": 854,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef gen_diffusion_flux_pyst_mpi_kernel_2d(real_t, mpi_construct,\n ghost_exchange_communicator):\n diffusion_flux_pyst_kernel = gen_diffusion_flux_pyst_kernel_2d(real_t=\n real_t, reset_ghost_zone=False)\n kernel_support = 1\n gen_diffusion_flux_pyst_mpi_kernel_2d.kernel_support = kernel_support\n check_valid_ghost_size_and_kernel_support(ghost_size=\n ghost_exchange_communicator.ghost_size, kernel_support=\n gen_diffusion_flux_pyst_mpi_kernel_2d.kernel_support)\n y_next, x_next = mpi_construct.next_grid_along\n y_previous, x_previous = mpi_construct.previous_grid_along\n set_fixed_val_kernel_2d = gen_set_fixed_val_pyst_kernel_2d(real_t=real_t)\n\n def diffusion_flux_pyst_mpi_kernel_2d(diffusion_flux, field, prefactor):\n diffusion_flux_pyst_mpi_kernel_2d.kernel_support = (\n gen_diffusion_flux_pyst_mpi_kernel_2d.kernel_support)\n ghost_size = ghost_exchange_communicator.ghost_size\n ghost_exchange_communicator.exchange_scalar_field_init(field)\n diffusion_flux_pyst_kernel(diffusion_flux=diffusion_flux[ghost_size\n :-ghost_size, ghost_size:-ghost_size], field=field[ghost_size:-\n ghost_size, ghost_size:-ghost_size], prefactor=prefactor)\n ghost_exchange_communicator.exchange_finalise()\n diffusion_flux_pyst_kernel(diffusion_flux=diffusion_flux[ghost_size -\n kernel_support:ghost_size + 2 * kernel_support, ghost_size:-\n ghost_size], field=field[ghost_size - kernel_support:ghost_size +\n 2 * kernel_support, ghost_size:-ghost_size], prefactor=prefactor)\n diffusion_flux_pyst_kernel(diffusion_flux=diffusion_flux[-(\n ghost_size + 2 * kernel_support):field.shape[0] - (ghost_size -\n kernel_support), ghost_size:-ghost_size], field=field[-(\n ghost_size + 2 * kernel_support):field.shape[0] - (ghost_size -\n kernel_support), ghost_size:-ghost_size], prefactor=prefactor)\n diffusion_flux_pyst_kernel(diffusion_flux=diffusion_flux[:, \n ghost_size - kernel_support:ghost_size + 2 * kernel_support],\n field=field[:, ghost_size - kernel_support:ghost_size + 2 *\n kernel_support], prefactor=prefactor)\n diffusion_flux_pyst_kernel(diffusion_flux=diffusion_flux[:, -(\n ghost_size + 2 * kernel_support):field.shape[1] - (ghost_size -\n kernel_support)], field=field[:, -(ghost_size + 2 *\n kernel_support):field.shape[1] - (ghost_size - kernel_support)],\n prefactor=prefactor)\n boundary_width = 1\n if x_previous == MPI.PROC_NULL:\n set_fixed_val_kernel_2d(field=diffusion_flux[:, :ghost_size +\n boundary_width], fixed_val=0.0)\n if x_next == MPI.PROC_NULL:\n set_fixed_val_kernel_2d(field=diffusion_flux[:, -ghost_size -\n boundary_width:], fixed_val=0.0)\n if y_previous == MPI.PROC_NULL:\n set_fixed_val_kernel_2d(field=diffusion_flux[:ghost_size +\n boundary_width, :], fixed_val=0.0)\n if y_next == MPI.PROC_NULL:\n set_fixed_val_kernel_2d(field=diffusion_flux[-ghost_size -\n boundary_width:, :], fixed_val=0.0)\n return diffusion_flux_pyst_mpi_kernel_2d\n",
"step-3": "<mask token>\nfrom sopht.numeric.eulerian_grid_ops.stencil_ops_2d import gen_diffusion_flux_pyst_kernel_2d, gen_set_fixed_val_pyst_kernel_2d\nfrom sopht_mpi.utils.mpi_utils import check_valid_ghost_size_and_kernel_support\nfrom mpi4py import MPI\n\n\ndef gen_diffusion_flux_pyst_mpi_kernel_2d(real_t, mpi_construct,\n ghost_exchange_communicator):\n diffusion_flux_pyst_kernel = gen_diffusion_flux_pyst_kernel_2d(real_t=\n real_t, reset_ghost_zone=False)\n kernel_support = 1\n gen_diffusion_flux_pyst_mpi_kernel_2d.kernel_support = kernel_support\n check_valid_ghost_size_and_kernel_support(ghost_size=\n ghost_exchange_communicator.ghost_size, kernel_support=\n gen_diffusion_flux_pyst_mpi_kernel_2d.kernel_support)\n y_next, x_next = mpi_construct.next_grid_along\n y_previous, x_previous = mpi_construct.previous_grid_along\n set_fixed_val_kernel_2d = gen_set_fixed_val_pyst_kernel_2d(real_t=real_t)\n\n def diffusion_flux_pyst_mpi_kernel_2d(diffusion_flux, field, prefactor):\n diffusion_flux_pyst_mpi_kernel_2d.kernel_support = (\n gen_diffusion_flux_pyst_mpi_kernel_2d.kernel_support)\n ghost_size = ghost_exchange_communicator.ghost_size\n ghost_exchange_communicator.exchange_scalar_field_init(field)\n diffusion_flux_pyst_kernel(diffusion_flux=diffusion_flux[ghost_size\n :-ghost_size, ghost_size:-ghost_size], field=field[ghost_size:-\n ghost_size, ghost_size:-ghost_size], prefactor=prefactor)\n ghost_exchange_communicator.exchange_finalise()\n diffusion_flux_pyst_kernel(diffusion_flux=diffusion_flux[ghost_size -\n kernel_support:ghost_size + 2 * kernel_support, ghost_size:-\n ghost_size], field=field[ghost_size - kernel_support:ghost_size +\n 2 * kernel_support, ghost_size:-ghost_size], prefactor=prefactor)\n diffusion_flux_pyst_kernel(diffusion_flux=diffusion_flux[-(\n ghost_size + 2 * kernel_support):field.shape[0] - (ghost_size -\n kernel_support), ghost_size:-ghost_size], field=field[-(\n ghost_size + 2 * kernel_support):field.shape[0] - (ghost_size -\n kernel_support), ghost_size:-ghost_size], prefactor=prefactor)\n diffusion_flux_pyst_kernel(diffusion_flux=diffusion_flux[:, \n ghost_size - kernel_support:ghost_size + 2 * kernel_support],\n field=field[:, ghost_size - kernel_support:ghost_size + 2 *\n kernel_support], prefactor=prefactor)\n diffusion_flux_pyst_kernel(diffusion_flux=diffusion_flux[:, -(\n ghost_size + 2 * kernel_support):field.shape[1] - (ghost_size -\n kernel_support)], field=field[:, -(ghost_size + 2 *\n kernel_support):field.shape[1] - (ghost_size - kernel_support)],\n prefactor=prefactor)\n boundary_width = 1\n if x_previous == MPI.PROC_NULL:\n set_fixed_val_kernel_2d(field=diffusion_flux[:, :ghost_size +\n boundary_width], fixed_val=0.0)\n if x_next == MPI.PROC_NULL:\n set_fixed_val_kernel_2d(field=diffusion_flux[:, -ghost_size -\n boundary_width:], fixed_val=0.0)\n if y_previous == MPI.PROC_NULL:\n set_fixed_val_kernel_2d(field=diffusion_flux[:ghost_size +\n boundary_width, :], fixed_val=0.0)\n if y_next == MPI.PROC_NULL:\n set_fixed_val_kernel_2d(field=diffusion_flux[-ghost_size -\n boundary_width:, :], fixed_val=0.0)\n return diffusion_flux_pyst_mpi_kernel_2d\n",
"step-4": "\"\"\"MPI-supported kernels for computing diffusion flux in 2D.\"\"\"\nfrom sopht.numeric.eulerian_grid_ops.stencil_ops_2d import (\n gen_diffusion_flux_pyst_kernel_2d,\n gen_set_fixed_val_pyst_kernel_2d,\n)\nfrom sopht_mpi.utils.mpi_utils import check_valid_ghost_size_and_kernel_support\nfrom mpi4py import MPI\n\n\ndef gen_diffusion_flux_pyst_mpi_kernel_2d(\n real_t, mpi_construct, ghost_exchange_communicator\n):\n # Note currently I'm generating these for arbit size arrays, we ca optimise this\n # more by generating fixed size for the interior stencil and arbit size for\n # boundary crunching\n diffusion_flux_pyst_kernel = gen_diffusion_flux_pyst_kernel_2d(\n real_t=real_t, reset_ghost_zone=False\n )\n kernel_support = 1\n # define this here so that ghost size and kernel support is checked during\n # generation phase itself\n gen_diffusion_flux_pyst_mpi_kernel_2d.kernel_support = kernel_support\n check_valid_ghost_size_and_kernel_support(\n ghost_size=ghost_exchange_communicator.ghost_size,\n kernel_support=gen_diffusion_flux_pyst_mpi_kernel_2d.kernel_support,\n )\n\n # for setting values at physical domain boundary\n y_next, x_next = mpi_construct.next_grid_along\n y_previous, x_previous = mpi_construct.previous_grid_along\n set_fixed_val_kernel_2d = gen_set_fixed_val_pyst_kernel_2d(real_t=real_t)\n\n def diffusion_flux_pyst_mpi_kernel_2d(\n diffusion_flux,\n field,\n prefactor,\n ):\n # define kernel support for kernel\n diffusion_flux_pyst_mpi_kernel_2d.kernel_support = (\n gen_diffusion_flux_pyst_mpi_kernel_2d.kernel_support\n )\n # define variable for use later\n ghost_size = ghost_exchange_communicator.ghost_size\n # begin ghost comm.\n ghost_exchange_communicator.exchange_scalar_field_init(field)\n\n # crunch interior stencil\n diffusion_flux_pyst_kernel(\n diffusion_flux=diffusion_flux[\n ghost_size:-ghost_size, ghost_size:-ghost_size\n ],\n field=field[ghost_size:-ghost_size, ghost_size:-ghost_size],\n prefactor=prefactor,\n )\n # finalise ghost comm.\n ghost_exchange_communicator.exchange_finalise()\n\n # crunch boundary numbers\n # NOTE: we pass in arrays of width 3 * kernel support size because the\n # interior stencil computation leaves out a width of kernel_support.\n # Since the support needed by the kernel is kernel_support on each side,\n # we need to pass an array of width 3 * kernel_support, starting from\n # index +/-(ghost_size - kernel_support) on the lower and upper end.\n # Pystencils then automatically sets the kernel comp. bounds and\n # crunches numbers in the kernel_support thickness zone at the boundary.\n # Start of Y axis\n diffusion_flux_pyst_kernel(\n diffusion_flux=diffusion_flux[\n ghost_size - kernel_support : ghost_size + 2 * kernel_support,\n ghost_size:-ghost_size,\n ],\n field=field[\n ghost_size - kernel_support : ghost_size + 2 * kernel_support,\n ghost_size:-ghost_size,\n ],\n prefactor=prefactor,\n )\n # End of Y axis\n diffusion_flux_pyst_kernel(\n diffusion_flux=diffusion_flux[\n -(ghost_size + 2 * kernel_support) : field.shape[0]\n - (ghost_size - kernel_support),\n ghost_size:-ghost_size,\n ],\n field=field[\n -(ghost_size + 2 * kernel_support) : field.shape[0]\n - (ghost_size - kernel_support),\n ghost_size:-ghost_size,\n ],\n prefactor=prefactor,\n )\n # Start of X axis\n diffusion_flux_pyst_kernel(\n diffusion_flux=diffusion_flux[\n :,\n ghost_size - kernel_support : ghost_size + 2 * kernel_support,\n ],\n field=field[\n :,\n ghost_size - kernel_support : ghost_size + 2 * kernel_support,\n ],\n prefactor=prefactor,\n )\n # End of X axis\n diffusion_flux_pyst_kernel(\n diffusion_flux=diffusion_flux[\n :,\n -(ghost_size + 2 * kernel_support) : field.shape[1]\n - (ghost_size - kernel_support),\n ],\n field=field[\n :,\n -(ghost_size + 2 * kernel_support) : field.shape[1]\n - (ghost_size - kernel_support),\n ],\n prefactor=prefactor,\n )\n\n # Set physical domain boundary diffusion flus to zero based on neighboring block\n boundary_width = 1\n if x_previous == MPI.PROC_NULL:\n set_fixed_val_kernel_2d(\n field=diffusion_flux[:, : ghost_size + boundary_width],\n fixed_val=0.0,\n )\n if x_next == MPI.PROC_NULL:\n set_fixed_val_kernel_2d(\n field=diffusion_flux[:, -ghost_size - boundary_width :],\n fixed_val=0.0,\n )\n if y_previous == MPI.PROC_NULL:\n set_fixed_val_kernel_2d(\n field=diffusion_flux[: ghost_size + boundary_width, :],\n fixed_val=0.0,\n )\n if y_next == MPI.PROC_NULL:\n set_fixed_val_kernel_2d(\n field=diffusion_flux[-ghost_size - boundary_width :, :],\n fixed_val=0.0,\n )\n\n return diffusion_flux_pyst_mpi_kernel_2d\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
TheBeatles = ['John', 'Paul', 'George', 'Ringo']
Wings = ['Paul']
for Beatle in TheBeatles:
if Beatle in Wings:
continue
print Beatle
|
normal
|
{
"blob_id": "9a54ff8e7e8d6d46860cb6173f03c52655b30f43",
"index": 6449,
"step-1": "TheBeatles = ['John', 'Paul', 'George', 'Ringo']\nWings = ['Paul']\n\nfor Beatle in TheBeatles:\n\t\tif Beatle in Wings:\n\t\t\t\tcontinue\n\t\tprint Beatle\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# Write a Python program to print alphabet pattern 'G'.
result = ''
for row in range(0,7):
for col in range(0,7):
if ((col ==0) and (row !=0 and row !=6) or ((row ==0 or row == 6) and (col>0 and col<6))or ((row ==1 or row == 5 or row == 4)and (col ==6))or ((row ==3)and ((col!=2)and col!=1))):
result = result+'*'
else:
result = result+' '
result=result+'\n'
print(result)
|
normal
|
{
"blob_id": "e598091fc6c05b1d7f9f35f2ae58494fed53f9af",
"index": 5392,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor row in range(0, 7):\n for col in range(0, 7):\n if col == 0 and (row != 0 and row != 6) or (row == 0 or row == 6) and (\n col > 0 and col < 6) or (row == 1 or row == 5 or row == 4\n ) and col == 6 or row == 3 and (col != 2 and col != 1):\n result = result + '*'\n else:\n result = result + ' '\n result = result + '\\n'\nprint(result)\n",
"step-3": "result = ''\nfor row in range(0, 7):\n for col in range(0, 7):\n if col == 0 and (row != 0 and row != 6) or (row == 0 or row == 6) and (\n col > 0 and col < 6) or (row == 1 or row == 5 or row == 4\n ) and col == 6 or row == 3 and (col != 2 and col != 1):\n result = result + '*'\n else:\n result = result + ' '\n result = result + '\\n'\nprint(result)\n",
"step-4": "# Write a Python program to print alphabet pattern 'G'.\n\nresult = ''\nfor row in range(0,7):\n for col in range(0,7):\n if ((col ==0) and (row !=0 and row !=6) or ((row ==0 or row == 6) and (col>0 and col<6))or ((row ==1 or row == 5 or row == 4)and (col ==6))or ((row ==3)and ((col!=2)and col!=1))):\n result = result+'*'\n else:\n result = result+' '\n result=result+'\\n'\nprint(result)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class MysqlBaseModel(BaseModel):
def __init__(self, db_name=None, table_name=None, table_alias=None,
primary_key='id'):
super(MysqlBaseModel, self).__init__(db_name, table_name,
table_alias, primary_key)
<|reserved_special_token_0|>
def get_executor(self):
return mysqlExecutor
class MysqlBaseRepository(BaseRepository):
def __init__(self, model_class=None):
super(MysqlBaseRepository, self).__init__(model_class)
def get_dialect(self):
return mysqlDialect
def get_executor(self):
return mysqlExecutor
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MySQLDialect(DefaultDialect):
def get_db_type(self):
return 'mysql'
def paginate_with(self, sql, page_number, page_size):
if page_number == 1 and page_size == 1:
if re.match(DefaultDialect.select_single_pattern, sql) is not None:
return sql
offset = page_size * (page_number - 1)
return '%s LIMIT %d OFFSET %d' % (sql, page_size, offset)
<|reserved_special_token_0|>
class MysqlBaseModel(BaseModel):
def __init__(self, db_name=None, table_name=None, table_alias=None,
primary_key='id'):
super(MysqlBaseModel, self).__init__(db_name, table_name,
table_alias, primary_key)
def get_dialect(self):
return mysqlDialect
def get_executor(self):
return mysqlExecutor
class MysqlBaseRepository(BaseRepository):
def __init__(self, model_class=None):
super(MysqlBaseRepository, self).__init__(model_class)
def get_dialect(self):
return mysqlDialect
def get_executor(self):
return mysqlExecutor
def transaction(rollback_exceptions=[]):
def wrap(func):
def handle(result, **kwargs):
func = kwargs['func']
args = kwargs['args']
kwargs = kwargs['kwargs']
return_value = func(*args, **kwargs)
logger.info('Transaction method: ' + func.__name__)
result.append(return_value)
def to_do(*args, **kwargs):
new_kwargs = {'func': func, 'args': args, 'kwargs': kwargs}
result = []
try:
mysqlExecutor.begin_transaction()
handle(result, **new_kwargs)
mysqlExecutor.commit_transaction()
except Exception as e:
if len(rollback_exceptions
) == 0 or e.__class__ in rollback_exceptions:
mysqlExecutor.rollback_transaction()
logger.error('Method execute error. method: ' + str(
func.__name__) + ', error:' + traceback.format_exc
() + ', transaction roll back.')
else:
mysqlExecutor.commit_transaction()
raise e
finally:
mysqlExecutor.close_transaction()
return to_do
return wrap
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MySQLDialect(DefaultDialect):
def get_db_type(self):
return 'mysql'
def paginate_with(self, sql, page_number, page_size):
if page_number == 1 and page_size == 1:
if re.match(DefaultDialect.select_single_pattern, sql) is not None:
return sql
offset = page_size * (page_number - 1)
return '%s LIMIT %d OFFSET %d' % (sql, page_size, offset)
<|reserved_special_token_0|>
if db_type == 'mysql':
import mysql.connector as connector
db_config['target'] = connector
db_config['use_pure'] = True
from mysql.connector.conversion import MySQLConverter
class NumpyMySQLConverter(MySQLConverter):
""" A mysql.connector Converter that handles Numpy types """
def _float32_to_mysql(self, value):
return float(value)
def _float64_to_mysql(self, value):
return float(value)
def _int32_to_mysql(self, value):
return int(value)
def _int64_to_mysql(self, value):
return int(value)
db_config['converter_class'] = NumpyMySQLConverter
mysqlExecutor = ExecutorFactory.get_executor(db_config=db_config)
mysqlDialect = MySQLDialect()
class MysqlBaseModel(BaseModel):
def __init__(self, db_name=None, table_name=None, table_alias=None,
primary_key='id'):
super(MysqlBaseModel, self).__init__(db_name, table_name,
table_alias, primary_key)
def get_dialect(self):
return mysqlDialect
def get_executor(self):
return mysqlExecutor
class MysqlBaseRepository(BaseRepository):
def __init__(self, model_class=None):
super(MysqlBaseRepository, self).__init__(model_class)
def get_dialect(self):
return mysqlDialect
def get_executor(self):
return mysqlExecutor
def transaction(rollback_exceptions=[]):
def wrap(func):
def handle(result, **kwargs):
func = kwargs['func']
args = kwargs['args']
kwargs = kwargs['kwargs']
return_value = func(*args, **kwargs)
logger.info('Transaction method: ' + func.__name__)
result.append(return_value)
def to_do(*args, **kwargs):
new_kwargs = {'func': func, 'args': args, 'kwargs': kwargs}
result = []
try:
mysqlExecutor.begin_transaction()
handle(result, **new_kwargs)
mysqlExecutor.commit_transaction()
except Exception as e:
if len(rollback_exceptions
) == 0 or e.__class__ in rollback_exceptions:
mysqlExecutor.rollback_transaction()
logger.error('Method execute error. method: ' + str(
func.__name__) + ', error:' + traceback.format_exc
() + ', transaction roll back.')
else:
mysqlExecutor.commit_transaction()
raise e
finally:
mysqlExecutor.close_transaction()
return to_do
return wrap
<|reserved_special_token_1|>
import re
import traceback
from pesto_common.config.configer import Configer
from pesto_common.log.logger_factory import LoggerFactory
from pesto_orm.core.base import db_config
from pesto_orm.core.executor import ExecutorFactory
from pesto_orm.core.model import BaseModel
from pesto_orm.core.repository import BaseRepository
from pesto_orm.dialect.base import DefaultDialect
logger = LoggerFactory.get_logger('dialect.mysql.domain')
class MySQLDialect(DefaultDialect):
def get_db_type(self):
return 'mysql'
def paginate_with(self, sql, page_number, page_size):
if page_number == 1 and page_size == 1:
if re.match(DefaultDialect.select_single_pattern, sql) is not None:
return sql
offset = page_size * (page_number - 1)
return '%s LIMIT %d OFFSET %d' % (sql, page_size, offset)
db_type = Configer.get('db.type')
if db_type == 'mysql':
import mysql.connector as connector
db_config['target'] = connector
db_config['use_pure'] = True
from mysql.connector.conversion import MySQLConverter
class NumpyMySQLConverter(MySQLConverter):
""" A mysql.connector Converter that handles Numpy types """
def _float32_to_mysql(self, value):
return float(value)
def _float64_to_mysql(self, value):
return float(value)
def _int32_to_mysql(self, value):
return int(value)
def _int64_to_mysql(self, value):
return int(value)
db_config['converter_class'] = NumpyMySQLConverter
mysqlExecutor = ExecutorFactory.get_executor(db_config=db_config)
mysqlDialect = MySQLDialect()
class MysqlBaseModel(BaseModel):
def __init__(self, db_name=None, table_name=None, table_alias=None,
primary_key='id'):
super(MysqlBaseModel, self).__init__(db_name, table_name,
table_alias, primary_key)
def get_dialect(self):
return mysqlDialect
def get_executor(self):
return mysqlExecutor
class MysqlBaseRepository(BaseRepository):
def __init__(self, model_class=None):
super(MysqlBaseRepository, self).__init__(model_class)
def get_dialect(self):
return mysqlDialect
def get_executor(self):
return mysqlExecutor
def transaction(rollback_exceptions=[]):
def wrap(func):
def handle(result, **kwargs):
func = kwargs['func']
args = kwargs['args']
kwargs = kwargs['kwargs']
return_value = func(*args, **kwargs)
logger.info('Transaction method: ' + func.__name__)
result.append(return_value)
def to_do(*args, **kwargs):
new_kwargs = {'func': func, 'args': args, 'kwargs': kwargs}
result = []
try:
mysqlExecutor.begin_transaction()
handle(result, **new_kwargs)
mysqlExecutor.commit_transaction()
except Exception as e:
if len(rollback_exceptions
) == 0 or e.__class__ in rollback_exceptions:
mysqlExecutor.rollback_transaction()
logger.error('Method execute error. method: ' + str(
func.__name__) + ', error:' + traceback.format_exc
() + ', transaction roll back.')
else:
mysqlExecutor.commit_transaction()
raise e
finally:
mysqlExecutor.close_transaction()
return to_do
return wrap
<|reserved_special_token_1|>
# coding=utf-8
import re
import traceback
from pesto_common.config.configer import Configer
from pesto_common.log.logger_factory import LoggerFactory
from pesto_orm.core.base import db_config
from pesto_orm.core.executor import ExecutorFactory
from pesto_orm.core.model import BaseModel
from pesto_orm.core.repository import BaseRepository
from pesto_orm.dialect.base import DefaultDialect
logger = LoggerFactory.get_logger('dialect.mysql.domain')
class MySQLDialect(DefaultDialect):
def get_db_type(self):
return 'mysql'
def paginate_with(self, sql, page_number, page_size):
if page_number == 1 and page_size == 1:
if re.match(DefaultDialect.select_single_pattern, sql) is not None:
return sql
offset = page_size * (page_number - 1)
return '%s LIMIT %d OFFSET %d' % (sql, page_size, offset)
db_type = Configer.get('db.type')
if db_type == 'mysql':
import mysql.connector as connector
db_config['target'] = connector
db_config['use_pure'] = True
from mysql.connector.conversion import MySQLConverter
class NumpyMySQLConverter(MySQLConverter):
''' A mysql.connector Converter that handles Numpy types '''
def _float32_to_mysql(self, value):
return float(value)
def _float64_to_mysql(self, value):
return float(value)
def _int32_to_mysql(self, value):
return int(value)
def _int64_to_mysql(self, value):
return int(value)
db_config['converter_class'] = NumpyMySQLConverter
mysqlExecutor = ExecutorFactory.get_executor(db_config=db_config)
mysqlDialect = MySQLDialect()
class MysqlBaseModel(BaseModel):
def __init__(self, db_name=None, table_name=None, table_alias=None, primary_key='id'):
super(MysqlBaseModel, self).__init__(db_name, table_name, table_alias, primary_key)
def get_dialect(self):
return mysqlDialect
def get_executor(self):
return mysqlExecutor
class MysqlBaseRepository(BaseRepository):
def __init__(self, model_class=None):
super(MysqlBaseRepository, self).__init__(model_class)
def get_dialect(self):
return mysqlDialect
def get_executor(self):
return mysqlExecutor
def transaction(rollback_exceptions=[]):
def wrap(func):
def handle(result, **kwargs): # 真实执行原方法.
func = kwargs['func']
args = kwargs['args']
kwargs = kwargs['kwargs']
return_value = func(*args, **kwargs)
logger.info('Transaction method: ' + func.__name__)
result.append(return_value)
def to_do(*args, **kwargs):
new_kwargs = {'func': func, 'args': args, 'kwargs': kwargs}
result = []
try:
mysqlExecutor.begin_transaction()
handle(result, **new_kwargs)
mysqlExecutor.commit_transaction()
except Exception as e:
if len(rollback_exceptions) == 0 or e.__class__ in rollback_exceptions:
mysqlExecutor.rollback_transaction()
logger.error('Method execute error. method: ' + str(func.__name__) + ', error:' + traceback.format_exc() + ', transaction roll back.')
else:
mysqlExecutor.commit_transaction()
raise e
finally:
mysqlExecutor.close_transaction()
return to_do
return wrap
|
flexible
|
{
"blob_id": "a68de7555fdab06014fd562e7db29ca2da03f443",
"index": 8240,
"step-1": "<mask token>\n\n\nclass MysqlBaseModel(BaseModel):\n\n def __init__(self, db_name=None, table_name=None, table_alias=None,\n primary_key='id'):\n super(MysqlBaseModel, self).__init__(db_name, table_name,\n table_alias, primary_key)\n <mask token>\n\n def get_executor(self):\n return mysqlExecutor\n\n\nclass MysqlBaseRepository(BaseRepository):\n\n def __init__(self, model_class=None):\n super(MysqlBaseRepository, self).__init__(model_class)\n\n def get_dialect(self):\n return mysqlDialect\n\n def get_executor(self):\n return mysqlExecutor\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MySQLDialect(DefaultDialect):\n\n def get_db_type(self):\n return 'mysql'\n\n def paginate_with(self, sql, page_number, page_size):\n if page_number == 1 and page_size == 1:\n if re.match(DefaultDialect.select_single_pattern, sql) is not None:\n return sql\n offset = page_size * (page_number - 1)\n return '%s LIMIT %d OFFSET %d' % (sql, page_size, offset)\n\n\n<mask token>\n\n\nclass MysqlBaseModel(BaseModel):\n\n def __init__(self, db_name=None, table_name=None, table_alias=None,\n primary_key='id'):\n super(MysqlBaseModel, self).__init__(db_name, table_name,\n table_alias, primary_key)\n\n def get_dialect(self):\n return mysqlDialect\n\n def get_executor(self):\n return mysqlExecutor\n\n\nclass MysqlBaseRepository(BaseRepository):\n\n def __init__(self, model_class=None):\n super(MysqlBaseRepository, self).__init__(model_class)\n\n def get_dialect(self):\n return mysqlDialect\n\n def get_executor(self):\n return mysqlExecutor\n\n\ndef transaction(rollback_exceptions=[]):\n\n def wrap(func):\n\n def handle(result, **kwargs):\n func = kwargs['func']\n args = kwargs['args']\n kwargs = kwargs['kwargs']\n return_value = func(*args, **kwargs)\n logger.info('Transaction method: ' + func.__name__)\n result.append(return_value)\n\n def to_do(*args, **kwargs):\n new_kwargs = {'func': func, 'args': args, 'kwargs': kwargs}\n result = []\n try:\n mysqlExecutor.begin_transaction()\n handle(result, **new_kwargs)\n mysqlExecutor.commit_transaction()\n except Exception as e:\n if len(rollback_exceptions\n ) == 0 or e.__class__ in rollback_exceptions:\n mysqlExecutor.rollback_transaction()\n logger.error('Method execute error. method: ' + str(\n func.__name__) + ', error:' + traceback.format_exc\n () + ', transaction roll back.')\n else:\n mysqlExecutor.commit_transaction()\n raise e\n finally:\n mysqlExecutor.close_transaction()\n return to_do\n return wrap\n",
"step-3": "<mask token>\n\n\nclass MySQLDialect(DefaultDialect):\n\n def get_db_type(self):\n return 'mysql'\n\n def paginate_with(self, sql, page_number, page_size):\n if page_number == 1 and page_size == 1:\n if re.match(DefaultDialect.select_single_pattern, sql) is not None:\n return sql\n offset = page_size * (page_number - 1)\n return '%s LIMIT %d OFFSET %d' % (sql, page_size, offset)\n\n\n<mask token>\nif db_type == 'mysql':\n import mysql.connector as connector\n db_config['target'] = connector\n db_config['use_pure'] = True\n from mysql.connector.conversion import MySQLConverter\n\n\n class NumpyMySQLConverter(MySQLConverter):\n \"\"\" A mysql.connector Converter that handles Numpy types \"\"\"\n\n def _float32_to_mysql(self, value):\n return float(value)\n\n def _float64_to_mysql(self, value):\n return float(value)\n\n def _int32_to_mysql(self, value):\n return int(value)\n\n def _int64_to_mysql(self, value):\n return int(value)\n db_config['converter_class'] = NumpyMySQLConverter\n mysqlExecutor = ExecutorFactory.get_executor(db_config=db_config)\n mysqlDialect = MySQLDialect()\n\n\nclass MysqlBaseModel(BaseModel):\n\n def __init__(self, db_name=None, table_name=None, table_alias=None,\n primary_key='id'):\n super(MysqlBaseModel, self).__init__(db_name, table_name,\n table_alias, primary_key)\n\n def get_dialect(self):\n return mysqlDialect\n\n def get_executor(self):\n return mysqlExecutor\n\n\nclass MysqlBaseRepository(BaseRepository):\n\n def __init__(self, model_class=None):\n super(MysqlBaseRepository, self).__init__(model_class)\n\n def get_dialect(self):\n return mysqlDialect\n\n def get_executor(self):\n return mysqlExecutor\n\n\ndef transaction(rollback_exceptions=[]):\n\n def wrap(func):\n\n def handle(result, **kwargs):\n func = kwargs['func']\n args = kwargs['args']\n kwargs = kwargs['kwargs']\n return_value = func(*args, **kwargs)\n logger.info('Transaction method: ' + func.__name__)\n result.append(return_value)\n\n def to_do(*args, **kwargs):\n new_kwargs = {'func': func, 'args': args, 'kwargs': kwargs}\n result = []\n try:\n mysqlExecutor.begin_transaction()\n handle(result, **new_kwargs)\n mysqlExecutor.commit_transaction()\n except Exception as e:\n if len(rollback_exceptions\n ) == 0 or e.__class__ in rollback_exceptions:\n mysqlExecutor.rollback_transaction()\n logger.error('Method execute error. method: ' + str(\n func.__name__) + ', error:' + traceback.format_exc\n () + ', transaction roll back.')\n else:\n mysqlExecutor.commit_transaction()\n raise e\n finally:\n mysqlExecutor.close_transaction()\n return to_do\n return wrap\n",
"step-4": "import re\nimport traceback\nfrom pesto_common.config.configer import Configer\nfrom pesto_common.log.logger_factory import LoggerFactory\nfrom pesto_orm.core.base import db_config\nfrom pesto_orm.core.executor import ExecutorFactory\nfrom pesto_orm.core.model import BaseModel\nfrom pesto_orm.core.repository import BaseRepository\nfrom pesto_orm.dialect.base import DefaultDialect\nlogger = LoggerFactory.get_logger('dialect.mysql.domain')\n\n\nclass MySQLDialect(DefaultDialect):\n\n def get_db_type(self):\n return 'mysql'\n\n def paginate_with(self, sql, page_number, page_size):\n if page_number == 1 and page_size == 1:\n if re.match(DefaultDialect.select_single_pattern, sql) is not None:\n return sql\n offset = page_size * (page_number - 1)\n return '%s LIMIT %d OFFSET %d' % (sql, page_size, offset)\n\n\ndb_type = Configer.get('db.type')\nif db_type == 'mysql':\n import mysql.connector as connector\n db_config['target'] = connector\n db_config['use_pure'] = True\n from mysql.connector.conversion import MySQLConverter\n\n\n class NumpyMySQLConverter(MySQLConverter):\n \"\"\" A mysql.connector Converter that handles Numpy types \"\"\"\n\n def _float32_to_mysql(self, value):\n return float(value)\n\n def _float64_to_mysql(self, value):\n return float(value)\n\n def _int32_to_mysql(self, value):\n return int(value)\n\n def _int64_to_mysql(self, value):\n return int(value)\n db_config['converter_class'] = NumpyMySQLConverter\n mysqlExecutor = ExecutorFactory.get_executor(db_config=db_config)\n mysqlDialect = MySQLDialect()\n\n\nclass MysqlBaseModel(BaseModel):\n\n def __init__(self, db_name=None, table_name=None, table_alias=None,\n primary_key='id'):\n super(MysqlBaseModel, self).__init__(db_name, table_name,\n table_alias, primary_key)\n\n def get_dialect(self):\n return mysqlDialect\n\n def get_executor(self):\n return mysqlExecutor\n\n\nclass MysqlBaseRepository(BaseRepository):\n\n def __init__(self, model_class=None):\n super(MysqlBaseRepository, self).__init__(model_class)\n\n def get_dialect(self):\n return mysqlDialect\n\n def get_executor(self):\n return mysqlExecutor\n\n\ndef transaction(rollback_exceptions=[]):\n\n def wrap(func):\n\n def handle(result, **kwargs):\n func = kwargs['func']\n args = kwargs['args']\n kwargs = kwargs['kwargs']\n return_value = func(*args, **kwargs)\n logger.info('Transaction method: ' + func.__name__)\n result.append(return_value)\n\n def to_do(*args, **kwargs):\n new_kwargs = {'func': func, 'args': args, 'kwargs': kwargs}\n result = []\n try:\n mysqlExecutor.begin_transaction()\n handle(result, **new_kwargs)\n mysqlExecutor.commit_transaction()\n except Exception as e:\n if len(rollback_exceptions\n ) == 0 or e.__class__ in rollback_exceptions:\n mysqlExecutor.rollback_transaction()\n logger.error('Method execute error. method: ' + str(\n func.__name__) + ', error:' + traceback.format_exc\n () + ', transaction roll back.')\n else:\n mysqlExecutor.commit_transaction()\n raise e\n finally:\n mysqlExecutor.close_transaction()\n return to_do\n return wrap\n",
"step-5": "# coding=utf-8\nimport re\nimport traceback\n\nfrom pesto_common.config.configer import Configer\nfrom pesto_common.log.logger_factory import LoggerFactory\nfrom pesto_orm.core.base import db_config\nfrom pesto_orm.core.executor import ExecutorFactory\nfrom pesto_orm.core.model import BaseModel\nfrom pesto_orm.core.repository import BaseRepository\nfrom pesto_orm.dialect.base import DefaultDialect\n\nlogger = LoggerFactory.get_logger('dialect.mysql.domain')\n\n\nclass MySQLDialect(DefaultDialect):\n\n def get_db_type(self):\n return 'mysql'\n\n def paginate_with(self, sql, page_number, page_size):\n if page_number == 1 and page_size == 1:\n if re.match(DefaultDialect.select_single_pattern, sql) is not None:\n return sql\n\n offset = page_size * (page_number - 1)\n return '%s LIMIT %d OFFSET %d' % (sql, page_size, offset)\n\n\ndb_type = Configer.get('db.type')\nif db_type == 'mysql':\n import mysql.connector as connector\n\n db_config['target'] = connector\n db_config['use_pure'] = True\n\n from mysql.connector.conversion import MySQLConverter\n\n\n class NumpyMySQLConverter(MySQLConverter):\n ''' A mysql.connector Converter that handles Numpy types '''\n\n def _float32_to_mysql(self, value):\n return float(value)\n\n def _float64_to_mysql(self, value):\n return float(value)\n\n def _int32_to_mysql(self, value):\n return int(value)\n\n def _int64_to_mysql(self, value):\n return int(value)\n\n\n db_config['converter_class'] = NumpyMySQLConverter\n\n mysqlExecutor = ExecutorFactory.get_executor(db_config=db_config)\n\n mysqlDialect = MySQLDialect()\n\n\nclass MysqlBaseModel(BaseModel):\n\n def __init__(self, db_name=None, table_name=None, table_alias=None, primary_key='id'):\n super(MysqlBaseModel, self).__init__(db_name, table_name, table_alias, primary_key)\n\n def get_dialect(self):\n return mysqlDialect\n\n def get_executor(self):\n return mysqlExecutor\n\n\nclass MysqlBaseRepository(BaseRepository):\n\n def __init__(self, model_class=None):\n super(MysqlBaseRepository, self).__init__(model_class)\n\n def get_dialect(self):\n return mysqlDialect\n\n def get_executor(self):\n return mysqlExecutor\n\n\ndef transaction(rollback_exceptions=[]):\n def wrap(func):\n def handle(result, **kwargs): # 真实执行原方法.\n func = kwargs['func']\n args = kwargs['args']\n kwargs = kwargs['kwargs']\n return_value = func(*args, **kwargs)\n logger.info('Transaction method: ' + func.__name__)\n result.append(return_value)\n\n def to_do(*args, **kwargs):\n new_kwargs = {'func': func, 'args': args, 'kwargs': kwargs}\n\n result = []\n try:\n mysqlExecutor.begin_transaction()\n handle(result, **new_kwargs)\n mysqlExecutor.commit_transaction()\n except Exception as e:\n\n if len(rollback_exceptions) == 0 or e.__class__ in rollback_exceptions:\n mysqlExecutor.rollback_transaction()\n logger.error('Method execute error. method: ' + str(func.__name__) + ', error:' + traceback.format_exc() + ', transaction roll back.')\n else:\n mysqlExecutor.commit_transaction()\n raise e\n finally:\n mysqlExecutor.close_transaction()\n\n return to_do\n\n return wrap\n",
"step-ids": [
7,
12,
13,
15,
16
]
}
|
[
7,
12,
13,
15,
16
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
targets = at.Table.read('targets_LCO2018A_002.txt', format='ascii')
headers = {'Authorization': 'Token {}'.format(sys.argv[1])}
for x in targets['targetname']:
obs = requests.get(
'https://observe.lco.global/api/userrequests/?proposal=LCO2018A-002&title={}'
.format(x.split('.')[0]), headers=headers).json()
for y in obs['results']:
print(y['group_id'])
<|reserved_special_token_1|>
import sys
import requests
import numpy as np
import astropy.table as at
if __name__ == '__main__':
targets = at.Table.read('targets_LCO2018A_002.txt', format='ascii')
headers = {'Authorization': 'Token {}'.format(sys.argv[1])}
for x in targets['targetname']:
obs = requests.get(
'https://observe.lco.global/api/userrequests/?proposal=LCO2018A-002&title={}'
.format(x.split('.')[0]), headers=headers).json()
for y in obs['results']:
print(y['group_id'])
<|reserved_special_token_1|>
#!/usr/bin/env python
import sys
import requests
import numpy as np
import astropy.table as at
if __name__=='__main__':
targets = at.Table.read('targets_LCO2018A_002.txt', format='ascii')
headers={'Authorization': 'Token {}'.format(sys.argv[1])}
for x in targets['targetname']:
obs = requests.get('https://observe.lco.global/api/userrequests/?proposal=LCO2018A-002&title={}'.format(x.split('.')[0]),headers=headers).json()
for y in obs['results']:
print(y['group_id'])
|
flexible
|
{
"blob_id": "705bc651e7d12769bcf5994168fe6685a6bae05d",
"index": 5983,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n targets = at.Table.read('targets_LCO2018A_002.txt', format='ascii')\n headers = {'Authorization': 'Token {}'.format(sys.argv[1])}\n for x in targets['targetname']:\n obs = requests.get(\n 'https://observe.lco.global/api/userrequests/?proposal=LCO2018A-002&title={}'\n .format(x.split('.')[0]), headers=headers).json()\n for y in obs['results']:\n print(y['group_id'])\n",
"step-3": "import sys\nimport requests\nimport numpy as np\nimport astropy.table as at\nif __name__ == '__main__':\n targets = at.Table.read('targets_LCO2018A_002.txt', format='ascii')\n headers = {'Authorization': 'Token {}'.format(sys.argv[1])}\n for x in targets['targetname']:\n obs = requests.get(\n 'https://observe.lco.global/api/userrequests/?proposal=LCO2018A-002&title={}'\n .format(x.split('.')[0]), headers=headers).json()\n for y in obs['results']:\n print(y['group_id'])\n",
"step-4": "#!/usr/bin/env python\nimport sys\nimport requests\nimport numpy as np\nimport astropy.table as at\n\nif __name__=='__main__':\n targets = at.Table.read('targets_LCO2018A_002.txt', format='ascii')\n headers={'Authorization': 'Token {}'.format(sys.argv[1])}\n for x in targets['targetname']:\n obs = requests.get('https://observe.lco.global/api/userrequests/?proposal=LCO2018A-002&title={}'.format(x.split('.')[0]),headers=headers).json()\n for y in obs['results']:\n print(y['group_id'])\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('/Users/danluu/dev/dump/terra/filtered_events.json', 'r') as f:
parsed = json.load(f)
print(json.dumps(parsed['4pLeague_S1_D1L1_G4']['events']['faction'], indent=2))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
parsed = {}
with open('/Users/danluu/dev/dump/terra/filtered_events.json', 'r') as f:
parsed = json.load(f)
print(json.dumps(parsed['4pLeague_S1_D1L1_G4']['events']['faction'], indent=2))
<|reserved_special_token_1|>
import json
parsed = {}
with open('/Users/danluu/dev/dump/terra/filtered_events.json', 'r') as f:
parsed = json.load(f)
print(json.dumps(parsed['4pLeague_S1_D1L1_G4']['events']['faction'], indent=2))
<|reserved_special_token_1|>
import json
parsed = {}
with open('/Users/danluu/dev/dump/terra/filtered_events.json','r') as f:
# with open('/Users/danluu/dev/dump/terra/game-data/2017-05.json','r') as f:
# with open('/Users/danluu/dev/dump/terra/ratings.json','r') as f:
parsed = json.load(f)
# print(json.dumps(parsed, indent=2))
print(json.dumps(parsed["4pLeague_S1_D1L1_G4"]["events"]["faction"], indent=2))
|
flexible
|
{
"blob_id": "886024a528112520948f1fb976aa7cb187a1da46",
"index": 6767,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('/Users/danluu/dev/dump/terra/filtered_events.json', 'r') as f:\n parsed = json.load(f)\nprint(json.dumps(parsed['4pLeague_S1_D1L1_G4']['events']['faction'], indent=2))\n",
"step-3": "<mask token>\nparsed = {}\nwith open('/Users/danluu/dev/dump/terra/filtered_events.json', 'r') as f:\n parsed = json.load(f)\nprint(json.dumps(parsed['4pLeague_S1_D1L1_G4']['events']['faction'], indent=2))\n",
"step-4": "import json\nparsed = {}\nwith open('/Users/danluu/dev/dump/terra/filtered_events.json', 'r') as f:\n parsed = json.load(f)\nprint(json.dumps(parsed['4pLeague_S1_D1L1_G4']['events']['faction'], indent=2))\n",
"step-5": "import json\n\nparsed = {}\n\nwith open('/Users/danluu/dev/dump/terra/filtered_events.json','r') as f:\n# with open('/Users/danluu/dev/dump/terra/game-data/2017-05.json','r') as f:\n# with open('/Users/danluu/dev/dump/terra/ratings.json','r') as f:\n parsed = json.load(f)\n # print(json.dumps(parsed, indent=2))\n\nprint(json.dumps(parsed[\"4pLeague_S1_D1L1_G4\"][\"events\"][\"faction\"], indent=2))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def rot(*symbols):
def _rot(n):
encoded = ''.join(sy[n:] + sy[:n] for sy in symbols)
lookup = str.maketrans(''.join(symbols), encoded)
return lambda s: s.translate(lookup)
return _rot
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def rot(*symbols):
def _rot(n):
encoded = ''.join(sy[n:] + sy[:n] for sy in symbols)
lookup = str.maketrans(''.join(symbols), encoded)
return lambda s: s.translate(lookup)
return _rot
def rot_alpha(n):
from string import ascii_lowercase as lc, ascii_uppercase as uc
lookup = str.maketrans(lc + uc, lc[n:] + lc[:n] + uc[n:] + uc[:n])
return lambda s: s.translate(lookup)
def rot_encode(n):
from string import ascii_lowercase as lc, ascii_uppercase as uc
lookup = str.maketrans(lc + uc, lc[n:] + lc[:n] + uc[n:] + uc[:n])
return lambda s: s.translate(lookup)
print(rot_encode(7)(text))
if __name__ == '__main__':
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
img = Image.open('flag.png')
text = pytesseract.image_to_string(img)
def rot(*symbols):
def _rot(n):
encoded = ''.join(sy[n:] + sy[:n] for sy in symbols)
lookup = str.maketrans(''.join(symbols), encoded)
return lambda s: s.translate(lookup)
return _rot
def rot_alpha(n):
from string import ascii_lowercase as lc, ascii_uppercase as uc
lookup = str.maketrans(lc + uc, lc[n:] + lc[:n] + uc[n:] + uc[:n])
return lambda s: s.translate(lookup)
def rot_encode(n):
from string import ascii_lowercase as lc, ascii_uppercase as uc
lookup = str.maketrans(lc + uc, lc[n:] + lc[:n] + uc[n:] + uc[:n])
return lambda s: s.translate(lookup)
print(rot_encode(7)(text))
if __name__ == '__main__':
pass
<|reserved_special_token_1|>
import pytesseract
from PIL import Image
img = Image.open('flag.png')
text = pytesseract.image_to_string(img)
def rot(*symbols):
def _rot(n):
encoded = ''.join(sy[n:] + sy[:n] for sy in symbols)
lookup = str.maketrans(''.join(symbols), encoded)
return lambda s: s.translate(lookup)
return _rot
def rot_alpha(n):
from string import ascii_lowercase as lc, ascii_uppercase as uc
lookup = str.maketrans(lc + uc, lc[n:] + lc[:n] + uc[n:] + uc[:n])
return lambda s: s.translate(lookup)
def rot_encode(n):
from string import ascii_lowercase as lc, ascii_uppercase as uc
lookup = str.maketrans(lc + uc, lc[n:] + lc[:n] + uc[n:] + uc[:n])
return lambda s: s.translate(lookup)
print(rot_encode(7)(text))
if __name__ == '__main__':
pass
<|reserved_special_token_1|>
import pytesseract
from PIL import Image
img = Image.open("flag.png")
text = pytesseract.image_to_string(img)
def rot(*symbols):
def _rot(n):
encoded = ''.join(sy[n:] + sy[:n] for sy in symbols)
lookup = str.maketrans(''.join(symbols), encoded)
return lambda s: s.translate(lookup)
return _rot
def rot_alpha(n):
from string import ascii_lowercase as lc, ascii_uppercase as uc
lookup = str.maketrans(lc + uc, lc[n:] + lc[:n] + uc[n:] + uc[:n])
return lambda s: s.translate(lookup)
def rot_encode(n):
from string import ascii_lowercase as lc, ascii_uppercase as uc
lookup = str.maketrans(lc + uc, lc[n:] + lc[:n] + uc[n:] + uc[:n])
return lambda s: s.translate(lookup)
print(rot_encode(7)(text))
if __name__ == '__main__':
pass
|
flexible
|
{
"blob_id": "b7a60322b4a0fcb6de16cd12be33db265a2b8746",
"index": 2735,
"step-1": "<mask token>\n\n\ndef rot(*symbols):\n\n def _rot(n):\n encoded = ''.join(sy[n:] + sy[:n] for sy in symbols)\n lookup = str.maketrans(''.join(symbols), encoded)\n return lambda s: s.translate(lookup)\n return _rot\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef rot(*symbols):\n\n def _rot(n):\n encoded = ''.join(sy[n:] + sy[:n] for sy in symbols)\n lookup = str.maketrans(''.join(symbols), encoded)\n return lambda s: s.translate(lookup)\n return _rot\n\n\ndef rot_alpha(n):\n from string import ascii_lowercase as lc, ascii_uppercase as uc\n lookup = str.maketrans(lc + uc, lc[n:] + lc[:n] + uc[n:] + uc[:n])\n return lambda s: s.translate(lookup)\n\n\ndef rot_encode(n):\n from string import ascii_lowercase as lc, ascii_uppercase as uc\n lookup = str.maketrans(lc + uc, lc[n:] + lc[:n] + uc[n:] + uc[:n])\n return lambda s: s.translate(lookup)\n\n\nprint(rot_encode(7)(text))\nif __name__ == '__main__':\n pass\n",
"step-3": "<mask token>\nimg = Image.open('flag.png')\ntext = pytesseract.image_to_string(img)\n\n\ndef rot(*symbols):\n\n def _rot(n):\n encoded = ''.join(sy[n:] + sy[:n] for sy in symbols)\n lookup = str.maketrans(''.join(symbols), encoded)\n return lambda s: s.translate(lookup)\n return _rot\n\n\ndef rot_alpha(n):\n from string import ascii_lowercase as lc, ascii_uppercase as uc\n lookup = str.maketrans(lc + uc, lc[n:] + lc[:n] + uc[n:] + uc[:n])\n return lambda s: s.translate(lookup)\n\n\ndef rot_encode(n):\n from string import ascii_lowercase as lc, ascii_uppercase as uc\n lookup = str.maketrans(lc + uc, lc[n:] + lc[:n] + uc[n:] + uc[:n])\n return lambda s: s.translate(lookup)\n\n\nprint(rot_encode(7)(text))\nif __name__ == '__main__':\n pass\n",
"step-4": "import pytesseract\nfrom PIL import Image\nimg = Image.open('flag.png')\ntext = pytesseract.image_to_string(img)\n\n\ndef rot(*symbols):\n\n def _rot(n):\n encoded = ''.join(sy[n:] + sy[:n] for sy in symbols)\n lookup = str.maketrans(''.join(symbols), encoded)\n return lambda s: s.translate(lookup)\n return _rot\n\n\ndef rot_alpha(n):\n from string import ascii_lowercase as lc, ascii_uppercase as uc\n lookup = str.maketrans(lc + uc, lc[n:] + lc[:n] + uc[n:] + uc[:n])\n return lambda s: s.translate(lookup)\n\n\ndef rot_encode(n):\n from string import ascii_lowercase as lc, ascii_uppercase as uc\n lookup = str.maketrans(lc + uc, lc[n:] + lc[:n] + uc[n:] + uc[:n])\n return lambda s: s.translate(lookup)\n\n\nprint(rot_encode(7)(text))\nif __name__ == '__main__':\n pass\n",
"step-5": "import pytesseract\nfrom PIL import Image\n\nimg = Image.open(\"flag.png\")\ntext = pytesseract.image_to_string(img)\n\n\ndef rot(*symbols):\n def _rot(n):\n encoded = ''.join(sy[n:] + sy[:n] for sy in symbols)\n lookup = str.maketrans(''.join(symbols), encoded)\n return lambda s: s.translate(lookup)\n\n return _rot\n\n\ndef rot_alpha(n):\n from string import ascii_lowercase as lc, ascii_uppercase as uc\n lookup = str.maketrans(lc + uc, lc[n:] + lc[:n] + uc[n:] + uc[:n])\n return lambda s: s.translate(lookup)\n\n\ndef rot_encode(n):\n from string import ascii_lowercase as lc, ascii_uppercase as uc\n lookup = str.maketrans(lc + uc, lc[n:] + lc[:n] + uc[n:] + uc[:n])\n return lambda s: s.translate(lookup)\n\n\nprint(rot_encode(7)(text))\n\nif __name__ == '__main__':\n pass\n",
"step-ids": [
1,
4,
5,
6,
7
]
}
|
[
1,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
class NN(nn.Module):
def __init__(self, input_size, num_classes):
super(NN, self).__init__()
self.fc1 = nn.Linear(input_size, 50)
self.fc2 = nn.Linear(50, num_classes)
def forward(self, x):
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
<|reserved_special_token_0|>
def check_accuracy(loader, model):
if loader.dataset.train:
print('Checking accuracy on training data')
else:
print('Checking accuracy on test data')
num_correct = 0
num_samples = 0
model.eval()
with torch.no_grad():
for x, y in loader:
x = x.to(device=device)
y = y.to(device=device)
x = x.reshape(x.shape[0], -1)
scores = model(x)
_, predictions = scores.max(1)
num_correct += (predictions == y).sum()
num_samples += predictions.size(0)
print(
f'Got {num_correct} / {num_samples} with accuracy {float(num_correct) / float(num_samples) * 100:.2f}'
)
model.train()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class NN(nn.Module):
def __init__(self, input_size, num_classes):
super(NN, self).__init__()
self.fc1 = nn.Linear(input_size, 50)
self.fc2 = nn.Linear(50, num_classes)
def forward(self, x):
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
<|reserved_special_token_0|>
for epoch in range(num_epochs):
print('Epoch: ' + str(epoch + 1))
for batch_idx, (data, targets) in enumerate(train_loader):
data = data.to(device=device)
targets = targets.to(device=device)
data = data.reshape(data.shape[0], -1)
scores = model(data)
loss = criterion(scores, targets)
optimizer.zero_grad()
loss.backward()
optimizer.step()
def check_accuracy(loader, model):
if loader.dataset.train:
print('Checking accuracy on training data')
else:
print('Checking accuracy on test data')
num_correct = 0
num_samples = 0
model.eval()
with torch.no_grad():
for x, y in loader:
x = x.to(device=device)
y = y.to(device=device)
x = x.reshape(x.shape[0], -1)
scores = model(x)
_, predictions = scores.max(1)
num_correct += (predictions == y).sum()
num_samples += predictions.size(0)
print(
f'Got {num_correct} / {num_samples} with accuracy {float(num_correct) / float(num_samples) * 100:.2f}'
)
model.train()
check_accuracy(train_loader, model)
check_accuracy(test_loader, model)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class NN(nn.Module):
def __init__(self, input_size, num_classes):
super(NN, self).__init__()
self.fc1 = nn.Linear(input_size, 50)
self.fc2 = nn.Linear(50, num_classes)
def forward(self, x):
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
input_size = 784
num_classes = 10
learning_rate = 0.001
batch_size = 64
num_epochs = 10
train_dataset = datasets.MNIST(root='dataset/', train=True, transform=
transforms.ToTensor(), download=True)
train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size,
shuffle=True)
test_dataset = datasets.MNIST(root='dataset/', train=False, transform=
transforms.ToTensor(), download=True)
test_loader = DataLoader(dataset=train_dataset, batch_size=batch_size,
shuffle=True)
model = NN(input_size, num_classes).to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
for epoch in range(num_epochs):
print('Epoch: ' + str(epoch + 1))
for batch_idx, (data, targets) in enumerate(train_loader):
data = data.to(device=device)
targets = targets.to(device=device)
data = data.reshape(data.shape[0], -1)
scores = model(data)
loss = criterion(scores, targets)
optimizer.zero_grad()
loss.backward()
optimizer.step()
def check_accuracy(loader, model):
if loader.dataset.train:
print('Checking accuracy on training data')
else:
print('Checking accuracy on test data')
num_correct = 0
num_samples = 0
model.eval()
with torch.no_grad():
for x, y in loader:
x = x.to(device=device)
y = y.to(device=device)
x = x.reshape(x.shape[0], -1)
scores = model(x)
_, predictions = scores.max(1)
num_correct += (predictions == y).sum()
num_samples += predictions.size(0)
print(
f'Got {num_correct} / {num_samples} with accuracy {float(num_correct) / float(num_samples) * 100:.2f}'
)
model.train()
check_accuracy(train_loader, model)
check_accuracy(test_loader, model)
<|reserved_special_token_1|>
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils import data
from torch.utils.data import DataLoader
import torchvision.datasets as datasets
import torchvision.transforms as transforms
class NN(nn.Module):
def __init__(self, input_size, num_classes):
super(NN, self).__init__()
self.fc1 = nn.Linear(input_size, 50)
self.fc2 = nn.Linear(50, num_classes)
def forward(self, x):
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
input_size = 784
num_classes = 10
learning_rate = 0.001
batch_size = 64
num_epochs = 10
train_dataset = datasets.MNIST(root='dataset/', train=True, transform=
transforms.ToTensor(), download=True)
train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size,
shuffle=True)
test_dataset = datasets.MNIST(root='dataset/', train=False, transform=
transforms.ToTensor(), download=True)
test_loader = DataLoader(dataset=train_dataset, batch_size=batch_size,
shuffle=True)
model = NN(input_size, num_classes).to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
for epoch in range(num_epochs):
print('Epoch: ' + str(epoch + 1))
for batch_idx, (data, targets) in enumerate(train_loader):
data = data.to(device=device)
targets = targets.to(device=device)
data = data.reshape(data.shape[0], -1)
scores = model(data)
loss = criterion(scores, targets)
optimizer.zero_grad()
loss.backward()
optimizer.step()
def check_accuracy(loader, model):
if loader.dataset.train:
print('Checking accuracy on training data')
else:
print('Checking accuracy on test data')
num_correct = 0
num_samples = 0
model.eval()
with torch.no_grad():
for x, y in loader:
x = x.to(device=device)
y = y.to(device=device)
x = x.reshape(x.shape[0], -1)
scores = model(x)
_, predictions = scores.max(1)
num_correct += (predictions == y).sum()
num_samples += predictions.size(0)
print(
f'Got {num_correct} / {num_samples} with accuracy {float(num_correct) / float(num_samples) * 100:.2f}'
)
model.train()
check_accuracy(train_loader, model)
check_accuracy(test_loader, model)
<|reserved_special_token_1|>
# Imports
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils import data
from torch.utils.data import DataLoader
import torchvision.datasets as datasets
import torchvision.transforms as transforms
# Create Fully Connected Network
class NN(nn.Module):
def __init__(self, input_size,num_classes):
super(NN,self).__init__()
self.fc1 = nn.Linear(input_size,50)
self.fc2 = nn.Linear(50,num_classes)
def forward(self,x):
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
# Set device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Hyperparameters
input_size =784
num_classes = 10
learning_rate = 0.001
batch_size = 64
num_epochs = 10
# Load Data
train_dataset = datasets.MNIST(
root='dataset/',
train=True,
transform=transforms.ToTensor(),
download=True,
)
train_loader = DataLoader(
dataset=train_dataset,
batch_size=batch_size,
shuffle=True,
)
test_dataset = datasets.MNIST(
root='dataset/',
train=False,
transform=transforms.ToTensor(),
download=True,
)
test_loader = DataLoader(
dataset=train_dataset,
batch_size=batch_size,
shuffle=True,
)
# Initialize network
model = NN(input_size,num_classes).to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(),lr=learning_rate)
# Train network
for epoch in range(num_epochs):
print("Epoch: "+str(epoch+1))
for batch_idx, (data, targets) in enumerate(train_loader):
data = data.to(device=device)
targets = targets.to(device=device)
# Get to correct shape
data = data.reshape(data.shape[0],-1)
scores = model(data)
loss = criterion(scores,targets)
# backward
optimizer.zero_grad()
loss.backward()
# gradient descent or adam step
optimizer.step()
# Check accuracy on training and test to see how good our model
def check_accuracy(loader, model):
if loader.dataset.train:
print("Checking accuracy on training data")
else:
print("Checking accuracy on test data")
num_correct = 0
num_samples = 0
model.eval()
with torch.no_grad():
for x,y in loader:
x = x.to(device=device)
y = y.to(device=device)
x = x.reshape(x.shape[0],-1)
scores = model(x)
_, predictions = scores.max(1)
num_correct += (predictions == y).sum()
num_samples += predictions.size(0)
print(f'Got {num_correct} / {num_samples} with accuracy {float(num_correct)/float(num_samples)*100:.2f}')
model.train()
check_accuracy(train_loader,model)
check_accuracy(test_loader,model)
|
flexible
|
{
"blob_id": "1edb92a4905048f3961e3067c67ef892d7b8a034",
"index": 9154,
"step-1": "<mask token>\n\n\nclass NN(nn.Module):\n\n def __init__(self, input_size, num_classes):\n super(NN, self).__init__()\n self.fc1 = nn.Linear(input_size, 50)\n self.fc2 = nn.Linear(50, num_classes)\n\n def forward(self, x):\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return x\n\n\n<mask token>\n\n\ndef check_accuracy(loader, model):\n if loader.dataset.train:\n print('Checking accuracy on training data')\n else:\n print('Checking accuracy on test data')\n num_correct = 0\n num_samples = 0\n model.eval()\n with torch.no_grad():\n for x, y in loader:\n x = x.to(device=device)\n y = y.to(device=device)\n x = x.reshape(x.shape[0], -1)\n scores = model(x)\n _, predictions = scores.max(1)\n num_correct += (predictions == y).sum()\n num_samples += predictions.size(0)\n print(\n f'Got {num_correct} / {num_samples} with accuracy {float(num_correct) / float(num_samples) * 100:.2f}'\n )\n model.train()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass NN(nn.Module):\n\n def __init__(self, input_size, num_classes):\n super(NN, self).__init__()\n self.fc1 = nn.Linear(input_size, 50)\n self.fc2 = nn.Linear(50, num_classes)\n\n def forward(self, x):\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return x\n\n\n<mask token>\nfor epoch in range(num_epochs):\n print('Epoch: ' + str(epoch + 1))\n for batch_idx, (data, targets) in enumerate(train_loader):\n data = data.to(device=device)\n targets = targets.to(device=device)\n data = data.reshape(data.shape[0], -1)\n scores = model(data)\n loss = criterion(scores, targets)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n\ndef check_accuracy(loader, model):\n if loader.dataset.train:\n print('Checking accuracy on training data')\n else:\n print('Checking accuracy on test data')\n num_correct = 0\n num_samples = 0\n model.eval()\n with torch.no_grad():\n for x, y in loader:\n x = x.to(device=device)\n y = y.to(device=device)\n x = x.reshape(x.shape[0], -1)\n scores = model(x)\n _, predictions = scores.max(1)\n num_correct += (predictions == y).sum()\n num_samples += predictions.size(0)\n print(\n f'Got {num_correct} / {num_samples} with accuracy {float(num_correct) / float(num_samples) * 100:.2f}'\n )\n model.train()\n\n\ncheck_accuracy(train_loader, model)\ncheck_accuracy(test_loader, model)\n",
"step-3": "<mask token>\n\n\nclass NN(nn.Module):\n\n def __init__(self, input_size, num_classes):\n super(NN, self).__init__()\n self.fc1 = nn.Linear(input_size, 50)\n self.fc2 = nn.Linear(50, num_classes)\n\n def forward(self, x):\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return x\n\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\ninput_size = 784\nnum_classes = 10\nlearning_rate = 0.001\nbatch_size = 64\nnum_epochs = 10\ntrain_dataset = datasets.MNIST(root='dataset/', train=True, transform=\n transforms.ToTensor(), download=True)\ntrain_loader = DataLoader(dataset=train_dataset, batch_size=batch_size,\n shuffle=True)\ntest_dataset = datasets.MNIST(root='dataset/', train=False, transform=\n transforms.ToTensor(), download=True)\ntest_loader = DataLoader(dataset=train_dataset, batch_size=batch_size,\n shuffle=True)\nmodel = NN(input_size, num_classes).to(device)\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.Adam(model.parameters(), lr=learning_rate)\nfor epoch in range(num_epochs):\n print('Epoch: ' + str(epoch + 1))\n for batch_idx, (data, targets) in enumerate(train_loader):\n data = data.to(device=device)\n targets = targets.to(device=device)\n data = data.reshape(data.shape[0], -1)\n scores = model(data)\n loss = criterion(scores, targets)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n\ndef check_accuracy(loader, model):\n if loader.dataset.train:\n print('Checking accuracy on training data')\n else:\n print('Checking accuracy on test data')\n num_correct = 0\n num_samples = 0\n model.eval()\n with torch.no_grad():\n for x, y in loader:\n x = x.to(device=device)\n y = y.to(device=device)\n x = x.reshape(x.shape[0], -1)\n scores = model(x)\n _, predictions = scores.max(1)\n num_correct += (predictions == y).sum()\n num_samples += predictions.size(0)\n print(\n f'Got {num_correct} / {num_samples} with accuracy {float(num_correct) / float(num_samples) * 100:.2f}'\n )\n model.train()\n\n\ncheck_accuracy(train_loader, model)\ncheck_accuracy(test_loader, model)\n",
"step-4": "import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.utils import data\nfrom torch.utils.data import DataLoader\nimport torchvision.datasets as datasets\nimport torchvision.transforms as transforms\n\n\nclass NN(nn.Module):\n\n def __init__(self, input_size, num_classes):\n super(NN, self).__init__()\n self.fc1 = nn.Linear(input_size, 50)\n self.fc2 = nn.Linear(50, num_classes)\n\n def forward(self, x):\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return x\n\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\ninput_size = 784\nnum_classes = 10\nlearning_rate = 0.001\nbatch_size = 64\nnum_epochs = 10\ntrain_dataset = datasets.MNIST(root='dataset/', train=True, transform=\n transforms.ToTensor(), download=True)\ntrain_loader = DataLoader(dataset=train_dataset, batch_size=batch_size,\n shuffle=True)\ntest_dataset = datasets.MNIST(root='dataset/', train=False, transform=\n transforms.ToTensor(), download=True)\ntest_loader = DataLoader(dataset=train_dataset, batch_size=batch_size,\n shuffle=True)\nmodel = NN(input_size, num_classes).to(device)\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.Adam(model.parameters(), lr=learning_rate)\nfor epoch in range(num_epochs):\n print('Epoch: ' + str(epoch + 1))\n for batch_idx, (data, targets) in enumerate(train_loader):\n data = data.to(device=device)\n targets = targets.to(device=device)\n data = data.reshape(data.shape[0], -1)\n scores = model(data)\n loss = criterion(scores, targets)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n\ndef check_accuracy(loader, model):\n if loader.dataset.train:\n print('Checking accuracy on training data')\n else:\n print('Checking accuracy on test data')\n num_correct = 0\n num_samples = 0\n model.eval()\n with torch.no_grad():\n for x, y in loader:\n x = x.to(device=device)\n y = y.to(device=device)\n x = x.reshape(x.shape[0], -1)\n scores = model(x)\n _, predictions = scores.max(1)\n num_correct += (predictions == y).sum()\n num_samples += predictions.size(0)\n print(\n f'Got {num_correct} / {num_samples} with accuracy {float(num_correct) / float(num_samples) * 100:.2f}'\n )\n model.train()\n\n\ncheck_accuracy(train_loader, model)\ncheck_accuracy(test_loader, model)\n",
"step-5": "# Imports\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.utils import data\nfrom torch.utils.data import DataLoader\nimport torchvision.datasets as datasets\nimport torchvision.transforms as transforms\n\n# Create Fully Connected Network\nclass NN(nn.Module):\n def __init__(self, input_size,num_classes):\n super(NN,self).__init__()\n self.fc1 = nn.Linear(input_size,50)\n self.fc2 = nn.Linear(50,num_classes)\n\n def forward(self,x):\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return x\n\n# Set device\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\n# Hyperparameters\ninput_size =784\nnum_classes = 10\nlearning_rate = 0.001\nbatch_size = 64\nnum_epochs = 10\n\n# Load Data\ntrain_dataset = datasets.MNIST(\n root='dataset/',\n train=True,\n transform=transforms.ToTensor(),\n download=True,\n)\ntrain_loader = DataLoader(\n dataset=train_dataset,\n batch_size=batch_size,\n shuffle=True,\n)\ntest_dataset = datasets.MNIST(\n root='dataset/',\n train=False,\n transform=transforms.ToTensor(),\n download=True,\n)\ntest_loader = DataLoader(\n dataset=train_dataset,\n batch_size=batch_size,\n shuffle=True,\n)\n# Initialize network\nmodel = NN(input_size,num_classes).to(device)\n\n# Loss and optimizer\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.Adam(model.parameters(),lr=learning_rate)\n\n# Train network\nfor epoch in range(num_epochs):\n print(\"Epoch: \"+str(epoch+1))\n for batch_idx, (data, targets) in enumerate(train_loader):\n data = data.to(device=device)\n targets = targets.to(device=device)\n\n # Get to correct shape\n data = data.reshape(data.shape[0],-1)\n scores = model(data)\n loss = criterion(scores,targets)\n\n # backward\n optimizer.zero_grad()\n loss.backward()\n\n # gradient descent or adam step\n optimizer.step()\n\n# Check accuracy on training and test to see how good our model\n\ndef check_accuracy(loader, model):\n\n if loader.dataset.train:\n print(\"Checking accuracy on training data\")\n else:\n print(\"Checking accuracy on test data\")\n\n num_correct = 0\n num_samples = 0\n model.eval()\n with torch.no_grad():\n for x,y in loader:\n x = x.to(device=device)\n y = y.to(device=device)\n x = x.reshape(x.shape[0],-1)\n\n scores = model(x)\n\n _, predictions = scores.max(1)\n num_correct += (predictions == y).sum()\n num_samples += predictions.size(0)\n\n print(f'Got {num_correct} / {num_samples} with accuracy {float(num_correct)/float(num_samples)*100:.2f}')\n model.train()\n\n\ncheck_accuracy(train_loader,model)\ncheck_accuracy(test_loader,model)",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pytest.fixture
def cvrp_problem():
max_num_vehicles = 1
coords = [[-15.6570138544452, -47.802664728268745], [-15.65879313293694,
-47.7496622016347], [-15.651440380492554, -47.75887552060412], [-
15.651207309372888, -47.755018806591394], [-15.648706444367969, -
47.758785390289965], [-15.66047286919706, -47.75284167302011]]
return CVRPProblem(problem_identifier='bla', location_idx=np.array([0,
1, 2, 3, 4, 5]), coords=np.array(coords), vehicle_capacity=100,
num_vehicles=max_num_vehicles, max_deliveries=5, demands=np.array([
0, 10, 10, 7, 3, 10]), depot_idx=0)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pytest.fixture
def cvrp_problem():
max_num_vehicles = 1
coords = [[-15.6570138544452, -47.802664728268745], [-15.65879313293694,
-47.7496622016347], [-15.651440380492554, -47.75887552060412], [-
15.651207309372888, -47.755018806591394], [-15.648706444367969, -
47.758785390289965], [-15.66047286919706, -47.75284167302011]]
return CVRPProblem(problem_identifier='bla', location_idx=np.array([0,
1, 2, 3, 4, 5]), coords=np.array(coords), vehicle_capacity=100,
num_vehicles=max_num_vehicles, max_deliveries=5, demands=np.array([
0, 10, 10, 7, 3, 10]), depot_idx=0)
def test_vrp_partition_full_qubo_solver(cvrp_problem):
backend_solver = QBSolv()
params = partitionqubo.KmeansPartitionFullQuboParams(fixed_num_clusters=1)
qubo_problem_fn = wrap_vrp_qubo_problem(params=params)
solver = partitionqubo.solver_fn(params=params, backend_solver=
backend_solver, qubo_problem_fn=qubo_problem_fn)
result = solver(problem=cvrp_problem)
assert result.problem_identifier == 'bla'
assert (result.routes == np.array([[0, 5, 1, 3, 2, 4, 0]])).all()
assert result.total_demands == 40
<|reserved_special_token_1|>
import pytest
import numpy as np
from dwave_qbsolv import QBSolv
from src.quantumrouting.solvers import partitionqubo
from src.quantumrouting.types import CVRPProblem
from src.quantumrouting.wrappers.qubo import wrap_vrp_qubo_problem
@pytest.fixture
def cvrp_problem():
max_num_vehicles = 1
coords = [[-15.6570138544452, -47.802664728268745], [-15.65879313293694,
-47.7496622016347], [-15.651440380492554, -47.75887552060412], [-
15.651207309372888, -47.755018806591394], [-15.648706444367969, -
47.758785390289965], [-15.66047286919706, -47.75284167302011]]
return CVRPProblem(problem_identifier='bla', location_idx=np.array([0,
1, 2, 3, 4, 5]), coords=np.array(coords), vehicle_capacity=100,
num_vehicles=max_num_vehicles, max_deliveries=5, demands=np.array([
0, 10, 10, 7, 3, 10]), depot_idx=0)
def test_vrp_partition_full_qubo_solver(cvrp_problem):
backend_solver = QBSolv()
params = partitionqubo.KmeansPartitionFullQuboParams(fixed_num_clusters=1)
qubo_problem_fn = wrap_vrp_qubo_problem(params=params)
solver = partitionqubo.solver_fn(params=params, backend_solver=
backend_solver, qubo_problem_fn=qubo_problem_fn)
result = solver(problem=cvrp_problem)
assert result.problem_identifier == 'bla'
assert (result.routes == np.array([[0, 5, 1, 3, 2, 4, 0]])).all()
assert result.total_demands == 40
|
flexible
|
{
"blob_id": "f61e9e8069a0e90506c2f03a0cc4a25a16d71b85",
"index": 3732,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]\ndef cvrp_problem():\n max_num_vehicles = 1\n coords = [[-15.6570138544452, -47.802664728268745], [-15.65879313293694,\n -47.7496622016347], [-15.651440380492554, -47.75887552060412], [-\n 15.651207309372888, -47.755018806591394], [-15.648706444367969, -\n 47.758785390289965], [-15.66047286919706, -47.75284167302011]]\n return CVRPProblem(problem_identifier='bla', location_idx=np.array([0, \n 1, 2, 3, 4, 5]), coords=np.array(coords), vehicle_capacity=100,\n num_vehicles=max_num_vehicles, max_deliveries=5, demands=np.array([\n 0, 10, 10, 7, 3, 10]), depot_idx=0)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\[email protected]\ndef cvrp_problem():\n max_num_vehicles = 1\n coords = [[-15.6570138544452, -47.802664728268745], [-15.65879313293694,\n -47.7496622016347], [-15.651440380492554, -47.75887552060412], [-\n 15.651207309372888, -47.755018806591394], [-15.648706444367969, -\n 47.758785390289965], [-15.66047286919706, -47.75284167302011]]\n return CVRPProblem(problem_identifier='bla', location_idx=np.array([0, \n 1, 2, 3, 4, 5]), coords=np.array(coords), vehicle_capacity=100,\n num_vehicles=max_num_vehicles, max_deliveries=5, demands=np.array([\n 0, 10, 10, 7, 3, 10]), depot_idx=0)\n\n\ndef test_vrp_partition_full_qubo_solver(cvrp_problem):\n backend_solver = QBSolv()\n params = partitionqubo.KmeansPartitionFullQuboParams(fixed_num_clusters=1)\n qubo_problem_fn = wrap_vrp_qubo_problem(params=params)\n solver = partitionqubo.solver_fn(params=params, backend_solver=\n backend_solver, qubo_problem_fn=qubo_problem_fn)\n result = solver(problem=cvrp_problem)\n assert result.problem_identifier == 'bla'\n assert (result.routes == np.array([[0, 5, 1, 3, 2, 4, 0]])).all()\n assert result.total_demands == 40\n",
"step-4": "import pytest\nimport numpy as np\nfrom dwave_qbsolv import QBSolv\nfrom src.quantumrouting.solvers import partitionqubo\nfrom src.quantumrouting.types import CVRPProblem\nfrom src.quantumrouting.wrappers.qubo import wrap_vrp_qubo_problem\n\n\[email protected]\ndef cvrp_problem():\n max_num_vehicles = 1\n coords = [[-15.6570138544452, -47.802664728268745], [-15.65879313293694,\n -47.7496622016347], [-15.651440380492554, -47.75887552060412], [-\n 15.651207309372888, -47.755018806591394], [-15.648706444367969, -\n 47.758785390289965], [-15.66047286919706, -47.75284167302011]]\n return CVRPProblem(problem_identifier='bla', location_idx=np.array([0, \n 1, 2, 3, 4, 5]), coords=np.array(coords), vehicle_capacity=100,\n num_vehicles=max_num_vehicles, max_deliveries=5, demands=np.array([\n 0, 10, 10, 7, 3, 10]), depot_idx=0)\n\n\ndef test_vrp_partition_full_qubo_solver(cvrp_problem):\n backend_solver = QBSolv()\n params = partitionqubo.KmeansPartitionFullQuboParams(fixed_num_clusters=1)\n qubo_problem_fn = wrap_vrp_qubo_problem(params=params)\n solver = partitionqubo.solver_fn(params=params, backend_solver=\n backend_solver, qubo_problem_fn=qubo_problem_fn)\n result = solver(problem=cvrp_problem)\n assert result.problem_identifier == 'bla'\n assert (result.routes == np.array([[0, 5, 1, 3, 2, 4, 0]])).all()\n assert result.total_demands == 40\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Location(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Banner(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=5)
name = models.CharField(max_length=100)
caption = models.TextField()
description = models.TextField(blank=True, null=True)
image = models.ImageField(upload_to='images/', verbose_name='Banner',
blank=True)
height = models.IntegerField()
width = models.IntegerField()
is_archived = models.BooleanField(default=False)
def __str__(self):
return self.name
def delete(self, *args, **kwargs):
self.image.delete(save=False)
super(Banner, self).delete(*args, **kwargs)
def save(self, **kwargs):
if not self.id:
max = Banner.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'BN' + '{0:03d}'.format(max)
super().save(*kwargs)
class Campaign(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=6)
location = models.ForeignKey(Location, on_delete=models.CASCADE,
related_name='locations')
campaign_code = models.CharField(max_length=30, null=True, blank=True)
priority = models.IntegerField(null=True, blank=True)
date_created = models.DateField(null=True, blank=True)
date_updated = models.DateField(null=True, blank=True)
valid_date_start = models.DateField(null=True, blank=True)
valid_date_end = models.DateField(null=True, blank=True)
def save(self, **kwargs):
if not self.id:
max = Campaign.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'CMP' + '{0:03d}'.format(max)
super().save(*kwargs)
class Installation(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=6)
banner = models.ForeignKey(Banner, on_delete=models.CASCADE,
related_name='banners', blank=True, null=True)
campaign = models.ForeignKey(Campaign, on_delete=models.CASCADE,
related_name='campaigns')
redirect = models.URLField(null=True, blank=True)
def save(self, **kwargs):
if not self.id:
max = Installation.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'INS' + '{0:03d}'.format(max)
super().save(*kwargs)
<|reserved_special_token_0|>
class ContactSource(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=9)
source = models.CharField(max_length=30, choices=source_choices)
def __str__(self):
return self.source
def save(self, **kwargs):
if not self.id:
max = ContactSource.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'CONSRC' + '{0:03d}'.format(max)
super().save(*kwargs)
class Contact(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=6)
source = models.ForeignKey(ContactSource, on_delete=models.CASCADE,
related_name='contactsources')
name = models.CharField(max_length=100)
numbers = models.FileField(upload_to='pickles/contact/')
is_deleted = models.BooleanField(default=False)
deleted_datetime = models.DateTimeField(blank=True, null=True)
def __str__(self):
return self.name
def save(self, **kwargs):
if not self.id:
max = Contact.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'CON' + '{0:03d}'.format(max)
super().save(*kwargs)
class GenerateContact(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=9)
contact = models.ForeignKey(Contact, on_delete=models.CASCADE,
related_name='contact')
first_code = models.CharField(max_length=4, validators=[RegexValidator(
'^\\d{0,10}$')])
digits = models.CharField(max_length=30, validators=[RegexValidator(
'^\\d{0,10}$')])
generate_numbers = models.CharField(max_length=30, validators=[
RegexValidator('^\\d{0,10}$')])
def save(self, **kwargs):
if not self.id:
max = GenerateContact.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'GENCON' + '{0:03d}'.format(max)
super().save(*kwargs)
<|reserved_special_token_0|>
class SMSBlast(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=6)
message_title = models.CharField(max_length=100)
message_text = models.CharField(max_length=160)
send_date = models.DateField(null=True, blank=True)
send_time = models.TimeField(null=True, blank=True)
is_now = models.BooleanField(default=False)
def __str__(self):
return self.message_title
def save(self, **kwargs):
if not self.id:
max = SMSBlast.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'SMS' + '{0:03d}'.format(max)
super().save(*kwargs)
class ContactAndSMS(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=12)
contact = models.ForeignKey(Contact, on_delete=models.CASCADE,
related_name='smsncon_contact')
smsblast = models.ForeignKey(SMSBlast, on_delete=models.CASCADE,
related_name='smsncon_smsblast')
def save(self, **kwargs):
if not self.id:
max = ContactAndSMS.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'CONANDSMS' + '{0:03d}'.format(max)
super().save(*kwargs)
class SMSBlastJob(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=9)
job_id = models.CharField(max_length=100, blank=True, null=True)
contact = models.ForeignKey(Contact, on_delete=models.CASCADE,
related_name='contact_job')
smsblast = models.ForeignKey(SMSBlast, on_delete=models.CASCADE,
related_name='smsblast_job')
def __str__(self):
return self.job_id
def save(self, **kwargs):
if not self.id:
max = SMSBlastJob.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'SMSJOB' + '{0:03d}'.format(max)
super().save(*kwargs)
class SMSStatus(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=10)
job = models.ForeignKey(SMSBlastJob, on_delete=models.CASCADE,
related_name='job_status')
contact = models.ForeignKey(Contact, on_delete=models.CASCADE,
related_name='contact_status')
status = models.FileField(upload_to='pickles/status/')
def __str__(self):
return self.job_id
def save(self, **kwargs):
if not self.id:
max = SMSStatus.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'SMSSTAT' + '{0:03d}'.format(max)
super().save(*kwargs)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Location(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __str__(self):
return self.name
def save(self, **kwargs):
if not self.id:
max = Location.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'LOC' + '{0:03d}'.format(max)
super().save(*kwargs)
class Banner(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=5)
name = models.CharField(max_length=100)
caption = models.TextField()
description = models.TextField(blank=True, null=True)
image = models.ImageField(upload_to='images/', verbose_name='Banner',
blank=True)
height = models.IntegerField()
width = models.IntegerField()
is_archived = models.BooleanField(default=False)
def __str__(self):
return self.name
def delete(self, *args, **kwargs):
self.image.delete(save=False)
super(Banner, self).delete(*args, **kwargs)
def save(self, **kwargs):
if not self.id:
max = Banner.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'BN' + '{0:03d}'.format(max)
super().save(*kwargs)
class Campaign(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=6)
location = models.ForeignKey(Location, on_delete=models.CASCADE,
related_name='locations')
campaign_code = models.CharField(max_length=30, null=True, blank=True)
priority = models.IntegerField(null=True, blank=True)
date_created = models.DateField(null=True, blank=True)
date_updated = models.DateField(null=True, blank=True)
valid_date_start = models.DateField(null=True, blank=True)
valid_date_end = models.DateField(null=True, blank=True)
def save(self, **kwargs):
if not self.id:
max = Campaign.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'CMP' + '{0:03d}'.format(max)
super().save(*kwargs)
class Installation(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=6)
banner = models.ForeignKey(Banner, on_delete=models.CASCADE,
related_name='banners', blank=True, null=True)
campaign = models.ForeignKey(Campaign, on_delete=models.CASCADE,
related_name='campaigns')
redirect = models.URLField(null=True, blank=True)
def save(self, **kwargs):
if not self.id:
max = Installation.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'INS' + '{0:03d}'.format(max)
super().save(*kwargs)
<|reserved_special_token_0|>
class ContactSource(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=9)
source = models.CharField(max_length=30, choices=source_choices)
def __str__(self):
return self.source
def save(self, **kwargs):
if not self.id:
max = ContactSource.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'CONSRC' + '{0:03d}'.format(max)
super().save(*kwargs)
class Contact(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=6)
source = models.ForeignKey(ContactSource, on_delete=models.CASCADE,
related_name='contactsources')
name = models.CharField(max_length=100)
numbers = models.FileField(upload_to='pickles/contact/')
is_deleted = models.BooleanField(default=False)
deleted_datetime = models.DateTimeField(blank=True, null=True)
def __str__(self):
return self.name
def save(self, **kwargs):
if not self.id:
max = Contact.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'CON' + '{0:03d}'.format(max)
super().save(*kwargs)
class GenerateContact(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=9)
contact = models.ForeignKey(Contact, on_delete=models.CASCADE,
related_name='contact')
first_code = models.CharField(max_length=4, validators=[RegexValidator(
'^\\d{0,10}$')])
digits = models.CharField(max_length=30, validators=[RegexValidator(
'^\\d{0,10}$')])
generate_numbers = models.CharField(max_length=30, validators=[
RegexValidator('^\\d{0,10}$')])
def save(self, **kwargs):
if not self.id:
max = GenerateContact.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'GENCON' + '{0:03d}'.format(max)
super().save(*kwargs)
<|reserved_special_token_0|>
class SMSBlast(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=6)
message_title = models.CharField(max_length=100)
message_text = models.CharField(max_length=160)
send_date = models.DateField(null=True, blank=True)
send_time = models.TimeField(null=True, blank=True)
is_now = models.BooleanField(default=False)
def __str__(self):
return self.message_title
def save(self, **kwargs):
if not self.id:
max = SMSBlast.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'SMS' + '{0:03d}'.format(max)
super().save(*kwargs)
class ContactAndSMS(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=12)
contact = models.ForeignKey(Contact, on_delete=models.CASCADE,
related_name='smsncon_contact')
smsblast = models.ForeignKey(SMSBlast, on_delete=models.CASCADE,
related_name='smsncon_smsblast')
def save(self, **kwargs):
if not self.id:
max = ContactAndSMS.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'CONANDSMS' + '{0:03d}'.format(max)
super().save(*kwargs)
class SMSBlastJob(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=9)
job_id = models.CharField(max_length=100, blank=True, null=True)
contact = models.ForeignKey(Contact, on_delete=models.CASCADE,
related_name='contact_job')
smsblast = models.ForeignKey(SMSBlast, on_delete=models.CASCADE,
related_name='smsblast_job')
def __str__(self):
return self.job_id
def save(self, **kwargs):
if not self.id:
max = SMSBlastJob.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'SMSJOB' + '{0:03d}'.format(max)
super().save(*kwargs)
class SMSStatus(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=10)
job = models.ForeignKey(SMSBlastJob, on_delete=models.CASCADE,
related_name='job_status')
contact = models.ForeignKey(Contact, on_delete=models.CASCADE,
related_name='contact_status')
status = models.FileField(upload_to='pickles/status/')
def __str__(self):
return self.job_id
def save(self, **kwargs):
if not self.id:
max = SMSStatus.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'SMSSTAT' + '{0:03d}'.format(max)
super().save(*kwargs)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Application(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Page(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=5)
application = models.ForeignKey(Application, on_delete=models.CASCADE,
related_name='applications')
name = models.CharField(max_length=100)
is_archived = models.BooleanField(default=False)
def __str__(self):
return self.name
def save(self, **kwargs):
if not self.id:
max = Page.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'PG' + '{0:03d}'.format(max)
super().save(*kwargs)
class Location(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=6)
loc_code = models.CharField(max_length=30, null=True, blank=True,
unique=True)
page = models.ForeignKey(Page, on_delete=models.CASCADE, related_name=
'pages')
is_slider = models.BooleanField(default=False)
is_active = models.BooleanField(default=False)
name = models.CharField(max_length=100)
width = models.IntegerField()
height = models.IntegerField()
def __str__(self):
return self.name
def save(self, **kwargs):
if not self.id:
max = Location.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'LOC' + '{0:03d}'.format(max)
super().save(*kwargs)
class Banner(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=5)
name = models.CharField(max_length=100)
caption = models.TextField()
description = models.TextField(blank=True, null=True)
image = models.ImageField(upload_to='images/', verbose_name='Banner',
blank=True)
height = models.IntegerField()
width = models.IntegerField()
is_archived = models.BooleanField(default=False)
def __str__(self):
return self.name
def delete(self, *args, **kwargs):
self.image.delete(save=False)
super(Banner, self).delete(*args, **kwargs)
def save(self, **kwargs):
if not self.id:
max = Banner.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'BN' + '{0:03d}'.format(max)
super().save(*kwargs)
class Campaign(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=6)
location = models.ForeignKey(Location, on_delete=models.CASCADE,
related_name='locations')
campaign_code = models.CharField(max_length=30, null=True, blank=True)
priority = models.IntegerField(null=True, blank=True)
date_created = models.DateField(null=True, blank=True)
date_updated = models.DateField(null=True, blank=True)
valid_date_start = models.DateField(null=True, blank=True)
valid_date_end = models.DateField(null=True, blank=True)
def save(self, **kwargs):
if not self.id:
max = Campaign.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'CMP' + '{0:03d}'.format(max)
super().save(*kwargs)
class Installation(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=6)
banner = models.ForeignKey(Banner, on_delete=models.CASCADE,
related_name='banners', blank=True, null=True)
campaign = models.ForeignKey(Campaign, on_delete=models.CASCADE,
related_name='campaigns')
redirect = models.URLField(null=True, blank=True)
def save(self, **kwargs):
if not self.id:
max = Installation.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'INS' + '{0:03d}'.format(max)
super().save(*kwargs)
<|reserved_special_token_0|>
class ContactSource(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=9)
source = models.CharField(max_length=30, choices=source_choices)
def __str__(self):
return self.source
def save(self, **kwargs):
if not self.id:
max = ContactSource.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'CONSRC' + '{0:03d}'.format(max)
super().save(*kwargs)
class Contact(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=6)
source = models.ForeignKey(ContactSource, on_delete=models.CASCADE,
related_name='contactsources')
name = models.CharField(max_length=100)
numbers = models.FileField(upload_to='pickles/contact/')
is_deleted = models.BooleanField(default=False)
deleted_datetime = models.DateTimeField(blank=True, null=True)
def __str__(self):
return self.name
def save(self, **kwargs):
if not self.id:
max = Contact.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'CON' + '{0:03d}'.format(max)
super().save(*kwargs)
class GenerateContact(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=9)
contact = models.ForeignKey(Contact, on_delete=models.CASCADE,
related_name='contact')
first_code = models.CharField(max_length=4, validators=[RegexValidator(
'^\\d{0,10}$')])
digits = models.CharField(max_length=30, validators=[RegexValidator(
'^\\d{0,10}$')])
generate_numbers = models.CharField(max_length=30, validators=[
RegexValidator('^\\d{0,10}$')])
def save(self, **kwargs):
if not self.id:
max = GenerateContact.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'GENCON' + '{0:03d}'.format(max)
super().save(*kwargs)
<|reserved_special_token_0|>
class SMSBlast(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=6)
message_title = models.CharField(max_length=100)
message_text = models.CharField(max_length=160)
send_date = models.DateField(null=True, blank=True)
send_time = models.TimeField(null=True, blank=True)
is_now = models.BooleanField(default=False)
def __str__(self):
return self.message_title
def save(self, **kwargs):
if not self.id:
max = SMSBlast.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'SMS' + '{0:03d}'.format(max)
super().save(*kwargs)
class ContactAndSMS(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=12)
contact = models.ForeignKey(Contact, on_delete=models.CASCADE,
related_name='smsncon_contact')
smsblast = models.ForeignKey(SMSBlast, on_delete=models.CASCADE,
related_name='smsncon_smsblast')
def save(self, **kwargs):
if not self.id:
max = ContactAndSMS.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'CONANDSMS' + '{0:03d}'.format(max)
super().save(*kwargs)
class SMSBlastJob(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=9)
job_id = models.CharField(max_length=100, blank=True, null=True)
contact = models.ForeignKey(Contact, on_delete=models.CASCADE,
related_name='contact_job')
smsblast = models.ForeignKey(SMSBlast, on_delete=models.CASCADE,
related_name='smsblast_job')
def __str__(self):
return self.job_id
def save(self, **kwargs):
if not self.id:
max = SMSBlastJob.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'SMSJOB' + '{0:03d}'.format(max)
super().save(*kwargs)
class SMSStatus(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=10)
job = models.ForeignKey(SMSBlastJob, on_delete=models.CASCADE,
related_name='job_status')
contact = models.ForeignKey(Contact, on_delete=models.CASCADE,
related_name='contact_status')
status = models.FileField(upload_to='pickles/status/')
def __str__(self):
return self.job_id
def save(self, **kwargs):
if not self.id:
max = SMSStatus.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'SMSSTAT' + '{0:03d}'.format(max)
super().save(*kwargs)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class User(AbstractUser):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Application(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=6)
app_code = models.CharField(max_length=30, blank=True, null=True)
name = models.CharField(max_length=100, blank=True, null=True)
is_archived = models.BooleanField(default=False)
def __str__(self):
return self.name
def save(self, **kwargs):
if not self.id:
max = Application.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'APP' + '{0:03d}'.format(max)
super().save(*kwargs)
class Page(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=5)
application = models.ForeignKey(Application, on_delete=models.CASCADE,
related_name='applications')
name = models.CharField(max_length=100)
is_archived = models.BooleanField(default=False)
def __str__(self):
return self.name
def save(self, **kwargs):
if not self.id:
max = Page.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'PG' + '{0:03d}'.format(max)
super().save(*kwargs)
class Location(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=6)
loc_code = models.CharField(max_length=30, null=True, blank=True,
unique=True)
page = models.ForeignKey(Page, on_delete=models.CASCADE, related_name=
'pages')
is_slider = models.BooleanField(default=False)
is_active = models.BooleanField(default=False)
name = models.CharField(max_length=100)
width = models.IntegerField()
height = models.IntegerField()
def __str__(self):
return self.name
def save(self, **kwargs):
if not self.id:
max = Location.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'LOC' + '{0:03d}'.format(max)
super().save(*kwargs)
class Banner(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=5)
name = models.CharField(max_length=100)
caption = models.TextField()
description = models.TextField(blank=True, null=True)
image = models.ImageField(upload_to='images/', verbose_name='Banner',
blank=True)
height = models.IntegerField()
width = models.IntegerField()
is_archived = models.BooleanField(default=False)
def __str__(self):
return self.name
def delete(self, *args, **kwargs):
self.image.delete(save=False)
super(Banner, self).delete(*args, **kwargs)
def save(self, **kwargs):
if not self.id:
max = Banner.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'BN' + '{0:03d}'.format(max)
super().save(*kwargs)
class Campaign(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=6)
location = models.ForeignKey(Location, on_delete=models.CASCADE,
related_name='locations')
campaign_code = models.CharField(max_length=30, null=True, blank=True)
priority = models.IntegerField(null=True, blank=True)
date_created = models.DateField(null=True, blank=True)
date_updated = models.DateField(null=True, blank=True)
valid_date_start = models.DateField(null=True, blank=True)
valid_date_end = models.DateField(null=True, blank=True)
def save(self, **kwargs):
if not self.id:
max = Campaign.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'CMP' + '{0:03d}'.format(max)
super().save(*kwargs)
class Installation(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=6)
banner = models.ForeignKey(Banner, on_delete=models.CASCADE,
related_name='banners', blank=True, null=True)
campaign = models.ForeignKey(Campaign, on_delete=models.CASCADE,
related_name='campaigns')
redirect = models.URLField(null=True, blank=True)
def save(self, **kwargs):
if not self.id:
max = Installation.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'INS' + '{0:03d}'.format(max)
super().save(*kwargs)
<|reserved_special_token_0|>
class ContactSource(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=9)
source = models.CharField(max_length=30, choices=source_choices)
def __str__(self):
return self.source
def save(self, **kwargs):
if not self.id:
max = ContactSource.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'CONSRC' + '{0:03d}'.format(max)
super().save(*kwargs)
class Contact(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=6)
source = models.ForeignKey(ContactSource, on_delete=models.CASCADE,
related_name='contactsources')
name = models.CharField(max_length=100)
numbers = models.FileField(upload_to='pickles/contact/')
is_deleted = models.BooleanField(default=False)
deleted_datetime = models.DateTimeField(blank=True, null=True)
def __str__(self):
return self.name
def save(self, **kwargs):
if not self.id:
max = Contact.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'CON' + '{0:03d}'.format(max)
super().save(*kwargs)
class GenerateContact(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=9)
contact = models.ForeignKey(Contact, on_delete=models.CASCADE,
related_name='contact')
first_code = models.CharField(max_length=4, validators=[RegexValidator(
'^\\d{0,10}$')])
digits = models.CharField(max_length=30, validators=[RegexValidator(
'^\\d{0,10}$')])
generate_numbers = models.CharField(max_length=30, validators=[
RegexValidator('^\\d{0,10}$')])
def save(self, **kwargs):
if not self.id:
max = GenerateContact.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'GENCON' + '{0:03d}'.format(max)
super().save(*kwargs)
<|reserved_special_token_0|>
class SMSBlast(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=6)
message_title = models.CharField(max_length=100)
message_text = models.CharField(max_length=160)
send_date = models.DateField(null=True, blank=True)
send_time = models.TimeField(null=True, blank=True)
is_now = models.BooleanField(default=False)
def __str__(self):
return self.message_title
def save(self, **kwargs):
if not self.id:
max = SMSBlast.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'SMS' + '{0:03d}'.format(max)
super().save(*kwargs)
class ContactAndSMS(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=12)
contact = models.ForeignKey(Contact, on_delete=models.CASCADE,
related_name='smsncon_contact')
smsblast = models.ForeignKey(SMSBlast, on_delete=models.CASCADE,
related_name='smsncon_smsblast')
def save(self, **kwargs):
if not self.id:
max = ContactAndSMS.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'CONANDSMS' + '{0:03d}'.format(max)
super().save(*kwargs)
class SMSBlastJob(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=9)
job_id = models.CharField(max_length=100, blank=True, null=True)
contact = models.ForeignKey(Contact, on_delete=models.CASCADE,
related_name='contact_job')
smsblast = models.ForeignKey(SMSBlast, on_delete=models.CASCADE,
related_name='smsblast_job')
def __str__(self):
return self.job_id
def save(self, **kwargs):
if not self.id:
max = SMSBlastJob.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'SMSJOB' + '{0:03d}'.format(max)
super().save(*kwargs)
class SMSStatus(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=10)
job = models.ForeignKey(SMSBlastJob, on_delete=models.CASCADE,
related_name='job_status')
contact = models.ForeignKey(Contact, on_delete=models.CASCADE,
related_name='contact_status')
status = models.FileField(upload_to='pickles/status/')
def __str__(self):
return self.job_id
def save(self, **kwargs):
if not self.id:
max = SMSStatus.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = 'SMSSTAT' + '{0:03d}'.format(max)
super().save(*kwargs)
<|reserved_special_token_1|>
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.db.models import Max
from django.core.validators import RegexValidator
from django.utils import timezone
class User(AbstractUser):
is_developer = models.BooleanField('developer status', default=False)
is_marketing = models.BooleanField('marketing status', default=False)
email = models.EmailField(unique=True, null=True, blank=True)
def __str__(self):
return self.username
class Application(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=6)
app_code = models.CharField(max_length=30, blank=True, null=True)
name = models.CharField(max_length=100, blank=True, null=True)
is_archived = models.BooleanField(default=False)
def __str__(self):
return self.name
def save(self, **kwargs):
if not self.id:
max = Application.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = "APP" + "{0:03d}".format(max)
super().save(*kwargs)
class Page(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=5)
application = models.ForeignKey(Application, on_delete=models.CASCADE, related_name='applications')
name = models.CharField(max_length=100)
is_archived = models.BooleanField(default=False)
def __str__(self):
return self.name
def save(self, **kwargs):
if not self.id:
max = Page.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = "PG" + "{0:03d}".format(max)
super().save(*kwargs)
class Location(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=6)
loc_code = models.CharField(max_length=30, null=True, blank=True, unique=True)
page = models.ForeignKey(Page, on_delete=models.CASCADE, related_name='pages')
is_slider = models.BooleanField(default=False)
is_active = models.BooleanField(default=False)
name = models.CharField(max_length=100)
width = models.IntegerField()
height = models.IntegerField()
def __str__(self):
return self.name
def save(self, **kwargs):
if not self.id:
max = Location.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = "LOC" + "{0:03d}".format(max)
super().save(*kwargs)
class Banner(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=5)
name = models.CharField(max_length=100)
caption = models.TextField()
description = models.TextField(blank=True, null=True)
image = models.ImageField(upload_to='images/', verbose_name='Banner', blank=True)
height = models.IntegerField()
width = models.IntegerField()
is_archived = models.BooleanField(default=False)
def __str__(self):
return self.name
def delete(self, *args, **kwargs):
self.image.delete(save=False)
super(Banner, self).delete(*args, **kwargs)
def save(self, **kwargs):
if not self.id:
max = Banner.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = "BN" + "{0:03d}".format(max)
super().save(*kwargs)
class Campaign(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=6)
location = models.ForeignKey(Location, on_delete=models.CASCADE, related_name='locations')
campaign_code = models.CharField(max_length=30, null=True, blank=True)
priority = models.IntegerField(null=True, blank=True)
date_created = models.DateField(null=True, blank=True)
date_updated = models.DateField(null=True, blank=True)
valid_date_start = models.DateField(null=True, blank=True)
valid_date_end = models.DateField(null=True, blank=True)
def save(self, **kwargs):
if not self.id:
max = Campaign.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = "CMP" + "{0:03d}".format(max)
super().save(*kwargs)
class Installation(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=6)
banner = models.ForeignKey(Banner, on_delete=models.CASCADE, related_name='banners', blank=True, null=True)
campaign = models.ForeignKey(Campaign, on_delete=models.CASCADE, related_name='campaigns')
redirect = models.URLField(null=True, blank=True)
def save(self, **kwargs):
if not self.id:
max = Installation.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = "INS" + "{0:03d}".format(max)
super().save(*kwargs)
source_choices = (
('random', 'Generate nomor secara acak'),
('csv', 'Upload file .csv'),
)
class ContactSource(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=9)
source = models.CharField(max_length=30, choices=source_choices)
def __str__(self):
return self.source
def save(self, **kwargs):
if not self.id:
max = ContactSource.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = "CONSRC" + "{0:03d}".format(max)
super().save(*kwargs)
class Contact(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=6)
source = models.ForeignKey(ContactSource, on_delete=models.CASCADE, related_name='contactsources')
name = models.CharField(max_length=100)
numbers = models.FileField(upload_to='pickles/contact/')
is_deleted = models.BooleanField(default=False)
deleted_datetime = models.DateTimeField(blank=True, null=True)
def __str__(self):
return self.name
def save(self, **kwargs):
if not self.id:
max = Contact.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = "CON" + "{0:03d}".format(max)
super().save(*kwargs)
class GenerateContact(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=9)
contact = models.ForeignKey(Contact, on_delete=models.CASCADE, related_name='contact')
first_code = models.CharField(max_length=4, validators=[RegexValidator(r'^\d{0,10}$')])
digits = models.CharField(max_length=30, validators=[RegexValidator(r'^\d{0,10}$')])
generate_numbers = models.CharField(max_length=30, validators=[RegexValidator(r'^\d{0,10}$')])
def save(self, **kwargs):
if not self.id:
max = GenerateContact.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = "GENCON" + "{0:03d}".format(max)
super().save(*kwargs)
status_choices = (
('complete', 'Sudah Dikirim'),
('uncomplete', 'Belum Dikirim'),
)
class SMSBlast(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=6)
message_title = models.CharField(max_length=100)
message_text = models.CharField(max_length=160)
send_date = models.DateField(null=True, blank=True)
send_time = models.TimeField(null=True, blank=True)
is_now = models.BooleanField(default=False)
def __str__(self):
return self.message_title
def save(self, **kwargs):
if not self.id:
max = SMSBlast.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = "SMS" + "{0:03d}".format(max)
super().save(*kwargs)
class ContactAndSMS(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=12)
contact = models.ForeignKey(Contact, on_delete=models.CASCADE, related_name='smsncon_contact')
smsblast = models.ForeignKey(SMSBlast, on_delete=models.CASCADE, related_name='smsncon_smsblast')
def save(self, **kwargs):
if not self.id:
max = ContactAndSMS.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = "CONANDSMS" + "{0:03d}".format(max)
super().save(*kwargs)
class SMSBlastJob(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=9)
job_id = models.CharField(max_length=100, blank=True, null=True)
contact = models.ForeignKey(Contact, on_delete=models.CASCADE, related_name='contact_job')
smsblast = models.ForeignKey(SMSBlast, on_delete=models.CASCADE, related_name='smsblast_job')
def __str__(self):
return self.job_id
def save(self, **kwargs):
if not self.id:
max = SMSBlastJob.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = "SMSJOB" + "{0:03d}".format(max)
super().save(*kwargs)
class SMSStatus(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=10)
job = models.ForeignKey(SMSBlastJob, on_delete=models.CASCADE, related_name='job_status')
contact = models.ForeignKey(Contact, on_delete=models.CASCADE, related_name='contact_status')
status = models.FileField(upload_to='pickles/status/')
def __str__(self):
return self.job_id
def save(self, **kwargs):
if not self.id:
max = SMSStatus.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = "SMSSTAT" + "{0:03d}".format(max)
super().save(*kwargs)
|
flexible
|
{
"blob_id": "94e9e7c4c09c8c4de4c8f2649707a949d5f5f856",
"index": 7836,
"step-1": "<mask token>\n\n\nclass Location(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Banner(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=5)\n name = models.CharField(max_length=100)\n caption = models.TextField()\n description = models.TextField(blank=True, null=True)\n image = models.ImageField(upload_to='images/', verbose_name='Banner',\n blank=True)\n height = models.IntegerField()\n width = models.IntegerField()\n is_archived = models.BooleanField(default=False)\n\n def __str__(self):\n return self.name\n\n def delete(self, *args, **kwargs):\n self.image.delete(save=False)\n super(Banner, self).delete(*args, **kwargs)\n\n def save(self, **kwargs):\n if not self.id:\n max = Banner.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'BN' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass Campaign(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n location = models.ForeignKey(Location, on_delete=models.CASCADE,\n related_name='locations')\n campaign_code = models.CharField(max_length=30, null=True, blank=True)\n priority = models.IntegerField(null=True, blank=True)\n date_created = models.DateField(null=True, blank=True)\n date_updated = models.DateField(null=True, blank=True)\n valid_date_start = models.DateField(null=True, blank=True)\n valid_date_end = models.DateField(null=True, blank=True)\n\n def save(self, **kwargs):\n if not self.id:\n max = Campaign.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'CMP' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass Installation(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n banner = models.ForeignKey(Banner, on_delete=models.CASCADE,\n related_name='banners', blank=True, null=True)\n campaign = models.ForeignKey(Campaign, on_delete=models.CASCADE,\n related_name='campaigns')\n redirect = models.URLField(null=True, blank=True)\n\n def save(self, **kwargs):\n if not self.id:\n max = Installation.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'INS' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\n<mask token>\n\n\nclass ContactSource(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=9)\n source = models.CharField(max_length=30, choices=source_choices)\n\n def __str__(self):\n return self.source\n\n def save(self, **kwargs):\n if not self.id:\n max = ContactSource.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'CONSRC' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass Contact(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n source = models.ForeignKey(ContactSource, on_delete=models.CASCADE,\n related_name='contactsources')\n name = models.CharField(max_length=100)\n numbers = models.FileField(upload_to='pickles/contact/')\n is_deleted = models.BooleanField(default=False)\n deleted_datetime = models.DateTimeField(blank=True, null=True)\n\n def __str__(self):\n return self.name\n\n def save(self, **kwargs):\n if not self.id:\n max = Contact.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'CON' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass GenerateContact(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=9)\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE,\n related_name='contact')\n first_code = models.CharField(max_length=4, validators=[RegexValidator(\n '^\\\\d{0,10}$')])\n digits = models.CharField(max_length=30, validators=[RegexValidator(\n '^\\\\d{0,10}$')])\n generate_numbers = models.CharField(max_length=30, validators=[\n RegexValidator('^\\\\d{0,10}$')])\n\n def save(self, **kwargs):\n if not self.id:\n max = GenerateContact.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'GENCON' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\n<mask token>\n\n\nclass SMSBlast(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n message_title = models.CharField(max_length=100)\n message_text = models.CharField(max_length=160)\n send_date = models.DateField(null=True, blank=True)\n send_time = models.TimeField(null=True, blank=True)\n is_now = models.BooleanField(default=False)\n\n def __str__(self):\n return self.message_title\n\n def save(self, **kwargs):\n if not self.id:\n max = SMSBlast.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'SMS' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass ContactAndSMS(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=12)\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE,\n related_name='smsncon_contact')\n smsblast = models.ForeignKey(SMSBlast, on_delete=models.CASCADE,\n related_name='smsncon_smsblast')\n\n def save(self, **kwargs):\n if not self.id:\n max = ContactAndSMS.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'CONANDSMS' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass SMSBlastJob(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=9)\n job_id = models.CharField(max_length=100, blank=True, null=True)\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE,\n related_name='contact_job')\n smsblast = models.ForeignKey(SMSBlast, on_delete=models.CASCADE,\n related_name='smsblast_job')\n\n def __str__(self):\n return self.job_id\n\n def save(self, **kwargs):\n if not self.id:\n max = SMSBlastJob.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'SMSJOB' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass SMSStatus(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=10)\n job = models.ForeignKey(SMSBlastJob, on_delete=models.CASCADE,\n related_name='job_status')\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE,\n related_name='contact_status')\n status = models.FileField(upload_to='pickles/status/')\n\n def __str__(self):\n return self.job_id\n\n def save(self, **kwargs):\n if not self.id:\n max = SMSStatus.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'SMSSTAT' + '{0:03d}'.format(max)\n super().save(*kwargs)\n",
"step-2": "<mask token>\n\n\nclass Location(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.name\n\n def save(self, **kwargs):\n if not self.id:\n max = Location.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'LOC' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass Banner(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=5)\n name = models.CharField(max_length=100)\n caption = models.TextField()\n description = models.TextField(blank=True, null=True)\n image = models.ImageField(upload_to='images/', verbose_name='Banner',\n blank=True)\n height = models.IntegerField()\n width = models.IntegerField()\n is_archived = models.BooleanField(default=False)\n\n def __str__(self):\n return self.name\n\n def delete(self, *args, **kwargs):\n self.image.delete(save=False)\n super(Banner, self).delete(*args, **kwargs)\n\n def save(self, **kwargs):\n if not self.id:\n max = Banner.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'BN' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass Campaign(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n location = models.ForeignKey(Location, on_delete=models.CASCADE,\n related_name='locations')\n campaign_code = models.CharField(max_length=30, null=True, blank=True)\n priority = models.IntegerField(null=True, blank=True)\n date_created = models.DateField(null=True, blank=True)\n date_updated = models.DateField(null=True, blank=True)\n valid_date_start = models.DateField(null=True, blank=True)\n valid_date_end = models.DateField(null=True, blank=True)\n\n def save(self, **kwargs):\n if not self.id:\n max = Campaign.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'CMP' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass Installation(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n banner = models.ForeignKey(Banner, on_delete=models.CASCADE,\n related_name='banners', blank=True, null=True)\n campaign = models.ForeignKey(Campaign, on_delete=models.CASCADE,\n related_name='campaigns')\n redirect = models.URLField(null=True, blank=True)\n\n def save(self, **kwargs):\n if not self.id:\n max = Installation.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'INS' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\n<mask token>\n\n\nclass ContactSource(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=9)\n source = models.CharField(max_length=30, choices=source_choices)\n\n def __str__(self):\n return self.source\n\n def save(self, **kwargs):\n if not self.id:\n max = ContactSource.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'CONSRC' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass Contact(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n source = models.ForeignKey(ContactSource, on_delete=models.CASCADE,\n related_name='contactsources')\n name = models.CharField(max_length=100)\n numbers = models.FileField(upload_to='pickles/contact/')\n is_deleted = models.BooleanField(default=False)\n deleted_datetime = models.DateTimeField(blank=True, null=True)\n\n def __str__(self):\n return self.name\n\n def save(self, **kwargs):\n if not self.id:\n max = Contact.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'CON' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass GenerateContact(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=9)\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE,\n related_name='contact')\n first_code = models.CharField(max_length=4, validators=[RegexValidator(\n '^\\\\d{0,10}$')])\n digits = models.CharField(max_length=30, validators=[RegexValidator(\n '^\\\\d{0,10}$')])\n generate_numbers = models.CharField(max_length=30, validators=[\n RegexValidator('^\\\\d{0,10}$')])\n\n def save(self, **kwargs):\n if not self.id:\n max = GenerateContact.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'GENCON' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\n<mask token>\n\n\nclass SMSBlast(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n message_title = models.CharField(max_length=100)\n message_text = models.CharField(max_length=160)\n send_date = models.DateField(null=True, blank=True)\n send_time = models.TimeField(null=True, blank=True)\n is_now = models.BooleanField(default=False)\n\n def __str__(self):\n return self.message_title\n\n def save(self, **kwargs):\n if not self.id:\n max = SMSBlast.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'SMS' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass ContactAndSMS(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=12)\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE,\n related_name='smsncon_contact')\n smsblast = models.ForeignKey(SMSBlast, on_delete=models.CASCADE,\n related_name='smsncon_smsblast')\n\n def save(self, **kwargs):\n if not self.id:\n max = ContactAndSMS.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'CONANDSMS' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass SMSBlastJob(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=9)\n job_id = models.CharField(max_length=100, blank=True, null=True)\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE,\n related_name='contact_job')\n smsblast = models.ForeignKey(SMSBlast, on_delete=models.CASCADE,\n related_name='smsblast_job')\n\n def __str__(self):\n return self.job_id\n\n def save(self, **kwargs):\n if not self.id:\n max = SMSBlastJob.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'SMSJOB' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass SMSStatus(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=10)\n job = models.ForeignKey(SMSBlastJob, on_delete=models.CASCADE,\n related_name='job_status')\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE,\n related_name='contact_status')\n status = models.FileField(upload_to='pickles/status/')\n\n def __str__(self):\n return self.job_id\n\n def save(self, **kwargs):\n if not self.id:\n max = SMSStatus.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'SMSSTAT' + '{0:03d}'.format(max)\n super().save(*kwargs)\n",
"step-3": "<mask token>\n\n\nclass Application(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Page(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=5)\n application = models.ForeignKey(Application, on_delete=models.CASCADE,\n related_name='applications')\n name = models.CharField(max_length=100)\n is_archived = models.BooleanField(default=False)\n\n def __str__(self):\n return self.name\n\n def save(self, **kwargs):\n if not self.id:\n max = Page.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'PG' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass Location(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n loc_code = models.CharField(max_length=30, null=True, blank=True,\n unique=True)\n page = models.ForeignKey(Page, on_delete=models.CASCADE, related_name=\n 'pages')\n is_slider = models.BooleanField(default=False)\n is_active = models.BooleanField(default=False)\n name = models.CharField(max_length=100)\n width = models.IntegerField()\n height = models.IntegerField()\n\n def __str__(self):\n return self.name\n\n def save(self, **kwargs):\n if not self.id:\n max = Location.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'LOC' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass Banner(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=5)\n name = models.CharField(max_length=100)\n caption = models.TextField()\n description = models.TextField(blank=True, null=True)\n image = models.ImageField(upload_to='images/', verbose_name='Banner',\n blank=True)\n height = models.IntegerField()\n width = models.IntegerField()\n is_archived = models.BooleanField(default=False)\n\n def __str__(self):\n return self.name\n\n def delete(self, *args, **kwargs):\n self.image.delete(save=False)\n super(Banner, self).delete(*args, **kwargs)\n\n def save(self, **kwargs):\n if not self.id:\n max = Banner.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'BN' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass Campaign(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n location = models.ForeignKey(Location, on_delete=models.CASCADE,\n related_name='locations')\n campaign_code = models.CharField(max_length=30, null=True, blank=True)\n priority = models.IntegerField(null=True, blank=True)\n date_created = models.DateField(null=True, blank=True)\n date_updated = models.DateField(null=True, blank=True)\n valid_date_start = models.DateField(null=True, blank=True)\n valid_date_end = models.DateField(null=True, blank=True)\n\n def save(self, **kwargs):\n if not self.id:\n max = Campaign.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'CMP' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass Installation(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n banner = models.ForeignKey(Banner, on_delete=models.CASCADE,\n related_name='banners', blank=True, null=True)\n campaign = models.ForeignKey(Campaign, on_delete=models.CASCADE,\n related_name='campaigns')\n redirect = models.URLField(null=True, blank=True)\n\n def save(self, **kwargs):\n if not self.id:\n max = Installation.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'INS' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\n<mask token>\n\n\nclass ContactSource(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=9)\n source = models.CharField(max_length=30, choices=source_choices)\n\n def __str__(self):\n return self.source\n\n def save(self, **kwargs):\n if not self.id:\n max = ContactSource.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'CONSRC' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass Contact(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n source = models.ForeignKey(ContactSource, on_delete=models.CASCADE,\n related_name='contactsources')\n name = models.CharField(max_length=100)\n numbers = models.FileField(upload_to='pickles/contact/')\n is_deleted = models.BooleanField(default=False)\n deleted_datetime = models.DateTimeField(blank=True, null=True)\n\n def __str__(self):\n return self.name\n\n def save(self, **kwargs):\n if not self.id:\n max = Contact.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'CON' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass GenerateContact(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=9)\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE,\n related_name='contact')\n first_code = models.CharField(max_length=4, validators=[RegexValidator(\n '^\\\\d{0,10}$')])\n digits = models.CharField(max_length=30, validators=[RegexValidator(\n '^\\\\d{0,10}$')])\n generate_numbers = models.CharField(max_length=30, validators=[\n RegexValidator('^\\\\d{0,10}$')])\n\n def save(self, **kwargs):\n if not self.id:\n max = GenerateContact.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'GENCON' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\n<mask token>\n\n\nclass SMSBlast(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n message_title = models.CharField(max_length=100)\n message_text = models.CharField(max_length=160)\n send_date = models.DateField(null=True, blank=True)\n send_time = models.TimeField(null=True, blank=True)\n is_now = models.BooleanField(default=False)\n\n def __str__(self):\n return self.message_title\n\n def save(self, **kwargs):\n if not self.id:\n max = SMSBlast.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'SMS' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass ContactAndSMS(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=12)\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE,\n related_name='smsncon_contact')\n smsblast = models.ForeignKey(SMSBlast, on_delete=models.CASCADE,\n related_name='smsncon_smsblast')\n\n def save(self, **kwargs):\n if not self.id:\n max = ContactAndSMS.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'CONANDSMS' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass SMSBlastJob(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=9)\n job_id = models.CharField(max_length=100, blank=True, null=True)\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE,\n related_name='contact_job')\n smsblast = models.ForeignKey(SMSBlast, on_delete=models.CASCADE,\n related_name='smsblast_job')\n\n def __str__(self):\n return self.job_id\n\n def save(self, **kwargs):\n if not self.id:\n max = SMSBlastJob.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'SMSJOB' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass SMSStatus(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=10)\n job = models.ForeignKey(SMSBlastJob, on_delete=models.CASCADE,\n related_name='job_status')\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE,\n related_name='contact_status')\n status = models.FileField(upload_to='pickles/status/')\n\n def __str__(self):\n return self.job_id\n\n def save(self, **kwargs):\n if not self.id:\n max = SMSStatus.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'SMSSTAT' + '{0:03d}'.format(max)\n super().save(*kwargs)\n",
"step-4": "<mask token>\n\n\nclass User(AbstractUser):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Application(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n app_code = models.CharField(max_length=30, blank=True, null=True)\n name = models.CharField(max_length=100, blank=True, null=True)\n is_archived = models.BooleanField(default=False)\n\n def __str__(self):\n return self.name\n\n def save(self, **kwargs):\n if not self.id:\n max = Application.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'APP' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass Page(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=5)\n application = models.ForeignKey(Application, on_delete=models.CASCADE,\n related_name='applications')\n name = models.CharField(max_length=100)\n is_archived = models.BooleanField(default=False)\n\n def __str__(self):\n return self.name\n\n def save(self, **kwargs):\n if not self.id:\n max = Page.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'PG' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass Location(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n loc_code = models.CharField(max_length=30, null=True, blank=True,\n unique=True)\n page = models.ForeignKey(Page, on_delete=models.CASCADE, related_name=\n 'pages')\n is_slider = models.BooleanField(default=False)\n is_active = models.BooleanField(default=False)\n name = models.CharField(max_length=100)\n width = models.IntegerField()\n height = models.IntegerField()\n\n def __str__(self):\n return self.name\n\n def save(self, **kwargs):\n if not self.id:\n max = Location.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'LOC' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass Banner(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=5)\n name = models.CharField(max_length=100)\n caption = models.TextField()\n description = models.TextField(blank=True, null=True)\n image = models.ImageField(upload_to='images/', verbose_name='Banner',\n blank=True)\n height = models.IntegerField()\n width = models.IntegerField()\n is_archived = models.BooleanField(default=False)\n\n def __str__(self):\n return self.name\n\n def delete(self, *args, **kwargs):\n self.image.delete(save=False)\n super(Banner, self).delete(*args, **kwargs)\n\n def save(self, **kwargs):\n if not self.id:\n max = Banner.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'BN' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass Campaign(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n location = models.ForeignKey(Location, on_delete=models.CASCADE,\n related_name='locations')\n campaign_code = models.CharField(max_length=30, null=True, blank=True)\n priority = models.IntegerField(null=True, blank=True)\n date_created = models.DateField(null=True, blank=True)\n date_updated = models.DateField(null=True, blank=True)\n valid_date_start = models.DateField(null=True, blank=True)\n valid_date_end = models.DateField(null=True, blank=True)\n\n def save(self, **kwargs):\n if not self.id:\n max = Campaign.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'CMP' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass Installation(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n banner = models.ForeignKey(Banner, on_delete=models.CASCADE,\n related_name='banners', blank=True, null=True)\n campaign = models.ForeignKey(Campaign, on_delete=models.CASCADE,\n related_name='campaigns')\n redirect = models.URLField(null=True, blank=True)\n\n def save(self, **kwargs):\n if not self.id:\n max = Installation.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'INS' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\n<mask token>\n\n\nclass ContactSource(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=9)\n source = models.CharField(max_length=30, choices=source_choices)\n\n def __str__(self):\n return self.source\n\n def save(self, **kwargs):\n if not self.id:\n max = ContactSource.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'CONSRC' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass Contact(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n source = models.ForeignKey(ContactSource, on_delete=models.CASCADE,\n related_name='contactsources')\n name = models.CharField(max_length=100)\n numbers = models.FileField(upload_to='pickles/contact/')\n is_deleted = models.BooleanField(default=False)\n deleted_datetime = models.DateTimeField(blank=True, null=True)\n\n def __str__(self):\n return self.name\n\n def save(self, **kwargs):\n if not self.id:\n max = Contact.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'CON' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass GenerateContact(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=9)\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE,\n related_name='contact')\n first_code = models.CharField(max_length=4, validators=[RegexValidator(\n '^\\\\d{0,10}$')])\n digits = models.CharField(max_length=30, validators=[RegexValidator(\n '^\\\\d{0,10}$')])\n generate_numbers = models.CharField(max_length=30, validators=[\n RegexValidator('^\\\\d{0,10}$')])\n\n def save(self, **kwargs):\n if not self.id:\n max = GenerateContact.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'GENCON' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\n<mask token>\n\n\nclass SMSBlast(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n message_title = models.CharField(max_length=100)\n message_text = models.CharField(max_length=160)\n send_date = models.DateField(null=True, blank=True)\n send_time = models.TimeField(null=True, blank=True)\n is_now = models.BooleanField(default=False)\n\n def __str__(self):\n return self.message_title\n\n def save(self, **kwargs):\n if not self.id:\n max = SMSBlast.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'SMS' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass ContactAndSMS(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=12)\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE,\n related_name='smsncon_contact')\n smsblast = models.ForeignKey(SMSBlast, on_delete=models.CASCADE,\n related_name='smsncon_smsblast')\n\n def save(self, **kwargs):\n if not self.id:\n max = ContactAndSMS.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'CONANDSMS' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass SMSBlastJob(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=9)\n job_id = models.CharField(max_length=100, blank=True, null=True)\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE,\n related_name='contact_job')\n smsblast = models.ForeignKey(SMSBlast, on_delete=models.CASCADE,\n related_name='smsblast_job')\n\n def __str__(self):\n return self.job_id\n\n def save(self, **kwargs):\n if not self.id:\n max = SMSBlastJob.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'SMSJOB' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass SMSStatus(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=10)\n job = models.ForeignKey(SMSBlastJob, on_delete=models.CASCADE,\n related_name='job_status')\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE,\n related_name='contact_status')\n status = models.FileField(upload_to='pickles/status/')\n\n def __str__(self):\n return self.job_id\n\n def save(self, **kwargs):\n if not self.id:\n max = SMSStatus.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'SMSSTAT' + '{0:03d}'.format(max)\n super().save(*kwargs)\n",
"step-5": "from django.db import models\nfrom django.contrib.auth.models import AbstractUser\nfrom django.db.models import Max\nfrom django.core.validators import RegexValidator\nfrom django.utils import timezone\n\nclass User(AbstractUser):\n is_developer = models.BooleanField('developer status', default=False)\n is_marketing = models.BooleanField('marketing status', default=False)\n email = models.EmailField(unique=True, null=True, blank=True)\n\n def __str__(self):\n return self.username\n\nclass Application(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n app_code = models.CharField(max_length=30, blank=True, null=True)\n name = models.CharField(max_length=100, blank=True, null=True)\n is_archived = models.BooleanField(default=False)\n\n def __str__(self):\n return self.name\n\n def save(self, **kwargs):\n if not self.id:\n max = Application.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = \"APP\" + \"{0:03d}\".format(max)\n super().save(*kwargs)\n\nclass Page(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=5)\n application = models.ForeignKey(Application, on_delete=models.CASCADE, related_name='applications')\n name = models.CharField(max_length=100)\n is_archived = models.BooleanField(default=False)\n\n def __str__(self):\n return self.name\n\n def save(self, **kwargs):\n if not self.id:\n max = Page.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = \"PG\" + \"{0:03d}\".format(max)\n super().save(*kwargs)\n\nclass Location(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n loc_code = models.CharField(max_length=30, null=True, blank=True, unique=True)\n page = models.ForeignKey(Page, on_delete=models.CASCADE, related_name='pages')\n is_slider = models.BooleanField(default=False)\n is_active = models.BooleanField(default=False)\n name = models.CharField(max_length=100)\n width = models.IntegerField()\n height = models.IntegerField()\n\n def __str__(self):\n return self.name\n\n def save(self, **kwargs):\n if not self.id:\n max = Location.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = \"LOC\" + \"{0:03d}\".format(max)\n super().save(*kwargs)\n\nclass Banner(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=5)\n name = models.CharField(max_length=100)\n caption = models.TextField()\n description = models.TextField(blank=True, null=True)\n image = models.ImageField(upload_to='images/', verbose_name='Banner', blank=True)\n height = models.IntegerField()\n width = models.IntegerField()\n is_archived = models.BooleanField(default=False)\n \n def __str__(self):\n return self.name\n\n def delete(self, *args, **kwargs):\n self.image.delete(save=False)\n\n super(Banner, self).delete(*args, **kwargs)\n\n def save(self, **kwargs):\n if not self.id:\n max = Banner.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = \"BN\" + \"{0:03d}\".format(max)\n super().save(*kwargs)\n\nclass Campaign(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n location = models.ForeignKey(Location, on_delete=models.CASCADE, related_name='locations')\n campaign_code = models.CharField(max_length=30, null=True, blank=True)\n priority = models.IntegerField(null=True, blank=True)\n date_created = models.DateField(null=True, blank=True)\n date_updated = models.DateField(null=True, blank=True)\n valid_date_start = models.DateField(null=True, blank=True)\n valid_date_end = models.DateField(null=True, blank=True)\n\n def save(self, **kwargs):\n if not self.id:\n max = Campaign.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = \"CMP\" + \"{0:03d}\".format(max)\n super().save(*kwargs)\n\nclass Installation(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n banner = models.ForeignKey(Banner, on_delete=models.CASCADE, related_name='banners', blank=True, null=True)\n campaign = models.ForeignKey(Campaign, on_delete=models.CASCADE, related_name='campaigns')\n redirect = models.URLField(null=True, blank=True)\n\n def save(self, **kwargs):\n if not self.id:\n max = Installation.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = \"INS\" + \"{0:03d}\".format(max)\n super().save(*kwargs)\n\nsource_choices = (\n ('random', 'Generate nomor secara acak'),\n ('csv', 'Upload file .csv'),\n)\n\nclass ContactSource(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=9)\n source = models.CharField(max_length=30, choices=source_choices)\n\n def __str__(self):\n return self.source\n\n def save(self, **kwargs):\n if not self.id:\n max = ContactSource.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = \"CONSRC\" + \"{0:03d}\".format(max)\n super().save(*kwargs)\n\nclass Contact(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n source = models.ForeignKey(ContactSource, on_delete=models.CASCADE, related_name='contactsources')\n name = models.CharField(max_length=100)\n numbers = models.FileField(upload_to='pickles/contact/')\n is_deleted = models.BooleanField(default=False)\n deleted_datetime = models.DateTimeField(blank=True, null=True)\n\n def __str__(self):\n return self.name\n\n def save(self, **kwargs):\n if not self.id:\n max = Contact.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = \"CON\" + \"{0:03d}\".format(max)\n super().save(*kwargs)\n\nclass GenerateContact(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=9)\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE, related_name='contact')\n first_code = models.CharField(max_length=4, validators=[RegexValidator(r'^\\d{0,10}$')])\n digits = models.CharField(max_length=30, validators=[RegexValidator(r'^\\d{0,10}$')])\n generate_numbers = models.CharField(max_length=30, validators=[RegexValidator(r'^\\d{0,10}$')])\n\n def save(self, **kwargs):\n if not self.id:\n max = GenerateContact.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = \"GENCON\" + \"{0:03d}\".format(max)\n super().save(*kwargs)\n\nstatus_choices = (\n ('complete', 'Sudah Dikirim'),\n ('uncomplete', 'Belum Dikirim'),\n)\n\nclass SMSBlast(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n message_title = models.CharField(max_length=100)\n message_text = models.CharField(max_length=160)\n send_date = models.DateField(null=True, blank=True)\n send_time = models.TimeField(null=True, blank=True)\n is_now = models.BooleanField(default=False)\n\n def __str__(self):\n return self.message_title\n\n def save(self, **kwargs):\n if not self.id:\n max = SMSBlast.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = \"SMS\" + \"{0:03d}\".format(max)\n super().save(*kwargs)\n\nclass ContactAndSMS(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=12)\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE, related_name='smsncon_contact')\n smsblast = models.ForeignKey(SMSBlast, on_delete=models.CASCADE, related_name='smsncon_smsblast')\n\n def save(self, **kwargs):\n if not self.id:\n max = ContactAndSMS.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = \"CONANDSMS\" + \"{0:03d}\".format(max)\n super().save(*kwargs)\n\nclass SMSBlastJob(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=9)\n job_id = models.CharField(max_length=100, blank=True, null=True)\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE, related_name='contact_job')\n smsblast = models.ForeignKey(SMSBlast, on_delete=models.CASCADE, related_name='smsblast_job')\n\n def __str__(self):\n return self.job_id\n\n def save(self, **kwargs):\n if not self.id:\n max = SMSBlastJob.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = \"SMSJOB\" + \"{0:03d}\".format(max)\n super().save(*kwargs)\n\nclass SMSStatus(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=10)\n job = models.ForeignKey(SMSBlastJob, on_delete=models.CASCADE, related_name='job_status')\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE, related_name='contact_status')\n status = models.FileField(upload_to='pickles/status/')\n\n def __str__(self):\n return self.job_id\n\n def save(self, **kwargs):\n if not self.id:\n max = SMSStatus.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = \"SMSSTAT\" + \"{0:03d}\".format(max)\n super().save(*kwargs)",
"step-ids": [
38,
40,
46,
50,
55
]
}
|
[
38,
40,
46,
50,
55
] |
# -*- coding: utf-8 -*-
"""
helpers
~~~~~~~
Implements various helper functions.
:copyright: (c) 2016 by Patrick Spencer.
:license: Apache 2.0, see LICENSE for more details.
"""
from datetime import datetime, timedelta
import calendar
def month_bounds(year, month):
"""
Returns a tuple of datetime objects (month_start,month_end) given a year and month.
Both params are strings because we want month to be a two digit month representation
and python doesn't handle leading zeros in integers as we want.
:param year: four digit year as a string e.g. "2016"
:param month: 2 digit month as a string e.g. 2 for February, 11 for November
"""
year = int(year)
month = int(month)
month_start = datetime.strptime('%s,%s,1' % (year, month),'%Y,%m,%d')
# days_in_month returns a tuple(weekday, days) where
# weekday is the eekday the month starts on and days is the number of days in the month
days_in_month = calendar.monthrange(year,month)
month_end = month_start + timedelta(days=days_in_month[1]-1)
return (month_start, month_end)
|
normal
|
{
"blob_id": "4c5416582afb3cfeb56259954cda2701ea26f8cd",
"index": 7780,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef month_bounds(year, month):\n \"\"\"\n Returns a tuple of datetime objects (month_start,month_end) given a year and month.\n Both params are strings because we want month to be a two digit month representation\n and python doesn't handle leading zeros in integers as we want.\n\n :param year: four digit year as a string e.g. \"2016\"\n :param month: 2 digit month as a string e.g. 2 for February, 11 for November\n \"\"\"\n year = int(year)\n month = int(month)\n month_start = datetime.strptime('%s,%s,1' % (year, month), '%Y,%m,%d')\n days_in_month = calendar.monthrange(year, month)\n month_end = month_start + timedelta(days=days_in_month[1] - 1)\n return month_start, month_end\n",
"step-3": "<mask token>\nfrom datetime import datetime, timedelta\nimport calendar\n\n\ndef month_bounds(year, month):\n \"\"\"\n Returns a tuple of datetime objects (month_start,month_end) given a year and month.\n Both params are strings because we want month to be a two digit month representation\n and python doesn't handle leading zeros in integers as we want.\n\n :param year: four digit year as a string e.g. \"2016\"\n :param month: 2 digit month as a string e.g. 2 for February, 11 for November\n \"\"\"\n year = int(year)\n month = int(month)\n month_start = datetime.strptime('%s,%s,1' % (year, month), '%Y,%m,%d')\n days_in_month = calendar.monthrange(year, month)\n month_end = month_start + timedelta(days=days_in_month[1] - 1)\n return month_start, month_end\n",
"step-4": "# -*- coding: utf-8 -*-\n\"\"\"\n helpers\n ~~~~~~~\n Implements various helper functions.\n\n :copyright: (c) 2016 by Patrick Spencer.\n :license: Apache 2.0, see LICENSE for more details.\n\"\"\"\n\nfrom datetime import datetime, timedelta\nimport calendar\n\ndef month_bounds(year, month):\n \"\"\"\n Returns a tuple of datetime objects (month_start,month_end) given a year and month.\n Both params are strings because we want month to be a two digit month representation\n and python doesn't handle leading zeros in integers as we want.\n\n :param year: four digit year as a string e.g. \"2016\"\n :param month: 2 digit month as a string e.g. 2 for February, 11 for November\n \"\"\"\n year = int(year)\n month = int(month)\n month_start = datetime.strptime('%s,%s,1' % (year, month),'%Y,%m,%d')\n # days_in_month returns a tuple(weekday, days) where\n # weekday is the eekday the month starts on and days is the number of days in the month\n days_in_month = calendar.monthrange(year,month)\n month_end = month_start + timedelta(days=days_in_month[1]-1)\n return (month_start, month_end)\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class EventSerializer(ModelSerializer):
class Meta:
model = Event
fields = '__all__'
class HolidaySerializerRead(ModelSerializer):
country = CountrySerializer()
class Meta:
model = Holiday
fields = '__all__'
class HolidaySerializerWrite(ModelSerializer):
class Meta:
model = Holiday
fields = '__all__'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UserSerializer(ModelSerializer):
class Meta:
model = User
fields = '__all__'
class EventSerializer(ModelSerializer):
class Meta:
model = Event
fields = '__all__'
class HolidaySerializerRead(ModelSerializer):
country = CountrySerializer()
class Meta:
model = Holiday
fields = '__all__'
class HolidaySerializerWrite(ModelSerializer):
class Meta:
model = Holiday
fields = '__all__'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CountrySerializer(ModelSerializer):
class Meta:
model = Country
fields = '__all__'
class UserSerializer(ModelSerializer):
class Meta:
model = User
fields = '__all__'
class EventSerializer(ModelSerializer):
class Meta:
model = Event
fields = '__all__'
class HolidaySerializerRead(ModelSerializer):
country = CountrySerializer()
class Meta:
model = Holiday
fields = '__all__'
class HolidaySerializerWrite(ModelSerializer):
class Meta:
model = Holiday
fields = '__all__'
<|reserved_special_token_1|>
from django.contrib.auth.models import User
from rest_framework.serializers import ModelSerializer
from app_calendar.models import Holiday, Country, Event, User
class CountrySerializer(ModelSerializer):
class Meta:
model = Country
fields = '__all__'
class UserSerializer(ModelSerializer):
class Meta:
model = User
fields = '__all__'
class EventSerializer(ModelSerializer):
class Meta:
model = Event
fields = '__all__'
class HolidaySerializerRead(ModelSerializer):
country = CountrySerializer()
class Meta:
model = Holiday
fields = '__all__'
class HolidaySerializerWrite(ModelSerializer):
class Meta:
model = Holiday
fields = '__all__'
|
flexible
|
{
"blob_id": "5b366b0f6813f686600df9da4a17f190f034a10c",
"index": 2046,
"step-1": "<mask token>\n\n\nclass EventSerializer(ModelSerializer):\n\n\n class Meta:\n model = Event\n fields = '__all__'\n\n\nclass HolidaySerializerRead(ModelSerializer):\n country = CountrySerializer()\n\n\n class Meta:\n model = Holiday\n fields = '__all__'\n\n\nclass HolidaySerializerWrite(ModelSerializer):\n\n\n class Meta:\n model = Holiday\n fields = '__all__'\n",
"step-2": "<mask token>\n\n\nclass UserSerializer(ModelSerializer):\n\n\n class Meta:\n model = User\n fields = '__all__'\n\n\nclass EventSerializer(ModelSerializer):\n\n\n class Meta:\n model = Event\n fields = '__all__'\n\n\nclass HolidaySerializerRead(ModelSerializer):\n country = CountrySerializer()\n\n\n class Meta:\n model = Holiday\n fields = '__all__'\n\n\nclass HolidaySerializerWrite(ModelSerializer):\n\n\n class Meta:\n model = Holiday\n fields = '__all__'\n",
"step-3": "<mask token>\n\n\nclass CountrySerializer(ModelSerializer):\n\n\n class Meta:\n model = Country\n fields = '__all__'\n\n\nclass UserSerializer(ModelSerializer):\n\n\n class Meta:\n model = User\n fields = '__all__'\n\n\nclass EventSerializer(ModelSerializer):\n\n\n class Meta:\n model = Event\n fields = '__all__'\n\n\nclass HolidaySerializerRead(ModelSerializer):\n country = CountrySerializer()\n\n\n class Meta:\n model = Holiday\n fields = '__all__'\n\n\nclass HolidaySerializerWrite(ModelSerializer):\n\n\n class Meta:\n model = Holiday\n fields = '__all__'\n",
"step-4": "from django.contrib.auth.models import User\nfrom rest_framework.serializers import ModelSerializer\nfrom app_calendar.models import Holiday, Country, Event, User\n\n\nclass CountrySerializer(ModelSerializer):\n\n\n class Meta:\n model = Country\n fields = '__all__'\n\n\nclass UserSerializer(ModelSerializer):\n\n\n class Meta:\n model = User\n fields = '__all__'\n\n\nclass EventSerializer(ModelSerializer):\n\n\n class Meta:\n model = Event\n fields = '__all__'\n\n\nclass HolidaySerializerRead(ModelSerializer):\n country = CountrySerializer()\n\n\n class Meta:\n model = Holiday\n fields = '__all__'\n\n\nclass HolidaySerializerWrite(ModelSerializer):\n\n\n class Meta:\n model = Holiday\n fields = '__all__'\n",
"step-5": null,
"step-ids": [
4,
5,
6,
7
]
}
|
[
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
print('2 + 3 * 4 =')
print(2 + 3 * 4)
print('2 + (3 * 4) = ')
print(2 + 3 * 4)
<|reserved_special_token_1|>
print("2 + 3 * 4 =")
print(2 + 3 * 4)
print("2 + (3 * 4) = ")
print(2 + (3 * 4))
|
flexible
|
{
"blob_id": "58d137d614a0d5c11bf4325c1ade13f4f4f89f52",
"index": 3184,
"step-1": "<mask token>\n",
"step-2": "print('2 + 3 * 4 =')\nprint(2 + 3 * 4)\nprint('2 + (3 * 4) = ')\nprint(2 + 3 * 4)\n",
"step-3": "print(\"2 + 3 * 4 =\")\nprint(2 + 3 * 4)\n\nprint(\"2 + (3 * 4) = \")\nprint(2 + (3 * 4))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_api_router():
api_router = APIRouter()
api_router.include_router(submissions.router, prefix='/submissions',
tags=['submissions'])
return api_router
<|reserved_special_token_1|>
from fastapi import APIRouter
from .endpoints import submissions
def get_api_router():
api_router = APIRouter()
api_router.include_router(submissions.router, prefix='/submissions',
tags=['submissions'])
return api_router
<|reserved_special_token_1|>
from fastapi import APIRouter
from .endpoints import submissions
def get_api_router():
api_router = APIRouter()
api_router.include_router(submissions.router,
prefix="/submissions",
tags=["submissions"])
# api_router.include_router(users.router, prefix="/users", tags=["users"])
return api_router
|
flexible
|
{
"blob_id": "844c9af4f0d4ca33e7c69b72f9886f58ceebefdb",
"index": 2719,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_api_router():\n api_router = APIRouter()\n api_router.include_router(submissions.router, prefix='/submissions',\n tags=['submissions'])\n return api_router\n",
"step-3": "from fastapi import APIRouter\nfrom .endpoints import submissions\n\n\ndef get_api_router():\n api_router = APIRouter()\n api_router.include_router(submissions.router, prefix='/submissions',\n tags=['submissions'])\n return api_router\n",
"step-4": "from fastapi import APIRouter\n\nfrom .endpoints import submissions\n\n\ndef get_api_router():\n api_router = APIRouter()\n api_router.include_router(submissions.router,\n prefix=\"/submissions\",\n tags=[\"submissions\"])\n # api_router.include_router(users.router, prefix=\"/users\", tags=[\"users\"])\n return api_router\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
__title__ = 'FUCKTHEINTRUDERS'
__description__ = 'Checking for Intruders in my locality'
__version__ = '0.0.1'
__author__ = 'Shivam Jalotra'
__email__ = '[email protected]'
__license__ = 'MIT 1.0'
|
flexible
|
{
"blob_id": "ba94a69ac356969ab593afc922a2517f4713771f",
"index": 5536,
"step-1": "<mask token>\n",
"step-2": "__title__ = 'FUCKTHEINTRUDERS'\n__description__ = 'Checking for Intruders in my locality'\n__version__ = '0.0.1'\n__author__ = 'Shivam Jalotra'\n__email__ = '[email protected]'\n__license__ = 'MIT 1.0'\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
##
# hunt_and_kill.py
# 05 Oct 2021
# Generates a maze using the hunt and kill algorithm
# S
from sys import argv
from enum import Enum
import random
# Cardinal directions, can be OR'd and AND'd
DIRS = {
'N': 1 << 0,
'E': 1 << 1,
'S': 1 << 2,
'W': 1 << 3
}
O_DIRS = {
'N': 'S',
'E': 'W',
'S': 'N',
'W': 'E'
}
def init_maze(width: int, height: int) -> list[int]:
"""
Set up a 2D list with 0 as starting value. Basically an empty maze
"""
return [0] * width * height
def walk_maze(maze: list[int], width: int, height: int, start: tuple[int, int]) -> None:
"""
Does a random walk, setting the cells as it goes, until it cant find a
path.
"""
# Shortcut for accessing maze
maze_idx = lambda p: p[1] * width + p[0]
# Shortcut funcs for surrounding points
north = lambda p: (p[0] , p[1] -1)
east = lambda p: (p[0] +1, p[1] )
south = lambda p: (p[0] , p[1] +1)
west = lambda p: (p[0] -1, p[1] )
def check_neighbours(pt, visited=False) -> list[tuple[int, int]]:
"""
Returns a list of possible neighbours.
Can pass arg to only count visited neighbours
"""
# Points will be added to this list if they havent been traversed yet
possible_points = dict()
# -- NORTH
p_pt = north(pt)
# This mess of a condition will evaluate to true if the cell is visited and the user is asking for a visited cell. Viceversa.
if pt[1] > 0 and (bool(maze[maze_idx(p_pt)]) == (False or visited)):
possible_points[p_pt] = "N"
# -- EAST
p_pt = east(pt)
if pt[0] < width - 1 and (bool(maze[maze_idx(p_pt)]) == (False or visited)):
possible_points[p_pt] = "E"
# -- SOUTH
p_pt = south(pt)
if pt[1] < height - 1 and (bool(maze[maze_idx(p_pt)]) == (False or visited)):
possible_points[p_pt] = "S"
# -- WEST
p_pt = west(pt)
if pt[0] > 0 and (bool(maze[maze_idx(p_pt)]) == (False or visited)):
possible_points[p_pt] = "W"
return possible_points
# First, connect to a random neighbour that has been visited.
starting_n = check_neighbours(start, True)
if starting_n:
neigh, dire = random.choice(tuple(starting_n.items()))
maze[maze_idx(neigh)] |= DIRS[O_DIRS[dire]]
maze[maze_idx(start)] |= DIRS[dire]
step = start
# Walk randomly until out of options
while possible_n := check_neighbours(step):
next_step, direction = random.choice(tuple(possible_n.items()))
# Connect the two cells
maze[maze_idx(step)] |= DIRS[direction]
maze[maze_idx(next_step)] |= DIRS[O_DIRS[direction]]
# Go to next
step = next_step
def gen_maze(width: int, height: int) -> list[int]:
maze = init_maze(width, height)
maze_idx = lambda p: p[1] * width + p[0]
for y in range(height):
for x in range(width):
if not maze[maze_idx((x, y))]:
walk_maze(maze, width, height, (x, y))
return maze
def print_maze(maze: list[int], width: int, height: int) -> None:
"""
Print an ASCII maze!!!! Maybe works??
"""
maze_idx = lambda p: p[1] * width + p[0]
# top row
print(' ' + '_' * (2 * width - 1))
for y in range(height):
for x in range(width):
# left wall
if maze[maze_idx((x, y))] & DIRS["W"]:
# leave wall open if you can also go down
if maze[maze_idx((x, y))] & DIRS["S"]:
print(' ', end='')
else:
print('_', end='')
else:
print('|', end='')
if maze[maze_idx((x, y))] & DIRS["S"]:
print(' ', end='')
else:
print('_', end='')
# right wall
print('|')
def main():
width = height = 10
if len(argv) > 2:
width = int(argv[1])
height = int(argv[2])
print(f"Generating maze size {width}x{height}")
maze = gen_maze(width, height)
print_maze(maze, width, height)
return maze
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "54002bc7e2a1991d2405acbe1d399e8803ac5582",
"index": 7210,
"step-1": "<mask token>\n\n\ndef walk_maze(maze: list[int], width: int, height: int, start: tuple[int, int]\n ) ->None:\n \"\"\"\n Does a random walk, setting the cells as it goes, until it cant find a\n path.\n \"\"\"\n maze_idx = lambda p: p[1] * width + p[0]\n north = lambda p: (p[0], p[1] - 1)\n east = lambda p: (p[0] + 1, p[1])\n south = lambda p: (p[0], p[1] + 1)\n west = lambda p: (p[0] - 1, p[1])\n\n def check_neighbours(pt, visited=False) ->list[tuple[int, int]]:\n \"\"\"\n Returns a list of possible neighbours.\n Can pass arg to only count visited neighbours\n \"\"\"\n possible_points = dict()\n p_pt = north(pt)\n if pt[1] > 0 and bool(maze[maze_idx(p_pt)]) == (False or visited):\n possible_points[p_pt] = 'N'\n p_pt = east(pt)\n if pt[0] < width - 1 and bool(maze[maze_idx(p_pt)]) == (False or\n visited):\n possible_points[p_pt] = 'E'\n p_pt = south(pt)\n if pt[1] < height - 1 and bool(maze[maze_idx(p_pt)]) == (False or\n visited):\n possible_points[p_pt] = 'S'\n p_pt = west(pt)\n if pt[0] > 0 and bool(maze[maze_idx(p_pt)]) == (False or visited):\n possible_points[p_pt] = 'W'\n return possible_points\n starting_n = check_neighbours(start, True)\n if starting_n:\n neigh, dire = random.choice(tuple(starting_n.items()))\n maze[maze_idx(neigh)] |= DIRS[O_DIRS[dire]]\n maze[maze_idx(start)] |= DIRS[dire]\n step = start\n while (possible_n := check_neighbours(step)):\n next_step, direction = random.choice(tuple(possible_n.items()))\n maze[maze_idx(step)] |= DIRS[direction]\n maze[maze_idx(next_step)] |= DIRS[O_DIRS[direction]]\n step = next_step\n\n\ndef gen_maze(width: int, height: int) ->list[int]:\n maze = init_maze(width, height)\n maze_idx = lambda p: p[1] * width + p[0]\n for y in range(height):\n for x in range(width):\n if not maze[maze_idx((x, y))]:\n walk_maze(maze, width, height, (x, y))\n return maze\n\n\ndef print_maze(maze: list[int], width: int, height: int) ->None:\n \"\"\"\n Print an ASCII maze!!!! Maybe works??\n \"\"\"\n maze_idx = lambda p: p[1] * width + p[0]\n print(' ' + '_' * (2 * width - 1))\n for y in range(height):\n for x in range(width):\n if maze[maze_idx((x, y))] & DIRS['W']:\n if maze[maze_idx((x, y))] & DIRS['S']:\n print(' ', end='')\n else:\n print('_', end='')\n else:\n print('|', end='')\n if maze[maze_idx((x, y))] & DIRS['S']:\n print(' ', end='')\n else:\n print('_', end='')\n print('|')\n\n\ndef main():\n width = height = 10\n if len(argv) > 2:\n width = int(argv[1])\n height = int(argv[2])\n print(f'Generating maze size {width}x{height}')\n maze = gen_maze(width, height)\n print_maze(maze, width, height)\n return maze\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef init_maze(width: int, height: int) ->list[int]:\n \"\"\"\n Set up a 2D list with 0 as starting value. Basically an empty maze\n \"\"\"\n return [0] * width * height\n\n\ndef walk_maze(maze: list[int], width: int, height: int, start: tuple[int, int]\n ) ->None:\n \"\"\"\n Does a random walk, setting the cells as it goes, until it cant find a\n path.\n \"\"\"\n maze_idx = lambda p: p[1] * width + p[0]\n north = lambda p: (p[0], p[1] - 1)\n east = lambda p: (p[0] + 1, p[1])\n south = lambda p: (p[0], p[1] + 1)\n west = lambda p: (p[0] - 1, p[1])\n\n def check_neighbours(pt, visited=False) ->list[tuple[int, int]]:\n \"\"\"\n Returns a list of possible neighbours.\n Can pass arg to only count visited neighbours\n \"\"\"\n possible_points = dict()\n p_pt = north(pt)\n if pt[1] > 0 and bool(maze[maze_idx(p_pt)]) == (False or visited):\n possible_points[p_pt] = 'N'\n p_pt = east(pt)\n if pt[0] < width - 1 and bool(maze[maze_idx(p_pt)]) == (False or\n visited):\n possible_points[p_pt] = 'E'\n p_pt = south(pt)\n if pt[1] < height - 1 and bool(maze[maze_idx(p_pt)]) == (False or\n visited):\n possible_points[p_pt] = 'S'\n p_pt = west(pt)\n if pt[0] > 0 and bool(maze[maze_idx(p_pt)]) == (False or visited):\n possible_points[p_pt] = 'W'\n return possible_points\n starting_n = check_neighbours(start, True)\n if starting_n:\n neigh, dire = random.choice(tuple(starting_n.items()))\n maze[maze_idx(neigh)] |= DIRS[O_DIRS[dire]]\n maze[maze_idx(start)] |= DIRS[dire]\n step = start\n while (possible_n := check_neighbours(step)):\n next_step, direction = random.choice(tuple(possible_n.items()))\n maze[maze_idx(step)] |= DIRS[direction]\n maze[maze_idx(next_step)] |= DIRS[O_DIRS[direction]]\n step = next_step\n\n\ndef gen_maze(width: int, height: int) ->list[int]:\n maze = init_maze(width, height)\n maze_idx = lambda p: p[1] * width + p[0]\n for y in range(height):\n for x in range(width):\n if not maze[maze_idx((x, y))]:\n walk_maze(maze, width, height, (x, y))\n return maze\n\n\ndef print_maze(maze: list[int], width: int, height: int) ->None:\n \"\"\"\n Print an ASCII maze!!!! Maybe works??\n \"\"\"\n maze_idx = lambda p: p[1] * width + p[0]\n print(' ' + '_' * (2 * width - 1))\n for y in range(height):\n for x in range(width):\n if maze[maze_idx((x, y))] & DIRS['W']:\n if maze[maze_idx((x, y))] & DIRS['S']:\n print(' ', end='')\n else:\n print('_', end='')\n else:\n print('|', end='')\n if maze[maze_idx((x, y))] & DIRS['S']:\n print(' ', end='')\n else:\n print('_', end='')\n print('|')\n\n\ndef main():\n width = height = 10\n if len(argv) > 2:\n width = int(argv[1])\n height = int(argv[2])\n print(f'Generating maze size {width}x{height}')\n maze = gen_maze(width, height)\n print_maze(maze, width, height)\n return maze\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nDIRS = {'N': 1 << 0, 'E': 1 << 1, 'S': 1 << 2, 'W': 1 << 3}\nO_DIRS = {'N': 'S', 'E': 'W', 'S': 'N', 'W': 'E'}\n\n\ndef init_maze(width: int, height: int) ->list[int]:\n \"\"\"\n Set up a 2D list with 0 as starting value. Basically an empty maze\n \"\"\"\n return [0] * width * height\n\n\ndef walk_maze(maze: list[int], width: int, height: int, start: tuple[int, int]\n ) ->None:\n \"\"\"\n Does a random walk, setting the cells as it goes, until it cant find a\n path.\n \"\"\"\n maze_idx = lambda p: p[1] * width + p[0]\n north = lambda p: (p[0], p[1] - 1)\n east = lambda p: (p[0] + 1, p[1])\n south = lambda p: (p[0], p[1] + 1)\n west = lambda p: (p[0] - 1, p[1])\n\n def check_neighbours(pt, visited=False) ->list[tuple[int, int]]:\n \"\"\"\n Returns a list of possible neighbours.\n Can pass arg to only count visited neighbours\n \"\"\"\n possible_points = dict()\n p_pt = north(pt)\n if pt[1] > 0 and bool(maze[maze_idx(p_pt)]) == (False or visited):\n possible_points[p_pt] = 'N'\n p_pt = east(pt)\n if pt[0] < width - 1 and bool(maze[maze_idx(p_pt)]) == (False or\n visited):\n possible_points[p_pt] = 'E'\n p_pt = south(pt)\n if pt[1] < height - 1 and bool(maze[maze_idx(p_pt)]) == (False or\n visited):\n possible_points[p_pt] = 'S'\n p_pt = west(pt)\n if pt[0] > 0 and bool(maze[maze_idx(p_pt)]) == (False or visited):\n possible_points[p_pt] = 'W'\n return possible_points\n starting_n = check_neighbours(start, True)\n if starting_n:\n neigh, dire = random.choice(tuple(starting_n.items()))\n maze[maze_idx(neigh)] |= DIRS[O_DIRS[dire]]\n maze[maze_idx(start)] |= DIRS[dire]\n step = start\n while (possible_n := check_neighbours(step)):\n next_step, direction = random.choice(tuple(possible_n.items()))\n maze[maze_idx(step)] |= DIRS[direction]\n maze[maze_idx(next_step)] |= DIRS[O_DIRS[direction]]\n step = next_step\n\n\ndef gen_maze(width: int, height: int) ->list[int]:\n maze = init_maze(width, height)\n maze_idx = lambda p: p[1] * width + p[0]\n for y in range(height):\n for x in range(width):\n if not maze[maze_idx((x, y))]:\n walk_maze(maze, width, height, (x, y))\n return maze\n\n\ndef print_maze(maze: list[int], width: int, height: int) ->None:\n \"\"\"\n Print an ASCII maze!!!! Maybe works??\n \"\"\"\n maze_idx = lambda p: p[1] * width + p[0]\n print(' ' + '_' * (2 * width - 1))\n for y in range(height):\n for x in range(width):\n if maze[maze_idx((x, y))] & DIRS['W']:\n if maze[maze_idx((x, y))] & DIRS['S']:\n print(' ', end='')\n else:\n print('_', end='')\n else:\n print('|', end='')\n if maze[maze_idx((x, y))] & DIRS['S']:\n print(' ', end='')\n else:\n print('_', end='')\n print('|')\n\n\ndef main():\n width = height = 10\n if len(argv) > 2:\n width = int(argv[1])\n height = int(argv[2])\n print(f'Generating maze size {width}x{height}')\n maze = gen_maze(width, height)\n print_maze(maze, width, height)\n return maze\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from sys import argv\nfrom enum import Enum\nimport random\nDIRS = {'N': 1 << 0, 'E': 1 << 1, 'S': 1 << 2, 'W': 1 << 3}\nO_DIRS = {'N': 'S', 'E': 'W', 'S': 'N', 'W': 'E'}\n\n\ndef init_maze(width: int, height: int) ->list[int]:\n \"\"\"\n Set up a 2D list with 0 as starting value. Basically an empty maze\n \"\"\"\n return [0] * width * height\n\n\ndef walk_maze(maze: list[int], width: int, height: int, start: tuple[int, int]\n ) ->None:\n \"\"\"\n Does a random walk, setting the cells as it goes, until it cant find a\n path.\n \"\"\"\n maze_idx = lambda p: p[1] * width + p[0]\n north = lambda p: (p[0], p[1] - 1)\n east = lambda p: (p[0] + 1, p[1])\n south = lambda p: (p[0], p[1] + 1)\n west = lambda p: (p[0] - 1, p[1])\n\n def check_neighbours(pt, visited=False) ->list[tuple[int, int]]:\n \"\"\"\n Returns a list of possible neighbours.\n Can pass arg to only count visited neighbours\n \"\"\"\n possible_points = dict()\n p_pt = north(pt)\n if pt[1] > 0 and bool(maze[maze_idx(p_pt)]) == (False or visited):\n possible_points[p_pt] = 'N'\n p_pt = east(pt)\n if pt[0] < width - 1 and bool(maze[maze_idx(p_pt)]) == (False or\n visited):\n possible_points[p_pt] = 'E'\n p_pt = south(pt)\n if pt[1] < height - 1 and bool(maze[maze_idx(p_pt)]) == (False or\n visited):\n possible_points[p_pt] = 'S'\n p_pt = west(pt)\n if pt[0] > 0 and bool(maze[maze_idx(p_pt)]) == (False or visited):\n possible_points[p_pt] = 'W'\n return possible_points\n starting_n = check_neighbours(start, True)\n if starting_n:\n neigh, dire = random.choice(tuple(starting_n.items()))\n maze[maze_idx(neigh)] |= DIRS[O_DIRS[dire]]\n maze[maze_idx(start)] |= DIRS[dire]\n step = start\n while (possible_n := check_neighbours(step)):\n next_step, direction = random.choice(tuple(possible_n.items()))\n maze[maze_idx(step)] |= DIRS[direction]\n maze[maze_idx(next_step)] |= DIRS[O_DIRS[direction]]\n step = next_step\n\n\ndef gen_maze(width: int, height: int) ->list[int]:\n maze = init_maze(width, height)\n maze_idx = lambda p: p[1] * width + p[0]\n for y in range(height):\n for x in range(width):\n if not maze[maze_idx((x, y))]:\n walk_maze(maze, width, height, (x, y))\n return maze\n\n\ndef print_maze(maze: list[int], width: int, height: int) ->None:\n \"\"\"\n Print an ASCII maze!!!! Maybe works??\n \"\"\"\n maze_idx = lambda p: p[1] * width + p[0]\n print(' ' + '_' * (2 * width - 1))\n for y in range(height):\n for x in range(width):\n if maze[maze_idx((x, y))] & DIRS['W']:\n if maze[maze_idx((x, y))] & DIRS['S']:\n print(' ', end='')\n else:\n print('_', end='')\n else:\n print('|', end='')\n if maze[maze_idx((x, y))] & DIRS['S']:\n print(' ', end='')\n else:\n print('_', end='')\n print('|')\n\n\ndef main():\n width = height = 10\n if len(argv) > 2:\n width = int(argv[1])\n height = int(argv[2])\n print(f'Generating maze size {width}x{height}')\n maze = gen_maze(width, height)\n print_maze(maze, width, height)\n return maze\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "##\n# hunt_and_kill.py\n# 05 Oct 2021\n# Generates a maze using the hunt and kill algorithm\n# S\nfrom sys import argv\nfrom enum import Enum\nimport random\n\n# Cardinal directions, can be OR'd and AND'd\nDIRS = {\n 'N': 1 << 0,\n 'E': 1 << 1,\n 'S': 1 << 2,\n 'W': 1 << 3\n}\n\nO_DIRS = {\n 'N': 'S',\n 'E': 'W',\n 'S': 'N',\n 'W': 'E'\n}\n\ndef init_maze(width: int, height: int) -> list[int]:\n \"\"\"\n Set up a 2D list with 0 as starting value. Basically an empty maze\n \"\"\"\n return [0] * width * height\n\n\ndef walk_maze(maze: list[int], width: int, height: int, start: tuple[int, int]) -> None:\n \"\"\"\n Does a random walk, setting the cells as it goes, until it cant find a\n path.\n \"\"\"\n # Shortcut for accessing maze\n maze_idx = lambda p: p[1] * width + p[0]\n\n # Shortcut funcs for surrounding points\n north = lambda p: (p[0] , p[1] -1)\n east = lambda p: (p[0] +1, p[1] )\n south = lambda p: (p[0] , p[1] +1)\n west = lambda p: (p[0] -1, p[1] )\n\n def check_neighbours(pt, visited=False) -> list[tuple[int, int]]:\n \"\"\"\n Returns a list of possible neighbours.\n Can pass arg to only count visited neighbours\n \"\"\"\n # Points will be added to this list if they havent been traversed yet\n possible_points = dict()\n\n # -- NORTH\n p_pt = north(pt)\n # This mess of a condition will evaluate to true if the cell is visited and the user is asking for a visited cell. Viceversa.\n if pt[1] > 0 and (bool(maze[maze_idx(p_pt)]) == (False or visited)):\n possible_points[p_pt] = \"N\"\n\n # -- EAST\n p_pt = east(pt)\n if pt[0] < width - 1 and (bool(maze[maze_idx(p_pt)]) == (False or visited)):\n possible_points[p_pt] = \"E\"\n\n # -- SOUTH\n p_pt = south(pt)\n if pt[1] < height - 1 and (bool(maze[maze_idx(p_pt)]) == (False or visited)):\n possible_points[p_pt] = \"S\"\n\n # -- WEST\n p_pt = west(pt)\n if pt[0] > 0 and (bool(maze[maze_idx(p_pt)]) == (False or visited)):\n possible_points[p_pt] = \"W\"\n\n return possible_points\n\n # First, connect to a random neighbour that has been visited.\n starting_n = check_neighbours(start, True)\n if starting_n:\n neigh, dire = random.choice(tuple(starting_n.items()))\n\n maze[maze_idx(neigh)] |= DIRS[O_DIRS[dire]]\n maze[maze_idx(start)] |= DIRS[dire]\n\n step = start\n\n # Walk randomly until out of options\n while possible_n := check_neighbours(step):\n next_step, direction = random.choice(tuple(possible_n.items()))\n\n # Connect the two cells\n maze[maze_idx(step)] |= DIRS[direction]\n maze[maze_idx(next_step)] |= DIRS[O_DIRS[direction]]\n\n # Go to next\n step = next_step\n\n\n\ndef gen_maze(width: int, height: int) -> list[int]:\n maze = init_maze(width, height)\n\n maze_idx = lambda p: p[1] * width + p[0]\n for y in range(height):\n for x in range(width):\n if not maze[maze_idx((x, y))]:\n walk_maze(maze, width, height, (x, y))\n\n return maze\n\ndef print_maze(maze: list[int], width: int, height: int) -> None:\n \"\"\"\n Print an ASCII maze!!!! Maybe works??\n \"\"\"\n maze_idx = lambda p: p[1] * width + p[0]\n\n # top row\n print(' ' + '_' * (2 * width - 1))\n\n for y in range(height):\n for x in range(width):\n # left wall\n if maze[maze_idx((x, y))] & DIRS[\"W\"]:\n # leave wall open if you can also go down\n if maze[maze_idx((x, y))] & DIRS[\"S\"]:\n print(' ', end='')\n else:\n print('_', end='')\n\n else:\n print('|', end='')\n\n if maze[maze_idx((x, y))] & DIRS[\"S\"]:\n print(' ', end='')\n else:\n print('_', end='')\n # right wall\n print('|')\n\ndef main():\n width = height = 10\n if len(argv) > 2:\n width = int(argv[1])\n height = int(argv[2])\n\n print(f\"Generating maze size {width}x{height}\")\n maze = gen_maze(width, height)\n print_maze(maze, width, height)\n return maze\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
class Player:
def __init__(self, hp=100, atk=100):
self.hp = hp
self.atk = atk
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Enemy:
def __init__(self, hp=100, atk=99):
self.hp = hp
self.atk = atk
def damage(self, value):
print('敌人:啊')
self.hp -= value
if self.hp <= 0:
print('电脑:敌人死亡,播放动画')
def attack(self, player):
print('电脑:敌人攻击玩家')
player.damage(self.atk)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Player:
def __init__(self, hp=100, atk=100):
self.hp = hp
self.atk = atk
def attack(self, enemy):
print('电脑:玩家攻击敌人')
enemy.damage(self.atk)
<|reserved_special_token_0|>
class Enemy:
def __init__(self, hp=100, atk=99):
self.hp = hp
self.atk = atk
def damage(self, value):
print('敌人:啊')
self.hp -= value
if self.hp <= 0:
print('电脑:敌人死亡,播放动画')
def attack(self, player):
print('电脑:敌人攻击玩家')
player.damage(self.atk)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Player:
def __init__(self, hp=100, atk=100):
self.hp = hp
self.atk = atk
def attack(self, enemy):
print('电脑:玩家攻击敌人')
enemy.damage(self.atk)
def damage(self, value):
print('玩家:我去')
self.hp -= value
if self.hp <= 0:
print('敌人:你真菜')
class Enemy:
def __init__(self, hp=100, atk=99):
self.hp = hp
self.atk = atk
def damage(self, value):
print('敌人:啊')
self.hp -= value
if self.hp <= 0:
print('电脑:敌人死亡,播放动画')
def attack(self, player):
print('电脑:敌人攻击玩家')
player.damage(self.atk)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Player:
def __init__(self, hp=100, atk=100):
self.hp = hp
self.atk = atk
def attack(self, enemy):
print('电脑:玩家攻击敌人')
enemy.damage(self.atk)
def damage(self, value):
print('玩家:我去')
self.hp -= value
if self.hp <= 0:
print('敌人:你真菜')
class Enemy:
def __init__(self, hp=100, atk=99):
self.hp = hp
self.atk = atk
def damage(self, value):
print('敌人:啊')
self.hp -= value
if self.hp <= 0:
print('电脑:敌人死亡,播放动画')
def attack(self, player):
print('电脑:敌人攻击玩家')
player.damage(self.atk)
<|reserved_special_token_0|>
p01.attack(e01)
e01.attack(p01)
e01.attack(p01)
<|reserved_special_token_1|>
# 玩家(攻击力)攻击敌人(血量)敌人受伤(减血)可能死亡(播放动画)
# 敌人攻击玩家 玩家受伤(减血 碎屏) 可能死亡(游戏结束)
# class Player:
# def __init__(self,name,hp,atk):
# self.name = name
# self.hp = hp
# self.atk = atk
#
# @property
# def hp(self):
# return self.__hp
# @hp.setter
# def hp(self,value):
# if 0<=value<=100:
# self.__hp = value
# else:
# raise ValueError('血量不在区间内')
#
# @property
# def atk(self):
# return self.__atk
#
# @atk.setter
# def atk(self, value):
# if 0 <= value <= 50:
# self.__atk = value
# else:
# raise ValueError('攻击力不在区间内')
#
#
# class Enemy:
# def __init__(self, e_name, e_hp, e_atk):
# self.e_name = e_name
# self.e_hp = e_hp
# self.e_atk = e_atk
#
# @property
# def e_hp(self):
# return self.__e_hp
#
# @e_hp.setter
# def e_hp(self, value):
# if 0 <= value <= 100:
# self.__e_hp = value
# else:
# raise ValueError('血量不在区间内')
#
# @property
# def e_atk(self):
# return self.__e_atk
#
# @e_atk.setter
# def e_atk(self, value):
# if 0 <= value <= 20:
# self.__e_atk = value
# else:
# raise ValueError('攻击力不在区间内')
#
#
#
# p1 = Player('悟空',100,20)
# e1 = Enemy('妖怪',40,10)
#
# #1.玩家(攻击力)攻击敌人(血量)敌人受伤(减血)可能死亡(播放动画)
# print('1.玩家攻击敌人:')
# def p_atk_e():
# count = 0
# while True:
# e1.e_hp -= p1.atk
# count += 1
# if e1.e_hp >0:
# print('玩家攻击%d次,敌人血量减少到%d' %
# (count,e1.e_hp))
# elif e1.e_hp == 0:
# print('玩家攻击%d次,敌人死亡,播放动画' % count)
# break
#
# p_atk_e()
#
# # 2.敌人攻击玩家 玩家受伤(减血 碎屏) 可能死亡(游戏结束)
# print('2.敌人攻击玩家:')
# def e_atk_p():
# count = 0
# while True:
# p1.hp -= e1.e_atk
# count += 1
# if p1.hp >0:
# print('敌人攻击%d次,玩家血量减少到%d' %
# (count,p1.hp))
# elif p1.hp == 0:
# print('敌人攻击%d次,玩家死亡,游戏结束' % count)
# break
# e_atk_p()
#玩家类
class Player:
def __init__(self,hp = 100,atk = 100):
self.hp = hp
self.atk = atk
def attack(self,enemy):
print('电脑:玩家攻击敌人')
enemy.damage(self.atk)
def damage(self,value):
print('玩家:我去')
#敌人减血
self.hp -= value
#可能死亡
if self.hp <= 0:
print('敌人:你真菜')
#敌人类
class Enemy:
def __init__(self,hp = 100,atk = 99):
self.hp = hp
self.atk = atk
def damage(self,value):
print('敌人:啊')
#玩家减血
self.hp -= value
#可能死亡
if self.hp <= 0:
print('电脑:敌人死亡,播放动画')
def attack(self,player):
print('电脑:敌人攻击玩家')
player.damage(self.atk)
p01 = Player()
e01 = Enemy()
p01.attack(e01)
e01.attack(p01)
e01.attack(p01)
|
flexible
|
{
"blob_id": "3065c87f79433e9fbbd2ff45c2915dfd5b1fa7cc",
"index": 8427,
"step-1": "class Player:\n\n def __init__(self, hp=100, atk=100):\n self.hp = hp\n self.atk = atk\n <mask token>\n <mask token>\n\n\nclass Enemy:\n\n def __init__(self, hp=100, atk=99):\n self.hp = hp\n self.atk = atk\n\n def damage(self, value):\n print('敌人:啊')\n self.hp -= value\n if self.hp <= 0:\n print('电脑:敌人死亡,播放动画')\n\n def attack(self, player):\n print('电脑:敌人攻击玩家')\n player.damage(self.atk)\n\n\n<mask token>\n",
"step-2": "class Player:\n\n def __init__(self, hp=100, atk=100):\n self.hp = hp\n self.atk = atk\n\n def attack(self, enemy):\n print('电脑:玩家攻击敌人')\n enemy.damage(self.atk)\n <mask token>\n\n\nclass Enemy:\n\n def __init__(self, hp=100, atk=99):\n self.hp = hp\n self.atk = atk\n\n def damage(self, value):\n print('敌人:啊')\n self.hp -= value\n if self.hp <= 0:\n print('电脑:敌人死亡,播放动画')\n\n def attack(self, player):\n print('电脑:敌人攻击玩家')\n player.damage(self.atk)\n\n\n<mask token>\n",
"step-3": "class Player:\n\n def __init__(self, hp=100, atk=100):\n self.hp = hp\n self.atk = atk\n\n def attack(self, enemy):\n print('电脑:玩家攻击敌人')\n enemy.damage(self.atk)\n\n def damage(self, value):\n print('玩家:我去')\n self.hp -= value\n if self.hp <= 0:\n print('敌人:你真菜')\n\n\nclass Enemy:\n\n def __init__(self, hp=100, atk=99):\n self.hp = hp\n self.atk = atk\n\n def damage(self, value):\n print('敌人:啊')\n self.hp -= value\n if self.hp <= 0:\n print('电脑:敌人死亡,播放动画')\n\n def attack(self, player):\n print('电脑:敌人攻击玩家')\n player.damage(self.atk)\n\n\n<mask token>\n",
"step-4": "class Player:\n\n def __init__(self, hp=100, atk=100):\n self.hp = hp\n self.atk = atk\n\n def attack(self, enemy):\n print('电脑:玩家攻击敌人')\n enemy.damage(self.atk)\n\n def damage(self, value):\n print('玩家:我去')\n self.hp -= value\n if self.hp <= 0:\n print('敌人:你真菜')\n\n\nclass Enemy:\n\n def __init__(self, hp=100, atk=99):\n self.hp = hp\n self.atk = atk\n\n def damage(self, value):\n print('敌人:啊')\n self.hp -= value\n if self.hp <= 0:\n print('电脑:敌人死亡,播放动画')\n\n def attack(self, player):\n print('电脑:敌人攻击玩家')\n player.damage(self.atk)\n\n\n<mask token>\np01.attack(e01)\ne01.attack(p01)\ne01.attack(p01)\n",
"step-5": "# 玩家(攻击力)攻击敌人(血量)敌人受伤(减血)可能死亡(播放动画)\n# 敌人攻击玩家 玩家受伤(减血 碎屏) 可能死亡(游戏结束)\n\n# class Player:\n# def __init__(self,name,hp,atk):\n# self.name = name\n# self.hp = hp\n# self.atk = atk\n#\n# @property\n# def hp(self):\n# return self.__hp\n# @hp.setter\n# def hp(self,value):\n# if 0<=value<=100:\n# self.__hp = value\n# else:\n# raise ValueError('血量不在区间内')\n#\n# @property\n# def atk(self):\n# return self.__atk\n#\n# @atk.setter\n# def atk(self, value):\n# if 0 <= value <= 50:\n# self.__atk = value\n# else:\n# raise ValueError('攻击力不在区间内')\n#\n#\n# class Enemy:\n# def __init__(self, e_name, e_hp, e_atk):\n# self.e_name = e_name\n# self.e_hp = e_hp\n# self.e_atk = e_atk\n#\n# @property\n# def e_hp(self):\n# return self.__e_hp\n#\n# @e_hp.setter\n# def e_hp(self, value):\n# if 0 <= value <= 100:\n# self.__e_hp = value\n# else:\n# raise ValueError('血量不在区间内')\n#\n# @property\n# def e_atk(self):\n# return self.__e_atk\n#\n# @e_atk.setter\n# def e_atk(self, value):\n# if 0 <= value <= 20:\n# self.__e_atk = value\n# else:\n# raise ValueError('攻击力不在区间内')\n#\n#\n#\n# p1 = Player('悟空',100,20)\n# e1 = Enemy('妖怪',40,10)\n#\n# #1.玩家(攻击力)攻击敌人(血量)敌人受伤(减血)可能死亡(播放动画)\n# print('1.玩家攻击敌人:')\n# def p_atk_e():\n# count = 0\n# while True:\n# e1.e_hp -= p1.atk\n# count += 1\n# if e1.e_hp >0:\n# print('玩家攻击%d次,敌人血量减少到%d' %\n# (count,e1.e_hp))\n# elif e1.e_hp == 0:\n# print('玩家攻击%d次,敌人死亡,播放动画' % count)\n# break\n#\n# p_atk_e()\n#\n# # 2.敌人攻击玩家 玩家受伤(减血 碎屏) 可能死亡(游戏结束)\n# print('2.敌人攻击玩家:')\n# def e_atk_p():\n# count = 0\n# while True:\n# p1.hp -= e1.e_atk\n# count += 1\n# if p1.hp >0:\n# print('敌人攻击%d次,玩家血量减少到%d' %\n# (count,p1.hp))\n# elif p1.hp == 0:\n# print('敌人攻击%d次,玩家死亡,游戏结束' % count)\n# break\n# e_atk_p()\n\n\n#玩家类\nclass Player:\n def __init__(self,hp = 100,atk = 100):\n self.hp = hp\n self.atk = atk\n def attack(self,enemy):\n print('电脑:玩家攻击敌人')\n enemy.damage(self.atk)\n def damage(self,value):\n print('玩家:我去')\n #敌人减血\n self.hp -= value\n #可能死亡\n if self.hp <= 0:\n print('敌人:你真菜')\n\n#敌人类\nclass Enemy:\n def __init__(self,hp = 100,atk = 99):\n self.hp = hp\n self.atk = atk\n def damage(self,value):\n print('敌人:啊')\n #玩家减血\n self.hp -= value\n #可能死亡\n if self.hp <= 0:\n print('电脑:敌人死亡,播放动画')\n def attack(self,player):\n print('电脑:敌人攻击玩家')\n player.damage(self.atk)\n\np01 = Player()\ne01 = Enemy()\np01.attack(e01)\ne01.attack(p01)\ne01.attack(p01)\n",
"step-ids": [
6,
7,
8,
9,
11
]
}
|
[
6,
7,
8,
9,
11
] |
#coding=utf-8
#########################################
# dbscan:
# 用法说明:读取文件
# 生成路径文件及簇文件,输出分类准确率
#########################################
from matplotlib.pyplot import *
import matplotlib.pyplot as plt
from collections import defaultdict
import random
from math import *
import numpy
import datetime
from dateutil.parser import parse
import datetime
import time
def dataset(filename):
#读取原始文件
lines = open(filename,'r').readlines()
l = len(lines)
all_points = []
for i in range(l):
if lines[i].strip():
line = lines[i].split()
time = line[0] +' '+ line[1]
lat = float(line[4])
lon = float(line[6])
all_points.append([lat,lon,time])
return all_points
def datarevise(all_points):
#数据平滑处理
point_new = []
all_points1 = np.array(all_points)
l = len(all_points)
for i in range(2,l-3):
lat_lon = np.array(all_points1[i-2:i+3,:-1],dtype = float).mean(0)
point_new.append([lat_lon[0],lat_lon[1],all_points1[i][-1]])
return point_new
def dist(p1, p2):
#计算亮点之间的距离
a = cos(p1[0])*cos(p2[0])
b = sin(p1[0])*sin(p2[0])*cos(p2[1]-p1[1])
if a+b >=1:
return 0
return acos(float(a+b))*6371*pi/180
def find_core(all_points,E,minPts):
#查找核心点
#输出:核心点,要绘制的点,非核心点
other_points =[]
core_points=[]
plotted_points=[]
for point in all_points:
point.append(0) # 初始点标号为0
total = 0 #计数:对每个点周围大于给定距离的点的个数
for otherPoint in all_points:
distance = dist(otherPoint,point)
if distance <= E:
total += 1
if total > minPts:
core_points.append(point)
plotted_points.append(point)
else:
other_points.append(point)
return core_points,plotted_points,other_points
def find_border(core_points,plotted_points,other_points,E):
#在非核心点查找边界点
#输出:边界点,要绘制的点
border_points=[]
for core in core_points:
for other in other_points:
if dist(core,other) <= E:#边界点的与核心点的距离小于E
border_points.append(other)
plotted_points.append(other)
return border_points,plotted_points
def algorithm(all_points,core_points,border_points,plotted_points,E):
# 返回簇,噪声点
#将所有的核心点分成不同的簇
cluster_label = 0
for point in core_points:
if point[-1] == 0:
cluster_label += 1
point[-1] = cluster_label
for point2 in plotted_points:
distance = dist(point2,point)
if point2[-1] ==0 and distance <= E:
point2[-1] =point[-1]
#将点集标号类型写成字典格式
cluster_dict = {}
for point in plotted_points:
if cluster_dict.get(point[-1]) is None:
cluster_dict[point[-1]] = [point[0:-1]]
else:
cluster_dict[point[-1]].append(point[0:-1])
#将簇中各个点按时间排序
cluster_dict_sort = {}
for lable in cluster_dict:
cluster_dict_sort.setdefault(lable,[])
cl = np.array(cluster_dict[lable])
cl_sort = cl[cl[:,-1].argsort()]
cluster_dict_sort[lable] = cl_sort
#噪声点,既不在边界点也不在核心点中
noise_points=[]
for point in all_points:
if point not in core_points and point not in border_points:
noise_points.append(point[0:-1])
return cluster_dict_sort,noise_points
def durtime(noise_points,difftime):
# 输入:噪声点,时间间隔
# 功能:分成不同的路径
# 输出:路径点[[],[]]
no = np.array(noise_points)
no_sort = no[no[:,-1].argsort()]
l = len(no_sort)
k = [0]
for i in range(l-1):
diff_time = (no_sort[i+1][-1] - no_sort[i][-1]).seconds
if diff_time > difftime:
k.append(i+1)
k.append(l)
no_split = []
for i in range(len(k)-1):
no_split.append(no_sort[k[i]:k[i+1]])
return no_split
def matplotshow(cluster_dict,no_split,name):
#画出各个簇
markers = ['or', 'ob', 'og', 'ok', '^r', '+r', 'sr', 'dr', '<r', 'pr']
i=0
for lable in cluster_dict:
for j in cluster_dict[lable]:
plot(j[0], j[1],markers[i])
i += 1
i = i%10
print i
#画出路径
markers = ['r', 'b', 'g', 'k', 'c', 'y', 'm',]
l =len(no_split)
for i in range(l):
path = np.array(no_split[i])
plt.plot(path[:,0],path[:,1],markers[i%7])
print i
title(" clusters created with E ="+str(E)+" Min Points="+str(minPts)+" total points="+str(len(all_points))+" noise Points = "+ str(len(noise_points)))
savefig(name)
show()
def datewrite(no_split,filename,mark):
f = open(filename,'w+')
for path in no_split:
f.write( str(mark) +'\n')
for no_path in path:
f.write(str(list(no_path))+'\n')
f.close()
def datewrite1(no_split,filename,mark):
f = open(filename,'w+')
for path in no_split:
for no_path in path:
f.write( str(mark) +'\n')
for j in no_path:
f.write(str(list(j))+'\n')
f.close()
if __name__ == '__main__':
filename = 'D:/sensor_data/sensor/gps/location_zh0710.txt'
all_points_old = dataset(filename)
all_points = datarevise(all_points_old)
E,minPts = 0.1,10
core_points,plotted_points,other_points = find_core(all_points,E,minPts)
border_points,plotted_points = find_border(core_points,plotted_points,other_points,E)
cluster_dict,noise_points = algorithm(all_points,border_points,core_points,plotted_points,E)
difftime = 1200
no_split = durtime(noise_points,difftime)
matplotshow(cluster_dict,no_split,"location_zh0710.png")
filename = 'D:/sensor_data/sensor/gps/location_zh0710_no_split.txt'
datewrite(no_split,filename,'path')
filename = 'D:/sensor_data/sensor/gps/location_zh0710_cluster.txt'
datewrite(cluster_dict.values(),filename,'lable')
|
normal
|
{
"blob_id": "99c839eddcbe985c81e709878d03c59e3be3c909",
"index": 293,
"step-1": "#coding=utf-8\n######################################### \n# dbscan: \n# 用法说明:读取文件\n# 生成路径文件及簇文件,输出分类准确率 \n######################################### \n\n\nfrom matplotlib.pyplot import *\nimport matplotlib.pyplot as plt\nfrom collections import defaultdict \nimport random\nfrom math import *\nimport numpy\nimport datetime\nfrom dateutil.parser import parse\nimport datetime\nimport time\n\n\n\ndef dataset(filename):\n #读取原始文件\n lines = open(filename,'r').readlines()\n l = len(lines)\n all_points = [] \n for i in range(l):\n if lines[i].strip():\n line = lines[i].split()\n time = line[0] +' '+ line[1]\n lat = float(line[4])\n lon = float(line[6])\n all_points.append([lat,lon,time])\n return all_points\n\ndef datarevise(all_points):\n #数据平滑处理\n point_new = []\n all_points1 = np.array(all_points)\n l = len(all_points)\n for i in range(2,l-3):\n lat_lon = np.array(all_points1[i-2:i+3,:-1],dtype = float).mean(0)\n point_new.append([lat_lon[0],lat_lon[1],all_points1[i][-1]])\n return point_new\n\n \ndef dist(p1, p2):\n #计算亮点之间的距离\n a = cos(p1[0])*cos(p2[0])\n b = sin(p1[0])*sin(p2[0])*cos(p2[1]-p1[1])\n if a+b >=1:\n return 0\n return acos(float(a+b))*6371*pi/180\n\ndef find_core(all_points,E,minPts):\n #查找核心点\n #输出:核心点,要绘制的点,非核心点\n other_points =[] \n core_points=[] \n plotted_points=[]\n for point in all_points:\n point.append(0) # 初始点标号为0\n total = 0 #计数:对每个点周围大于给定距离的点的个数\n for otherPoint in all_points:\n distance = dist(otherPoint,point)\n if distance <= E:\n total += 1\n if total > minPts:\n core_points.append(point)\n plotted_points.append(point)\n else:\n other_points.append(point)\n return core_points,plotted_points,other_points\n\ndef find_border(core_points,plotted_points,other_points,E):\n #在非核心点查找边界点\n #输出:边界点,要绘制的点\n border_points=[]\n for core in core_points:\n for other in other_points:\n if dist(core,other) <= E:#边界点的与核心点的距离小于E\n border_points.append(other)\n plotted_points.append(other)\n return border_points,plotted_points\n\n\ndef algorithm(all_points,core_points,border_points,plotted_points,E):\n # 返回簇,噪声点\n \n #将所有的核心点分成不同的簇\n cluster_label = 0\n for point in core_points:\n if point[-1] == 0:\n cluster_label += 1\n point[-1] = cluster_label\n for point2 in plotted_points:\n distance = dist(point2,point)\n if point2[-1] ==0 and distance <= E:\n point2[-1] =point[-1]\n #将点集标号类型写成字典格式 \n cluster_dict = {}\n for point in plotted_points:\n if cluster_dict.get(point[-1]) is None:\n cluster_dict[point[-1]] = [point[0:-1]]\n else:\n cluster_dict[point[-1]].append(point[0:-1])\n\n #将簇中各个点按时间排序\n cluster_dict_sort = {}\n for lable in cluster_dict:\n cluster_dict_sort.setdefault(lable,[])\n cl = np.array(cluster_dict[lable])\n cl_sort = cl[cl[:,-1].argsort()]\n cluster_dict_sort[lable] = cl_sort\n \n #噪声点,既不在边界点也不在核心点中 \n noise_points=[]\n for point in all_points:\n if point not in core_points and point not in border_points:\n noise_points.append(point[0:-1])\n return cluster_dict_sort,noise_points\n\n\n\ndef durtime(noise_points,difftime):\n # 输入:噪声点,时间间隔\n # 功能:分成不同的路径\n # 输出:路径点[[],[]]\n no = np.array(noise_points)\n no_sort = no[no[:,-1].argsort()]\n l = len(no_sort)\n k = [0]\n for i in range(l-1):\n diff_time = (no_sort[i+1][-1] - no_sort[i][-1]).seconds\n if diff_time > difftime:\n k.append(i+1)\n k.append(l)\n no_split = []\n for i in range(len(k)-1):\n no_split.append(no_sort[k[i]:k[i+1]])\n return no_split\n\ndef matplotshow(cluster_dict,no_split,name):\n #画出各个簇\n markers = ['or', 'ob', 'og', 'ok', '^r', '+r', 'sr', 'dr', '<r', 'pr']\n i=0\n for lable in cluster_dict:\n for j in cluster_dict[lable]:\n plot(j[0], j[1],markers[i])\n i += 1\n i = i%10\n print i \n #画出路径\n markers = ['r', 'b', 'g', 'k', 'c', 'y', 'm',]\n l =len(no_split)\n for i in range(l):\n path = np.array(no_split[i])\n plt.plot(path[:,0],path[:,1],markers[i%7])\n print i\n title(\" clusters created with E =\"+str(E)+\" Min Points=\"+str(minPts)+\" total points=\"+str(len(all_points))+\" noise Points = \"+ str(len(noise_points)))\n savefig(name)\n show()\n\n \ndef datewrite(no_split,filename,mark): \n f = open(filename,'w+')\n for path in no_split:\n f.write( str(mark) +'\\n')\n for no_path in path:\n f.write(str(list(no_path))+'\\n') \n f.close()\n\ndef datewrite1(no_split,filename,mark): \n f = open(filename,'w+')\n for path in no_split:\n for no_path in path:\n f.write( str(mark) +'\\n')\n for j in no_path:\n f.write(str(list(j))+'\\n') \n f.close()\n \nif __name__ == '__main__':\n filename = 'D:/sensor_data/sensor/gps/location_zh0710.txt'\n all_points_old = dataset(filename)\n all_points = datarevise(all_points_old)\n E,minPts = 0.1,10\n core_points,plotted_points,other_points = find_core(all_points,E,minPts)\n border_points,plotted_points = find_border(core_points,plotted_points,other_points,E)\n cluster_dict,noise_points = algorithm(all_points,border_points,core_points,plotted_points,E)\n difftime = 1200\n no_split = durtime(noise_points,difftime)\n matplotshow(cluster_dict,no_split,\"location_zh0710.png\")\n filename = 'D:/sensor_data/sensor/gps/location_zh0710_no_split.txt'\n datewrite(no_split,filename,'path')\n filename = 'D:/sensor_data/sensor/gps/location_zh0710_cluster.txt'\n datewrite(cluster_dict.values(),filename,'lable')\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
"""
Simple python script to help learn basic socket API
"""
import sys, socket
HOSTNAME = sys.argv[-2]
PORT = sys.argv[-1]
options = ( HOSTNAME, int(PORT) )
print options
print 'creating socket...'
sock = socket.socket()
print 'socket created'
print 'connecting...'
sock.connect(options)
print 'connected'
print 'sending message...'
sock.send('hello')
print 'sent message'
print 'closing...'
sock.close()
print 'closed'
|
normal
|
{
"blob_id": "e41b5ee0dff30cca51593e737420889bce8f419f",
"index": 8563,
"step-1": "\"\"\"\nSimple python script to help learn basic socket API\n\"\"\"\n\nimport sys, socket\n\nHOSTNAME = sys.argv[-2]\nPORT = sys.argv[-1]\n\noptions = ( HOSTNAME, int(PORT) )\nprint options\n\nprint 'creating socket...'\nsock = socket.socket()\nprint 'socket created'\n\nprint 'connecting...'\nsock.connect(options)\nprint 'connected'\n\nprint 'sending message...'\nsock.send('hello')\nprint 'sent message'\n\nprint 'closing...'\nsock.close()\nprint 'closed'",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from rest_framework.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView
from .serializers import ConcertSerializer
from .models import Concert
from .permissions import IsOwnerOrReadOnly
class ConcertList(ListCreateAPIView):
queryset = Concert.objects.all()
serializer_class = ConcertSerializer
class ConcertDetail(RetrieveUpdateDestroyAPIView):
permission_classes = (IsOwnerOrReadOnly,)
queryset = Concert.objects.all()
serializer_class = ConcertSerializer
|
normal
|
{
"blob_id": "74ad2ec2cd7cd683a773b0affde4ab0b150d74c5",
"index": 4780,
"step-1": "<mask token>\n\n\nclass ConcertDetail(RetrieveUpdateDestroyAPIView):\n permission_classes = IsOwnerOrReadOnly,\n queryset = Concert.objects.all()\n serializer_class = ConcertSerializer\n",
"step-2": "<mask token>\n\n\nclass ConcertList(ListCreateAPIView):\n <mask token>\n <mask token>\n\n\nclass ConcertDetail(RetrieveUpdateDestroyAPIView):\n permission_classes = IsOwnerOrReadOnly,\n queryset = Concert.objects.all()\n serializer_class = ConcertSerializer\n",
"step-3": "<mask token>\n\n\nclass ConcertList(ListCreateAPIView):\n queryset = Concert.objects.all()\n serializer_class = ConcertSerializer\n\n\nclass ConcertDetail(RetrieveUpdateDestroyAPIView):\n permission_classes = IsOwnerOrReadOnly,\n queryset = Concert.objects.all()\n serializer_class = ConcertSerializer\n",
"step-4": "from rest_framework.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView\nfrom .serializers import ConcertSerializer\nfrom .models import Concert\nfrom .permissions import IsOwnerOrReadOnly\n\n\nclass ConcertList(ListCreateAPIView):\n queryset = Concert.objects.all()\n serializer_class = ConcertSerializer\n\n\nclass ConcertDetail(RetrieveUpdateDestroyAPIView):\n permission_classes = IsOwnerOrReadOnly,\n queryset = Concert.objects.all()\n serializer_class = ConcertSerializer\n",
"step-5": "from rest_framework.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView\nfrom .serializers import ConcertSerializer\nfrom .models import Concert\nfrom .permissions import IsOwnerOrReadOnly\n\nclass ConcertList(ListCreateAPIView):\n queryset = Concert.objects.all()\n serializer_class = ConcertSerializer\n\n\nclass ConcertDetail(RetrieveUpdateDestroyAPIView):\n permission_classes = (IsOwnerOrReadOnly,)\n queryset = Concert.objects.all()\n serializer_class = ConcertSerializer\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class Plotter:
def __init__(self):
self.red_hex_code = '#ff0000'
def AlkDMIonStatsSplitPlot(self, df):
PV1_DataSets_lst = df[df['inst'] == 'PV1']['DataSet'].unique()
PV2_DataSets_lst = df[df['inst'] == 'PV2']['DataSet'].unique()
inst_sets = [PV1_DataSets_lst, PV2_DataSets_lst]
ax_title = ['Peg-BT PV1', 'Peg-BT PV2']
fig = plt.figure(figsize=(25, 9))
ax1 = fig.add_subplot(1, 2, 1)
ax2 = fig.add_subplot(1, 2, 2)
ax1.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.1, 0.9, 4)))
ax2.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.1, 0.9, 4)))
ax = [ax1, ax2]
for a in range(2):
ax[a].spines['right'].set_visible(False)
ax[a].spines['top'].set_visible(False)
ax[a].set_ylabel('Area Per Ion via Detector Measurement')
ax[a].set_xlabel('Alkane Standard\nSample Injection Count')
ax[a].set_title(ax_title[a])
for dset in inst_sets[a]:
df_sliced = df[df['DataSet'] == dset].copy()
offset = df_sliced['offset_volts'].iloc[2]
dv = df_sliced['Det_Volts'].iloc[2]
curve_label = 'Offset: +{v} v = {d} v'.format(v=offset, d=dv)
ax[a].plot(df_sliced['Cumulative_Inj'], df_sliced['ave_api'
], label=curve_label)
ax[a].legend(loc='center', bbox_to_anchor=(0.17, -0.1))
plt.savefig('DM_API_Analysis', bbox_inches='tight')
plt.show()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def Manual_OFN20fg_IDL(self):
fig = plt.figure(figsize=(25, 9))
ax = fig.add_subplot(1, 1, 1)
ax.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.25, 0.84, 2)))
xdata = [0, 150, 250, 350]
ydata = [[0.036614, 0.009674, 0.0056418, 0.004696], [0.0083151,
0.0044855, 0.0046082, 0.0033099]]
legendlbl_lst = ['Peg BT - PV1', 'Peg BT - PV2']
for s in range(len(ydata)):
ax.plot(xdata, ydata[s], label=legendlbl_lst[s])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_ylabel('IDL pg')
ax.set_xlabel('Optimized Detector Voltage Offset (volts)')
plt.legend()
plt.suptitle(
'IDL vs Detector Voltage Offset\nOFN 0.02 pg On Column\nQuant Mass = 271.99'
, fontsize=20)
plt.savefig('OFN_20fg_IDL_Plot', bbox_inches='tight')
def Manual_GO_Plot(self):
fig = plt.figure(figsize=(25, 9))
ax = fig.add_subplot(1, 1, 1)
ax.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.25, 0.84, 2)))
xdata = [0, 150, 250, 350]
ydata = [[-7.7, 26.5, 42.8, 66.1], [-8, 4.1, 13.5, 48.4]]
legendlbl_lst = ['Peg BT - PV1', 'Peg BT - PV2']
for s in range(len(ydata)):
ax.plot(xdata, ydata[s], label=legendlbl_lst[s])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_ylabel('Change in Optimized Detector Voltage')
ax.set_xlabel('Optimized Detector Voltage Offset (volts)')
plt.legend()
plt.savefig('GO_Delta_Plot', bbox_inches='tight')
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Plotter:
def __init__(self):
self.red_hex_code = '#ff0000'
def AlkDMIonStatsSplitPlot(self, df):
PV1_DataSets_lst = df[df['inst'] == 'PV1']['DataSet'].unique()
PV2_DataSets_lst = df[df['inst'] == 'PV2']['DataSet'].unique()
inst_sets = [PV1_DataSets_lst, PV2_DataSets_lst]
ax_title = ['Peg-BT PV1', 'Peg-BT PV2']
fig = plt.figure(figsize=(25, 9))
ax1 = fig.add_subplot(1, 2, 1)
ax2 = fig.add_subplot(1, 2, 2)
ax1.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.1, 0.9, 4)))
ax2.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.1, 0.9, 4)))
ax = [ax1, ax2]
for a in range(2):
ax[a].spines['right'].set_visible(False)
ax[a].spines['top'].set_visible(False)
ax[a].set_ylabel('Area Per Ion via Detector Measurement')
ax[a].set_xlabel('Alkane Standard\nSample Injection Count')
ax[a].set_title(ax_title[a])
for dset in inst_sets[a]:
df_sliced = df[df['DataSet'] == dset].copy()
offset = df_sliced['offset_volts'].iloc[2]
dv = df_sliced['Det_Volts'].iloc[2]
curve_label = 'Offset: +{v} v = {d} v'.format(v=offset, d=dv)
ax[a].plot(df_sliced['Cumulative_Inj'], df_sliced['ave_api'
], label=curve_label)
ax[a].legend(loc='center', bbox_to_anchor=(0.17, -0.1))
plt.savefig('DM_API_Analysis', bbox_inches='tight')
plt.show()
<|reserved_special_token_0|>
def GenericIndividualPlotMaker(self, xdata_lst, ydata_lst,
legendlbl_lst, xlbl, ylbl, plot_title, png_filename,
legend_h_offset=1.25, legend_v_offset=0.75, legend_location='center'):
fig = plt.figure(figsize=(15.5, 9))
ax = fig.add_subplot(1, 1, 1)
for i in range(len(xdata_lst)):
ax.plot(xdata_lst[i], ydata_lst[i], color=self.color_codes[i],
label=legendlbl_lst[i])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.ylabel(ylbl)
plt.xlabel(xlbl)
plt.title(plot_title)
plt.legend(loc=legend_location, bbox_to_anchor=(legend_h_offset,
legend_v_offset))
plt.savefig(png_filename, bbox_inches='tight')
<|reserved_special_token_0|>
def Manual_OFN20fg_IDL(self):
fig = plt.figure(figsize=(25, 9))
ax = fig.add_subplot(1, 1, 1)
ax.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.25, 0.84, 2)))
xdata = [0, 150, 250, 350]
ydata = [[0.036614, 0.009674, 0.0056418, 0.004696], [0.0083151,
0.0044855, 0.0046082, 0.0033099]]
legendlbl_lst = ['Peg BT - PV1', 'Peg BT - PV2']
for s in range(len(ydata)):
ax.plot(xdata, ydata[s], label=legendlbl_lst[s])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_ylabel('IDL pg')
ax.set_xlabel('Optimized Detector Voltage Offset (volts)')
plt.legend()
plt.suptitle(
'IDL vs Detector Voltage Offset\nOFN 0.02 pg On Column\nQuant Mass = 271.99'
, fontsize=20)
plt.savefig('OFN_20fg_IDL_Plot', bbox_inches='tight')
def Manual_GO_Plot(self):
fig = plt.figure(figsize=(25, 9))
ax = fig.add_subplot(1, 1, 1)
ax.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.25, 0.84, 2)))
xdata = [0, 150, 250, 350]
ydata = [[-7.7, 26.5, 42.8, 66.1], [-8, 4.1, 13.5, 48.4]]
legendlbl_lst = ['Peg BT - PV1', 'Peg BT - PV2']
for s in range(len(ydata)):
ax.plot(xdata, ydata[s], label=legendlbl_lst[s])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_ylabel('Change in Optimized Detector Voltage')
ax.set_xlabel('Optimized Detector Voltage Offset (volts)')
plt.legend()
plt.savefig('GO_Delta_Plot', bbox_inches='tight')
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Plotter:
def __init__(self):
self.red_hex_code = '#ff0000'
def AlkDMIonStatsSplitPlot(self, df):
PV1_DataSets_lst = df[df['inst'] == 'PV1']['DataSet'].unique()
PV2_DataSets_lst = df[df['inst'] == 'PV2']['DataSet'].unique()
inst_sets = [PV1_DataSets_lst, PV2_DataSets_lst]
ax_title = ['Peg-BT PV1', 'Peg-BT PV2']
fig = plt.figure(figsize=(25, 9))
ax1 = fig.add_subplot(1, 2, 1)
ax2 = fig.add_subplot(1, 2, 2)
ax1.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.1, 0.9, 4)))
ax2.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.1, 0.9, 4)))
ax = [ax1, ax2]
for a in range(2):
ax[a].spines['right'].set_visible(False)
ax[a].spines['top'].set_visible(False)
ax[a].set_ylabel('Area Per Ion via Detector Measurement')
ax[a].set_xlabel('Alkane Standard\nSample Injection Count')
ax[a].set_title(ax_title[a])
for dset in inst_sets[a]:
df_sliced = df[df['DataSet'] == dset].copy()
offset = df_sliced['offset_volts'].iloc[2]
dv = df_sliced['Det_Volts'].iloc[2]
curve_label = 'Offset: +{v} v = {d} v'.format(v=offset, d=dv)
ax[a].plot(df_sliced['Cumulative_Inj'], df_sliced['ave_api'
], label=curve_label)
ax[a].legend(loc='center', bbox_to_anchor=(0.17, -0.1))
plt.savefig('DM_API_Analysis', bbox_inches='tight')
plt.show()
def AlkDMIonStatsPlot(self, df):
DataSets_lst = df['DataSet'].unique()
fig = plt.figure(figsize=(15.5, 9))
ax = fig.add_subplot(1, 1, 1)
ax.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.1, 1.0, 8)))
for dset in DataSets_lst:
df_sliced = df[df['DataSet'] == dset].copy()
instrument = df_sliced['inst'].iloc[2]
offset = df_sliced['offset_volts'].iloc[2]
dv = df_sliced['Det_Volts'].iloc[2]
curve_label = 'Inst: {i} - Offset: +{v} v = {d} v'.format(i=
instrument, v=offset, d=dv)
ax.plot(df_sliced['Cumulative_Inj'], df_sliced['ave_api'],
label=curve_label)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.ylabel('Ave. Aera Per Ion')
plt.xlabel('Sample Injections')
plt.title(
"""Tracking Area Per Ion via Detector Measurement
Over ~48 Hours of Continuous Sample Acquisition"""
)
legend_h_offset, legend_v_offset = 1.25, 0.75
plt.legend(loc='center right', bbox_to_anchor=(legend_h_offset,
legend_v_offset))
plt.savefig('DM_API_Analysis', bbox_inches='tight')
plt.show()
def GenericIndividualPlotMaker(self, xdata_lst, ydata_lst,
legendlbl_lst, xlbl, ylbl, plot_title, png_filename,
legend_h_offset=1.25, legend_v_offset=0.75, legend_location='center'):
fig = plt.figure(figsize=(15.5, 9))
ax = fig.add_subplot(1, 1, 1)
for i in range(len(xdata_lst)):
ax.plot(xdata_lst[i], ydata_lst[i], color=self.color_codes[i],
label=legendlbl_lst[i])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.ylabel(ylbl)
plt.xlabel(xlbl)
plt.title(plot_title)
plt.legend(loc=legend_location, bbox_to_anchor=(legend_h_offset,
legend_v_offset))
plt.savefig(png_filename, bbox_inches='tight')
def GenericCombinedPlotMaker(self, xdata_lst, ydata_lst, legendlbl_lst,
xlbl, ylbl_lst, fig_title, png_filename, legend_h_offset=0.9,
legend_v_offset=2.4, legend_location='center'):
fig = plt.figure(figsize=(25, 9))
ax = []
for a in range(4):
ax.append(fig.add_subplot(2, 2, 1 + a))
ax[a].set_prop_cycle('color', plt.cm.spectral(np.linspace(0.25,
0.84, 2)))
for s in range(len(xdata_lst)):
ax[a].plot(xdata_lst[s], ydata_lst[a][s], label=
legendlbl_lst[s])
ax[a].spines['right'].set_visible(False)
ax[a].spines['top'].set_visible(False)
ax[a].set_ylabel(ylbl_lst[a])
if (a == 2 or a == 3) and s == 1:
plt.xlabel(xlbl)
elif (a == 0 or a == 1) and s == 1:
ax[a].set_xticklabels([])
ax[a].spines['bottom'].set_visible(False)
ax[a].xaxis.set_ticks_position('none')
plt.suptitle(fig_title, fontsize=20)
plt.legend(loc=legend_location, bbox_to_anchor=(legend_h_offset,
legend_v_offset))
plt.savefig(png_filename, bbox_inches='tight')
def Manual_OFN20fg_IDL(self):
fig = plt.figure(figsize=(25, 9))
ax = fig.add_subplot(1, 1, 1)
ax.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.25, 0.84, 2)))
xdata = [0, 150, 250, 350]
ydata = [[0.036614, 0.009674, 0.0056418, 0.004696], [0.0083151,
0.0044855, 0.0046082, 0.0033099]]
legendlbl_lst = ['Peg BT - PV1', 'Peg BT - PV2']
for s in range(len(ydata)):
ax.plot(xdata, ydata[s], label=legendlbl_lst[s])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_ylabel('IDL pg')
ax.set_xlabel('Optimized Detector Voltage Offset (volts)')
plt.legend()
plt.suptitle(
'IDL vs Detector Voltage Offset\nOFN 0.02 pg On Column\nQuant Mass = 271.99'
, fontsize=20)
plt.savefig('OFN_20fg_IDL_Plot', bbox_inches='tight')
def Manual_GO_Plot(self):
fig = plt.figure(figsize=(25, 9))
ax = fig.add_subplot(1, 1, 1)
ax.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.25, 0.84, 2)))
xdata = [0, 150, 250, 350]
ydata = [[-7.7, 26.5, 42.8, 66.1], [-8, 4.1, 13.5, 48.4]]
legendlbl_lst = ['Peg BT - PV1', 'Peg BT - PV2']
for s in range(len(ydata)):
ax.plot(xdata, ydata[s], label=legendlbl_lst[s])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_ylabel('Change in Optimized Detector Voltage')
ax.set_xlabel('Optimized Detector Voltage Offset (volts)')
plt.legend()
plt.savefig('GO_Delta_Plot', bbox_inches='tight')
plt.show()
<|reserved_special_token_1|>
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
class Plotter:
def __init__(self):
self.red_hex_code = '#ff0000'
def AlkDMIonStatsSplitPlot(self, df):
PV1_DataSets_lst = df[df['inst'] == 'PV1']['DataSet'].unique()
PV2_DataSets_lst = df[df['inst'] == 'PV2']['DataSet'].unique()
inst_sets = [PV1_DataSets_lst, PV2_DataSets_lst]
ax_title = ['Peg-BT PV1', 'Peg-BT PV2']
fig = plt.figure(figsize=(25, 9))
ax1 = fig.add_subplot(1, 2, 1)
ax2 = fig.add_subplot(1, 2, 2)
ax1.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.1, 0.9, 4)))
ax2.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.1, 0.9, 4)))
ax = [ax1, ax2]
for a in range(2):
ax[a].spines['right'].set_visible(False)
ax[a].spines['top'].set_visible(False)
ax[a].set_ylabel('Area Per Ion via Detector Measurement')
ax[a].set_xlabel('Alkane Standard\nSample Injection Count')
ax[a].set_title(ax_title[a])
for dset in inst_sets[a]:
df_sliced = df[df['DataSet'] == dset].copy()
offset = df_sliced['offset_volts'].iloc[2]
dv = df_sliced['Det_Volts'].iloc[2]
curve_label = 'Offset: +{v} v = {d} v'.format(v=offset, d=dv)
ax[a].plot(df_sliced['Cumulative_Inj'], df_sliced['ave_api'
], label=curve_label)
ax[a].legend(loc='center', bbox_to_anchor=(0.17, -0.1))
plt.savefig('DM_API_Analysis', bbox_inches='tight')
plt.show()
def AlkDMIonStatsPlot(self, df):
DataSets_lst = df['DataSet'].unique()
fig = plt.figure(figsize=(15.5, 9))
ax = fig.add_subplot(1, 1, 1)
ax.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.1, 1.0, 8)))
for dset in DataSets_lst:
df_sliced = df[df['DataSet'] == dset].copy()
instrument = df_sliced['inst'].iloc[2]
offset = df_sliced['offset_volts'].iloc[2]
dv = df_sliced['Det_Volts'].iloc[2]
curve_label = 'Inst: {i} - Offset: +{v} v = {d} v'.format(i=
instrument, v=offset, d=dv)
ax.plot(df_sliced['Cumulative_Inj'], df_sliced['ave_api'],
label=curve_label)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.ylabel('Ave. Aera Per Ion')
plt.xlabel('Sample Injections')
plt.title(
"""Tracking Area Per Ion via Detector Measurement
Over ~48 Hours of Continuous Sample Acquisition"""
)
legend_h_offset, legend_v_offset = 1.25, 0.75
plt.legend(loc='center right', bbox_to_anchor=(legend_h_offset,
legend_v_offset))
plt.savefig('DM_API_Analysis', bbox_inches='tight')
plt.show()
def GenericIndividualPlotMaker(self, xdata_lst, ydata_lst,
legendlbl_lst, xlbl, ylbl, plot_title, png_filename,
legend_h_offset=1.25, legend_v_offset=0.75, legend_location='center'):
fig = plt.figure(figsize=(15.5, 9))
ax = fig.add_subplot(1, 1, 1)
for i in range(len(xdata_lst)):
ax.plot(xdata_lst[i], ydata_lst[i], color=self.color_codes[i],
label=legendlbl_lst[i])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.ylabel(ylbl)
plt.xlabel(xlbl)
plt.title(plot_title)
plt.legend(loc=legend_location, bbox_to_anchor=(legend_h_offset,
legend_v_offset))
plt.savefig(png_filename, bbox_inches='tight')
def GenericCombinedPlotMaker(self, xdata_lst, ydata_lst, legendlbl_lst,
xlbl, ylbl_lst, fig_title, png_filename, legend_h_offset=0.9,
legend_v_offset=2.4, legend_location='center'):
fig = plt.figure(figsize=(25, 9))
ax = []
for a in range(4):
ax.append(fig.add_subplot(2, 2, 1 + a))
ax[a].set_prop_cycle('color', plt.cm.spectral(np.linspace(0.25,
0.84, 2)))
for s in range(len(xdata_lst)):
ax[a].plot(xdata_lst[s], ydata_lst[a][s], label=
legendlbl_lst[s])
ax[a].spines['right'].set_visible(False)
ax[a].spines['top'].set_visible(False)
ax[a].set_ylabel(ylbl_lst[a])
if (a == 2 or a == 3) and s == 1:
plt.xlabel(xlbl)
elif (a == 0 or a == 1) and s == 1:
ax[a].set_xticklabels([])
ax[a].spines['bottom'].set_visible(False)
ax[a].xaxis.set_ticks_position('none')
plt.suptitle(fig_title, fontsize=20)
plt.legend(loc=legend_location, bbox_to_anchor=(legend_h_offset,
legend_v_offset))
plt.savefig(png_filename, bbox_inches='tight')
def Manual_OFN20fg_IDL(self):
fig = plt.figure(figsize=(25, 9))
ax = fig.add_subplot(1, 1, 1)
ax.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.25, 0.84, 2)))
xdata = [0, 150, 250, 350]
ydata = [[0.036614, 0.009674, 0.0056418, 0.004696], [0.0083151,
0.0044855, 0.0046082, 0.0033099]]
legendlbl_lst = ['Peg BT - PV1', 'Peg BT - PV2']
for s in range(len(ydata)):
ax.plot(xdata, ydata[s], label=legendlbl_lst[s])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_ylabel('IDL pg')
ax.set_xlabel('Optimized Detector Voltage Offset (volts)')
plt.legend()
plt.suptitle(
'IDL vs Detector Voltage Offset\nOFN 0.02 pg On Column\nQuant Mass = 271.99'
, fontsize=20)
plt.savefig('OFN_20fg_IDL_Plot', bbox_inches='tight')
def Manual_GO_Plot(self):
fig = plt.figure(figsize=(25, 9))
ax = fig.add_subplot(1, 1, 1)
ax.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.25, 0.84, 2)))
xdata = [0, 150, 250, 350]
ydata = [[-7.7, 26.5, 42.8, 66.1], [-8, 4.1, 13.5, 48.4]]
legendlbl_lst = ['Peg BT - PV1', 'Peg BT - PV2']
for s in range(len(ydata)):
ax.plot(xdata, ydata[s], label=legendlbl_lst[s])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_ylabel('Change in Optimized Detector Voltage')
ax.set_xlabel('Optimized Detector Voltage Offset (volts)')
plt.legend()
plt.savefig('GO_Delta_Plot', bbox_inches='tight')
plt.show()
<|reserved_special_token_1|>
import pandas as pd #@UnusedImport
import matplotlib.pyplot as plt
import matplotlib #@UnusedImport
import numpy as np #@UnusedImport
class Plotter():
def __init__(self):
self.red_hex_code = '#ff0000'
def AlkDMIonStatsSplitPlot(self, df):
PV1_DataSets_lst = df[df['inst'] == 'PV1']['DataSet'].unique()
PV2_DataSets_lst = df[df['inst'] == 'PV2']['DataSet'].unique()
inst_sets = [PV1_DataSets_lst,PV2_DataSets_lst]
ax_title = ['Peg-BT PV1', 'Peg-BT PV2']
fig = plt.figure(figsize=(25,9))
ax1 = fig.add_subplot(1,2,1)
ax2 = fig.add_subplot(1,2,2)
ax1.set_prop_cycle('color',plt.cm.spectral(np.linspace(0.1,0.9,4))) #@UndefinedVariable
ax2.set_prop_cycle('color',plt.cm.spectral(np.linspace(0.1,0.9,4))) #@UndefinedVariable
ax = [ax1,ax2]
for a in range(2):
ax[a].spines['right'].set_visible(False)
ax[a].spines['top'].set_visible(False)
ax[a].set_ylabel('Area Per Ion via Detector Measurement')
ax[a].set_xlabel('Alkane Standard\nSample Injection Count')
ax[a].set_title(ax_title[a])
for dset in inst_sets[a]:
df_sliced = df[df['DataSet'] == dset].copy()
offset = df_sliced['offset_volts'].iloc[2]
dv = df_sliced['Det_Volts'].iloc[2]
curve_label = 'Offset: +{v} v = {d} v'.format(v=offset, d=dv)
ax[a].plot(df_sliced['Cumulative_Inj'], df_sliced['ave_api'], label=curve_label)
ax[a].legend(loc='center', bbox_to_anchor=(0.17,-0.1))
# plt.suptitle('Tracking Area Per Ion via Detector Measurement\nOver ~48 Hours of Continuous Sample Acquisition', fontsize=14)
plt.savefig('DM_API_Analysis', bbox_inches='tight')
plt.show()
def AlkDMIonStatsPlot(self, df):
DataSets_lst = df['DataSet'].unique()
fig = plt.figure(figsize=(15.5,9))
ax = fig.add_subplot(1,1,1)
ax.set_prop_cycle('color',plt.cm.spectral(np.linspace(0.1,1.00,8))) #@UndefinedVariable
for dset in DataSets_lst:
df_sliced = df[df['DataSet'] == dset].copy()
instrument = df_sliced['inst'].iloc[2]
offset = df_sliced['offset_volts'].iloc[2]
dv = df_sliced['Det_Volts'].iloc[2]
curve_label = 'Inst: {i} - Offset: +{v} v = {d} v'.format(i=instrument, v=offset, d=dv)
ax.plot(df_sliced['Cumulative_Inj'], df_sliced['ave_api'], label=curve_label)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.ylabel('Ave. Aera Per Ion')
plt.xlabel('Sample Injections')
plt.title('Tracking Area Per Ion via Detector Measurement\nOver ~48 Hours of Continuous Sample Acquisition')
legend_h_offset, legend_v_offset = 1.25, 0.75
plt.legend(loc='center right', bbox_to_anchor=(legend_h_offset, legend_v_offset))
plt.savefig('DM_API_Analysis', bbox_inches='tight')
plt.show()
def GenericIndividualPlotMaker(self, xdata_lst, ydata_lst, legendlbl_lst, xlbl, ylbl, plot_title, png_filename, legend_h_offset=1.25, legend_v_offset=0.75, legend_location='center'):
# xdata & ydata: both are a list of lists each containing the corresponding axis data. These are the requirement of these two
# data set to prevent an error:
# Sublists with the same index are a matching x vs y set that will be plotted. They MUST be the same length to prevent an error.
# There must be the same number of sub lists to prevent an error.
# legendlbl_lst: a list of legend labels for each x vs y plot. Again there must be the same number of items in this list as x/y pairs.
# The rest are self explainatory
fig = plt.figure(figsize=(15.5,9))
ax = fig.add_subplot(1,1,1)
for i in range(len(xdata_lst)):
ax.plot(xdata_lst[i], ydata_lst[i], color=self.color_codes[i], label=legendlbl_lst[i])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.ylabel(ylbl)
plt.xlabel(xlbl)
plt.title(plot_title)
plt.legend(loc=legend_location, bbox_to_anchor=(legend_h_offset, legend_v_offset))
plt.savefig(png_filename, bbox_inches='tight')
# (x_data, all_y_data, legendlbl_lst, xlbl, plot_titles, figure_title, all_png_filenames)
def GenericCombinedPlotMaker(self, xdata_lst, ydata_lst, legendlbl_lst, xlbl, ylbl_lst, fig_title, png_filename, legend_h_offset=0.9, legend_v_offset=2.4, legend_location='center'):
# xdata_lst: is a list of lists each containing the corresponding x-axis data. The x-axis data is the same for all ax_n objects
# Generic example: [Series_1_x-axis_data_lst, Series_n_x-axis_data_lst...]
# ydata_lst: is a list of lists of lists containing all the y-axis data.
# Generic example: [ax_1[Series_1_y-axis_data_lst, Series_n_y-axis_data_lst...], ax_n[ax_1[Series_1_y-axis_data_lst, Series_n_y-axis_data_lst...]...]
# data set to prevent an error:
# Sublists with the same index are a matching x vs y set that will be plotted. They MUST be the same length to prevent an error.
# There must be the same number of sub lists to prevent an error.
# legendlbl_lst: a list of legend labels for each x vs y plot. Again there must be the same number of items in this list as x/y pairs.
# The rest are self explainatory
fig = plt.figure(figsize=(25,9))
ax = []
for a in range(4):
ax.append(fig.add_subplot(2,2,1+a))
ax[a].set_prop_cycle('color',plt.cm.spectral(np.linspace(0.25,0.84,2))) #@UndefinedVariable
for s in range(len(xdata_lst)):
ax[a].plot(xdata_lst[s], ydata_lst[a][s], label=legendlbl_lst[s])
ax[a].spines['right'].set_visible(False)
ax[a].spines['top'].set_visible(False)
ax[a].set_ylabel(ylbl_lst[a])
if (a == 2 or a == 3) and s == 1:
plt.xlabel(xlbl)
elif (a == 0 or a == 1) and s == 1:
ax[a].set_xticklabels([])
ax[a].spines['bottom'].set_visible(False)
ax[a].xaxis.set_ticks_position('none')
plt.suptitle(fig_title, fontsize=20)
plt.legend(loc=legend_location, bbox_to_anchor=(legend_h_offset, legend_v_offset))
plt.savefig(png_filename, bbox_inches='tight')
def Manual_OFN20fg_IDL(self):
fig = plt.figure(figsize=(25,9))
ax = fig.add_subplot(1,1,1)
ax.set_prop_cycle('color',plt.cm.spectral(np.linspace(0.25,0.84,2))) #@UndefinedVariable
xdata = [0,150,250,350]
ydata = [[0.036614, 0.009674, 0.0056418, 0.004696],[0.0083151, 0.0044855, 0.0046082, 0.0033099]]
legendlbl_lst = ['Peg BT - PV1', 'Peg BT - PV2']
for s in range(len(ydata)):
ax.plot(xdata, ydata[s], label=legendlbl_lst[s])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_ylabel('IDL pg')
ax.set_xlabel('Optimized Detector Voltage Offset (volts)')
plt.legend()
plt.suptitle('IDL vs Detector Voltage Offset\nOFN 0.02 pg On Column\nQuant Mass = 271.99', fontsize=20)
plt.savefig('OFN_20fg_IDL_Plot', bbox_inches='tight')
def Manual_GO_Plot(self):
fig = plt.figure(figsize=(25,9))
ax = fig.add_subplot(1,1,1)
ax.set_prop_cycle('color',plt.cm.spectral(np.linspace(0.25,0.84,2))) #@UndefinedVariable
xdata = [0,150,250,350]
ydata = [[-7.7, 26.5, 42.8, 66.1],[-8, 4.1, 13.5, 48.4]]
legendlbl_lst = ['Peg BT - PV1', 'Peg BT - PV2']
for s in range(len(ydata)):
ax.plot(xdata, ydata[s], label=legendlbl_lst[s])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_ylabel('Change in Optimized Detector Voltage')
ax.set_xlabel('Optimized Detector Voltage Offset (volts)')
plt.legend()
# plt.suptitle('Change in Optimized Detector Voltage\nFrom the Beginning to the End of a Data Set', fontsize=20)
plt.savefig('GO_Delta_Plot', bbox_inches='tight')
plt.show()
|
flexible
|
{
"blob_id": "81b920ab5417937dc0fc1c9675d393efc6a4d58d",
"index": 5453,
"step-1": "<mask token>\n\n\nclass Plotter:\n\n def __init__(self):\n self.red_hex_code = '#ff0000'\n\n def AlkDMIonStatsSplitPlot(self, df):\n PV1_DataSets_lst = df[df['inst'] == 'PV1']['DataSet'].unique()\n PV2_DataSets_lst = df[df['inst'] == 'PV2']['DataSet'].unique()\n inst_sets = [PV1_DataSets_lst, PV2_DataSets_lst]\n ax_title = ['Peg-BT PV1', 'Peg-BT PV2']\n fig = plt.figure(figsize=(25, 9))\n ax1 = fig.add_subplot(1, 2, 1)\n ax2 = fig.add_subplot(1, 2, 2)\n ax1.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.1, 0.9, 4)))\n ax2.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.1, 0.9, 4)))\n ax = [ax1, ax2]\n for a in range(2):\n ax[a].spines['right'].set_visible(False)\n ax[a].spines['top'].set_visible(False)\n ax[a].set_ylabel('Area Per Ion via Detector Measurement')\n ax[a].set_xlabel('Alkane Standard\\nSample Injection Count')\n ax[a].set_title(ax_title[a])\n for dset in inst_sets[a]:\n df_sliced = df[df['DataSet'] == dset].copy()\n offset = df_sliced['offset_volts'].iloc[2]\n dv = df_sliced['Det_Volts'].iloc[2]\n curve_label = 'Offset: +{v} v = {d} v'.format(v=offset, d=dv)\n ax[a].plot(df_sliced['Cumulative_Inj'], df_sliced['ave_api'\n ], label=curve_label)\n ax[a].legend(loc='center', bbox_to_anchor=(0.17, -0.1))\n plt.savefig('DM_API_Analysis', bbox_inches='tight')\n plt.show()\n <mask token>\n <mask token>\n <mask token>\n\n def Manual_OFN20fg_IDL(self):\n fig = plt.figure(figsize=(25, 9))\n ax = fig.add_subplot(1, 1, 1)\n ax.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.25, 0.84, 2)))\n xdata = [0, 150, 250, 350]\n ydata = [[0.036614, 0.009674, 0.0056418, 0.004696], [0.0083151, \n 0.0044855, 0.0046082, 0.0033099]]\n legendlbl_lst = ['Peg BT - PV1', 'Peg BT - PV2']\n for s in range(len(ydata)):\n ax.plot(xdata, ydata[s], label=legendlbl_lst[s])\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.set_ylabel('IDL pg')\n ax.set_xlabel('Optimized Detector Voltage Offset (volts)')\n plt.legend()\n plt.suptitle(\n 'IDL vs Detector Voltage Offset\\nOFN 0.02 pg On Column\\nQuant Mass = 271.99'\n , fontsize=20)\n plt.savefig('OFN_20fg_IDL_Plot', bbox_inches='tight')\n\n def Manual_GO_Plot(self):\n fig = plt.figure(figsize=(25, 9))\n ax = fig.add_subplot(1, 1, 1)\n ax.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.25, 0.84, 2)))\n xdata = [0, 150, 250, 350]\n ydata = [[-7.7, 26.5, 42.8, 66.1], [-8, 4.1, 13.5, 48.4]]\n legendlbl_lst = ['Peg BT - PV1', 'Peg BT - PV2']\n for s in range(len(ydata)):\n ax.plot(xdata, ydata[s], label=legendlbl_lst[s])\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.set_ylabel('Change in Optimized Detector Voltage')\n ax.set_xlabel('Optimized Detector Voltage Offset (volts)')\n plt.legend()\n plt.savefig('GO_Delta_Plot', bbox_inches='tight')\n plt.show()\n",
"step-2": "<mask token>\n\n\nclass Plotter:\n\n def __init__(self):\n self.red_hex_code = '#ff0000'\n\n def AlkDMIonStatsSplitPlot(self, df):\n PV1_DataSets_lst = df[df['inst'] == 'PV1']['DataSet'].unique()\n PV2_DataSets_lst = df[df['inst'] == 'PV2']['DataSet'].unique()\n inst_sets = [PV1_DataSets_lst, PV2_DataSets_lst]\n ax_title = ['Peg-BT PV1', 'Peg-BT PV2']\n fig = plt.figure(figsize=(25, 9))\n ax1 = fig.add_subplot(1, 2, 1)\n ax2 = fig.add_subplot(1, 2, 2)\n ax1.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.1, 0.9, 4)))\n ax2.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.1, 0.9, 4)))\n ax = [ax1, ax2]\n for a in range(2):\n ax[a].spines['right'].set_visible(False)\n ax[a].spines['top'].set_visible(False)\n ax[a].set_ylabel('Area Per Ion via Detector Measurement')\n ax[a].set_xlabel('Alkane Standard\\nSample Injection Count')\n ax[a].set_title(ax_title[a])\n for dset in inst_sets[a]:\n df_sliced = df[df['DataSet'] == dset].copy()\n offset = df_sliced['offset_volts'].iloc[2]\n dv = df_sliced['Det_Volts'].iloc[2]\n curve_label = 'Offset: +{v} v = {d} v'.format(v=offset, d=dv)\n ax[a].plot(df_sliced['Cumulative_Inj'], df_sliced['ave_api'\n ], label=curve_label)\n ax[a].legend(loc='center', bbox_to_anchor=(0.17, -0.1))\n plt.savefig('DM_API_Analysis', bbox_inches='tight')\n plt.show()\n <mask token>\n\n def GenericIndividualPlotMaker(self, xdata_lst, ydata_lst,\n legendlbl_lst, xlbl, ylbl, plot_title, png_filename,\n legend_h_offset=1.25, legend_v_offset=0.75, legend_location='center'):\n fig = plt.figure(figsize=(15.5, 9))\n ax = fig.add_subplot(1, 1, 1)\n for i in range(len(xdata_lst)):\n ax.plot(xdata_lst[i], ydata_lst[i], color=self.color_codes[i],\n label=legendlbl_lst[i])\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n plt.ylabel(ylbl)\n plt.xlabel(xlbl)\n plt.title(plot_title)\n plt.legend(loc=legend_location, bbox_to_anchor=(legend_h_offset,\n legend_v_offset))\n plt.savefig(png_filename, bbox_inches='tight')\n <mask token>\n\n def Manual_OFN20fg_IDL(self):\n fig = plt.figure(figsize=(25, 9))\n ax = fig.add_subplot(1, 1, 1)\n ax.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.25, 0.84, 2)))\n xdata = [0, 150, 250, 350]\n ydata = [[0.036614, 0.009674, 0.0056418, 0.004696], [0.0083151, \n 0.0044855, 0.0046082, 0.0033099]]\n legendlbl_lst = ['Peg BT - PV1', 'Peg BT - PV2']\n for s in range(len(ydata)):\n ax.plot(xdata, ydata[s], label=legendlbl_lst[s])\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.set_ylabel('IDL pg')\n ax.set_xlabel('Optimized Detector Voltage Offset (volts)')\n plt.legend()\n plt.suptitle(\n 'IDL vs Detector Voltage Offset\\nOFN 0.02 pg On Column\\nQuant Mass = 271.99'\n , fontsize=20)\n plt.savefig('OFN_20fg_IDL_Plot', bbox_inches='tight')\n\n def Manual_GO_Plot(self):\n fig = plt.figure(figsize=(25, 9))\n ax = fig.add_subplot(1, 1, 1)\n ax.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.25, 0.84, 2)))\n xdata = [0, 150, 250, 350]\n ydata = [[-7.7, 26.5, 42.8, 66.1], [-8, 4.1, 13.5, 48.4]]\n legendlbl_lst = ['Peg BT - PV1', 'Peg BT - PV2']\n for s in range(len(ydata)):\n ax.plot(xdata, ydata[s], label=legendlbl_lst[s])\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.set_ylabel('Change in Optimized Detector Voltage')\n ax.set_xlabel('Optimized Detector Voltage Offset (volts)')\n plt.legend()\n plt.savefig('GO_Delta_Plot', bbox_inches='tight')\n plt.show()\n",
"step-3": "<mask token>\n\n\nclass Plotter:\n\n def __init__(self):\n self.red_hex_code = '#ff0000'\n\n def AlkDMIonStatsSplitPlot(self, df):\n PV1_DataSets_lst = df[df['inst'] == 'PV1']['DataSet'].unique()\n PV2_DataSets_lst = df[df['inst'] == 'PV2']['DataSet'].unique()\n inst_sets = [PV1_DataSets_lst, PV2_DataSets_lst]\n ax_title = ['Peg-BT PV1', 'Peg-BT PV2']\n fig = plt.figure(figsize=(25, 9))\n ax1 = fig.add_subplot(1, 2, 1)\n ax2 = fig.add_subplot(1, 2, 2)\n ax1.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.1, 0.9, 4)))\n ax2.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.1, 0.9, 4)))\n ax = [ax1, ax2]\n for a in range(2):\n ax[a].spines['right'].set_visible(False)\n ax[a].spines['top'].set_visible(False)\n ax[a].set_ylabel('Area Per Ion via Detector Measurement')\n ax[a].set_xlabel('Alkane Standard\\nSample Injection Count')\n ax[a].set_title(ax_title[a])\n for dset in inst_sets[a]:\n df_sliced = df[df['DataSet'] == dset].copy()\n offset = df_sliced['offset_volts'].iloc[2]\n dv = df_sliced['Det_Volts'].iloc[2]\n curve_label = 'Offset: +{v} v = {d} v'.format(v=offset, d=dv)\n ax[a].plot(df_sliced['Cumulative_Inj'], df_sliced['ave_api'\n ], label=curve_label)\n ax[a].legend(loc='center', bbox_to_anchor=(0.17, -0.1))\n plt.savefig('DM_API_Analysis', bbox_inches='tight')\n plt.show()\n\n def AlkDMIonStatsPlot(self, df):\n DataSets_lst = df['DataSet'].unique()\n fig = plt.figure(figsize=(15.5, 9))\n ax = fig.add_subplot(1, 1, 1)\n ax.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.1, 1.0, 8)))\n for dset in DataSets_lst:\n df_sliced = df[df['DataSet'] == dset].copy()\n instrument = df_sliced['inst'].iloc[2]\n offset = df_sliced['offset_volts'].iloc[2]\n dv = df_sliced['Det_Volts'].iloc[2]\n curve_label = 'Inst: {i} - Offset: +{v} v = {d} v'.format(i=\n instrument, v=offset, d=dv)\n ax.plot(df_sliced['Cumulative_Inj'], df_sliced['ave_api'],\n label=curve_label)\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n plt.ylabel('Ave. Aera Per Ion')\n plt.xlabel('Sample Injections')\n plt.title(\n \"\"\"Tracking Area Per Ion via Detector Measurement\nOver ~48 Hours of Continuous Sample Acquisition\"\"\"\n )\n legend_h_offset, legend_v_offset = 1.25, 0.75\n plt.legend(loc='center right', bbox_to_anchor=(legend_h_offset,\n legend_v_offset))\n plt.savefig('DM_API_Analysis', bbox_inches='tight')\n plt.show()\n\n def GenericIndividualPlotMaker(self, xdata_lst, ydata_lst,\n legendlbl_lst, xlbl, ylbl, plot_title, png_filename,\n legend_h_offset=1.25, legend_v_offset=0.75, legend_location='center'):\n fig = plt.figure(figsize=(15.5, 9))\n ax = fig.add_subplot(1, 1, 1)\n for i in range(len(xdata_lst)):\n ax.plot(xdata_lst[i], ydata_lst[i], color=self.color_codes[i],\n label=legendlbl_lst[i])\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n plt.ylabel(ylbl)\n plt.xlabel(xlbl)\n plt.title(plot_title)\n plt.legend(loc=legend_location, bbox_to_anchor=(legend_h_offset,\n legend_v_offset))\n plt.savefig(png_filename, bbox_inches='tight')\n\n def GenericCombinedPlotMaker(self, xdata_lst, ydata_lst, legendlbl_lst,\n xlbl, ylbl_lst, fig_title, png_filename, legend_h_offset=0.9,\n legend_v_offset=2.4, legend_location='center'):\n fig = plt.figure(figsize=(25, 9))\n ax = []\n for a in range(4):\n ax.append(fig.add_subplot(2, 2, 1 + a))\n ax[a].set_prop_cycle('color', plt.cm.spectral(np.linspace(0.25,\n 0.84, 2)))\n for s in range(len(xdata_lst)):\n ax[a].plot(xdata_lst[s], ydata_lst[a][s], label=\n legendlbl_lst[s])\n ax[a].spines['right'].set_visible(False)\n ax[a].spines['top'].set_visible(False)\n ax[a].set_ylabel(ylbl_lst[a])\n if (a == 2 or a == 3) and s == 1:\n plt.xlabel(xlbl)\n elif (a == 0 or a == 1) and s == 1:\n ax[a].set_xticklabels([])\n ax[a].spines['bottom'].set_visible(False)\n ax[a].xaxis.set_ticks_position('none')\n plt.suptitle(fig_title, fontsize=20)\n plt.legend(loc=legend_location, bbox_to_anchor=(legend_h_offset,\n legend_v_offset))\n plt.savefig(png_filename, bbox_inches='tight')\n\n def Manual_OFN20fg_IDL(self):\n fig = plt.figure(figsize=(25, 9))\n ax = fig.add_subplot(1, 1, 1)\n ax.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.25, 0.84, 2)))\n xdata = [0, 150, 250, 350]\n ydata = [[0.036614, 0.009674, 0.0056418, 0.004696], [0.0083151, \n 0.0044855, 0.0046082, 0.0033099]]\n legendlbl_lst = ['Peg BT - PV1', 'Peg BT - PV2']\n for s in range(len(ydata)):\n ax.plot(xdata, ydata[s], label=legendlbl_lst[s])\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.set_ylabel('IDL pg')\n ax.set_xlabel('Optimized Detector Voltage Offset (volts)')\n plt.legend()\n plt.suptitle(\n 'IDL vs Detector Voltage Offset\\nOFN 0.02 pg On Column\\nQuant Mass = 271.99'\n , fontsize=20)\n plt.savefig('OFN_20fg_IDL_Plot', bbox_inches='tight')\n\n def Manual_GO_Plot(self):\n fig = plt.figure(figsize=(25, 9))\n ax = fig.add_subplot(1, 1, 1)\n ax.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.25, 0.84, 2)))\n xdata = [0, 150, 250, 350]\n ydata = [[-7.7, 26.5, 42.8, 66.1], [-8, 4.1, 13.5, 48.4]]\n legendlbl_lst = ['Peg BT - PV1', 'Peg BT - PV2']\n for s in range(len(ydata)):\n ax.plot(xdata, ydata[s], label=legendlbl_lst[s])\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.set_ylabel('Change in Optimized Detector Voltage')\n ax.set_xlabel('Optimized Detector Voltage Offset (volts)')\n plt.legend()\n plt.savefig('GO_Delta_Plot', bbox_inches='tight')\n plt.show()\n",
"step-4": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport numpy as np\n\n\nclass Plotter:\n\n def __init__(self):\n self.red_hex_code = '#ff0000'\n\n def AlkDMIonStatsSplitPlot(self, df):\n PV1_DataSets_lst = df[df['inst'] == 'PV1']['DataSet'].unique()\n PV2_DataSets_lst = df[df['inst'] == 'PV2']['DataSet'].unique()\n inst_sets = [PV1_DataSets_lst, PV2_DataSets_lst]\n ax_title = ['Peg-BT PV1', 'Peg-BT PV2']\n fig = plt.figure(figsize=(25, 9))\n ax1 = fig.add_subplot(1, 2, 1)\n ax2 = fig.add_subplot(1, 2, 2)\n ax1.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.1, 0.9, 4)))\n ax2.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.1, 0.9, 4)))\n ax = [ax1, ax2]\n for a in range(2):\n ax[a].spines['right'].set_visible(False)\n ax[a].spines['top'].set_visible(False)\n ax[a].set_ylabel('Area Per Ion via Detector Measurement')\n ax[a].set_xlabel('Alkane Standard\\nSample Injection Count')\n ax[a].set_title(ax_title[a])\n for dset in inst_sets[a]:\n df_sliced = df[df['DataSet'] == dset].copy()\n offset = df_sliced['offset_volts'].iloc[2]\n dv = df_sliced['Det_Volts'].iloc[2]\n curve_label = 'Offset: +{v} v = {d} v'.format(v=offset, d=dv)\n ax[a].plot(df_sliced['Cumulative_Inj'], df_sliced['ave_api'\n ], label=curve_label)\n ax[a].legend(loc='center', bbox_to_anchor=(0.17, -0.1))\n plt.savefig('DM_API_Analysis', bbox_inches='tight')\n plt.show()\n\n def AlkDMIonStatsPlot(self, df):\n DataSets_lst = df['DataSet'].unique()\n fig = plt.figure(figsize=(15.5, 9))\n ax = fig.add_subplot(1, 1, 1)\n ax.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.1, 1.0, 8)))\n for dset in DataSets_lst:\n df_sliced = df[df['DataSet'] == dset].copy()\n instrument = df_sliced['inst'].iloc[2]\n offset = df_sliced['offset_volts'].iloc[2]\n dv = df_sliced['Det_Volts'].iloc[2]\n curve_label = 'Inst: {i} - Offset: +{v} v = {d} v'.format(i=\n instrument, v=offset, d=dv)\n ax.plot(df_sliced['Cumulative_Inj'], df_sliced['ave_api'],\n label=curve_label)\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n plt.ylabel('Ave. Aera Per Ion')\n plt.xlabel('Sample Injections')\n plt.title(\n \"\"\"Tracking Area Per Ion via Detector Measurement\nOver ~48 Hours of Continuous Sample Acquisition\"\"\"\n )\n legend_h_offset, legend_v_offset = 1.25, 0.75\n plt.legend(loc='center right', bbox_to_anchor=(legend_h_offset,\n legend_v_offset))\n plt.savefig('DM_API_Analysis', bbox_inches='tight')\n plt.show()\n\n def GenericIndividualPlotMaker(self, xdata_lst, ydata_lst,\n legendlbl_lst, xlbl, ylbl, plot_title, png_filename,\n legend_h_offset=1.25, legend_v_offset=0.75, legend_location='center'):\n fig = plt.figure(figsize=(15.5, 9))\n ax = fig.add_subplot(1, 1, 1)\n for i in range(len(xdata_lst)):\n ax.plot(xdata_lst[i], ydata_lst[i], color=self.color_codes[i],\n label=legendlbl_lst[i])\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n plt.ylabel(ylbl)\n plt.xlabel(xlbl)\n plt.title(plot_title)\n plt.legend(loc=legend_location, bbox_to_anchor=(legend_h_offset,\n legend_v_offset))\n plt.savefig(png_filename, bbox_inches='tight')\n\n def GenericCombinedPlotMaker(self, xdata_lst, ydata_lst, legendlbl_lst,\n xlbl, ylbl_lst, fig_title, png_filename, legend_h_offset=0.9,\n legend_v_offset=2.4, legend_location='center'):\n fig = plt.figure(figsize=(25, 9))\n ax = []\n for a in range(4):\n ax.append(fig.add_subplot(2, 2, 1 + a))\n ax[a].set_prop_cycle('color', plt.cm.spectral(np.linspace(0.25,\n 0.84, 2)))\n for s in range(len(xdata_lst)):\n ax[a].plot(xdata_lst[s], ydata_lst[a][s], label=\n legendlbl_lst[s])\n ax[a].spines['right'].set_visible(False)\n ax[a].spines['top'].set_visible(False)\n ax[a].set_ylabel(ylbl_lst[a])\n if (a == 2 or a == 3) and s == 1:\n plt.xlabel(xlbl)\n elif (a == 0 or a == 1) and s == 1:\n ax[a].set_xticklabels([])\n ax[a].spines['bottom'].set_visible(False)\n ax[a].xaxis.set_ticks_position('none')\n plt.suptitle(fig_title, fontsize=20)\n plt.legend(loc=legend_location, bbox_to_anchor=(legend_h_offset,\n legend_v_offset))\n plt.savefig(png_filename, bbox_inches='tight')\n\n def Manual_OFN20fg_IDL(self):\n fig = plt.figure(figsize=(25, 9))\n ax = fig.add_subplot(1, 1, 1)\n ax.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.25, 0.84, 2)))\n xdata = [0, 150, 250, 350]\n ydata = [[0.036614, 0.009674, 0.0056418, 0.004696], [0.0083151, \n 0.0044855, 0.0046082, 0.0033099]]\n legendlbl_lst = ['Peg BT - PV1', 'Peg BT - PV2']\n for s in range(len(ydata)):\n ax.plot(xdata, ydata[s], label=legendlbl_lst[s])\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.set_ylabel('IDL pg')\n ax.set_xlabel('Optimized Detector Voltage Offset (volts)')\n plt.legend()\n plt.suptitle(\n 'IDL vs Detector Voltage Offset\\nOFN 0.02 pg On Column\\nQuant Mass = 271.99'\n , fontsize=20)\n plt.savefig('OFN_20fg_IDL_Plot', bbox_inches='tight')\n\n def Manual_GO_Plot(self):\n fig = plt.figure(figsize=(25, 9))\n ax = fig.add_subplot(1, 1, 1)\n ax.set_prop_cycle('color', plt.cm.spectral(np.linspace(0.25, 0.84, 2)))\n xdata = [0, 150, 250, 350]\n ydata = [[-7.7, 26.5, 42.8, 66.1], [-8, 4.1, 13.5, 48.4]]\n legendlbl_lst = ['Peg BT - PV1', 'Peg BT - PV2']\n for s in range(len(ydata)):\n ax.plot(xdata, ydata[s], label=legendlbl_lst[s])\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.set_ylabel('Change in Optimized Detector Voltage')\n ax.set_xlabel('Optimized Detector Voltage Offset (volts)')\n plt.legend()\n plt.savefig('GO_Delta_Plot', bbox_inches='tight')\n plt.show()\n",
"step-5": "import pandas as pd #@UnusedImport\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib #@UnusedImport\r\nimport numpy as np #@UnusedImport\r\n\r\nclass Plotter():\r\n\tdef __init__(self):\r\n\t\tself.red_hex_code = '#ff0000'\r\n\r\n\tdef AlkDMIonStatsSplitPlot(self, df):\r\n\t\tPV1_DataSets_lst = df[df['inst'] == 'PV1']['DataSet'].unique()\r\n\t\tPV2_DataSets_lst = df[df['inst'] == 'PV2']['DataSet'].unique()\r\n\t\tinst_sets = [PV1_DataSets_lst,PV2_DataSets_lst]\r\n\t\tax_title = ['Peg-BT PV1', 'Peg-BT PV2']\r\n\t\t\r\n\t\t\r\n\t\tfig = plt.figure(figsize=(25,9))\r\n\t\tax1 = fig.add_subplot(1,2,1)\r\n\t\tax2 = fig.add_subplot(1,2,2)\t\t\r\n\t\tax1.set_prop_cycle('color',plt.cm.spectral(np.linspace(0.1,0.9,4))) #@UndefinedVariable\r\n\t\tax2.set_prop_cycle('color',plt.cm.spectral(np.linspace(0.1,0.9,4))) #@UndefinedVariable\r\n\t\tax = [ax1,ax2]\r\n\t\t\r\n\t\tfor a in range(2):\r\n\t\t\t\r\n\t\t\tax[a].spines['right'].set_visible(False)\r\n\t\t\tax[a].spines['top'].set_visible(False)\r\n\t\t\tax[a].set_ylabel('Area Per Ion via Detector Measurement')\r\n\t\t\tax[a].set_xlabel('Alkane Standard\\nSample Injection Count')\r\n\t\t\tax[a].set_title(ax_title[a])\r\n\t\t\t\r\n\t\t\tfor dset in inst_sets[a]:\r\n\t\t\t\tdf_sliced = df[df['DataSet'] == dset].copy()\r\n\t\t\t\toffset = df_sliced['offset_volts'].iloc[2]\r\n\t\t\t\tdv = df_sliced['Det_Volts'].iloc[2]\r\n\t\t\t\tcurve_label = 'Offset: +{v} v = {d} v'.format(v=offset, d=dv)\r\n\t\t\t\tax[a].plot(df_sliced['Cumulative_Inj'], df_sliced['ave_api'], label=curve_label)\r\n\t\t\t\t\r\n\t\t\tax[a].legend(loc='center', bbox_to_anchor=(0.17,-0.1))\r\n\t\t\r\n# \t\tplt.suptitle('Tracking Area Per Ion via Detector Measurement\\nOver ~48 Hours of Continuous Sample Acquisition', fontsize=14)\r\n\t\tplt.savefig('DM_API_Analysis', bbox_inches='tight')\r\n\t\tplt.show()\r\n\r\n\r\n\t\r\n\tdef AlkDMIonStatsPlot(self, df):\r\n\t\tDataSets_lst = df['DataSet'].unique()\r\n\t\tfig = plt.figure(figsize=(15.5,9))\r\n\t\tax = fig.add_subplot(1,1,1)\r\n\t\tax.set_prop_cycle('color',plt.cm.spectral(np.linspace(0.1,1.00,8))) #@UndefinedVariable\r\n\t\t\r\n\t\tfor dset in DataSets_lst:\r\n\t\t\tdf_sliced = df[df['DataSet'] == dset].copy()\r\n\t\t\tinstrument = df_sliced['inst'].iloc[2]\r\n\t\t\toffset = df_sliced['offset_volts'].iloc[2]\r\n\t\t\tdv = df_sliced['Det_Volts'].iloc[2]\r\n\t\t\tcurve_label = 'Inst: {i} - Offset: +{v} v = {d} v'.format(i=instrument, v=offset, d=dv)\r\n\t\t\t\r\n\t\t\tax.plot(df_sliced['Cumulative_Inj'], df_sliced['ave_api'], label=curve_label)\r\n\t\t\r\n\t\tax.spines['right'].set_visible(False)\r\n\t\tax.spines['top'].set_visible(False)\r\n\t\t\r\n\t\tplt.ylabel('Ave. Aera Per Ion')\r\n\t\tplt.xlabel('Sample Injections')\r\n\t\tplt.title('Tracking Area Per Ion via Detector Measurement\\nOver ~48 Hours of Continuous Sample Acquisition')\r\n\r\n\t\tlegend_h_offset, legend_v_offset = 1.25, 0.75\r\n\t\tplt.legend(loc='center right', bbox_to_anchor=(legend_h_offset, legend_v_offset))\r\n\t\tplt.savefig('DM_API_Analysis', bbox_inches='tight')\r\n\t\tplt.show()\r\n\t\t\r\n\tdef GenericIndividualPlotMaker(self, xdata_lst, ydata_lst, legendlbl_lst, xlbl, ylbl, plot_title, png_filename, legend_h_offset=1.25, legend_v_offset=0.75, legend_location='center'):\r\n\t\t# xdata & ydata: both are a list of lists each containing the corresponding axis data. These are the requirement of these two\r\n\t\t\t# data set to prevent an error:\r\n\t\t\t\t# Sublists with the same index are a matching x vs y set that will be plotted. They MUST be the same length to prevent an error.\r\n\t\t\t\t# There must be the same number of sub lists to prevent an error.\r\n\t\t# legendlbl_lst: a list of legend labels for each x vs y plot. Again there must be the same number of items in this list as x/y pairs.\r\n\t\t# The rest are self explainatory\r\n\t\tfig = plt.figure(figsize=(15.5,9))\r\n\t\tax = fig.add_subplot(1,1,1)\r\n\t\t\r\n\t\tfor i in range(len(xdata_lst)):\r\n\t\t\tax.plot(xdata_lst[i], ydata_lst[i], color=self.color_codes[i], label=legendlbl_lst[i])\r\n\t\t\t\r\n\t\tax.spines['right'].set_visible(False)\r\n\t\tax.spines['top'].set_visible(False)\r\n\t\t\r\n\t\tplt.ylabel(ylbl)\r\n\t\tplt.xlabel(xlbl)\r\n\t\tplt.title(plot_title)\r\n\r\n\t\tplt.legend(loc=legend_location, bbox_to_anchor=(legend_h_offset, legend_v_offset))\r\n\t\tplt.savefig(png_filename, bbox_inches='tight')\r\n\t\t\r\n\t\t# (x_data, all_y_data, legendlbl_lst, xlbl, plot_titles, figure_title, all_png_filenames)\r\n\tdef GenericCombinedPlotMaker(self, xdata_lst, ydata_lst, legendlbl_lst, xlbl, ylbl_lst, fig_title, png_filename, legend_h_offset=0.9, legend_v_offset=2.4, legend_location='center'):\r\n\t\t# xdata_lst: is a list of lists each containing the corresponding x-axis data. The x-axis data is the same for all ax_n objects\r\n\t\t\t# Generic example: [Series_1_x-axis_data_lst, Series_n_x-axis_data_lst...]\r\n\t\t# ydata_lst: is a list of lists of lists containing all the y-axis data.\r\n\t\t\t# Generic example: [ax_1[Series_1_y-axis_data_lst, Series_n_y-axis_data_lst...], ax_n[ax_1[Series_1_y-axis_data_lst, Series_n_y-axis_data_lst...]...]\t\r\n\t\t\t# data set to prevent an error:\r\n\t\t\t\t# Sublists with the same index are a matching x vs y set that will be plotted. They MUST be the same length to prevent an error.\r\n\t\t\t\t# There must be the same number of sub lists to prevent an error.\r\n\t\t# legendlbl_lst: a list of legend labels for each x vs y plot. Again there must be the same number of items in this list as x/y pairs.\r\n\t\t# The rest are self explainatory\r\n\t\tfig = plt.figure(figsize=(25,9))\r\n\t\tax = []\r\n\t\t\r\n\t\tfor a in range(4):\r\n\t\t\tax.append(fig.add_subplot(2,2,1+a))\r\n\t\t\tax[a].set_prop_cycle('color',plt.cm.spectral(np.linspace(0.25,0.84,2))) #@UndefinedVariable\r\n\t\t\t\r\n\t\t\tfor s in range(len(xdata_lst)):\r\n\t\t\t\tax[a].plot(xdata_lst[s], ydata_lst[a][s], label=legendlbl_lst[s])\r\n\t\t\t\tax[a].spines['right'].set_visible(False)\r\n\t\t\t\tax[a].spines['top'].set_visible(False)\r\n\t\t\t\tax[a].set_ylabel(ylbl_lst[a])\r\n\t\t\t\t\r\n\t\t\t\t\r\n\t\t\t\tif (a == 2 or a == 3) and s == 1:\r\n\t\t\t\t\tplt.xlabel(xlbl)\r\n\t\t\t\telif (a == 0 or a == 1) and s == 1:\r\n\t\t\t\t\tax[a].set_xticklabels([])\r\n\t\t\t\t\tax[a].spines['bottom'].set_visible(False)\r\n\t\t\t\t\tax[a].xaxis.set_ticks_position('none')\r\n\t\t\t\t\t\r\n\t\tplt.suptitle(fig_title, fontsize=20)\r\n\t\tplt.legend(loc=legend_location, bbox_to_anchor=(legend_h_offset, legend_v_offset))\r\n\t\tplt.savefig(png_filename, bbox_inches='tight')\r\n\t\t\r\n\tdef Manual_OFN20fg_IDL(self):\r\n\t\tfig = plt.figure(figsize=(25,9))\r\n\t\tax = fig.add_subplot(1,1,1)\r\n\t\tax.set_prop_cycle('color',plt.cm.spectral(np.linspace(0.25,0.84,2))) #@UndefinedVariable\r\n\t\t\r\n\t\txdata = [0,150,250,350]\r\n\t\tydata = [[0.036614, 0.009674, 0.0056418, 0.004696],[0.0083151, 0.0044855, 0.0046082, 0.0033099]]\r\n\t\tlegendlbl_lst = ['Peg BT - PV1', 'Peg BT - PV2']\r\n\t\t\r\n\t\tfor s in range(len(ydata)):\r\n\t\t\tax.plot(xdata, ydata[s], label=legendlbl_lst[s])\r\n\t\t\t\r\n\t\tax.spines['right'].set_visible(False)\r\n\t\tax.spines['top'].set_visible(False)\r\n\t\tax.set_ylabel('IDL pg')\r\n\t\tax.set_xlabel('Optimized Detector Voltage Offset (volts)')\r\n\t\tplt.legend()\r\n\t\tplt.suptitle('IDL vs Detector Voltage Offset\\nOFN 0.02 pg On Column\\nQuant Mass = 271.99', fontsize=20)\r\n\t\tplt.savefig('OFN_20fg_IDL_Plot', bbox_inches='tight')\r\n\t\t\r\n\tdef Manual_GO_Plot(self):\r\n\t\tfig = plt.figure(figsize=(25,9))\r\n\t\tax = fig.add_subplot(1,1,1)\r\n\t\tax.set_prop_cycle('color',plt.cm.spectral(np.linspace(0.25,0.84,2))) #@UndefinedVariable\r\n\t\t\r\n\t\txdata = [0,150,250,350]\r\n\t\tydata = [[-7.7, 26.5, 42.8, 66.1],[-8, 4.1, 13.5, 48.4]]\r\n\t\tlegendlbl_lst = ['Peg BT - PV1', 'Peg BT - PV2']\r\n\t\t\r\n\t\tfor s in range(len(ydata)):\r\n\t\t\tax.plot(xdata, ydata[s], label=legendlbl_lst[s])\r\n\t\t\t\r\n\t\tax.spines['right'].set_visible(False)\r\n\t\tax.spines['top'].set_visible(False)\r\n\t\tax.set_ylabel('Change in Optimized Detector Voltage')\r\n\t\tax.set_xlabel('Optimized Detector Voltage Offset (volts)')\r\n\t\tplt.legend()\r\n# \t\tplt.suptitle('Change in Optimized Detector Voltage\\nFrom the Beginning to the End of a Data Set', fontsize=20)\r\n\t\tplt.savefig('GO_Delta_Plot', bbox_inches='tight')\r\n\t\tplt.show()",
"step-ids": [
5,
6,
8,
9,
10
]
}
|
[
5,
6,
8,
9,
10
] |
from django.db import models
from home.models import MainUser
from product.models import Product
# Create your models here.
class Cart(models.Model):
user = models.ForeignKey(MainUser,on_delete=models.CASCADE)
item = models.ForeignKey(Product, on_delete=models.CASCADE)
quantity = models.PositiveIntegerField(default=1)
parchased=models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
def __str__(self):
return f'{self.item}x{self.quantity}'
def get_total(self):
total=self.item.price *self.quantity
f_total=format(total,'0.2f')
return f_total
class Order(models.Model):
orderitems = models.ManyToManyField(Cart)
user=models.ForeignKey(MainUser,on_delete=models.CASCADE)
ordered=models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
payment_id=models.CharField(max_length=300,blank=True,null=True)
orderid=models.CharField(max_length=300,blank=True,null=True)
|
normal
|
{
"blob_id": "454d210c1b1a41e4a645ef7ccb24f80ee20a451c",
"index": 2224,
"step-1": "<mask token>\n\n\nclass Cart(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def get_total(self):\n total = self.item.price * self.quantity\n f_total = format(total, '0.2f')\n return f_total\n\n\nclass Order(models.Model):\n orderitems = models.ManyToManyField(Cart)\n user = models.ForeignKey(MainUser, on_delete=models.CASCADE)\n ordered = models.BooleanField(default=False)\n created = models.DateTimeField(auto_now_add=True)\n payment_id = models.CharField(max_length=300, blank=True, null=True)\n orderid = models.CharField(max_length=300, blank=True, null=True)\n",
"step-2": "<mask token>\n\n\nclass Cart(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return f'{self.item}x{self.quantity}'\n\n def get_total(self):\n total = self.item.price * self.quantity\n f_total = format(total, '0.2f')\n return f_total\n\n\nclass Order(models.Model):\n orderitems = models.ManyToManyField(Cart)\n user = models.ForeignKey(MainUser, on_delete=models.CASCADE)\n ordered = models.BooleanField(default=False)\n created = models.DateTimeField(auto_now_add=True)\n payment_id = models.CharField(max_length=300, blank=True, null=True)\n orderid = models.CharField(max_length=300, blank=True, null=True)\n",
"step-3": "<mask token>\n\n\nclass Cart(models.Model):\n user = models.ForeignKey(MainUser, on_delete=models.CASCADE)\n item = models.ForeignKey(Product, on_delete=models.CASCADE)\n quantity = models.PositiveIntegerField(default=1)\n parchased = models.BooleanField(default=False)\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return f'{self.item}x{self.quantity}'\n\n def get_total(self):\n total = self.item.price * self.quantity\n f_total = format(total, '0.2f')\n return f_total\n\n\nclass Order(models.Model):\n orderitems = models.ManyToManyField(Cart)\n user = models.ForeignKey(MainUser, on_delete=models.CASCADE)\n ordered = models.BooleanField(default=False)\n created = models.DateTimeField(auto_now_add=True)\n payment_id = models.CharField(max_length=300, blank=True, null=True)\n orderid = models.CharField(max_length=300, blank=True, null=True)\n",
"step-4": "from django.db import models\nfrom home.models import MainUser\nfrom product.models import Product\n\n\nclass Cart(models.Model):\n user = models.ForeignKey(MainUser, on_delete=models.CASCADE)\n item = models.ForeignKey(Product, on_delete=models.CASCADE)\n quantity = models.PositiveIntegerField(default=1)\n parchased = models.BooleanField(default=False)\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return f'{self.item}x{self.quantity}'\n\n def get_total(self):\n total = self.item.price * self.quantity\n f_total = format(total, '0.2f')\n return f_total\n\n\nclass Order(models.Model):\n orderitems = models.ManyToManyField(Cart)\n user = models.ForeignKey(MainUser, on_delete=models.CASCADE)\n ordered = models.BooleanField(default=False)\n created = models.DateTimeField(auto_now_add=True)\n payment_id = models.CharField(max_length=300, blank=True, null=True)\n orderid = models.CharField(max_length=300, blank=True, null=True)\n",
"step-5": "from django.db import models\nfrom home.models import MainUser\nfrom product.models import Product\n# Create your models here.\nclass Cart(models.Model):\n user = models.ForeignKey(MainUser,on_delete=models.CASCADE)\n item = models.ForeignKey(Product, on_delete=models.CASCADE)\n\n quantity = models.PositiveIntegerField(default=1)\n parchased=models.BooleanField(default=False)\n\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n \n\n def __str__(self):\n return f'{self.item}x{self.quantity}'\n\n\n def get_total(self):\n total=self.item.price *self.quantity \n f_total=format(total,'0.2f')\n return f_total\n \nclass Order(models.Model):\n orderitems = models.ManyToManyField(Cart)\n user=models.ForeignKey(MainUser,on_delete=models.CASCADE)\n ordered=models.BooleanField(default=False)\n\n created = models.DateTimeField(auto_now_add=True)\n payment_id=models.CharField(max_length=300,blank=True,null=True)\n orderid=models.CharField(max_length=300,blank=True,null=True)\n\n \n\n ",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
#
# PySNMP MIB module ADTRAN-ATLAS-HSSI-V35-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ADTRAN-ATLAS-HSSI-V35-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 16:59:09 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
adATLASModuleInfoFPStatus, = mibBuilder.importSymbols("ADTRAN-ATLAS-MODULE-MIB", "adATLASModuleInfoFPStatus")
adATLASUnitSlotAddress, adATLASUnitFPStatus, adATLASUnitPortAddress = mibBuilder.importSymbols("ADTRAN-ATLAS-UNIT-MIB", "adATLASUnitSlotAddress", "adATLASUnitFPStatus", "adATLASUnitPortAddress")
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueRangeConstraint, ConstraintsUnion, ConstraintsIntersection, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueSizeConstraint")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
MibScalar, MibTable, MibTableRow, MibTableColumn, Bits, Gauge32, Integer32, Counter64, IpAddress, ModuleIdentity, ObjectIdentity, iso, Unsigned32, Counter32, MibIdentifier, NotificationType, NotificationType, enterprises, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Bits", "Gauge32", "Integer32", "Counter64", "IpAddress", "ModuleIdentity", "ObjectIdentity", "iso", "Unsigned32", "Counter32", "MibIdentifier", "NotificationType", "NotificationType", "enterprises", "TimeTicks")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
adtran = MibIdentifier((1, 3, 6, 1, 4, 1, 664))
adMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 664, 2))
adATLASmg = MibIdentifier((1, 3, 6, 1, 4, 1, 664, 2, 154))
adGenATLASmg = MibIdentifier((1, 3, 6, 1, 4, 1, 664, 2, 154, 1))
adATLASHSSIV35mg = MibIdentifier((1, 3, 6, 1, 4, 1, 664, 2, 154, 1, 11))
adATLASHSSIV35IfceDeact = NotificationType((1, 3, 6, 1, 4, 1, 664, 2, 154) + (0,15401100)).setObjects(("IF-MIB", "ifIndex"), ("ADTRAN-ATLAS-UNIT-MIB", "adATLASUnitSlotAddress"), ("ADTRAN-ATLAS-UNIT-MIB", "adATLASUnitPortAddress"), ("ADTRAN-ATLAS-MODULE-MIB", "adATLASModuleInfoFPStatus"), ("ADTRAN-ATLAS-UNIT-MIB", "adATLASUnitFPStatus"))
adATLASHSSIV35IfceReact = NotificationType((1, 3, 6, 1, 4, 1, 664, 2, 154) + (0,15401101)).setObjects(("IF-MIB", "ifIndex"), ("ADTRAN-ATLAS-UNIT-MIB", "adATLASUnitSlotAddress"), ("ADTRAN-ATLAS-UNIT-MIB", "adATLASUnitPortAddress"), ("ADTRAN-ATLAS-MODULE-MIB", "adATLASModuleInfoFPStatus"), ("ADTRAN-ATLAS-UNIT-MIB", "adATLASUnitFPStatus"))
mibBuilder.exportSymbols("ADTRAN-ATLAS-HSSI-V35-MIB", adtran=adtran, adMgmt=adMgmt, adATLASHSSIV35IfceReact=adATLASHSSIV35IfceReact, adGenATLASmg=adGenATLASmg, adATLASmg=adATLASmg, adATLASHSSIV35IfceDeact=adATLASHSSIV35IfceDeact, adATLASHSSIV35mg=adATLASHSSIV35mg)
|
normal
|
{
"blob_id": "309807e04bfbf6c32b7105fe87d6ad1247ae411a",
"index": 3192,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nmibBuilder.exportSymbols('ADTRAN-ATLAS-HSSI-V35-MIB', adtran=adtran, adMgmt\n =adMgmt, adATLASHSSIV35IfceReact=adATLASHSSIV35IfceReact, adGenATLASmg=\n adGenATLASmg, adATLASmg=adATLASmg, adATLASHSSIV35IfceDeact=\n adATLASHSSIV35IfceDeact, adATLASHSSIV35mg=adATLASHSSIV35mg)\n",
"step-3": "adATLASModuleInfoFPStatus, = mibBuilder.importSymbols('ADTRAN-ATLAS-MODULE-MIB'\n , 'adATLASModuleInfoFPStatus')\nadATLASUnitSlotAddress, adATLASUnitFPStatus, adATLASUnitPortAddress = (\n mibBuilder.importSymbols('ADTRAN-ATLAS-UNIT-MIB',\n 'adATLASUnitSlotAddress', 'adATLASUnitFPStatus', 'adATLASUnitPortAddress'))\nObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols('ASN1',\n 'ObjectIdentifier', 'Integer', 'OctetString')\nNamedValues, = mibBuilder.importSymbols('ASN1-ENUMERATION', 'NamedValues')\n(SingleValueConstraint, ValueRangeConstraint, ConstraintsUnion,\n ConstraintsIntersection, ValueSizeConstraint) = (mibBuilder.\n importSymbols('ASN1-REFINEMENT', 'SingleValueConstraint',\n 'ValueRangeConstraint', 'ConstraintsUnion', 'ConstraintsIntersection',\n 'ValueSizeConstraint'))\nifIndex, = mibBuilder.importSymbols('IF-MIB', 'ifIndex')\nModuleCompliance, NotificationGroup = mibBuilder.importSymbols('SNMPv2-CONF',\n 'ModuleCompliance', 'NotificationGroup')\n(MibScalar, MibTable, MibTableRow, MibTableColumn, Bits, Gauge32, Integer32,\n Counter64, IpAddress, ModuleIdentity, ObjectIdentity, iso, Unsigned32,\n Counter32, MibIdentifier, NotificationType, NotificationType,\n enterprises, TimeTicks) = (mibBuilder.importSymbols('SNMPv2-SMI',\n 'MibScalar', 'MibTable', 'MibTableRow', 'MibTableColumn', 'Bits',\n 'Gauge32', 'Integer32', 'Counter64', 'IpAddress', 'ModuleIdentity',\n 'ObjectIdentity', 'iso', 'Unsigned32', 'Counter32', 'MibIdentifier',\n 'NotificationType', 'NotificationType', 'enterprises', 'TimeTicks'))\nDisplayString, TextualConvention = mibBuilder.importSymbols('SNMPv2-TC',\n 'DisplayString', 'TextualConvention')\nadtran = MibIdentifier((1, 3, 6, 1, 4, 1, 664))\nadMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 664, 2))\nadATLASmg = MibIdentifier((1, 3, 6, 1, 4, 1, 664, 2, 154))\nadGenATLASmg = MibIdentifier((1, 3, 6, 1, 4, 1, 664, 2, 154, 1))\nadATLASHSSIV35mg = MibIdentifier((1, 3, 6, 1, 4, 1, 664, 2, 154, 1, 11))\nadATLASHSSIV35IfceDeact = NotificationType((1, 3, 6, 1, 4, 1, 664, 2, 154) +\n (0, 15401100)).setObjects(('IF-MIB', 'ifIndex'), (\n 'ADTRAN-ATLAS-UNIT-MIB', 'adATLASUnitSlotAddress'), (\n 'ADTRAN-ATLAS-UNIT-MIB', 'adATLASUnitPortAddress'), (\n 'ADTRAN-ATLAS-MODULE-MIB', 'adATLASModuleInfoFPStatus'), (\n 'ADTRAN-ATLAS-UNIT-MIB', 'adATLASUnitFPStatus'))\nadATLASHSSIV35IfceReact = NotificationType((1, 3, 6, 1, 4, 1, 664, 2, 154) +\n (0, 15401101)).setObjects(('IF-MIB', 'ifIndex'), (\n 'ADTRAN-ATLAS-UNIT-MIB', 'adATLASUnitSlotAddress'), (\n 'ADTRAN-ATLAS-UNIT-MIB', 'adATLASUnitPortAddress'), (\n 'ADTRAN-ATLAS-MODULE-MIB', 'adATLASModuleInfoFPStatus'), (\n 'ADTRAN-ATLAS-UNIT-MIB', 'adATLASUnitFPStatus'))\nmibBuilder.exportSymbols('ADTRAN-ATLAS-HSSI-V35-MIB', adtran=adtran, adMgmt\n =adMgmt, adATLASHSSIV35IfceReact=adATLASHSSIV35IfceReact, adGenATLASmg=\n adGenATLASmg, adATLASmg=adATLASmg, adATLASHSSIV35IfceDeact=\n adATLASHSSIV35IfceDeact, adATLASHSSIV35mg=adATLASHSSIV35mg)\n",
"step-4": "#\n# PySNMP MIB module ADTRAN-ATLAS-HSSI-V35-MIB (http://snmplabs.com/pysmi)\n# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ADTRAN-ATLAS-HSSI-V35-MIB\n# Produced by pysmi-0.3.4 at Mon Apr 29 16:59:09 2019\n# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4\n# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) \n#\nadATLASModuleInfoFPStatus, = mibBuilder.importSymbols(\"ADTRAN-ATLAS-MODULE-MIB\", \"adATLASModuleInfoFPStatus\")\nadATLASUnitSlotAddress, adATLASUnitFPStatus, adATLASUnitPortAddress = mibBuilder.importSymbols(\"ADTRAN-ATLAS-UNIT-MIB\", \"adATLASUnitSlotAddress\", \"adATLASUnitFPStatus\", \"adATLASUnitPortAddress\")\nObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols(\"ASN1\", \"ObjectIdentifier\", \"Integer\", \"OctetString\")\nNamedValues, = mibBuilder.importSymbols(\"ASN1-ENUMERATION\", \"NamedValues\")\nSingleValueConstraint, ValueRangeConstraint, ConstraintsUnion, ConstraintsIntersection, ValueSizeConstraint = mibBuilder.importSymbols(\"ASN1-REFINEMENT\", \"SingleValueConstraint\", \"ValueRangeConstraint\", \"ConstraintsUnion\", \"ConstraintsIntersection\", \"ValueSizeConstraint\")\nifIndex, = mibBuilder.importSymbols(\"IF-MIB\", \"ifIndex\")\nModuleCompliance, NotificationGroup = mibBuilder.importSymbols(\"SNMPv2-CONF\", \"ModuleCompliance\", \"NotificationGroup\")\nMibScalar, MibTable, MibTableRow, MibTableColumn, Bits, Gauge32, Integer32, Counter64, IpAddress, ModuleIdentity, ObjectIdentity, iso, Unsigned32, Counter32, MibIdentifier, NotificationType, NotificationType, enterprises, TimeTicks = mibBuilder.importSymbols(\"SNMPv2-SMI\", \"MibScalar\", \"MibTable\", \"MibTableRow\", \"MibTableColumn\", \"Bits\", \"Gauge32\", \"Integer32\", \"Counter64\", \"IpAddress\", \"ModuleIdentity\", \"ObjectIdentity\", \"iso\", \"Unsigned32\", \"Counter32\", \"MibIdentifier\", \"NotificationType\", \"NotificationType\", \"enterprises\", \"TimeTicks\")\nDisplayString, TextualConvention = mibBuilder.importSymbols(\"SNMPv2-TC\", \"DisplayString\", \"TextualConvention\")\nadtran = MibIdentifier((1, 3, 6, 1, 4, 1, 664))\nadMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 664, 2))\nadATLASmg = MibIdentifier((1, 3, 6, 1, 4, 1, 664, 2, 154))\nadGenATLASmg = MibIdentifier((1, 3, 6, 1, 4, 1, 664, 2, 154, 1))\nadATLASHSSIV35mg = MibIdentifier((1, 3, 6, 1, 4, 1, 664, 2, 154, 1, 11))\nadATLASHSSIV35IfceDeact = NotificationType((1, 3, 6, 1, 4, 1, 664, 2, 154) + (0,15401100)).setObjects((\"IF-MIB\", \"ifIndex\"), (\"ADTRAN-ATLAS-UNIT-MIB\", \"adATLASUnitSlotAddress\"), (\"ADTRAN-ATLAS-UNIT-MIB\", \"adATLASUnitPortAddress\"), (\"ADTRAN-ATLAS-MODULE-MIB\", \"adATLASModuleInfoFPStatus\"), (\"ADTRAN-ATLAS-UNIT-MIB\", \"adATLASUnitFPStatus\"))\nadATLASHSSIV35IfceReact = NotificationType((1, 3, 6, 1, 4, 1, 664, 2, 154) + (0,15401101)).setObjects((\"IF-MIB\", \"ifIndex\"), (\"ADTRAN-ATLAS-UNIT-MIB\", \"adATLASUnitSlotAddress\"), (\"ADTRAN-ATLAS-UNIT-MIB\", \"adATLASUnitPortAddress\"), (\"ADTRAN-ATLAS-MODULE-MIB\", \"adATLASModuleInfoFPStatus\"), (\"ADTRAN-ATLAS-UNIT-MIB\", \"adATLASUnitFPStatus\"))\nmibBuilder.exportSymbols(\"ADTRAN-ATLAS-HSSI-V35-MIB\", adtran=adtran, adMgmt=adMgmt, adATLASHSSIV35IfceReact=adATLASHSSIV35IfceReact, adGenATLASmg=adGenATLASmg, adATLASmg=adATLASmg, adATLASHSSIV35IfceDeact=adATLASHSSIV35IfceDeact, adATLASHSSIV35mg=adATLASHSSIV35mg)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import sys
from PySide6.QtCore import *
from PySide6.QtWidgets import *
from PySide6.QtGui import *
from simple_drawing_window import *
class simple_drawing_window1( simple_drawing_window):
def __init__(self):
super().__init__()
def paintEvent(self, e):
p = QPainter()
p.begin(self)
"""
p.setPen(QColor(0,0,0))
p.setBrush(QColor(0,127,0))
p.drawPolygon(
[QPoint(70,100), QPoint(100,110),
QPoint(130, 100), QPoint(100,150),]
)
"""
p.setPen(QColor(255,127,0))
p.setBrush(QColor(255,127,0))
p.drawPolygon(
[QPoint(50,100), QPoint(200,100),QPoint(200,400), QPoint(50,400),]
)
p.drawPixmap(QRect(400,150,200,200), self.rabbit)
p.end()
|
normal
|
{
"blob_id": "6fc43919f521234d0dc9e167bb72f014e9c0bf17",
"index": 2102,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass simple_drawing_window1(simple_drawing_window):\n <mask token>\n\n def paintEvent(self, e):\n p = QPainter()\n p.begin(self)\n \"\"\"\n\t\tp.setPen(QColor(0,0,0))\n\t\tp.setBrush(QColor(0,127,0))\n\t\tp.drawPolygon(\n\t\t\t[QPoint(70,100), QPoint(100,110), \n\t\t\tQPoint(130, 100), QPoint(100,150),]\n\t\t)\n\t\t\"\"\"\n p.setPen(QColor(255, 127, 0))\n p.setBrush(QColor(255, 127, 0))\n p.drawPolygon([QPoint(50, 100), QPoint(200, 100), QPoint(200, 400),\n QPoint(50, 400)])\n p.drawPixmap(QRect(400, 150, 200, 200), self.rabbit)\n p.end()\n",
"step-3": "<mask token>\n\n\nclass simple_drawing_window1(simple_drawing_window):\n\n def __init__(self):\n super().__init__()\n\n def paintEvent(self, e):\n p = QPainter()\n p.begin(self)\n \"\"\"\n\t\tp.setPen(QColor(0,0,0))\n\t\tp.setBrush(QColor(0,127,0))\n\t\tp.drawPolygon(\n\t\t\t[QPoint(70,100), QPoint(100,110), \n\t\t\tQPoint(130, 100), QPoint(100,150),]\n\t\t)\n\t\t\"\"\"\n p.setPen(QColor(255, 127, 0))\n p.setBrush(QColor(255, 127, 0))\n p.drawPolygon([QPoint(50, 100), QPoint(200, 100), QPoint(200, 400),\n QPoint(50, 400)])\n p.drawPixmap(QRect(400, 150, 200, 200), self.rabbit)\n p.end()\n",
"step-4": "import sys\nfrom PySide6.QtCore import *\nfrom PySide6.QtWidgets import *\nfrom PySide6.QtGui import *\nfrom simple_drawing_window import *\n\n\nclass simple_drawing_window1(simple_drawing_window):\n\n def __init__(self):\n super().__init__()\n\n def paintEvent(self, e):\n p = QPainter()\n p.begin(self)\n \"\"\"\n\t\tp.setPen(QColor(0,0,0))\n\t\tp.setBrush(QColor(0,127,0))\n\t\tp.drawPolygon(\n\t\t\t[QPoint(70,100), QPoint(100,110), \n\t\t\tQPoint(130, 100), QPoint(100,150),]\n\t\t)\n\t\t\"\"\"\n p.setPen(QColor(255, 127, 0))\n p.setBrush(QColor(255, 127, 0))\n p.drawPolygon([QPoint(50, 100), QPoint(200, 100), QPoint(200, 400),\n QPoint(50, 400)])\n p.drawPixmap(QRect(400, 150, 200, 200), self.rabbit)\n p.end()\n",
"step-5": "\nimport sys\nfrom PySide6.QtCore import *\nfrom PySide6.QtWidgets import *\nfrom PySide6.QtGui import *\nfrom simple_drawing_window import *\n\nclass simple_drawing_window1( simple_drawing_window):\n\tdef __init__(self):\n\t\tsuper().__init__()\n \n\tdef paintEvent(self, e):\n\t\tp = QPainter()\n\t\tp.begin(self)\n\t\t\"\"\"\n\t\tp.setPen(QColor(0,0,0))\n\t\tp.setBrush(QColor(0,127,0))\n\t\tp.drawPolygon(\n\t\t\t[QPoint(70,100), QPoint(100,110), \n\t\t\tQPoint(130, 100), QPoint(100,150),]\n\t\t)\n\t\t\"\"\"\n\n\t\tp.setPen(QColor(255,127,0))\n\t\tp.setBrush(QColor(255,127,0))\n \n\t\t\n \n\t\tp.drawPolygon(\n\t\t\t[QPoint(50,100), QPoint(200,100),QPoint(200,400), QPoint(50,400),]\n\t\t)\n\t\t\n\t\tp.drawPixmap(QRect(400,150,200,200), self.rabbit)\n \n\t\tp.end()\n\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def getHashcode(string):
for i in range(10000000000):
hash_md5 = hashlib.md5(str(i).encode('utf-8'))
res = hash_md5.hexdigest()
if res[0:len(string)] == string:
print(i)
exit()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def getHashcode(string):
for i in range(10000000000):
hash_md5 = hashlib.md5(str(i).encode('utf-8'))
res = hash_md5.hexdigest()
if res[0:len(string)] == string:
print(i)
exit()
if __name__ == '__main__':
getHashcode(sys.argv[1])
<|reserved_special_token_1|>
import hashlib
import sys
def getHashcode(string):
for i in range(10000000000):
hash_md5 = hashlib.md5(str(i).encode('utf-8'))
res = hash_md5.hexdigest()
if res[0:len(string)] == string:
print(i)
exit()
if __name__ == '__main__':
getHashcode(sys.argv[1])
|
flexible
|
{
"blob_id": "4c8e3c21dd478606cf09f2e97dc9deed6597dae5",
"index": 4375,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef getHashcode(string):\n for i in range(10000000000):\n hash_md5 = hashlib.md5(str(i).encode('utf-8'))\n res = hash_md5.hexdigest()\n if res[0:len(string)] == string:\n print(i)\n exit()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef getHashcode(string):\n for i in range(10000000000):\n hash_md5 = hashlib.md5(str(i).encode('utf-8'))\n res = hash_md5.hexdigest()\n if res[0:len(string)] == string:\n print(i)\n exit()\n\n\nif __name__ == '__main__':\n getHashcode(sys.argv[1])\n",
"step-4": "import hashlib\nimport sys\n\n\ndef getHashcode(string):\n for i in range(10000000000):\n hash_md5 = hashlib.md5(str(i).encode('utf-8'))\n res = hash_md5.hexdigest()\n if res[0:len(string)] == string:\n print(i)\n exit()\n\n\nif __name__ == '__main__':\n getHashcode(sys.argv[1])\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if operacion == 'SUMA':
resultado = numero1 + numero2
elif operacion == 'RESTA':
resultado = numero1 - numero2
elif operacion == 'DIVISION':
resultado = numero1 / numero2
elif operacion == 'MULTIPLICACION':
resultado = numero1 * numero2
print('Resultado : {}'.format(resultado))
<|reserved_special_token_1|>
numero1 = 0
numero2 = 0
operacion = input(
'¿Qué operación quiere realizar (Suma / Resta / Division / Multiplicacion)?: '
).upper()
numero1 = int(input('Introduzca el valor 1: '))
numero2 = int(input('Introduzca el valor 2: '))
if operacion == 'SUMA':
resultado = numero1 + numero2
elif operacion == 'RESTA':
resultado = numero1 - numero2
elif operacion == 'DIVISION':
resultado = numero1 / numero2
elif operacion == 'MULTIPLICACION':
resultado = numero1 * numero2
print('Resultado : {}'.format(resultado))
<|reserved_special_token_1|>
#Calculadora mediante el terminal
numero1 = 0
numero2 = 0
#Preguntamos los valores
operacion = input("¿Qué operación quiere realizar (Suma / Resta / Division / Multiplicacion)?: ").upper()
numero1 = int(input("Introduzca el valor 1: "))
numero2 = int(input("Introduzca el valor 2: "))
#Realizamos las operaciones
if operacion == "SUMA":
resultado = numero1 + numero2
elif operacion == "RESTA":
resultado = numero1 - numero2
elif operacion == "DIVISION":
resultado = numero1 / numero2
elif operacion == "MULTIPLICACION":
resultado = numero1 * numero2
#Mostramos en pantalla el resultado
print("Resultado : {}".format(resultado))
|
flexible
|
{
"blob_id": "5d618acc0962447554807cbb9d3546cd4e0b3572",
"index": 3005,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif operacion == 'SUMA':\n resultado = numero1 + numero2\nelif operacion == 'RESTA':\n resultado = numero1 - numero2\nelif operacion == 'DIVISION':\n resultado = numero1 / numero2\nelif operacion == 'MULTIPLICACION':\n resultado = numero1 * numero2\nprint('Resultado : {}'.format(resultado))\n",
"step-3": "numero1 = 0\nnumero2 = 0\noperacion = input(\n '¿Qué operación quiere realizar (Suma / Resta / Division / Multiplicacion)?: '\n ).upper()\nnumero1 = int(input('Introduzca el valor 1: '))\nnumero2 = int(input('Introduzca el valor 2: '))\nif operacion == 'SUMA':\n resultado = numero1 + numero2\nelif operacion == 'RESTA':\n resultado = numero1 - numero2\nelif operacion == 'DIVISION':\n resultado = numero1 / numero2\nelif operacion == 'MULTIPLICACION':\n resultado = numero1 * numero2\nprint('Resultado : {}'.format(resultado))\n",
"step-4": "#Calculadora mediante el terminal\n\nnumero1 = 0\nnumero2 = 0\n\n\n#Preguntamos los valores\n\noperacion = input(\"¿Qué operación quiere realizar (Suma / Resta / Division / Multiplicacion)?: \").upper()\n\nnumero1 = int(input(\"Introduzca el valor 1: \"))\nnumero2 = int(input(\"Introduzca el valor 2: \"))\n\n\n#Realizamos las operaciones\nif operacion == \"SUMA\":\n resultado = numero1 + numero2\nelif operacion == \"RESTA\":\n resultado = numero1 - numero2\nelif operacion == \"DIVISION\":\n resultado = numero1 / numero2\nelif operacion == \"MULTIPLICACION\":\n resultado = numero1 * numero2\n\n\n#Mostramos en pantalla el resultado\nprint(\"Resultado : {}\".format(resultado))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class ScrapySpiderPipeline(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ScrapySpiderPipeline(object):
def __init__(self):
engine = db_connect()
create_table(engine)
self.Session = sessionmaker(bind=engine)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ScrapySpiderPipeline(object):
def __init__(self):
engine = db_connect()
create_table(engine)
self.Session = sessionmaker(bind=engine)
def process_item(self, item, spider):
session = self.Session()
ım_db = IMDB_DATABASE()
ım_db.MOVIE_CODE = item['MOVIE_CODE']
ım_db.MOVIE_NAME = item['MOVIE_NAME']
ım_db.YEAR = item['YEAR']
ım_db.RANK = item['RANK']
ım_db.IMDB_RATING = item['IMDB_RATING']
try:
session.add(ım_db)
session.commit()
except:
session.rollback()
raise
finally:
session.close()
return item
<|reserved_special_token_1|>
from sqlalchemy.orm import sessionmaker
from IMDB.spiders.models import IMDB_DATABASE, db_connect, create_table
class ScrapySpiderPipeline(object):
def __init__(self):
engine = db_connect()
create_table(engine)
self.Session = sessionmaker(bind=engine)
def process_item(self, item, spider):
session = self.Session()
ım_db = IMDB_DATABASE()
ım_db.MOVIE_CODE = item['MOVIE_CODE']
ım_db.MOVIE_NAME = item['MOVIE_NAME']
ım_db.YEAR = item['YEAR']
ım_db.RANK = item['RANK']
ım_db.IMDB_RATING = item['IMDB_RATING']
try:
session.add(ım_db)
session.commit()
except:
session.rollback()
raise
finally:
session.close()
return item
<|reserved_special_token_1|>
from sqlalchemy.orm import sessionmaker
from IMDB.spiders.models import IMDB_DATABASE, db_connect, create_table
class ScrapySpiderPipeline(object):
# Bu Fonksiyon Veritabanı bağlantısını ve oturum oluşturucuyu başlatır ve bir İlişkisel Veritabanı tablosu oluşturur.
def __init__(self):
engine = db_connect()
create_table(engine)
self.Session = sessionmaker(bind=engine)
# Bu Fonksiyon Spiderdan Gelen Dataları Models.py Dosyasındaki Model Şablonuna Göre İşleme Sokarak Verileri Database İçine Kaydeder
def process_item(self, item, spider):
session = self.Session()
ım_db = IMDB_DATABASE()
ım_db.MOVIE_CODE = item["MOVIE_CODE"]
ım_db.MOVIE_NAME = item["MOVIE_NAME"]
ım_db.YEAR = item["YEAR"]
ım_db.RANK = item["RANK"]
ım_db.IMDB_RATING = item["IMDB_RATING"]
# Buradaki Try Except istisna blokları datalar kaydedilirken varsa oluşan hataları ayıklayarak bizlere mesaj olarak döner
try:
session.add(ım_db)
session.commit()
except:
session.rollback()
raise
finally:
session.close()
return item
|
flexible
|
{
"blob_id": "16074fc1824a99b6fd1c4bf113d5b752308e8803",
"index": 5198,
"step-1": "<mask token>\n\n\nclass ScrapySpiderPipeline(object):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ScrapySpiderPipeline(object):\n\n def __init__(self):\n engine = db_connect()\n create_table(engine)\n self.Session = sessionmaker(bind=engine)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass ScrapySpiderPipeline(object):\n\n def __init__(self):\n engine = db_connect()\n create_table(engine)\n self.Session = sessionmaker(bind=engine)\n\n def process_item(self, item, spider):\n session = self.Session()\n ım_db = IMDB_DATABASE()\n ım_db.MOVIE_CODE = item['MOVIE_CODE']\n ım_db.MOVIE_NAME = item['MOVIE_NAME']\n ım_db.YEAR = item['YEAR']\n ım_db.RANK = item['RANK']\n ım_db.IMDB_RATING = item['IMDB_RATING']\n try:\n session.add(ım_db)\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()\n return item\n",
"step-4": "from sqlalchemy.orm import sessionmaker\nfrom IMDB.spiders.models import IMDB_DATABASE, db_connect, create_table\n\n\nclass ScrapySpiderPipeline(object):\n\n def __init__(self):\n engine = db_connect()\n create_table(engine)\n self.Session = sessionmaker(bind=engine)\n\n def process_item(self, item, spider):\n session = self.Session()\n ım_db = IMDB_DATABASE()\n ım_db.MOVIE_CODE = item['MOVIE_CODE']\n ım_db.MOVIE_NAME = item['MOVIE_NAME']\n ım_db.YEAR = item['YEAR']\n ım_db.RANK = item['RANK']\n ım_db.IMDB_RATING = item['IMDB_RATING']\n try:\n session.add(ım_db)\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()\n return item\n",
"step-5": "from sqlalchemy.orm import sessionmaker\nfrom IMDB.spiders.models import IMDB_DATABASE, db_connect, create_table\n\n\nclass ScrapySpiderPipeline(object):\n \n # Bu Fonksiyon Veritabanı bağlantısını ve oturum oluşturucuyu başlatır ve bir İlişkisel Veritabanı tablosu oluşturur.\n def __init__(self):\n \n engine = db_connect()\n create_table(engine)\n \n self.Session = sessionmaker(bind=engine)\n\n # Bu Fonksiyon Spiderdan Gelen Dataları Models.py Dosyasındaki Model Şablonuna Göre İşleme Sokarak Verileri Database İçine Kaydeder\n def process_item(self, item, spider):\n\n session = self.Session()\n \n ım_db = IMDB_DATABASE()\n \n ım_db.MOVIE_CODE = item[\"MOVIE_CODE\"]\n \n ım_db.MOVIE_NAME = item[\"MOVIE_NAME\"]\n\n ım_db.YEAR = item[\"YEAR\"]\n\n ım_db.RANK = item[\"RANK\"]\n\n ım_db.IMDB_RATING = item[\"IMDB_RATING\"]\n\n\n\n # Buradaki Try Except istisna blokları datalar kaydedilirken varsa oluşan hataları ayıklayarak bizlere mesaj olarak döner\n try:\n session.add(ım_db)\n session.commit()\n \n except:\n session.rollback()\n raise\n \n finally:\n session.close()\n\n return item\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
def strictly_greater_than(value):
if value : # Change this line
return "Greater than 100"
elif value : # Change this line
return "Greater than 10"
else:
return "10 or less"
# Change the value 1 below to experiment with different values
print(strictly_greater_than(1))
|
normal
|
{
"blob_id": "7620d76afc65ceb3b478f0b05339ace1f1531f7d",
"index": 6708,
"step-1": "<mask token>\n",
"step-2": "def strictly_greater_than(value):\n if value:\n return 'Greater than 100'\n elif value:\n return 'Greater than 10'\n else:\n return '10 or less'\n\n\n<mask token>\n",
"step-3": "def strictly_greater_than(value):\n if value:\n return 'Greater than 100'\n elif value:\n return 'Greater than 10'\n else:\n return '10 or less'\n\n\nprint(strictly_greater_than(1))\n",
"step-4": "def strictly_greater_than(value):\n if value : # Change this line\n return \"Greater than 100\"\n elif value : # Change this line\n return \"Greater than 10\"\n else:\n return \"10 or less\"\n\n# Change the value 1 below to experiment with different values\nprint(strictly_greater_than(1))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/python2
import requests ,optparse
def get_link():
parser=optparse.OptionParser()
parser.add_option("-l","--link",dest="url",help="direct link of file to download .pdf")
(url,argument)=parser.parse_args()
return url
def download(url):
try:
get_request=requests.get(url)
name_url=url.split("/")[-1]
print(name_url)
with open(name_url,"wb") as file:
file.write(get_request.content)
except:
print("[-]Print Valid Link")
def start():
url_link=get_link()
try:
download(url_link.url)
except:
url_link=input("[+]Enter link:")
download(url_link)
start()
|
normal
|
{
"blob_id": "22ddae977afd2a1b0a729cf0d56783eaaca3b0a0",
"index": 9813,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_link():\n parser = optparse.OptionParser()\n parser.add_option('-l', '--link', dest='url', help=\n 'direct link of file to download .pdf')\n url, argument = parser.parse_args()\n return url\n\n\ndef download(url):\n try:\n get_request = requests.get(url)\n name_url = url.split('/')[-1]\n print(name_url)\n with open(name_url, 'wb') as file:\n file.write(get_request.content)\n except:\n print('[-]Print Valid Link')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_link():\n parser = optparse.OptionParser()\n parser.add_option('-l', '--link', dest='url', help=\n 'direct link of file to download .pdf')\n url, argument = parser.parse_args()\n return url\n\n\ndef download(url):\n try:\n get_request = requests.get(url)\n name_url = url.split('/')[-1]\n print(name_url)\n with open(name_url, 'wb') as file:\n file.write(get_request.content)\n except:\n print('[-]Print Valid Link')\n\n\ndef start():\n url_link = get_link()\n try:\n download(url_link.url)\n except:\n url_link = input('[+]Enter link:')\n download(url_link)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef get_link():\n parser = optparse.OptionParser()\n parser.add_option('-l', '--link', dest='url', help=\n 'direct link of file to download .pdf')\n url, argument = parser.parse_args()\n return url\n\n\ndef download(url):\n try:\n get_request = requests.get(url)\n name_url = url.split('/')[-1]\n print(name_url)\n with open(name_url, 'wb') as file:\n file.write(get_request.content)\n except:\n print('[-]Print Valid Link')\n\n\ndef start():\n url_link = get_link()\n try:\n download(url_link.url)\n except:\n url_link = input('[+]Enter link:')\n download(url_link)\n\n\nstart()\n",
"step-5": "#!/usr/bin/python2\n\nimport requests ,optparse\n\n\ndef get_link():\n parser=optparse.OptionParser()\n parser.add_option(\"-l\",\"--link\",dest=\"url\",help=\"direct link of file to download .pdf\")\n (url,argument)=parser.parse_args()\n return url\n\ndef download(url):\n try:\n get_request=requests.get(url)\n name_url=url.split(\"/\")[-1]\n print(name_url)\n with open(name_url,\"wb\") as file:\n file.write(get_request.content)\n except:\n print(\"[-]Print Valid Link\")\n \n \n\n\ndef start():\n url_link=get_link()\n try:\t\n download(url_link.url)\n except:\n url_link=input(\"[+]Enter link:\")\n download(url_link)\n\nstart()\n\n\n\n",
"step-ids": [
0,
2,
3,
4,
6
]
}
|
[
0,
2,
3,
4,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for n in primos:
quadrado = n ** 2
if quadrado in intervalo:
is_magic.append(quadrado)
print(len(is_magic))
<|reserved_special_token_1|>
primos = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37]
intervalo = list(range(8, 27)) + list(range(49, 50))
is_magic = []
for n in primos:
quadrado = n ** 2
if quadrado in intervalo:
is_magic.append(quadrado)
print(len(is_magic))
<|reserved_special_token_1|>
primos = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37]
# números entre (8 - 26) e (44 - 44)
intervalo = list(range(8, 27)) + list(range(49, 50))
is_magic = []
for n in primos:
quadrado = n ** 2
if quadrado in intervalo:
is_magic.append(quadrado)
print(len(is_magic)) # 3
|
flexible
|
{
"blob_id": "b7f443521e165f327aae9ff5d7bbb7b8462abeb5",
"index": 2890,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor n in primos:\n quadrado = n ** 2\n if quadrado in intervalo:\n is_magic.append(quadrado)\nprint(len(is_magic))\n",
"step-3": "primos = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37]\nintervalo = list(range(8, 27)) + list(range(49, 50))\nis_magic = []\nfor n in primos:\n quadrado = n ** 2\n if quadrado in intervalo:\n is_magic.append(quadrado)\nprint(len(is_magic))\n",
"step-4": "primos = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37]\n# números entre (8 - 26) e (44 - 44)\nintervalo = list(range(8, 27)) + list(range(49, 50))\nis_magic = []\nfor n in primos:\n quadrado = n ** 2\n if quadrado in intervalo:\n is_magic.append(quadrado)\n\nprint(len(is_magic)) # 3",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def gen_ft_parser():
ft_parser = argparse.ArgumentParser(description=
'Generate a Character-Feature Translation Table')
ft_parser.add_argument('alphabet_file', metavar='alphabet_file', type=
str, help=
'A file contianing all the characters that will appear in the translation table.'
)
ft_parser.add_argument('save_file', metavar='save_path', type=str, help
='The feature table filename.')
return ft_parser
def construct_alphabet(alpha_string):
symbols = set(alpha_string)
alphabet = ''.join(sorted(c for c in string.printable if c in symbols))
return numpy.array(list(alphabet))
def load_alphabet(alphabet_file):
with open(alphabet_file) as alphabet:
alphabet = alphabet.read(100000).replace('\n', ' ')
return construct_alphabet(alphabet)
def gen_row(c, key):
row = [False] * (len(key) + 1)
row[key[c.lower()]] = True
row[-1] = c.isupper()
return row
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def gen_ft_parser():
ft_parser = argparse.ArgumentParser(description=
'Generate a Character-Feature Translation Table')
ft_parser.add_argument('alphabet_file', metavar='alphabet_file', type=
str, help=
'A file contianing all the characters that will appear in the translation table.'
)
ft_parser.add_argument('save_file', metavar='save_path', type=str, help
='The feature table filename.')
return ft_parser
def construct_alphabet(alpha_string):
symbols = set(alpha_string)
alphabet = ''.join(sorted(c for c in string.printable if c in symbols))
return numpy.array(list(alphabet))
def load_alphabet(alphabet_file):
with open(alphabet_file) as alphabet:
alphabet = alphabet.read(100000).replace('\n', ' ')
return construct_alphabet(alphabet)
def gen_row(c, key):
row = [False] * (len(key) + 1)
row[key[c.lower()]] = True
row[-1] = c.isupper()
return row
def build_table(alphabet):
code = ''.join(sorted(set(''.join(alphabet).lower())))
key = {c: i for i, c in enumerate(code)}
table = numpy.zeros((len(alphabet), len(key) + 1))
for i, c in enumerate(alphabet):
table[i] = gen_row(c, key)
return table
def main(args):
table = build_table(load_alphabet(args.alphabet_file))
numpy.save(args.save_file, table)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def gen_ft_parser():
ft_parser = argparse.ArgumentParser(description=
'Generate a Character-Feature Translation Table')
ft_parser.add_argument('alphabet_file', metavar='alphabet_file', type=
str, help=
'A file contianing all the characters that will appear in the translation table.'
)
ft_parser.add_argument('save_file', metavar='save_path', type=str, help
='The feature table filename.')
return ft_parser
def construct_alphabet(alpha_string):
symbols = set(alpha_string)
alphabet = ''.join(sorted(c for c in string.printable if c in symbols))
return numpy.array(list(alphabet))
def load_alphabet(alphabet_file):
with open(alphabet_file) as alphabet:
alphabet = alphabet.read(100000).replace('\n', ' ')
return construct_alphabet(alphabet)
def gen_row(c, key):
row = [False] * (len(key) + 1)
row[key[c.lower()]] = True
row[-1] = c.isupper()
return row
def build_table(alphabet):
code = ''.join(sorted(set(''.join(alphabet).lower())))
key = {c: i for i, c in enumerate(code)}
table = numpy.zeros((len(alphabet), len(key) + 1))
for i, c in enumerate(alphabet):
table[i] = gen_row(c, key)
return table
def main(args):
table = build_table(load_alphabet(args.alphabet_file))
numpy.save(args.save_file, table)
if __name__ == '__main__':
main(gen_ft_parser().parse_args())
<|reserved_special_token_1|>
import argparse
import string
import numpy
def gen_ft_parser():
ft_parser = argparse.ArgumentParser(description=
'Generate a Character-Feature Translation Table')
ft_parser.add_argument('alphabet_file', metavar='alphabet_file', type=
str, help=
'A file contianing all the characters that will appear in the translation table.'
)
ft_parser.add_argument('save_file', metavar='save_path', type=str, help
='The feature table filename.')
return ft_parser
def construct_alphabet(alpha_string):
symbols = set(alpha_string)
alphabet = ''.join(sorted(c for c in string.printable if c in symbols))
return numpy.array(list(alphabet))
def load_alphabet(alphabet_file):
with open(alphabet_file) as alphabet:
alphabet = alphabet.read(100000).replace('\n', ' ')
return construct_alphabet(alphabet)
def gen_row(c, key):
row = [False] * (len(key) + 1)
row[key[c.lower()]] = True
row[-1] = c.isupper()
return row
def build_table(alphabet):
code = ''.join(sorted(set(''.join(alphabet).lower())))
key = {c: i for i, c in enumerate(code)}
table = numpy.zeros((len(alphabet), len(key) + 1))
for i, c in enumerate(alphabet):
table[i] = gen_row(c, key)
return table
def main(args):
table = build_table(load_alphabet(args.alphabet_file))
numpy.save(args.save_file, table)
if __name__ == '__main__':
main(gen_ft_parser().parse_args())
<|reserved_special_token_1|>
#!/usr/bin/python
import argparse
import string
import numpy
def gen_ft_parser():
ft_parser = argparse.ArgumentParser(
description='Generate a Character-Feature Translation Table')
ft_parser.add_argument('alphabet_file', metavar='alphabet_file',
type=str, help='A file contianing all the characters that will '
'appear in the translation table.')
ft_parser.add_argument('save_file', metavar='save_path',
type=str, help='The feature table filename.')
return ft_parser
def construct_alphabet(alpha_string):
symbols = set(alpha_string)
alphabet = ''.join(sorted(c for c in string.printable if c in symbols))
return numpy.array(list(alphabet))
def load_alphabet(alphabet_file):
with open(alphabet_file) as alphabet:
alphabet = alphabet.read(100000).replace('\n', ' ')
return construct_alphabet(alphabet)
def gen_row(c, key):
row = [False] * (len(key) + 1)
row[key[c.lower()]] = True
row[-1] = c.isupper()
return row
def build_table(alphabet):
code = ''.join(sorted(set(''.join(alphabet).lower())))
key = {c:i for i, c in enumerate(code)}
table = numpy.zeros((len(alphabet), len(key) + 1))
for i, c in enumerate(alphabet):
table[i] = gen_row(c, key)
return table
def main(args):
table = build_table(load_alphabet(args.alphabet_file))
numpy.save(args.save_file, table)
if __name__ == "__main__":
main(gen_ft_parser().parse_args())
|
flexible
|
{
"blob_id": "f4d4be174bed2704c0ad12eea2f0cd64eaaa0aaa",
"index": 1973,
"step-1": "<mask token>\n\n\ndef gen_ft_parser():\n ft_parser = argparse.ArgumentParser(description=\n 'Generate a Character-Feature Translation Table')\n ft_parser.add_argument('alphabet_file', metavar='alphabet_file', type=\n str, help=\n 'A file contianing all the characters that will appear in the translation table.'\n )\n ft_parser.add_argument('save_file', metavar='save_path', type=str, help\n ='The feature table filename.')\n return ft_parser\n\n\ndef construct_alphabet(alpha_string):\n symbols = set(alpha_string)\n alphabet = ''.join(sorted(c for c in string.printable if c in symbols))\n return numpy.array(list(alphabet))\n\n\ndef load_alphabet(alphabet_file):\n with open(alphabet_file) as alphabet:\n alphabet = alphabet.read(100000).replace('\\n', ' ')\n return construct_alphabet(alphabet)\n\n\ndef gen_row(c, key):\n row = [False] * (len(key) + 1)\n row[key[c.lower()]] = True\n row[-1] = c.isupper()\n return row\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef gen_ft_parser():\n ft_parser = argparse.ArgumentParser(description=\n 'Generate a Character-Feature Translation Table')\n ft_parser.add_argument('alphabet_file', metavar='alphabet_file', type=\n str, help=\n 'A file contianing all the characters that will appear in the translation table.'\n )\n ft_parser.add_argument('save_file', metavar='save_path', type=str, help\n ='The feature table filename.')\n return ft_parser\n\n\ndef construct_alphabet(alpha_string):\n symbols = set(alpha_string)\n alphabet = ''.join(sorted(c for c in string.printable if c in symbols))\n return numpy.array(list(alphabet))\n\n\ndef load_alphabet(alphabet_file):\n with open(alphabet_file) as alphabet:\n alphabet = alphabet.read(100000).replace('\\n', ' ')\n return construct_alphabet(alphabet)\n\n\ndef gen_row(c, key):\n row = [False] * (len(key) + 1)\n row[key[c.lower()]] = True\n row[-1] = c.isupper()\n return row\n\n\ndef build_table(alphabet):\n code = ''.join(sorted(set(''.join(alphabet).lower())))\n key = {c: i for i, c in enumerate(code)}\n table = numpy.zeros((len(alphabet), len(key) + 1))\n for i, c in enumerate(alphabet):\n table[i] = gen_row(c, key)\n return table\n\n\ndef main(args):\n table = build_table(load_alphabet(args.alphabet_file))\n numpy.save(args.save_file, table)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef gen_ft_parser():\n ft_parser = argparse.ArgumentParser(description=\n 'Generate a Character-Feature Translation Table')\n ft_parser.add_argument('alphabet_file', metavar='alphabet_file', type=\n str, help=\n 'A file contianing all the characters that will appear in the translation table.'\n )\n ft_parser.add_argument('save_file', metavar='save_path', type=str, help\n ='The feature table filename.')\n return ft_parser\n\n\ndef construct_alphabet(alpha_string):\n symbols = set(alpha_string)\n alphabet = ''.join(sorted(c for c in string.printable if c in symbols))\n return numpy.array(list(alphabet))\n\n\ndef load_alphabet(alphabet_file):\n with open(alphabet_file) as alphabet:\n alphabet = alphabet.read(100000).replace('\\n', ' ')\n return construct_alphabet(alphabet)\n\n\ndef gen_row(c, key):\n row = [False] * (len(key) + 1)\n row[key[c.lower()]] = True\n row[-1] = c.isupper()\n return row\n\n\ndef build_table(alphabet):\n code = ''.join(sorted(set(''.join(alphabet).lower())))\n key = {c: i for i, c in enumerate(code)}\n table = numpy.zeros((len(alphabet), len(key) + 1))\n for i, c in enumerate(alphabet):\n table[i] = gen_row(c, key)\n return table\n\n\ndef main(args):\n table = build_table(load_alphabet(args.alphabet_file))\n numpy.save(args.save_file, table)\n\n\nif __name__ == '__main__':\n main(gen_ft_parser().parse_args())\n",
"step-4": "import argparse\nimport string\nimport numpy\n\n\ndef gen_ft_parser():\n ft_parser = argparse.ArgumentParser(description=\n 'Generate a Character-Feature Translation Table')\n ft_parser.add_argument('alphabet_file', metavar='alphabet_file', type=\n str, help=\n 'A file contianing all the characters that will appear in the translation table.'\n )\n ft_parser.add_argument('save_file', metavar='save_path', type=str, help\n ='The feature table filename.')\n return ft_parser\n\n\ndef construct_alphabet(alpha_string):\n symbols = set(alpha_string)\n alphabet = ''.join(sorted(c for c in string.printable if c in symbols))\n return numpy.array(list(alphabet))\n\n\ndef load_alphabet(alphabet_file):\n with open(alphabet_file) as alphabet:\n alphabet = alphabet.read(100000).replace('\\n', ' ')\n return construct_alphabet(alphabet)\n\n\ndef gen_row(c, key):\n row = [False] * (len(key) + 1)\n row[key[c.lower()]] = True\n row[-1] = c.isupper()\n return row\n\n\ndef build_table(alphabet):\n code = ''.join(sorted(set(''.join(alphabet).lower())))\n key = {c: i for i, c in enumerate(code)}\n table = numpy.zeros((len(alphabet), len(key) + 1))\n for i, c in enumerate(alphabet):\n table[i] = gen_row(c, key)\n return table\n\n\ndef main(args):\n table = build_table(load_alphabet(args.alphabet_file))\n numpy.save(args.save_file, table)\n\n\nif __name__ == '__main__':\n main(gen_ft_parser().parse_args())\n",
"step-5": "#!/usr/bin/python\n\nimport argparse\nimport string\nimport numpy\n\n\ndef gen_ft_parser():\n ft_parser = argparse.ArgumentParser(\n description='Generate a Character-Feature Translation Table')\n ft_parser.add_argument('alphabet_file', metavar='alphabet_file', \n type=str, help='A file contianing all the characters that will '\n 'appear in the translation table.')\n ft_parser.add_argument('save_file', metavar='save_path',\n type=str, help='The feature table filename.')\n return ft_parser\n\ndef construct_alphabet(alpha_string):\n symbols = set(alpha_string)\n alphabet = ''.join(sorted(c for c in string.printable if c in symbols))\n return numpy.array(list(alphabet))\n\ndef load_alphabet(alphabet_file):\n with open(alphabet_file) as alphabet:\n alphabet = alphabet.read(100000).replace('\\n', ' ')\n return construct_alphabet(alphabet)\n\ndef gen_row(c, key):\n row = [False] * (len(key) + 1)\n row[key[c.lower()]] = True\n row[-1] = c.isupper()\n return row\n\ndef build_table(alphabet):\n code = ''.join(sorted(set(''.join(alphabet).lower())))\n key = {c:i for i, c in enumerate(code)}\n table = numpy.zeros((len(alphabet), len(key) + 1))\n for i, c in enumerate(alphabet):\n table[i] = gen_row(c, key)\n return table\n\ndef main(args):\n table = build_table(load_alphabet(args.alphabet_file))\n numpy.save(args.save_file, table)\n\nif __name__ == \"__main__\":\n main(gen_ft_parser().parse_args())\n\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
install_requires.append('tensorflow-gpu==1.13.1' if has_cuda else
'tensorflow==1.13.1')
<|reserved_special_token_0|>
setup(name='easybert', version=version, url=
'https://github.com/robrua/easy-bert', author='Rob Rua', author_email=
'[email protected]', description=
'A Dead Simple BERT API (https://github.com/google-research/bert)',
keywords=['BERT', 'Natural Language Processing', 'NLP',
'Language Model', 'Language Models', 'Machine Learning', 'ML',
'TensorFlow', 'Embeddings', 'Word Embeddings', 'Sentence Embeddings'],
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3'], license='MIT', packages=
find_packages(), entry_points={'console_scripts': [
'bert=easybert.__main__:_main']}, zip_safe=True, install_requires=
install_requires, include_package_data=True)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
install_requires = ['numpy', 'tensorflow-hub==0.4.0',
'bert-tensorflow==1.0.1', 'click']
has_cuda = any('CUDA' in name.split('_') for name in os.environ.keys())
install_requires.append('tensorflow-gpu==1.13.1' if has_cuda else
'tensorflow==1.13.1')
version_file = Path(__file__).parent.joinpath('easybert', 'VERSION.txt')
version = version_file.read_text(encoding='UTF-8').strip()
setup(name='easybert', version=version, url=
'https://github.com/robrua/easy-bert', author='Rob Rua', author_email=
'[email protected]', description=
'A Dead Simple BERT API (https://github.com/google-research/bert)',
keywords=['BERT', 'Natural Language Processing', 'NLP',
'Language Model', 'Language Models', 'Machine Learning', 'ML',
'TensorFlow', 'Embeddings', 'Word Embeddings', 'Sentence Embeddings'],
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3'], license='MIT', packages=
find_packages(), entry_points={'console_scripts': [
'bert=easybert.__main__:_main']}, zip_safe=True, install_requires=
install_requires, include_package_data=True)
<|reserved_special_token_1|>
from pathlib import Path
import os
from setuptools import setup, find_packages
install_requires = ['numpy', 'tensorflow-hub==0.4.0',
'bert-tensorflow==1.0.1', 'click']
has_cuda = any('CUDA' in name.split('_') for name in os.environ.keys())
install_requires.append('tensorflow-gpu==1.13.1' if has_cuda else
'tensorflow==1.13.1')
version_file = Path(__file__).parent.joinpath('easybert', 'VERSION.txt')
version = version_file.read_text(encoding='UTF-8').strip()
setup(name='easybert', version=version, url=
'https://github.com/robrua/easy-bert', author='Rob Rua', author_email=
'[email protected]', description=
'A Dead Simple BERT API (https://github.com/google-research/bert)',
keywords=['BERT', 'Natural Language Processing', 'NLP',
'Language Model', 'Language Models', 'Machine Learning', 'ML',
'TensorFlow', 'Embeddings', 'Word Embeddings', 'Sentence Embeddings'],
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3'], license='MIT', packages=
find_packages(), entry_points={'console_scripts': [
'bert=easybert.__main__:_main']}, zip_safe=True, install_requires=
install_requires, include_package_data=True)
<|reserved_special_token_1|>
#!/usr/bin/env python
from pathlib import Path
import os
from setuptools import setup, find_packages
install_requires = [
"numpy",
"tensorflow-hub==0.4.0",
"bert-tensorflow==1.0.1",
"click"
]
# Hacky check for whether CUDA is installed
has_cuda = any("CUDA" in name.split("_") for name in os.environ.keys())
install_requires.append("tensorflow-gpu==1.13.1" if has_cuda else "tensorflow==1.13.1")
version_file = Path(__file__).parent.joinpath("easybert", "VERSION.txt")
version = version_file.read_text(encoding="UTF-8").strip()
setup(
name="easybert",
version=version,
url="https://github.com/robrua/easy-bert",
author="Rob Rua",
author_email="[email protected]",
description="A Dead Simple BERT API (https://github.com/google-research/bert)",
keywords=["BERT", "Natural Language Processing", "NLP", "Language Model", "Language Models", "Machine Learning", "ML", "TensorFlow", "Embeddings", "Word Embeddings", "Sentence Embeddings"],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3"
],
license="MIT",
packages=find_packages(),
entry_points={"console_scripts": ["bert=easybert.__main__:_main"]},
zip_safe=True,
install_requires=install_requires,
include_package_data=True
)
|
flexible
|
{
"blob_id": "a1141e6aae6992a5037d53093378f0d346f2ca29",
"index": 7666,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ninstall_requires.append('tensorflow-gpu==1.13.1' if has_cuda else\n 'tensorflow==1.13.1')\n<mask token>\nsetup(name='easybert', version=version, url=\n 'https://github.com/robrua/easy-bert', author='Rob Rua', author_email=\n '[email protected]', description=\n 'A Dead Simple BERT API (https://github.com/google-research/bert)',\n keywords=['BERT', 'Natural Language Processing', 'NLP',\n 'Language Model', 'Language Models', 'Machine Learning', 'ML',\n 'TensorFlow', 'Embeddings', 'Word Embeddings', 'Sentence Embeddings'],\n classifiers=['Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3'], license='MIT', packages=\n find_packages(), entry_points={'console_scripts': [\n 'bert=easybert.__main__:_main']}, zip_safe=True, install_requires=\n install_requires, include_package_data=True)\n",
"step-3": "<mask token>\ninstall_requires = ['numpy', 'tensorflow-hub==0.4.0',\n 'bert-tensorflow==1.0.1', 'click']\nhas_cuda = any('CUDA' in name.split('_') for name in os.environ.keys())\ninstall_requires.append('tensorflow-gpu==1.13.1' if has_cuda else\n 'tensorflow==1.13.1')\nversion_file = Path(__file__).parent.joinpath('easybert', 'VERSION.txt')\nversion = version_file.read_text(encoding='UTF-8').strip()\nsetup(name='easybert', version=version, url=\n 'https://github.com/robrua/easy-bert', author='Rob Rua', author_email=\n '[email protected]', description=\n 'A Dead Simple BERT API (https://github.com/google-research/bert)',\n keywords=['BERT', 'Natural Language Processing', 'NLP',\n 'Language Model', 'Language Models', 'Machine Learning', 'ML',\n 'TensorFlow', 'Embeddings', 'Word Embeddings', 'Sentence Embeddings'],\n classifiers=['Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3'], license='MIT', packages=\n find_packages(), entry_points={'console_scripts': [\n 'bert=easybert.__main__:_main']}, zip_safe=True, install_requires=\n install_requires, include_package_data=True)\n",
"step-4": "from pathlib import Path\nimport os\nfrom setuptools import setup, find_packages\ninstall_requires = ['numpy', 'tensorflow-hub==0.4.0',\n 'bert-tensorflow==1.0.1', 'click']\nhas_cuda = any('CUDA' in name.split('_') for name in os.environ.keys())\ninstall_requires.append('tensorflow-gpu==1.13.1' if has_cuda else\n 'tensorflow==1.13.1')\nversion_file = Path(__file__).parent.joinpath('easybert', 'VERSION.txt')\nversion = version_file.read_text(encoding='UTF-8').strip()\nsetup(name='easybert', version=version, url=\n 'https://github.com/robrua/easy-bert', author='Rob Rua', author_email=\n '[email protected]', description=\n 'A Dead Simple BERT API (https://github.com/google-research/bert)',\n keywords=['BERT', 'Natural Language Processing', 'NLP',\n 'Language Model', 'Language Models', 'Machine Learning', 'ML',\n 'TensorFlow', 'Embeddings', 'Word Embeddings', 'Sentence Embeddings'],\n classifiers=['Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3'], license='MIT', packages=\n find_packages(), entry_points={'console_scripts': [\n 'bert=easybert.__main__:_main']}, zip_safe=True, install_requires=\n install_requires, include_package_data=True)\n",
"step-5": "#!/usr/bin/env python\nfrom pathlib import Path\nimport os\n\nfrom setuptools import setup, find_packages\n\n\ninstall_requires = [\n \"numpy\",\n \"tensorflow-hub==0.4.0\",\n \"bert-tensorflow==1.0.1\",\n \"click\"\n]\n\n# Hacky check for whether CUDA is installed\nhas_cuda = any(\"CUDA\" in name.split(\"_\") for name in os.environ.keys())\ninstall_requires.append(\"tensorflow-gpu==1.13.1\" if has_cuda else \"tensorflow==1.13.1\")\n\nversion_file = Path(__file__).parent.joinpath(\"easybert\", \"VERSION.txt\")\nversion = version_file.read_text(encoding=\"UTF-8\").strip()\n\nsetup(\n name=\"easybert\",\n version=version,\n url=\"https://github.com/robrua/easy-bert\",\n author=\"Rob Rua\",\n author_email=\"[email protected]\",\n description=\"A Dead Simple BERT API (https://github.com/google-research/bert)\",\n keywords=[\"BERT\", \"Natural Language Processing\", \"NLP\", \"Language Model\", \"Language Models\", \"Machine Learning\", \"ML\", \"TensorFlow\", \"Embeddings\", \"Word Embeddings\", \"Sentence Embeddings\"],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3\"\n ],\n license=\"MIT\",\n packages=find_packages(),\n entry_points={\"console_scripts\": [\"bert=easybert.__main__:_main\"]},\n zip_safe=True,\n install_requires=install_requires,\n include_package_data=True\n)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(filelist)
for infile in filelist:
outfile = os.path.splitext(infile)[0] + '.jpg'
if infile != outfile:
try:
Image.open(infile).save(outfile)
except IOError:
print('cannot convert', infile)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
cwd = os.getcwd()
filelist = get_imlist(os.getcwd())
print(filelist)
for infile in filelist:
outfile = os.path.splitext(infile)[0] + '.jpg'
if infile != outfile:
try:
Image.open(infile).save(outfile)
except IOError:
print('cannot convert', infile)
<|reserved_special_token_1|>
from PIL import Image
from imtools import *
import os
cwd = os.getcwd()
filelist = get_imlist(os.getcwd())
print(filelist)
for infile in filelist:
outfile = os.path.splitext(infile)[0] + '.jpg'
if infile != outfile:
try:
Image.open(infile).save(outfile)
except IOError:
print('cannot convert', infile)
<|reserved_special_token_1|>
#! /usr/bin/env python3
from PIL import Image
from imtools import *
import os
cwd = os.getcwd()
filelist = get_imlist(os.getcwd())
print(filelist)
for infile in filelist:
outfile = os.path.splitext(infile)[0] + ".jpg"
if infile != outfile:
try:
Image.open(infile).save(outfile)
except IOError:
print("cannot convert", infile)
|
flexible
|
{
"blob_id": "31416f1ba9f3c44a7aa740365e05b5db49e70444",
"index": 9106,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(filelist)\nfor infile in filelist:\n outfile = os.path.splitext(infile)[0] + '.jpg'\n if infile != outfile:\n try:\n Image.open(infile).save(outfile)\n except IOError:\n print('cannot convert', infile)\n",
"step-3": "<mask token>\ncwd = os.getcwd()\nfilelist = get_imlist(os.getcwd())\nprint(filelist)\nfor infile in filelist:\n outfile = os.path.splitext(infile)[0] + '.jpg'\n if infile != outfile:\n try:\n Image.open(infile).save(outfile)\n except IOError:\n print('cannot convert', infile)\n",
"step-4": "from PIL import Image\nfrom imtools import *\nimport os\ncwd = os.getcwd()\nfilelist = get_imlist(os.getcwd())\nprint(filelist)\nfor infile in filelist:\n outfile = os.path.splitext(infile)[0] + '.jpg'\n if infile != outfile:\n try:\n Image.open(infile).save(outfile)\n except IOError:\n print('cannot convert', infile)\n",
"step-5": "#! /usr/bin/env python3\n\nfrom PIL import Image\nfrom imtools import *\nimport os\n\ncwd = os.getcwd()\n\nfilelist = get_imlist(os.getcwd())\n\nprint(filelist)\n\nfor infile in filelist:\n\toutfile = os.path.splitext(infile)[0] + \".jpg\"\n\tif infile != outfile:\n\t\ttry:\n\t\t\tImage.open(infile).save(outfile)\n\t\texcept IOError:\n\t\t\tprint(\"cannot convert\", infile)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class Solution:
def evalRPN(self, tokens: List[str]) -> int:
def operation(op1,op2,op):
if op == "+":
return op1 + op2
if op == "-":
return op1 - op2
if op == "*":
return op1 * op2
if op == "/":
return int(op1/op2)
stack = []
for char in tokens:
if char in ["+", "-", "*", "/"]:
op2 = stack.pop()
op1 = stack.pop()
res = operation(op1,op2,char)
stack.append(int(res))
else:
stack.append(int(char))
return stack.pop()
|
normal
|
{
"blob_id": "6b597f1570c022d17e4476e2ab8817e724a166a7",
"index": 1096,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def evalRPN(self, tokens: List[str]) ->int:\n\n def operation(op1, op2, op):\n if op == '+':\n return op1 + op2\n if op == '-':\n return op1 - op2\n if op == '*':\n return op1 * op2\n if op == '/':\n return int(op1 / op2)\n stack = []\n for char in tokens:\n if char in ['+', '-', '*', '/']:\n op2 = stack.pop()\n op1 = stack.pop()\n res = operation(op1, op2, char)\n stack.append(int(res))\n else:\n stack.append(int(char))\n return stack.pop()\n",
"step-4": "class Solution:\r\n def evalRPN(self, tokens: List[str]) -> int:\r\n def operation(op1,op2,op):\r\n if op == \"+\":\r\n return op1 + op2\r\n if op == \"-\":\r\n return op1 - op2\r\n if op == \"*\":\r\n return op1 * op2\r\n if op == \"/\":\r\n return int(op1/op2)\r\n \r\n stack = []\r\n for char in tokens:\r\n if char in [\"+\", \"-\", \"*\", \"/\"]:\r\n op2 = stack.pop()\r\n op1 = stack.pop()\r\n res = operation(op1,op2,char)\r\n stack.append(int(res))\r\n else:\r\n stack.append(int(char))\r\n return stack.pop()",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(random.randint(1, 100))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import random
print(random.randint(1, 100))
<|reserved_special_token_1|>
"""
CP1404 - Practical
Code that produces a random number between 1 and 100 inclusive
Rhys Simpson
"""
# 1.
# smallest number 5; largest number 20
# 2.
# smallest number 3; largest number 9
# no it can only produce 3, 5, 7, 9
# 3.
# smallest number 2.5000000000000000; largest number 5.5000000000000000
import random
print(random.randint(1, 100))
|
flexible
|
{
"blob_id": "46696ee9576d74c087ae435bfd304c8346530ab2",
"index": 9804,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(random.randint(1, 100))\n",
"step-3": "<mask token>\nimport random\nprint(random.randint(1, 100))\n",
"step-4": "\"\"\"\nCP1404 - Practical\nCode that produces a random number between 1 and 100 inclusive\n\nRhys Simpson\n\"\"\"\n# 1.\n# smallest number 5; largest number 20\n\n# 2.\n# smallest number 3; largest number 9\n# no it can only produce 3, 5, 7, 9\n\n# 3.\n# smallest number 2.5000000000000000; largest number 5.5000000000000000\n\nimport random\nprint(random.randint(1, 100))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Config(object):
def __init__(self, name=None):
"""
Load config for colin.
:param name: str (name of the config file (without .json), default is "default"
"""
self.name = name or 'default'
config_path = os.path.join(get_config_directory(), self.name + JSON)
try:
with open(config_path, mode='r') as config_file:
self.config_dict = json.load(config_file)
except Exception as ex:
raise ColinConfigException("Config file '{}' cannot be loaded."
.format(config_path))
def get_checks(self, target_type, group=None, severity=None, tags=None):
"""
Get all checks for given type/group/severity/tags.
:param target_type: TargetType enum
:param group: str (if not group, get checks from all groups/directories)
:param severity: str (optional x required)
:param tags: list of str
:return: list of check instances
"""
check_files = self._get_check_files(group=group, severity=severity)
groups = {}
for group, check_files in iteritems(check_files):
checks = []
for severity, check_file in check_files:
check_classes = load_check_implementation(path=check_file,
severity=severity)
for check_class in check_classes:
if is_compatible(target_type, check_class, severity, tags):
checks.append(check_class)
groups[group] = checks
return groups
@staticmethod
def get_check_file(group, name):
"""
Get the check file from given group with given name.
:param group: str
:param name: str
:return: str (path)
"""
return os.path.join(get_checks_path(), group, name + '.py')
<|reserved_special_token_0|>
def _get_check_groups(self, group=None):
"""
Get check group to validate
:param group: str (if None, all from the config will be used)
:return: list of str (group names)
"""
groups = [g for g in self.config_dict]
if group:
if group in groups:
check_groups = [group]
else:
check_groups = []
else:
check_groups = groups
return check_groups
def _get_check_files(self, group=None, severity=None):
"""
Get file names with checks filtered by group and severity.
:param group: str (if None, all groups will be used)
:param severity: str (if None, all severities will be used)
:return: list of str (absolute paths)
"""
groups = {}
for g in self._get_check_groups(group):
check_files = []
for sev, files in iteritems(self.config_dict[g]):
if not severity or severity == sev:
check_files += Config.get_check_files(group=g, names=
files, severity=sev)
groups[g] = check_files
return groups
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Config(object):
def __init__(self, name=None):
"""
Load config for colin.
:param name: str (name of the config file (without .json), default is "default"
"""
self.name = name or 'default'
config_path = os.path.join(get_config_directory(), self.name + JSON)
try:
with open(config_path, mode='r') as config_file:
self.config_dict = json.load(config_file)
except Exception as ex:
raise ColinConfigException("Config file '{}' cannot be loaded."
.format(config_path))
def get_checks(self, target_type, group=None, severity=None, tags=None):
"""
Get all checks for given type/group/severity/tags.
:param target_type: TargetType enum
:param group: str (if not group, get checks from all groups/directories)
:param severity: str (optional x required)
:param tags: list of str
:return: list of check instances
"""
check_files = self._get_check_files(group=group, severity=severity)
groups = {}
for group, check_files in iteritems(check_files):
checks = []
for severity, check_file in check_files:
check_classes = load_check_implementation(path=check_file,
severity=severity)
for check_class in check_classes:
if is_compatible(target_type, check_class, severity, tags):
checks.append(check_class)
groups[group] = checks
return groups
@staticmethod
def get_check_file(group, name):
"""
Get the check file from given group with given name.
:param group: str
:param name: str
:return: str (path)
"""
return os.path.join(get_checks_path(), group, name + '.py')
@staticmethod
def get_check_files(group, names, severity):
"""
Get the check files from given group with given names.
:param severity: str
:param group: str
:param names: list of str
:return: list of str (paths)
"""
check_files = []
for f in names:
check_file = Config.get_check_file(group=group, name=f)
check_files.append((severity, check_file))
return check_files
def _get_check_groups(self, group=None):
"""
Get check group to validate
:param group: str (if None, all from the config will be used)
:return: list of str (group names)
"""
groups = [g for g in self.config_dict]
if group:
if group in groups:
check_groups = [group]
else:
check_groups = []
else:
check_groups = groups
return check_groups
def _get_check_files(self, group=None, severity=None):
"""
Get file names with checks filtered by group and severity.
:param group: str (if None, all groups will be used)
:param severity: str (if None, all severities will be used)
:return: list of str (absolute paths)
"""
groups = {}
for g in self._get_check_groups(group):
check_files = []
for sev, files in iteritems(self.config_dict[g]):
if not severity or severity == sev:
check_files += Config.get_check_files(group=g, names=
files, severity=sev)
groups[g] = check_files
return groups
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Config(object):
def __init__(self, name=None):
"""
Load config for colin.
:param name: str (name of the config file (without .json), default is "default"
"""
self.name = name or 'default'
config_path = os.path.join(get_config_directory(), self.name + JSON)
try:
with open(config_path, mode='r') as config_file:
self.config_dict = json.load(config_file)
except Exception as ex:
raise ColinConfigException("Config file '{}' cannot be loaded."
.format(config_path))
def get_checks(self, target_type, group=None, severity=None, tags=None):
"""
Get all checks for given type/group/severity/tags.
:param target_type: TargetType enum
:param group: str (if not group, get checks from all groups/directories)
:param severity: str (optional x required)
:param tags: list of str
:return: list of check instances
"""
check_files = self._get_check_files(group=group, severity=severity)
groups = {}
for group, check_files in iteritems(check_files):
checks = []
for severity, check_file in check_files:
check_classes = load_check_implementation(path=check_file,
severity=severity)
for check_class in check_classes:
if is_compatible(target_type, check_class, severity, tags):
checks.append(check_class)
groups[group] = checks
return groups
@staticmethod
def get_check_file(group, name):
"""
Get the check file from given group with given name.
:param group: str
:param name: str
:return: str (path)
"""
return os.path.join(get_checks_path(), group, name + '.py')
@staticmethod
def get_check_files(group, names, severity):
"""
Get the check files from given group with given names.
:param severity: str
:param group: str
:param names: list of str
:return: list of str (paths)
"""
check_files = []
for f in names:
check_file = Config.get_check_file(group=group, name=f)
check_files.append((severity, check_file))
return check_files
def _get_check_groups(self, group=None):
"""
Get check group to validate
:param group: str (if None, all from the config will be used)
:return: list of str (group names)
"""
groups = [g for g in self.config_dict]
if group:
if group in groups:
check_groups = [group]
else:
check_groups = []
else:
check_groups = groups
return check_groups
def _get_check_files(self, group=None, severity=None):
"""
Get file names with checks filtered by group and severity.
:param group: str (if None, all groups will be used)
:param severity: str (if None, all severities will be used)
:return: list of str (absolute paths)
"""
groups = {}
for g in self._get_check_groups(group):
check_files = []
for sev, files in iteritems(self.config_dict[g]):
if not severity or severity == sev:
check_files += Config.get_check_files(group=g, names=
files, severity=sev)
groups[g] = check_files
return groups
def get_checks_path():
"""
Get path to checks.
:return: str (absolute path of directory with checks)
"""
rel_path = os.path.join(os.pardir, os.pardir, os.pardir, 'checks')
return os.path.abspath(os.path.join(__file__, rel_path))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Config(object):
def __init__(self, name=None):
"""
Load config for colin.
:param name: str (name of the config file (without .json), default is "default"
"""
self.name = name or 'default'
config_path = os.path.join(get_config_directory(), self.name + JSON)
try:
with open(config_path, mode='r') as config_file:
self.config_dict = json.load(config_file)
except Exception as ex:
raise ColinConfigException("Config file '{}' cannot be loaded."
.format(config_path))
def get_checks(self, target_type, group=None, severity=None, tags=None):
"""
Get all checks for given type/group/severity/tags.
:param target_type: TargetType enum
:param group: str (if not group, get checks from all groups/directories)
:param severity: str (optional x required)
:param tags: list of str
:return: list of check instances
"""
check_files = self._get_check_files(group=group, severity=severity)
groups = {}
for group, check_files in iteritems(check_files):
checks = []
for severity, check_file in check_files:
check_classes = load_check_implementation(path=check_file,
severity=severity)
for check_class in check_classes:
if is_compatible(target_type, check_class, severity, tags):
checks.append(check_class)
groups[group] = checks
return groups
@staticmethod
def get_check_file(group, name):
"""
Get the check file from given group with given name.
:param group: str
:param name: str
:return: str (path)
"""
return os.path.join(get_checks_path(), group, name + '.py')
@staticmethod
def get_check_files(group, names, severity):
"""
Get the check files from given group with given names.
:param severity: str
:param group: str
:param names: list of str
:return: list of str (paths)
"""
check_files = []
for f in names:
check_file = Config.get_check_file(group=group, name=f)
check_files.append((severity, check_file))
return check_files
def _get_check_groups(self, group=None):
"""
Get check group to validate
:param group: str (if None, all from the config will be used)
:return: list of str (group names)
"""
groups = [g for g in self.config_dict]
if group:
if group in groups:
check_groups = [group]
else:
check_groups = []
else:
check_groups = groups
return check_groups
def _get_check_files(self, group=None, severity=None):
"""
Get file names with checks filtered by group and severity.
:param group: str (if None, all groups will be used)
:param severity: str (if None, all severities will be used)
:return: list of str (absolute paths)
"""
groups = {}
for g in self._get_check_groups(group):
check_files = []
for sev, files in iteritems(self.config_dict[g]):
if not severity or severity == sev:
check_files += Config.get_check_files(group=g, names=
files, severity=sev)
groups[g] = check_files
return groups
def get_checks_path():
"""
Get path to checks.
:return: str (absolute path of directory with checks)
"""
rel_path = os.path.join(os.pardir, os.pardir, os.pardir, 'checks')
return os.path.abspath(os.path.join(__file__, rel_path))
def get_config_directory():
"""
Get the directory with config files
:return: str
"""
local_share = os.path.join(os.path.expanduser('~'), '.local',
CONFIG_DIRECTORY)
if os.path.isdir(local_share) and os.path.exists(local_share):
return local_share
usr_local_share = os.path.join('/usr/local', CONFIG_DIRECTORY)
if os.path.isdir(usr_local_share) and os.path.exists(usr_local_share):
return usr_local_share
raise ColinConfigException('Config directory cannot be found.')
<|reserved_special_token_1|>
import json
import os
from six import iteritems
from ..exceptions import ColinConfigException
from ..constant import CONFIG_DIRECTORY, JSON
from ..loader import load_check_implementation
from ..target import is_compatible
class Config(object):
def __init__(self, name=None):
"""
Load config for colin.
:param name: str (name of the config file (without .json), default is "default"
"""
self.name = name or "default"
config_path = os.path.join(get_config_directory(), self.name + JSON)
try:
with open(config_path, mode='r') as config_file:
self.config_dict = json.load(config_file)
except Exception as ex:
raise ColinConfigException("Config file '{}' cannot be loaded.".format(config_path))
def get_checks(self, target_type, group=None, severity=None, tags=None):
"""
Get all checks for given type/group/severity/tags.
:param target_type: TargetType enum
:param group: str (if not group, get checks from all groups/directories)
:param severity: str (optional x required)
:param tags: list of str
:return: list of check instances
"""
check_files = self._get_check_files(group=group,
severity=severity)
groups = {}
for (group, check_files) in iteritems(check_files):
checks = []
for severity, check_file in check_files:
check_classes = load_check_implementation(path=check_file, severity=severity)
for check_class in check_classes:
if is_compatible(target_type, check_class, severity, tags):
checks.append(check_class)
groups[group] = checks
return groups
@staticmethod
def get_check_file(group, name):
"""
Get the check file from given group with given name.
:param group: str
:param name: str
:return: str (path)
"""
return os.path.join(get_checks_path(), group, name + ".py")
@staticmethod
def get_check_files(group, names, severity):
"""
Get the check files from given group with given names.
:param severity: str
:param group: str
:param names: list of str
:return: list of str (paths)
"""
check_files = []
for f in names:
check_file = Config.get_check_file(group=group,
name=f)
check_files.append((severity, check_file))
return check_files
def _get_check_groups(self, group=None):
"""
Get check group to validate
:param group: str (if None, all from the config will be used)
:return: list of str (group names)
"""
groups = [g for g in self.config_dict]
if group:
if group in groups:
check_groups = [group]
else:
check_groups = []
else:
check_groups = groups
return check_groups
def _get_check_files(self, group=None, severity=None):
"""
Get file names with checks filtered by group and severity.
:param group: str (if None, all groups will be used)
:param severity: str (if None, all severities will be used)
:return: list of str (absolute paths)
"""
groups = {}
for g in self._get_check_groups(group):
check_files = []
for sev, files in iteritems(self.config_dict[g]):
if (not severity) or severity == sev:
check_files += Config.get_check_files(group=g,
names=files,
severity=sev)
groups[g] = check_files
return groups
def get_checks_path():
"""
Get path to checks.
:return: str (absolute path of directory with checks)
"""
rel_path = os.path.join(os.pardir, os.pardir, os.pardir, "checks")
return os.path.abspath(os.path.join(__file__, rel_path))
def get_config_directory():
"""
Get the directory with config files
:return: str
"""
local_share = os.path.join(os.path.expanduser("~"),
".local",
CONFIG_DIRECTORY)
if os.path.isdir(local_share) and os.path.exists(local_share):
return local_share
usr_local_share = os.path.join("/usr/local", CONFIG_DIRECTORY)
if os.path.isdir(usr_local_share) and os.path.exists(usr_local_share):
return usr_local_share
raise ColinConfigException("Config directory cannot be found.")
|
flexible
|
{
"blob_id": "7bb9455e6f0c15ab0be6963cff06ff41df73e6e0",
"index": 2583,
"step-1": "<mask token>\n\n\nclass Config(object):\n\n def __init__(self, name=None):\n \"\"\"\n Load config for colin.\n\n :param name: str (name of the config file (without .json), default is \"default\"\n \"\"\"\n self.name = name or 'default'\n config_path = os.path.join(get_config_directory(), self.name + JSON)\n try:\n with open(config_path, mode='r') as config_file:\n self.config_dict = json.load(config_file)\n except Exception as ex:\n raise ColinConfigException(\"Config file '{}' cannot be loaded.\"\n .format(config_path))\n\n def get_checks(self, target_type, group=None, severity=None, tags=None):\n \"\"\"\n Get all checks for given type/group/severity/tags.\n\n :param target_type: TargetType enum\n :param group: str (if not group, get checks from all groups/directories)\n :param severity: str (optional x required)\n :param tags: list of str\n :return: list of check instances\n \"\"\"\n check_files = self._get_check_files(group=group, severity=severity)\n groups = {}\n for group, check_files in iteritems(check_files):\n checks = []\n for severity, check_file in check_files:\n check_classes = load_check_implementation(path=check_file,\n severity=severity)\n for check_class in check_classes:\n if is_compatible(target_type, check_class, severity, tags):\n checks.append(check_class)\n groups[group] = checks\n return groups\n\n @staticmethod\n def get_check_file(group, name):\n \"\"\"\n Get the check file from given group with given name.\n\n :param group: str\n :param name: str\n :return: str (path)\n \"\"\"\n return os.path.join(get_checks_path(), group, name + '.py')\n <mask token>\n\n def _get_check_groups(self, group=None):\n \"\"\"\n Get check group to validate\n\n :param group: str (if None, all from the config will be used)\n :return: list of str (group names)\n \"\"\"\n groups = [g for g in self.config_dict]\n if group:\n if group in groups:\n check_groups = [group]\n else:\n check_groups = []\n else:\n check_groups = groups\n return check_groups\n\n def _get_check_files(self, group=None, severity=None):\n \"\"\"\n Get file names with checks filtered by group and severity.\n\n :param group: str (if None, all groups will be used)\n :param severity: str (if None, all severities will be used)\n :return: list of str (absolute paths)\n \"\"\"\n groups = {}\n for g in self._get_check_groups(group):\n check_files = []\n for sev, files in iteritems(self.config_dict[g]):\n if not severity or severity == sev:\n check_files += Config.get_check_files(group=g, names=\n files, severity=sev)\n groups[g] = check_files\n return groups\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Config(object):\n\n def __init__(self, name=None):\n \"\"\"\n Load config for colin.\n\n :param name: str (name of the config file (without .json), default is \"default\"\n \"\"\"\n self.name = name or 'default'\n config_path = os.path.join(get_config_directory(), self.name + JSON)\n try:\n with open(config_path, mode='r') as config_file:\n self.config_dict = json.load(config_file)\n except Exception as ex:\n raise ColinConfigException(\"Config file '{}' cannot be loaded.\"\n .format(config_path))\n\n def get_checks(self, target_type, group=None, severity=None, tags=None):\n \"\"\"\n Get all checks for given type/group/severity/tags.\n\n :param target_type: TargetType enum\n :param group: str (if not group, get checks from all groups/directories)\n :param severity: str (optional x required)\n :param tags: list of str\n :return: list of check instances\n \"\"\"\n check_files = self._get_check_files(group=group, severity=severity)\n groups = {}\n for group, check_files in iteritems(check_files):\n checks = []\n for severity, check_file in check_files:\n check_classes = load_check_implementation(path=check_file,\n severity=severity)\n for check_class in check_classes:\n if is_compatible(target_type, check_class, severity, tags):\n checks.append(check_class)\n groups[group] = checks\n return groups\n\n @staticmethod\n def get_check_file(group, name):\n \"\"\"\n Get the check file from given group with given name.\n\n :param group: str\n :param name: str\n :return: str (path)\n \"\"\"\n return os.path.join(get_checks_path(), group, name + '.py')\n\n @staticmethod\n def get_check_files(group, names, severity):\n \"\"\"\n Get the check files from given group with given names.\n\n :param severity: str\n :param group: str\n :param names: list of str\n :return: list of str (paths)\n \"\"\"\n check_files = []\n for f in names:\n check_file = Config.get_check_file(group=group, name=f)\n check_files.append((severity, check_file))\n return check_files\n\n def _get_check_groups(self, group=None):\n \"\"\"\n Get check group to validate\n\n :param group: str (if None, all from the config will be used)\n :return: list of str (group names)\n \"\"\"\n groups = [g for g in self.config_dict]\n if group:\n if group in groups:\n check_groups = [group]\n else:\n check_groups = []\n else:\n check_groups = groups\n return check_groups\n\n def _get_check_files(self, group=None, severity=None):\n \"\"\"\n Get file names with checks filtered by group and severity.\n\n :param group: str (if None, all groups will be used)\n :param severity: str (if None, all severities will be used)\n :return: list of str (absolute paths)\n \"\"\"\n groups = {}\n for g in self._get_check_groups(group):\n check_files = []\n for sev, files in iteritems(self.config_dict[g]):\n if not severity or severity == sev:\n check_files += Config.get_check_files(group=g, names=\n files, severity=sev)\n groups[g] = check_files\n return groups\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Config(object):\n\n def __init__(self, name=None):\n \"\"\"\n Load config for colin.\n\n :param name: str (name of the config file (without .json), default is \"default\"\n \"\"\"\n self.name = name or 'default'\n config_path = os.path.join(get_config_directory(), self.name + JSON)\n try:\n with open(config_path, mode='r') as config_file:\n self.config_dict = json.load(config_file)\n except Exception as ex:\n raise ColinConfigException(\"Config file '{}' cannot be loaded.\"\n .format(config_path))\n\n def get_checks(self, target_type, group=None, severity=None, tags=None):\n \"\"\"\n Get all checks for given type/group/severity/tags.\n\n :param target_type: TargetType enum\n :param group: str (if not group, get checks from all groups/directories)\n :param severity: str (optional x required)\n :param tags: list of str\n :return: list of check instances\n \"\"\"\n check_files = self._get_check_files(group=group, severity=severity)\n groups = {}\n for group, check_files in iteritems(check_files):\n checks = []\n for severity, check_file in check_files:\n check_classes = load_check_implementation(path=check_file,\n severity=severity)\n for check_class in check_classes:\n if is_compatible(target_type, check_class, severity, tags):\n checks.append(check_class)\n groups[group] = checks\n return groups\n\n @staticmethod\n def get_check_file(group, name):\n \"\"\"\n Get the check file from given group with given name.\n\n :param group: str\n :param name: str\n :return: str (path)\n \"\"\"\n return os.path.join(get_checks_path(), group, name + '.py')\n\n @staticmethod\n def get_check_files(group, names, severity):\n \"\"\"\n Get the check files from given group with given names.\n\n :param severity: str\n :param group: str\n :param names: list of str\n :return: list of str (paths)\n \"\"\"\n check_files = []\n for f in names:\n check_file = Config.get_check_file(group=group, name=f)\n check_files.append((severity, check_file))\n return check_files\n\n def _get_check_groups(self, group=None):\n \"\"\"\n Get check group to validate\n\n :param group: str (if None, all from the config will be used)\n :return: list of str (group names)\n \"\"\"\n groups = [g for g in self.config_dict]\n if group:\n if group in groups:\n check_groups = [group]\n else:\n check_groups = []\n else:\n check_groups = groups\n return check_groups\n\n def _get_check_files(self, group=None, severity=None):\n \"\"\"\n Get file names with checks filtered by group and severity.\n\n :param group: str (if None, all groups will be used)\n :param severity: str (if None, all severities will be used)\n :return: list of str (absolute paths)\n \"\"\"\n groups = {}\n for g in self._get_check_groups(group):\n check_files = []\n for sev, files in iteritems(self.config_dict[g]):\n if not severity or severity == sev:\n check_files += Config.get_check_files(group=g, names=\n files, severity=sev)\n groups[g] = check_files\n return groups\n\n\ndef get_checks_path():\n \"\"\"\n Get path to checks.\n\n :return: str (absolute path of directory with checks)\n \"\"\"\n rel_path = os.path.join(os.pardir, os.pardir, os.pardir, 'checks')\n return os.path.abspath(os.path.join(__file__, rel_path))\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Config(object):\n\n def __init__(self, name=None):\n \"\"\"\n Load config for colin.\n\n :param name: str (name of the config file (without .json), default is \"default\"\n \"\"\"\n self.name = name or 'default'\n config_path = os.path.join(get_config_directory(), self.name + JSON)\n try:\n with open(config_path, mode='r') as config_file:\n self.config_dict = json.load(config_file)\n except Exception as ex:\n raise ColinConfigException(\"Config file '{}' cannot be loaded.\"\n .format(config_path))\n\n def get_checks(self, target_type, group=None, severity=None, tags=None):\n \"\"\"\n Get all checks for given type/group/severity/tags.\n\n :param target_type: TargetType enum\n :param group: str (if not group, get checks from all groups/directories)\n :param severity: str (optional x required)\n :param tags: list of str\n :return: list of check instances\n \"\"\"\n check_files = self._get_check_files(group=group, severity=severity)\n groups = {}\n for group, check_files in iteritems(check_files):\n checks = []\n for severity, check_file in check_files:\n check_classes = load_check_implementation(path=check_file,\n severity=severity)\n for check_class in check_classes:\n if is_compatible(target_type, check_class, severity, tags):\n checks.append(check_class)\n groups[group] = checks\n return groups\n\n @staticmethod\n def get_check_file(group, name):\n \"\"\"\n Get the check file from given group with given name.\n\n :param group: str\n :param name: str\n :return: str (path)\n \"\"\"\n return os.path.join(get_checks_path(), group, name + '.py')\n\n @staticmethod\n def get_check_files(group, names, severity):\n \"\"\"\n Get the check files from given group with given names.\n\n :param severity: str\n :param group: str\n :param names: list of str\n :return: list of str (paths)\n \"\"\"\n check_files = []\n for f in names:\n check_file = Config.get_check_file(group=group, name=f)\n check_files.append((severity, check_file))\n return check_files\n\n def _get_check_groups(self, group=None):\n \"\"\"\n Get check group to validate\n\n :param group: str (if None, all from the config will be used)\n :return: list of str (group names)\n \"\"\"\n groups = [g for g in self.config_dict]\n if group:\n if group in groups:\n check_groups = [group]\n else:\n check_groups = []\n else:\n check_groups = groups\n return check_groups\n\n def _get_check_files(self, group=None, severity=None):\n \"\"\"\n Get file names with checks filtered by group and severity.\n\n :param group: str (if None, all groups will be used)\n :param severity: str (if None, all severities will be used)\n :return: list of str (absolute paths)\n \"\"\"\n groups = {}\n for g in self._get_check_groups(group):\n check_files = []\n for sev, files in iteritems(self.config_dict[g]):\n if not severity or severity == sev:\n check_files += Config.get_check_files(group=g, names=\n files, severity=sev)\n groups[g] = check_files\n return groups\n\n\ndef get_checks_path():\n \"\"\"\n Get path to checks.\n\n :return: str (absolute path of directory with checks)\n \"\"\"\n rel_path = os.path.join(os.pardir, os.pardir, os.pardir, 'checks')\n return os.path.abspath(os.path.join(__file__, rel_path))\n\n\ndef get_config_directory():\n \"\"\"\n Get the directory with config files\n\n :return: str\n \"\"\"\n local_share = os.path.join(os.path.expanduser('~'), '.local',\n CONFIG_DIRECTORY)\n if os.path.isdir(local_share) and os.path.exists(local_share):\n return local_share\n usr_local_share = os.path.join('/usr/local', CONFIG_DIRECTORY)\n if os.path.isdir(usr_local_share) and os.path.exists(usr_local_share):\n return usr_local_share\n raise ColinConfigException('Config directory cannot be found.')\n",
"step-5": "import json\nimport os\n\nfrom six import iteritems\n\nfrom ..exceptions import ColinConfigException\nfrom ..constant import CONFIG_DIRECTORY, JSON\nfrom ..loader import load_check_implementation\nfrom ..target import is_compatible\n\n\nclass Config(object):\n\n def __init__(self, name=None):\n \"\"\"\n Load config for colin.\n\n :param name: str (name of the config file (without .json), default is \"default\"\n \"\"\"\n self.name = name or \"default\"\n config_path = os.path.join(get_config_directory(), self.name + JSON)\n try:\n with open(config_path, mode='r') as config_file:\n self.config_dict = json.load(config_file)\n except Exception as ex:\n raise ColinConfigException(\"Config file '{}' cannot be loaded.\".format(config_path))\n\n def get_checks(self, target_type, group=None, severity=None, tags=None):\n \"\"\"\n Get all checks for given type/group/severity/tags.\n\n :param target_type: TargetType enum\n :param group: str (if not group, get checks from all groups/directories)\n :param severity: str (optional x required)\n :param tags: list of str\n :return: list of check instances\n \"\"\"\n check_files = self._get_check_files(group=group,\n severity=severity)\n groups = {}\n for (group, check_files) in iteritems(check_files):\n checks = []\n for severity, check_file in check_files:\n\n check_classes = load_check_implementation(path=check_file, severity=severity)\n for check_class in check_classes:\n if is_compatible(target_type, check_class, severity, tags):\n checks.append(check_class)\n\n groups[group] = checks\n return groups\n\n @staticmethod\n def get_check_file(group, name):\n \"\"\"\n Get the check file from given group with given name.\n\n :param group: str\n :param name: str\n :return: str (path)\n \"\"\"\n return os.path.join(get_checks_path(), group, name + \".py\")\n\n @staticmethod\n def get_check_files(group, names, severity):\n \"\"\"\n Get the check files from given group with given names.\n\n :param severity: str\n :param group: str\n :param names: list of str\n :return: list of str (paths)\n \"\"\"\n check_files = []\n for f in names:\n check_file = Config.get_check_file(group=group,\n name=f)\n check_files.append((severity, check_file))\n return check_files\n\n def _get_check_groups(self, group=None):\n \"\"\"\n Get check group to validate\n\n :param group: str (if None, all from the config will be used)\n :return: list of str (group names)\n \"\"\"\n groups = [g for g in self.config_dict]\n if group:\n if group in groups:\n check_groups = [group]\n else:\n check_groups = []\n else:\n check_groups = groups\n return check_groups\n\n def _get_check_files(self, group=None, severity=None):\n \"\"\"\n Get file names with checks filtered by group and severity.\n\n :param group: str (if None, all groups will be used)\n :param severity: str (if None, all severities will be used)\n :return: list of str (absolute paths)\n \"\"\"\n groups = {}\n for g in self._get_check_groups(group):\n check_files = []\n for sev, files in iteritems(self.config_dict[g]):\n if (not severity) or severity == sev:\n check_files += Config.get_check_files(group=g,\n names=files,\n severity=sev)\n groups[g] = check_files\n return groups\n\n\ndef get_checks_path():\n \"\"\"\n Get path to checks.\n\n :return: str (absolute path of directory with checks)\n \"\"\"\n rel_path = os.path.join(os.pardir, os.pardir, os.pardir, \"checks\")\n return os.path.abspath(os.path.join(__file__, rel_path))\n\n\ndef get_config_directory():\n \"\"\"\n Get the directory with config files\n\n :return: str\n \"\"\"\n local_share = os.path.join(os.path.expanduser(\"~\"),\n \".local\",\n CONFIG_DIRECTORY)\n if os.path.isdir(local_share) and os.path.exists(local_share):\n return local_share\n\n usr_local_share = os.path.join(\"/usr/local\", CONFIG_DIRECTORY)\n if os.path.isdir(usr_local_share) and os.path.exists(usr_local_share):\n return usr_local_share\n\n raise ColinConfigException(\"Config directory cannot be found.\")\n",
"step-ids": [
6,
7,
8,
9,
11
]
}
|
[
6,
7,
8,
9,
11
] |
import os
import pandas as pd
import numpy as np
from dataloader import *
from keras.optimizers import Adam, SGD
from mylib.models.misc import set_gpu_usage
set_gpu_usage()
from mylib.models import densesharp, metrics, losses
from keras.callbacks import ModelCheckpoint, CSVLogger, TensorBoard, EarlyStopping, ReduceLROnPlateau, \
LearningRateScheduler
os.environ['CUDA_VISIBLE_DEVICES'] = '/gpu:0'
def main(batch_size, crop_size, learning_rate, segmentation_task_ratio, weight_decay, save_folder, epochs,
alpha):
print(learning_rate)
print(alpha)
print(weight_decay)
train_dataset = ClfSegDataset(subset=[0, 1])
train_loader = get_mixup_loader(train_dataset, batch_size=batch_size, alpha=alpha)
val_dataset = ClfvalSegDataset(crop_size=crop_size, move=None, subset=[2])
val_loader = get_loader(val_dataset, batch_size=batch_size)
model = densesharp.get_compiled(output_size=1,
optimizer=Adam(lr=learning_rate),
loss={"clf": 'binary_crossentropy',
"seg": losses.DiceLoss()},
metrics={'clf': ['accuracy', metrics.precision, metrics.recall, metrics.fmeasure,
metrics.auc],
'seg': [metrics.precision, metrics.recall, metrics.fmeasure]},
loss_weights={"clf": 1., "seg": segmentation_task_ratio},
weight_decay=weight_decay, weights='tmp/test/weights42_222639.h5')
checkpointer = ModelCheckpoint(filepath='tmp/%s/weights.{epoch:02d}.h5' % save_folder, verbose=1,
period=1, save_weights_only=True)
csv_logger = CSVLogger('tmp/%s/training.csv' % save_folder)
tensorboard = TensorBoard(log_dir='tmp/%s/logs/' % save_folder)
best_keeper = ModelCheckpoint(filepath='tmp/%s/best.h5' % save_folder, verbose=1, save_weights_only=True,
monitor='val_clf_acc', save_best_only=True, period=1, mode='max')
early_stopping = EarlyStopping(monitor='val_clf_acc', min_delta=0, mode='max',
patience=20, verbose=1)
lr_reducer = ReduceLROnPlateau(monitor='val_loss', factor=0.334, patience=10,
verbose=1, mode='min', epsilon=1.e-5, cooldown=2, min_lr=0)
model.fit_generator(generator=train_loader, steps_per_epoch=50, max_queue_size=10, workers=1,
validation_data=val_loader, epochs=epochs, validation_steps=50,
callbacks=[checkpointer, csv_logger, best_keeper, early_stopping, lr_reducer, tensorboard])
if __name__ == '__main__':
main(batch_size=32,
crop_size=[32, 32, 32],
learning_rate=1.e-5,
segmentation_task_ratio=0.2,
weight_decay=0.0,
save_folder='test',
epochs=10,
alpha=1.0)
|
normal
|
{
"blob_id": "94b3fa700d7da0ca913adeb0ad5324d1fec0be50",
"index": 7104,
"step-1": "<mask token>\n\n\ndef main(batch_size, crop_size, learning_rate, segmentation_task_ratio,\n weight_decay, save_folder, epochs, alpha):\n print(learning_rate)\n print(alpha)\n print(weight_decay)\n train_dataset = ClfSegDataset(subset=[0, 1])\n train_loader = get_mixup_loader(train_dataset, batch_size=batch_size,\n alpha=alpha)\n val_dataset = ClfvalSegDataset(crop_size=crop_size, move=None, subset=[2])\n val_loader = get_loader(val_dataset, batch_size=batch_size)\n model = densesharp.get_compiled(output_size=1, optimizer=Adam(lr=\n learning_rate), loss={'clf': 'binary_crossentropy', 'seg': losses.\n DiceLoss()}, metrics={'clf': ['accuracy', metrics.precision,\n metrics.recall, metrics.fmeasure, metrics.auc], 'seg': [metrics.\n precision, metrics.recall, metrics.fmeasure]}, loss_weights={'clf':\n 1.0, 'seg': segmentation_task_ratio}, weight_decay=weight_decay,\n weights='tmp/test/weights42_222639.h5')\n checkpointer = ModelCheckpoint(filepath='tmp/%s/weights.{epoch:02d}.h5' %\n save_folder, verbose=1, period=1, save_weights_only=True)\n csv_logger = CSVLogger('tmp/%s/training.csv' % save_folder)\n tensorboard = TensorBoard(log_dir='tmp/%s/logs/' % save_folder)\n best_keeper = ModelCheckpoint(filepath='tmp/%s/best.h5' % save_folder,\n verbose=1, save_weights_only=True, monitor='val_clf_acc',\n save_best_only=True, period=1, mode='max')\n early_stopping = EarlyStopping(monitor='val_clf_acc', min_delta=0, mode\n ='max', patience=20, verbose=1)\n lr_reducer = ReduceLROnPlateau(monitor='val_loss', factor=0.334,\n patience=10, verbose=1, mode='min', epsilon=1e-05, cooldown=2, min_lr=0\n )\n model.fit_generator(generator=train_loader, steps_per_epoch=50,\n max_queue_size=10, workers=1, validation_data=val_loader, epochs=\n epochs, validation_steps=50, callbacks=[checkpointer, csv_logger,\n best_keeper, early_stopping, lr_reducer, tensorboard])\n\n\n<mask token>\n",
"step-2": "<mask token>\nset_gpu_usage()\n<mask token>\n\n\ndef main(batch_size, crop_size, learning_rate, segmentation_task_ratio,\n weight_decay, save_folder, epochs, alpha):\n print(learning_rate)\n print(alpha)\n print(weight_decay)\n train_dataset = ClfSegDataset(subset=[0, 1])\n train_loader = get_mixup_loader(train_dataset, batch_size=batch_size,\n alpha=alpha)\n val_dataset = ClfvalSegDataset(crop_size=crop_size, move=None, subset=[2])\n val_loader = get_loader(val_dataset, batch_size=batch_size)\n model = densesharp.get_compiled(output_size=1, optimizer=Adam(lr=\n learning_rate), loss={'clf': 'binary_crossentropy', 'seg': losses.\n DiceLoss()}, metrics={'clf': ['accuracy', metrics.precision,\n metrics.recall, metrics.fmeasure, metrics.auc], 'seg': [metrics.\n precision, metrics.recall, metrics.fmeasure]}, loss_weights={'clf':\n 1.0, 'seg': segmentation_task_ratio}, weight_decay=weight_decay,\n weights='tmp/test/weights42_222639.h5')\n checkpointer = ModelCheckpoint(filepath='tmp/%s/weights.{epoch:02d}.h5' %\n save_folder, verbose=1, period=1, save_weights_only=True)\n csv_logger = CSVLogger('tmp/%s/training.csv' % save_folder)\n tensorboard = TensorBoard(log_dir='tmp/%s/logs/' % save_folder)\n best_keeper = ModelCheckpoint(filepath='tmp/%s/best.h5' % save_folder,\n verbose=1, save_weights_only=True, monitor='val_clf_acc',\n save_best_only=True, period=1, mode='max')\n early_stopping = EarlyStopping(monitor='val_clf_acc', min_delta=0, mode\n ='max', patience=20, verbose=1)\n lr_reducer = ReduceLROnPlateau(monitor='val_loss', factor=0.334,\n patience=10, verbose=1, mode='min', epsilon=1e-05, cooldown=2, min_lr=0\n )\n model.fit_generator(generator=train_loader, steps_per_epoch=50,\n max_queue_size=10, workers=1, validation_data=val_loader, epochs=\n epochs, validation_steps=50, callbacks=[checkpointer, csv_logger,\n best_keeper, early_stopping, lr_reducer, tensorboard])\n\n\nif __name__ == '__main__':\n main(batch_size=32, crop_size=[32, 32, 32], learning_rate=1e-05,\n segmentation_task_ratio=0.2, weight_decay=0.0, save_folder='test',\n epochs=10, alpha=1.0)\n",
"step-3": "<mask token>\nset_gpu_usage()\n<mask token>\nos.environ['CUDA_VISIBLE_DEVICES'] = '/gpu:0'\n\n\ndef main(batch_size, crop_size, learning_rate, segmentation_task_ratio,\n weight_decay, save_folder, epochs, alpha):\n print(learning_rate)\n print(alpha)\n print(weight_decay)\n train_dataset = ClfSegDataset(subset=[0, 1])\n train_loader = get_mixup_loader(train_dataset, batch_size=batch_size,\n alpha=alpha)\n val_dataset = ClfvalSegDataset(crop_size=crop_size, move=None, subset=[2])\n val_loader = get_loader(val_dataset, batch_size=batch_size)\n model = densesharp.get_compiled(output_size=1, optimizer=Adam(lr=\n learning_rate), loss={'clf': 'binary_crossentropy', 'seg': losses.\n DiceLoss()}, metrics={'clf': ['accuracy', metrics.precision,\n metrics.recall, metrics.fmeasure, metrics.auc], 'seg': [metrics.\n precision, metrics.recall, metrics.fmeasure]}, loss_weights={'clf':\n 1.0, 'seg': segmentation_task_ratio}, weight_decay=weight_decay,\n weights='tmp/test/weights42_222639.h5')\n checkpointer = ModelCheckpoint(filepath='tmp/%s/weights.{epoch:02d}.h5' %\n save_folder, verbose=1, period=1, save_weights_only=True)\n csv_logger = CSVLogger('tmp/%s/training.csv' % save_folder)\n tensorboard = TensorBoard(log_dir='tmp/%s/logs/' % save_folder)\n best_keeper = ModelCheckpoint(filepath='tmp/%s/best.h5' % save_folder,\n verbose=1, save_weights_only=True, monitor='val_clf_acc',\n save_best_only=True, period=1, mode='max')\n early_stopping = EarlyStopping(monitor='val_clf_acc', min_delta=0, mode\n ='max', patience=20, verbose=1)\n lr_reducer = ReduceLROnPlateau(monitor='val_loss', factor=0.334,\n patience=10, verbose=1, mode='min', epsilon=1e-05, cooldown=2, min_lr=0\n )\n model.fit_generator(generator=train_loader, steps_per_epoch=50,\n max_queue_size=10, workers=1, validation_data=val_loader, epochs=\n epochs, validation_steps=50, callbacks=[checkpointer, csv_logger,\n best_keeper, early_stopping, lr_reducer, tensorboard])\n\n\nif __name__ == '__main__':\n main(batch_size=32, crop_size=[32, 32, 32], learning_rate=1e-05,\n segmentation_task_ratio=0.2, weight_decay=0.0, save_folder='test',\n epochs=10, alpha=1.0)\n",
"step-4": "import os\nimport pandas as pd\nimport numpy as np\nfrom dataloader import *\nfrom keras.optimizers import Adam, SGD\nfrom mylib.models.misc import set_gpu_usage\nset_gpu_usage()\nfrom mylib.models import densesharp, metrics, losses\nfrom keras.callbacks import ModelCheckpoint, CSVLogger, TensorBoard, EarlyStopping, ReduceLROnPlateau, LearningRateScheduler\nos.environ['CUDA_VISIBLE_DEVICES'] = '/gpu:0'\n\n\ndef main(batch_size, crop_size, learning_rate, segmentation_task_ratio,\n weight_decay, save_folder, epochs, alpha):\n print(learning_rate)\n print(alpha)\n print(weight_decay)\n train_dataset = ClfSegDataset(subset=[0, 1])\n train_loader = get_mixup_loader(train_dataset, batch_size=batch_size,\n alpha=alpha)\n val_dataset = ClfvalSegDataset(crop_size=crop_size, move=None, subset=[2])\n val_loader = get_loader(val_dataset, batch_size=batch_size)\n model = densesharp.get_compiled(output_size=1, optimizer=Adam(lr=\n learning_rate), loss={'clf': 'binary_crossentropy', 'seg': losses.\n DiceLoss()}, metrics={'clf': ['accuracy', metrics.precision,\n metrics.recall, metrics.fmeasure, metrics.auc], 'seg': [metrics.\n precision, metrics.recall, metrics.fmeasure]}, loss_weights={'clf':\n 1.0, 'seg': segmentation_task_ratio}, weight_decay=weight_decay,\n weights='tmp/test/weights42_222639.h5')\n checkpointer = ModelCheckpoint(filepath='tmp/%s/weights.{epoch:02d}.h5' %\n save_folder, verbose=1, period=1, save_weights_only=True)\n csv_logger = CSVLogger('tmp/%s/training.csv' % save_folder)\n tensorboard = TensorBoard(log_dir='tmp/%s/logs/' % save_folder)\n best_keeper = ModelCheckpoint(filepath='tmp/%s/best.h5' % save_folder,\n verbose=1, save_weights_only=True, monitor='val_clf_acc',\n save_best_only=True, period=1, mode='max')\n early_stopping = EarlyStopping(monitor='val_clf_acc', min_delta=0, mode\n ='max', patience=20, verbose=1)\n lr_reducer = ReduceLROnPlateau(monitor='val_loss', factor=0.334,\n patience=10, verbose=1, mode='min', epsilon=1e-05, cooldown=2, min_lr=0\n )\n model.fit_generator(generator=train_loader, steps_per_epoch=50,\n max_queue_size=10, workers=1, validation_data=val_loader, epochs=\n epochs, validation_steps=50, callbacks=[checkpointer, csv_logger,\n best_keeper, early_stopping, lr_reducer, tensorboard])\n\n\nif __name__ == '__main__':\n main(batch_size=32, crop_size=[32, 32, 32], learning_rate=1e-05,\n segmentation_task_ratio=0.2, weight_decay=0.0, save_folder='test',\n epochs=10, alpha=1.0)\n",
"step-5": "import os\nimport pandas as pd\nimport numpy as np\n\nfrom dataloader import *\nfrom keras.optimizers import Adam, SGD\nfrom mylib.models.misc import set_gpu_usage\n\nset_gpu_usage()\n\nfrom mylib.models import densesharp, metrics, losses\nfrom keras.callbacks import ModelCheckpoint, CSVLogger, TensorBoard, EarlyStopping, ReduceLROnPlateau, \\\n LearningRateScheduler\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '/gpu:0'\n\n\ndef main(batch_size, crop_size, learning_rate, segmentation_task_ratio, weight_decay, save_folder, epochs,\n alpha):\n\n print(learning_rate)\n print(alpha)\n print(weight_decay)\n\n train_dataset = ClfSegDataset(subset=[0, 1])\n train_loader = get_mixup_loader(train_dataset, batch_size=batch_size, alpha=alpha)\n\n val_dataset = ClfvalSegDataset(crop_size=crop_size, move=None, subset=[2])\n val_loader = get_loader(val_dataset, batch_size=batch_size)\n\n model = densesharp.get_compiled(output_size=1,\n optimizer=Adam(lr=learning_rate),\n loss={\"clf\": 'binary_crossentropy',\n \"seg\": losses.DiceLoss()},\n metrics={'clf': ['accuracy', metrics.precision, metrics.recall, metrics.fmeasure,\n metrics.auc],\n 'seg': [metrics.precision, metrics.recall, metrics.fmeasure]},\n loss_weights={\"clf\": 1., \"seg\": segmentation_task_ratio},\n weight_decay=weight_decay, weights='tmp/test/weights42_222639.h5')\n\n checkpointer = ModelCheckpoint(filepath='tmp/%s/weights.{epoch:02d}.h5' % save_folder, verbose=1,\n period=1, save_weights_only=True)\n csv_logger = CSVLogger('tmp/%s/training.csv' % save_folder)\n tensorboard = TensorBoard(log_dir='tmp/%s/logs/' % save_folder)\n\n best_keeper = ModelCheckpoint(filepath='tmp/%s/best.h5' % save_folder, verbose=1, save_weights_only=True,\n monitor='val_clf_acc', save_best_only=True, period=1, mode='max')\n\n early_stopping = EarlyStopping(monitor='val_clf_acc', min_delta=0, mode='max',\n patience=20, verbose=1)\n\n lr_reducer = ReduceLROnPlateau(monitor='val_loss', factor=0.334, patience=10,\n verbose=1, mode='min', epsilon=1.e-5, cooldown=2, min_lr=0)\n\n model.fit_generator(generator=train_loader, steps_per_epoch=50, max_queue_size=10, workers=1,\n validation_data=val_loader, epochs=epochs, validation_steps=50,\n callbacks=[checkpointer, csv_logger, best_keeper, early_stopping, lr_reducer, tensorboard])\n\n\nif __name__ == '__main__':\n main(batch_size=32,\n crop_size=[32, 32, 32],\n learning_rate=1.e-5,\n segmentation_task_ratio=0.2,\n weight_decay=0.0,\n save_folder='test',\n epochs=10,\n alpha=1.0)",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import unittest
"""
Find the largest 0 to 9 pandigital that can be formed by concatenating products
Take the number 6 and multiply it by each of 1273 and 9854:
6 × 1273 = 7638
6 × 9854 = 59124
By concatenating these products we get the 1 to 9 pandigital 763859124. We will call 763859124 the "concatenated product of 6 and (1273,9854)". Notice too, that the concatenation of the input numbers, 612739854, is also 1 to 9 pandigital.
The same can be done for 0 to 9 pandigital numbers.
What is the largest 0 to 9 pandigital 10-digit concatenated product of an integer with two or more other integers, such that the concatenation of the input numbers is also a 0 to 9 pandigital 10-digit number?
"""
class Test(unittest.TestCase):
def test(self):
pass
|
normal
|
{
"blob_id": "cb08b95e3b9c80fb74d4415b3798ddbb36cd76e7",
"index": 419,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Test(unittest.TestCase):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Test(unittest.TestCase):\n\n def test(self):\n pass\n",
"step-4": "import unittest\n<mask token>\n\n\nclass Test(unittest.TestCase):\n\n def test(self):\n pass\n",
"step-5": "import unittest\n\n\"\"\"\nFind the largest 0 to 9 pandigital that can be formed by concatenating products\n\nTake the number 6 and multiply it by each of 1273 and 9854:\n6 × 1273 = 7638\n6 × 9854 = 59124\nBy concatenating these products we get the 1 to 9 pandigital 763859124. We will call 763859124 the \"concatenated product of 6 and (1273,9854)\". Notice too, that the concatenation of the input numbers, 612739854, is also 1 to 9 pandigital.\nThe same can be done for 0 to 9 pandigital numbers.\nWhat is the largest 0 to 9 pandigital 10-digit concatenated product of an integer with two or more other integers, such that the concatenation of the input numbers is also a 0 to 9 pandigital 10-digit number?\n\"\"\"\n\n\nclass Test(unittest.TestCase):\n def test(self):\n pass\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class TestNurse(unittest.TestCase):
<|reserved_special_token_0|>
def setUp(self):
self.n1 = n.Nurse('Tess', 18, '5436890982', 3200, 25)
self.n2 = n.Nurse('Melissa', 40, '8920953924', 9000, 5)
<|reserved_special_token_0|>
def test_display(self):
self.assertEqual(self.n1.display(),
"""Nurse {} is {} years old.
The best number to reach out is {}.
The nurse's salary is {}.
The nurse has treated {} patients.
"""
.format('Tess', 18, '5436890982', 3200, 25))
def test_change_in_phone_num(self):
self.n1.change_in_phone_num('1234567890')
self.n2.change_in_phone_num('0987654321')
self.assertEqual(self.n1.phone_num, '1234567890')
self.assertEqual(self.n2.phone_num, '0987654321')
self.n1.change_in_phone_num('3254678313')
self.n2.change_in_phone_num('0928495820')
self.assertEqual(self.n1.phone_num, '3254678313')
self.assertEqual(self.n2.phone_num, '0928495820')
def test_change_in_salary(self):
self.n1.change_in_salary(9000)
self.n2.change_in_salary(10000)
self.assertEqual(self.n1.salary, 9000)
self.assertEqual(self.n1.change_in_salary(-50), 'Invalid salary.')
self.assertEqual(self.n2.salary, 10000)
self.n1.change_in_salary(20)
self.assertEqual(self.n1.salary, 20)
def test_bonus(self):
self.n1.bonus()
self.n2.bonus()
self.assertEqual(self.n1.salary, 3450)
self.assertEqual(self.n2.salary, 9050)
def tearDown(self):
self.n1 = None
self.n2 = None
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestNurse(unittest.TestCase):
<|reserved_special_token_0|>
def setUp(self):
self.n1 = n.Nurse('Tess', 18, '5436890982', 3200, 25)
self.n2 = n.Nurse('Melissa', 40, '8920953924', 9000, 5)
<|reserved_special_token_0|>
def test_display(self):
self.assertEqual(self.n1.display(),
"""Nurse {} is {} years old.
The best number to reach out is {}.
The nurse's salary is {}.
The nurse has treated {} patients.
"""
.format('Tess', 18, '5436890982', 3200, 25))
def test_change_in_phone_num(self):
self.n1.change_in_phone_num('1234567890')
self.n2.change_in_phone_num('0987654321')
self.assertEqual(self.n1.phone_num, '1234567890')
self.assertEqual(self.n2.phone_num, '0987654321')
self.n1.change_in_phone_num('3254678313')
self.n2.change_in_phone_num('0928495820')
self.assertEqual(self.n1.phone_num, '3254678313')
self.assertEqual(self.n2.phone_num, '0928495820')
def test_change_in_salary(self):
self.n1.change_in_salary(9000)
self.n2.change_in_salary(10000)
self.assertEqual(self.n1.salary, 9000)
self.assertEqual(self.n1.change_in_salary(-50), 'Invalid salary.')
self.assertEqual(self.n2.salary, 10000)
self.n1.change_in_salary(20)
self.assertEqual(self.n1.salary, 20)
def test_bonus(self):
self.n1.bonus()
self.n2.bonus()
self.assertEqual(self.n1.salary, 3450)
self.assertEqual(self.n2.salary, 9050)
def tearDown(self):
self.n1 = None
self.n2 = None
@classmethod
def tearDownClass(cls):
print('Finish test nurse')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestNurse(unittest.TestCase):
<|reserved_special_token_0|>
def setUp(self):
self.n1 = n.Nurse('Tess', 18, '5436890982', 3200, 25)
self.n2 = n.Nurse('Melissa', 40, '8920953924', 9000, 5)
def test_init(self):
self.assertEqual(self.n1.name, 'Tess')
self.assertEqual(self.n1.age, 18)
self.assertEqual(self.n1.phone_num, '5436890982')
self.assertEqual(self.n1.salary, 3200)
self.assertEqual(self.n1.number_treated, 25)
def test_display(self):
self.assertEqual(self.n1.display(),
"""Nurse {} is {} years old.
The best number to reach out is {}.
The nurse's salary is {}.
The nurse has treated {} patients.
"""
.format('Tess', 18, '5436890982', 3200, 25))
def test_change_in_phone_num(self):
self.n1.change_in_phone_num('1234567890')
self.n2.change_in_phone_num('0987654321')
self.assertEqual(self.n1.phone_num, '1234567890')
self.assertEqual(self.n2.phone_num, '0987654321')
self.n1.change_in_phone_num('3254678313')
self.n2.change_in_phone_num('0928495820')
self.assertEqual(self.n1.phone_num, '3254678313')
self.assertEqual(self.n2.phone_num, '0928495820')
def test_change_in_salary(self):
self.n1.change_in_salary(9000)
self.n2.change_in_salary(10000)
self.assertEqual(self.n1.salary, 9000)
self.assertEqual(self.n1.change_in_salary(-50), 'Invalid salary.')
self.assertEqual(self.n2.salary, 10000)
self.n1.change_in_salary(20)
self.assertEqual(self.n1.salary, 20)
def test_bonus(self):
self.n1.bonus()
self.n2.bonus()
self.assertEqual(self.n1.salary, 3450)
self.assertEqual(self.n2.salary, 9050)
def tearDown(self):
self.n1 = None
self.n2 = None
@classmethod
def tearDownClass(cls):
print('Finish test nurse')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestNurse(unittest.TestCase):
@classmethod
def setUpClass(cls):
print('Start testing nurse')
def setUp(self):
self.n1 = n.Nurse('Tess', 18, '5436890982', 3200, 25)
self.n2 = n.Nurse('Melissa', 40, '8920953924', 9000, 5)
def test_init(self):
self.assertEqual(self.n1.name, 'Tess')
self.assertEqual(self.n1.age, 18)
self.assertEqual(self.n1.phone_num, '5436890982')
self.assertEqual(self.n1.salary, 3200)
self.assertEqual(self.n1.number_treated, 25)
def test_display(self):
self.assertEqual(self.n1.display(),
"""Nurse {} is {} years old.
The best number to reach out is {}.
The nurse's salary is {}.
The nurse has treated {} patients.
"""
.format('Tess', 18, '5436890982', 3200, 25))
def test_change_in_phone_num(self):
self.n1.change_in_phone_num('1234567890')
self.n2.change_in_phone_num('0987654321')
self.assertEqual(self.n1.phone_num, '1234567890')
self.assertEqual(self.n2.phone_num, '0987654321')
self.n1.change_in_phone_num('3254678313')
self.n2.change_in_phone_num('0928495820')
self.assertEqual(self.n1.phone_num, '3254678313')
self.assertEqual(self.n2.phone_num, '0928495820')
def test_change_in_salary(self):
self.n1.change_in_salary(9000)
self.n2.change_in_salary(10000)
self.assertEqual(self.n1.salary, 9000)
self.assertEqual(self.n1.change_in_salary(-50), 'Invalid salary.')
self.assertEqual(self.n2.salary, 10000)
self.n1.change_in_salary(20)
self.assertEqual(self.n1.salary, 20)
def test_bonus(self):
self.n1.bonus()
self.n2.bonus()
self.assertEqual(self.n1.salary, 3450)
self.assertEqual(self.n2.salary, 9050)
def tearDown(self):
self.n1 = None
self.n2 = None
@classmethod
def tearDownClass(cls):
print('Finish test nurse')
unittest.main(argv=[''], verbosity=2, exit=False)
<|reserved_special_token_1|>
import unittest
import hospital.employee.nurse as n
class TestNurse(unittest.TestCase):
@classmethod
def setUpClass(cls):
print('Start testing nurse')
def setUp(self):
self.n1 = n.Nurse('Tess',18,"5436890982",3200,25)
self.n2 = n.Nurse('Melissa',40,"8920953924",9000,5)
def test_init(self):
self.assertEqual(self.n1.name,"Tess")
self.assertEqual(self.n1.age,18)
self.assertEqual(self.n1.phone_num,"5436890982")
self.assertEqual(self.n1.salary,3200)
self.assertEqual(self.n1.number_treated,25)
def test_display(self):
self.assertEqual(self.n1.display(),"Nurse {} is {} years old. \nThe best number to reach out is {}. \nThe nurse's salary is {}. \nThe nurse has treated {} patients.\n".format('Tess',18,"5436890982",3200,25))
def test_change_in_phone_num(self):
self.n1.change_in_phone_num("1234567890")
self.n2.change_in_phone_num("0987654321")
self.assertEqual(self.n1.phone_num,"1234567890")
self.assertEqual(self.n2.phone_num,"0987654321")
self.n1.change_in_phone_num("3254678313")
self.n2.change_in_phone_num("0928495820")
self.assertEqual(self.n1.phone_num,"3254678313")
self.assertEqual(self.n2.phone_num,"0928495820")
def test_change_in_salary(self):
self.n1.change_in_salary(9000)
self.n2.change_in_salary(10000)
self.assertEqual(self.n1.salary,9000)
self.assertEqual(self.n1.change_in_salary(-50),"Invalid salary.")
self.assertEqual(self.n2.salary,10000)
self.n1.change_in_salary(20)
self.assertEqual(self.n1.salary,20)
def test_bonus(self):
self.n1.bonus()
self.n2.bonus()
self.assertEqual(self.n1.salary,3450)
self.assertEqual(self.n2.salary,9050)
def tearDown(self):
self.n1 = None
self.n2 = None
@classmethod
def tearDownClass(cls):
print("Finish test nurse")
unittest.main(argv=[''], verbosity=2, exit=False)
|
flexible
|
{
"blob_id": "f24075ea70851ce95bb6b3cd87b6417f8141d546",
"index": 9112,
"step-1": "<mask token>\n\n\nclass TestNurse(unittest.TestCase):\n <mask token>\n\n def setUp(self):\n self.n1 = n.Nurse('Tess', 18, '5436890982', 3200, 25)\n self.n2 = n.Nurse('Melissa', 40, '8920953924', 9000, 5)\n <mask token>\n\n def test_display(self):\n self.assertEqual(self.n1.display(),\n \"\"\"Nurse {} is {} years old. \nThe best number to reach out is {}. \nThe nurse's salary is {}. \nThe nurse has treated {} patients.\n\"\"\"\n .format('Tess', 18, '5436890982', 3200, 25))\n\n def test_change_in_phone_num(self):\n self.n1.change_in_phone_num('1234567890')\n self.n2.change_in_phone_num('0987654321')\n self.assertEqual(self.n1.phone_num, '1234567890')\n self.assertEqual(self.n2.phone_num, '0987654321')\n self.n1.change_in_phone_num('3254678313')\n self.n2.change_in_phone_num('0928495820')\n self.assertEqual(self.n1.phone_num, '3254678313')\n self.assertEqual(self.n2.phone_num, '0928495820')\n\n def test_change_in_salary(self):\n self.n1.change_in_salary(9000)\n self.n2.change_in_salary(10000)\n self.assertEqual(self.n1.salary, 9000)\n self.assertEqual(self.n1.change_in_salary(-50), 'Invalid salary.')\n self.assertEqual(self.n2.salary, 10000)\n self.n1.change_in_salary(20)\n self.assertEqual(self.n1.salary, 20)\n\n def test_bonus(self):\n self.n1.bonus()\n self.n2.bonus()\n self.assertEqual(self.n1.salary, 3450)\n self.assertEqual(self.n2.salary, 9050)\n\n def tearDown(self):\n self.n1 = None\n self.n2 = None\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestNurse(unittest.TestCase):\n <mask token>\n\n def setUp(self):\n self.n1 = n.Nurse('Tess', 18, '5436890982', 3200, 25)\n self.n2 = n.Nurse('Melissa', 40, '8920953924', 9000, 5)\n <mask token>\n\n def test_display(self):\n self.assertEqual(self.n1.display(),\n \"\"\"Nurse {} is {} years old. \nThe best number to reach out is {}. \nThe nurse's salary is {}. \nThe nurse has treated {} patients.\n\"\"\"\n .format('Tess', 18, '5436890982', 3200, 25))\n\n def test_change_in_phone_num(self):\n self.n1.change_in_phone_num('1234567890')\n self.n2.change_in_phone_num('0987654321')\n self.assertEqual(self.n1.phone_num, '1234567890')\n self.assertEqual(self.n2.phone_num, '0987654321')\n self.n1.change_in_phone_num('3254678313')\n self.n2.change_in_phone_num('0928495820')\n self.assertEqual(self.n1.phone_num, '3254678313')\n self.assertEqual(self.n2.phone_num, '0928495820')\n\n def test_change_in_salary(self):\n self.n1.change_in_salary(9000)\n self.n2.change_in_salary(10000)\n self.assertEqual(self.n1.salary, 9000)\n self.assertEqual(self.n1.change_in_salary(-50), 'Invalid salary.')\n self.assertEqual(self.n2.salary, 10000)\n self.n1.change_in_salary(20)\n self.assertEqual(self.n1.salary, 20)\n\n def test_bonus(self):\n self.n1.bonus()\n self.n2.bonus()\n self.assertEqual(self.n1.salary, 3450)\n self.assertEqual(self.n2.salary, 9050)\n\n def tearDown(self):\n self.n1 = None\n self.n2 = None\n\n @classmethod\n def tearDownClass(cls):\n print('Finish test nurse')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestNurse(unittest.TestCase):\n <mask token>\n\n def setUp(self):\n self.n1 = n.Nurse('Tess', 18, '5436890982', 3200, 25)\n self.n2 = n.Nurse('Melissa', 40, '8920953924', 9000, 5)\n\n def test_init(self):\n self.assertEqual(self.n1.name, 'Tess')\n self.assertEqual(self.n1.age, 18)\n self.assertEqual(self.n1.phone_num, '5436890982')\n self.assertEqual(self.n1.salary, 3200)\n self.assertEqual(self.n1.number_treated, 25)\n\n def test_display(self):\n self.assertEqual(self.n1.display(),\n \"\"\"Nurse {} is {} years old. \nThe best number to reach out is {}. \nThe nurse's salary is {}. \nThe nurse has treated {} patients.\n\"\"\"\n .format('Tess', 18, '5436890982', 3200, 25))\n\n def test_change_in_phone_num(self):\n self.n1.change_in_phone_num('1234567890')\n self.n2.change_in_phone_num('0987654321')\n self.assertEqual(self.n1.phone_num, '1234567890')\n self.assertEqual(self.n2.phone_num, '0987654321')\n self.n1.change_in_phone_num('3254678313')\n self.n2.change_in_phone_num('0928495820')\n self.assertEqual(self.n1.phone_num, '3254678313')\n self.assertEqual(self.n2.phone_num, '0928495820')\n\n def test_change_in_salary(self):\n self.n1.change_in_salary(9000)\n self.n2.change_in_salary(10000)\n self.assertEqual(self.n1.salary, 9000)\n self.assertEqual(self.n1.change_in_salary(-50), 'Invalid salary.')\n self.assertEqual(self.n2.salary, 10000)\n self.n1.change_in_salary(20)\n self.assertEqual(self.n1.salary, 20)\n\n def test_bonus(self):\n self.n1.bonus()\n self.n2.bonus()\n self.assertEqual(self.n1.salary, 3450)\n self.assertEqual(self.n2.salary, 9050)\n\n def tearDown(self):\n self.n1 = None\n self.n2 = None\n\n @classmethod\n def tearDownClass(cls):\n print('Finish test nurse')\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass TestNurse(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n print('Start testing nurse')\n\n def setUp(self):\n self.n1 = n.Nurse('Tess', 18, '5436890982', 3200, 25)\n self.n2 = n.Nurse('Melissa', 40, '8920953924', 9000, 5)\n\n def test_init(self):\n self.assertEqual(self.n1.name, 'Tess')\n self.assertEqual(self.n1.age, 18)\n self.assertEqual(self.n1.phone_num, '5436890982')\n self.assertEqual(self.n1.salary, 3200)\n self.assertEqual(self.n1.number_treated, 25)\n\n def test_display(self):\n self.assertEqual(self.n1.display(),\n \"\"\"Nurse {} is {} years old. \nThe best number to reach out is {}. \nThe nurse's salary is {}. \nThe nurse has treated {} patients.\n\"\"\"\n .format('Tess', 18, '5436890982', 3200, 25))\n\n def test_change_in_phone_num(self):\n self.n1.change_in_phone_num('1234567890')\n self.n2.change_in_phone_num('0987654321')\n self.assertEqual(self.n1.phone_num, '1234567890')\n self.assertEqual(self.n2.phone_num, '0987654321')\n self.n1.change_in_phone_num('3254678313')\n self.n2.change_in_phone_num('0928495820')\n self.assertEqual(self.n1.phone_num, '3254678313')\n self.assertEqual(self.n2.phone_num, '0928495820')\n\n def test_change_in_salary(self):\n self.n1.change_in_salary(9000)\n self.n2.change_in_salary(10000)\n self.assertEqual(self.n1.salary, 9000)\n self.assertEqual(self.n1.change_in_salary(-50), 'Invalid salary.')\n self.assertEqual(self.n2.salary, 10000)\n self.n1.change_in_salary(20)\n self.assertEqual(self.n1.salary, 20)\n\n def test_bonus(self):\n self.n1.bonus()\n self.n2.bonus()\n self.assertEqual(self.n1.salary, 3450)\n self.assertEqual(self.n2.salary, 9050)\n\n def tearDown(self):\n self.n1 = None\n self.n2 = None\n\n @classmethod\n def tearDownClass(cls):\n print('Finish test nurse')\n\n\nunittest.main(argv=[''], verbosity=2, exit=False)\n",
"step-5": "import unittest\nimport hospital.employee.nurse as n\n\nclass TestNurse(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n print('Start testing nurse')\n \n def setUp(self):\n self.n1 = n.Nurse('Tess',18,\"5436890982\",3200,25)\n self.n2 = n.Nurse('Melissa',40,\"8920953924\",9000,5)\n\n def test_init(self):\n self.assertEqual(self.n1.name,\"Tess\")\n self.assertEqual(self.n1.age,18)\n self.assertEqual(self.n1.phone_num,\"5436890982\")\n self.assertEqual(self.n1.salary,3200)\n self.assertEqual(self.n1.number_treated,25)\n\n def test_display(self):\n self.assertEqual(self.n1.display(),\"Nurse {} is {} years old. \\nThe best number to reach out is {}. \\nThe nurse's salary is {}. \\nThe nurse has treated {} patients.\\n\".format('Tess',18,\"5436890982\",3200,25))\n\n def test_change_in_phone_num(self):\n self.n1.change_in_phone_num(\"1234567890\")\n self.n2.change_in_phone_num(\"0987654321\")\n self.assertEqual(self.n1.phone_num,\"1234567890\")\n self.assertEqual(self.n2.phone_num,\"0987654321\")\n self.n1.change_in_phone_num(\"3254678313\")\n self.n2.change_in_phone_num(\"0928495820\")\n self.assertEqual(self.n1.phone_num,\"3254678313\")\n self.assertEqual(self.n2.phone_num,\"0928495820\")\n\n def test_change_in_salary(self):\n self.n1.change_in_salary(9000)\n self.n2.change_in_salary(10000)\n self.assertEqual(self.n1.salary,9000)\n self.assertEqual(self.n1.change_in_salary(-50),\"Invalid salary.\")\n self.assertEqual(self.n2.salary,10000)\n self.n1.change_in_salary(20)\n self.assertEqual(self.n1.salary,20)\n\n def test_bonus(self):\n self.n1.bonus()\n self.n2.bonus()\n self.assertEqual(self.n1.salary,3450)\n self.assertEqual(self.n2.salary,9050)\n \n\n def tearDown(self):\n self.n1 = None\n self.n2 = None\n \n @classmethod\n def tearDownClass(cls):\n print(\"Finish test nurse\")\n\nunittest.main(argv=[''], verbosity=2, exit=False)\n",
"step-ids": [
7,
8,
9,
11,
13
]
}
|
[
7,
8,
9,
11,
13
] |
import sys
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from uraeus.nmbd.python import simulation
from uraeus.nmbd.python.engine.numerics.math_funcs import A, B
database_directory = os.path.abspath('../../')
sys.path.append(database_directory)
from uraeus_fsae.simenv.assemblies import asurt_FS17_v1 as num_assm
from controllers import speed_controller, stanley_controller
num_model = num_assm.num_model
dt = num_assm.dt
TR = 254
def generate_circular_path(radius, offset):
theta = np.deg2rad(np.linspace(0, 360, 360))
x_data = radius * np.sin(theta) + offset[0]
y_data = radius * np.cos(theta) + offset[1]
radii = radius * np.ones((360,))
return x_data, y_data, radii
x_data, y_data, radii = generate_circular_path(10.5, (0, -10.5))
path_data = np.zeros((360, 3))
path_data[:, 0] = -1e3 * x_data
path_data[:, 1] = 1e3 * y_data
path_data[:, 2] = 1e3 * radii
plt.figure(figsize=(10, 5))
plt.plot(path_data[:, 0], path_data[:, 1])
plt.grid()
plt.show()
logitudinal_controller = speed_controller(35, dt)
lateral_controller = stanley_controller(path_data, 25)
def terrain_state(x, y):
local_normal = np.array([[0],[0],[1]], dtype=np.float64)
hieght = 0
return [local_normal, hieght]
def torque_function(t):
P_ch = num_model.Subsystems.CH.P_rbs_chassis
Rd = num_model.Subsystems.CH.Rd_rbs_chassis
factor = logitudinal_controller.get_torque_factor(P_ch, Rd)
return factor
def RR_Torque(t):
factor = torque_function(t)
torque = -factor*(70*9.81)*1e6*TR
return torque
def RL_Torque(t):
factor = torque_function(t)
torque = -factor*(70*9.81)*1e6*TR
return torque
def steering_function(t):
R_ch = num_model.Subsystems.CH.R_rbs_chassis
P_ch = num_model.Subsystems.CH.P_rbs_chassis
Rd_ch = num_model.Subsystems.CH.Rd_rbs_chassis
Pd_ch = num_model.Subsystems.CH.Pd_rbs_chassis
rbar_ax1 = np.array([[-800], [0], [0]], dtype=np.float64)
r_ax1 = R_ch + A(P_ch)@rbar_ax1
vel = (A(P_ch).T @ (Rd_ch + B(P_ch, rbar_ax1)@Pd_ch))[0,0]
delta = lateral_controller.get_steer_factor(r_ax1, P_ch, Pd_ch, vel)
travel = delta * 18
#print('Travel = %s'%travel)
return travel
def zero_func(t):
return np.zeros((3,1), dtype=np.float64)
num_assm.terrain_data.get_state = terrain_state
num_assm.ST1_config.UF_mcs_rack_act = steering_function
num_assm.AX1_config.UF_far_drive = RR_Torque
num_assm.AX1_config.UF_fal_drive = RL_Torque
#num_assm.DR2_config.UF_far_drive = RR_Torque
#num_assm.DR2_config.UF_fal_drive = RL_Torque
num_assm.CH_config.UF_fas_aero_drag_F = zero_func
num_assm.CH_config.UF_fas_aero_drag_T = zero_func
# =============================================================================
# Setting and Starting Simulation
# =============================================================================
sim = simulation('sim', num_model, 'dds')
sim.set_time_array(15, dt)
# Getting Equilibrium results as initial conditions to this simulation
# ====================================================================
sim.set_initial_states('results/equilibrium_v4.npz')
sim.solve()
sim.save_as_csv('results', 'constant_radius_v8', 'pos')
sim.save_as_npz('results', 'constant_radius_v8')
#=============================================================================
# Plotting Simulation Results
# =============================================================================
import matplotlib.pyplot as plt
sim.soln.pos_dataframe.plot(x='CH.rbs_chassis.x', y='CH.rbs_chassis.y', grid=True)
sim.soln.vel_dataframe.plot(x='time', y='CH.rbs_chassis.x', grid=True)
sim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.z', grid=True)
sim.soln.vel_dataframe.plot(x='time', y='CH.rbs_chassis.z', grid=True)
sim.soln.acc_dataframe.plot(x='time', y='CH.rbs_chassis.z', grid=True)
sim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e0', grid=True)
sim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e1', grid=True)
sim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e2', grid=True)
sim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e3', grid=True)
plt.show()
|
normal
|
{
"blob_id": "e0541c377eb6631e4ef5eb79b1204612ce8af48c",
"index": 6107,
"step-1": "<mask token>\n\n\ndef generate_circular_path(radius, offset):\n theta = np.deg2rad(np.linspace(0, 360, 360))\n x_data = radius * np.sin(theta) + offset[0]\n y_data = radius * np.cos(theta) + offset[1]\n radii = radius * np.ones((360,))\n return x_data, y_data, radii\n\n\n<mask token>\n\n\ndef terrain_state(x, y):\n local_normal = np.array([[0], [0], [1]], dtype=np.float64)\n hieght = 0\n return [local_normal, hieght]\n\n\n<mask token>\n\n\ndef steering_function(t):\n R_ch = num_model.Subsystems.CH.R_rbs_chassis\n P_ch = num_model.Subsystems.CH.P_rbs_chassis\n Rd_ch = num_model.Subsystems.CH.Rd_rbs_chassis\n Pd_ch = num_model.Subsystems.CH.Pd_rbs_chassis\n rbar_ax1 = np.array([[-800], [0], [0]], dtype=np.float64)\n r_ax1 = R_ch + A(P_ch) @ rbar_ax1\n vel = (A(P_ch).T @ (Rd_ch + B(P_ch, rbar_ax1) @ Pd_ch))[0, 0]\n delta = lateral_controller.get_steer_factor(r_ax1, P_ch, Pd_ch, vel)\n travel = delta * 18\n return travel\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.path.append(database_directory)\n<mask token>\n\n\ndef generate_circular_path(radius, offset):\n theta = np.deg2rad(np.linspace(0, 360, 360))\n x_data = radius * np.sin(theta) + offset[0]\n y_data = radius * np.cos(theta) + offset[1]\n radii = radius * np.ones((360,))\n return x_data, y_data, radii\n\n\n<mask token>\nplt.figure(figsize=(10, 5))\nplt.plot(path_data[:, 0], path_data[:, 1])\nplt.grid()\nplt.show()\n<mask token>\n\n\ndef terrain_state(x, y):\n local_normal = np.array([[0], [0], [1]], dtype=np.float64)\n hieght = 0\n return [local_normal, hieght]\n\n\ndef torque_function(t):\n P_ch = num_model.Subsystems.CH.P_rbs_chassis\n Rd = num_model.Subsystems.CH.Rd_rbs_chassis\n factor = logitudinal_controller.get_torque_factor(P_ch, Rd)\n return factor\n\n\ndef RR_Torque(t):\n factor = torque_function(t)\n torque = -factor * (70 * 9.81) * 1000000.0 * TR\n return torque\n\n\ndef RL_Torque(t):\n factor = torque_function(t)\n torque = -factor * (70 * 9.81) * 1000000.0 * TR\n return torque\n\n\ndef steering_function(t):\n R_ch = num_model.Subsystems.CH.R_rbs_chassis\n P_ch = num_model.Subsystems.CH.P_rbs_chassis\n Rd_ch = num_model.Subsystems.CH.Rd_rbs_chassis\n Pd_ch = num_model.Subsystems.CH.Pd_rbs_chassis\n rbar_ax1 = np.array([[-800], [0], [0]], dtype=np.float64)\n r_ax1 = R_ch + A(P_ch) @ rbar_ax1\n vel = (A(P_ch).T @ (Rd_ch + B(P_ch, rbar_ax1) @ Pd_ch))[0, 0]\n delta = lateral_controller.get_steer_factor(r_ax1, P_ch, Pd_ch, vel)\n travel = delta * 18\n return travel\n\n\ndef zero_func(t):\n return np.zeros((3, 1), dtype=np.float64)\n\n\n<mask token>\nsim.set_time_array(15, dt)\nsim.set_initial_states('results/equilibrium_v4.npz')\nsim.solve()\nsim.save_as_csv('results', 'constant_radius_v8', 'pos')\nsim.save_as_npz('results', 'constant_radius_v8')\n<mask token>\nsim.soln.pos_dataframe.plot(x='CH.rbs_chassis.x', y='CH.rbs_chassis.y',\n grid=True)\nsim.soln.vel_dataframe.plot(x='time', y='CH.rbs_chassis.x', grid=True)\nsim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.z', grid=True)\nsim.soln.vel_dataframe.plot(x='time', y='CH.rbs_chassis.z', grid=True)\nsim.soln.acc_dataframe.plot(x='time', y='CH.rbs_chassis.z', grid=True)\nsim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e0', grid=True)\nsim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e1', grid=True)\nsim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e2', grid=True)\nsim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e3', grid=True)\nplt.show()\n",
"step-3": "<mask token>\ndatabase_directory = os.path.abspath('../../')\nsys.path.append(database_directory)\n<mask token>\nnum_model = num_assm.num_model\ndt = num_assm.dt\nTR = 254\n\n\ndef generate_circular_path(radius, offset):\n theta = np.deg2rad(np.linspace(0, 360, 360))\n x_data = radius * np.sin(theta) + offset[0]\n y_data = radius * np.cos(theta) + offset[1]\n radii = radius * np.ones((360,))\n return x_data, y_data, radii\n\n\nx_data, y_data, radii = generate_circular_path(10.5, (0, -10.5))\npath_data = np.zeros((360, 3))\npath_data[:, 0] = -1000.0 * x_data\npath_data[:, 1] = 1000.0 * y_data\npath_data[:, 2] = 1000.0 * radii\nplt.figure(figsize=(10, 5))\nplt.plot(path_data[:, 0], path_data[:, 1])\nplt.grid()\nplt.show()\nlogitudinal_controller = speed_controller(35, dt)\nlateral_controller = stanley_controller(path_data, 25)\n\n\ndef terrain_state(x, y):\n local_normal = np.array([[0], [0], [1]], dtype=np.float64)\n hieght = 0\n return [local_normal, hieght]\n\n\ndef torque_function(t):\n P_ch = num_model.Subsystems.CH.P_rbs_chassis\n Rd = num_model.Subsystems.CH.Rd_rbs_chassis\n factor = logitudinal_controller.get_torque_factor(P_ch, Rd)\n return factor\n\n\ndef RR_Torque(t):\n factor = torque_function(t)\n torque = -factor * (70 * 9.81) * 1000000.0 * TR\n return torque\n\n\ndef RL_Torque(t):\n factor = torque_function(t)\n torque = -factor * (70 * 9.81) * 1000000.0 * TR\n return torque\n\n\ndef steering_function(t):\n R_ch = num_model.Subsystems.CH.R_rbs_chassis\n P_ch = num_model.Subsystems.CH.P_rbs_chassis\n Rd_ch = num_model.Subsystems.CH.Rd_rbs_chassis\n Pd_ch = num_model.Subsystems.CH.Pd_rbs_chassis\n rbar_ax1 = np.array([[-800], [0], [0]], dtype=np.float64)\n r_ax1 = R_ch + A(P_ch) @ rbar_ax1\n vel = (A(P_ch).T @ (Rd_ch + B(P_ch, rbar_ax1) @ Pd_ch))[0, 0]\n delta = lateral_controller.get_steer_factor(r_ax1, P_ch, Pd_ch, vel)\n travel = delta * 18\n return travel\n\n\ndef zero_func(t):\n return np.zeros((3, 1), dtype=np.float64)\n\n\nnum_assm.terrain_data.get_state = terrain_state\nnum_assm.ST1_config.UF_mcs_rack_act = steering_function\nnum_assm.AX1_config.UF_far_drive = RR_Torque\nnum_assm.AX1_config.UF_fal_drive = RL_Torque\nnum_assm.CH_config.UF_fas_aero_drag_F = zero_func\nnum_assm.CH_config.UF_fas_aero_drag_T = zero_func\nsim = simulation('sim', num_model, 'dds')\nsim.set_time_array(15, dt)\nsim.set_initial_states('results/equilibrium_v4.npz')\nsim.solve()\nsim.save_as_csv('results', 'constant_radius_v8', 'pos')\nsim.save_as_npz('results', 'constant_radius_v8')\n<mask token>\nsim.soln.pos_dataframe.plot(x='CH.rbs_chassis.x', y='CH.rbs_chassis.y',\n grid=True)\nsim.soln.vel_dataframe.plot(x='time', y='CH.rbs_chassis.x', grid=True)\nsim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.z', grid=True)\nsim.soln.vel_dataframe.plot(x='time', y='CH.rbs_chassis.z', grid=True)\nsim.soln.acc_dataframe.plot(x='time', y='CH.rbs_chassis.z', grid=True)\nsim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e0', grid=True)\nsim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e1', grid=True)\nsim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e2', grid=True)\nsim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e3', grid=True)\nplt.show()\n",
"step-4": "import sys\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom uraeus.nmbd.python import simulation\nfrom uraeus.nmbd.python.engine.numerics.math_funcs import A, B\ndatabase_directory = os.path.abspath('../../')\nsys.path.append(database_directory)\nfrom uraeus_fsae.simenv.assemblies import asurt_FS17_v1 as num_assm\nfrom controllers import speed_controller, stanley_controller\nnum_model = num_assm.num_model\ndt = num_assm.dt\nTR = 254\n\n\ndef generate_circular_path(radius, offset):\n theta = np.deg2rad(np.linspace(0, 360, 360))\n x_data = radius * np.sin(theta) + offset[0]\n y_data = radius * np.cos(theta) + offset[1]\n radii = radius * np.ones((360,))\n return x_data, y_data, radii\n\n\nx_data, y_data, radii = generate_circular_path(10.5, (0, -10.5))\npath_data = np.zeros((360, 3))\npath_data[:, 0] = -1000.0 * x_data\npath_data[:, 1] = 1000.0 * y_data\npath_data[:, 2] = 1000.0 * radii\nplt.figure(figsize=(10, 5))\nplt.plot(path_data[:, 0], path_data[:, 1])\nplt.grid()\nplt.show()\nlogitudinal_controller = speed_controller(35, dt)\nlateral_controller = stanley_controller(path_data, 25)\n\n\ndef terrain_state(x, y):\n local_normal = np.array([[0], [0], [1]], dtype=np.float64)\n hieght = 0\n return [local_normal, hieght]\n\n\ndef torque_function(t):\n P_ch = num_model.Subsystems.CH.P_rbs_chassis\n Rd = num_model.Subsystems.CH.Rd_rbs_chassis\n factor = logitudinal_controller.get_torque_factor(P_ch, Rd)\n return factor\n\n\ndef RR_Torque(t):\n factor = torque_function(t)\n torque = -factor * (70 * 9.81) * 1000000.0 * TR\n return torque\n\n\ndef RL_Torque(t):\n factor = torque_function(t)\n torque = -factor * (70 * 9.81) * 1000000.0 * TR\n return torque\n\n\ndef steering_function(t):\n R_ch = num_model.Subsystems.CH.R_rbs_chassis\n P_ch = num_model.Subsystems.CH.P_rbs_chassis\n Rd_ch = num_model.Subsystems.CH.Rd_rbs_chassis\n Pd_ch = num_model.Subsystems.CH.Pd_rbs_chassis\n rbar_ax1 = np.array([[-800], [0], [0]], dtype=np.float64)\n r_ax1 = R_ch + A(P_ch) @ rbar_ax1\n vel = (A(P_ch).T @ (Rd_ch + B(P_ch, rbar_ax1) @ Pd_ch))[0, 0]\n delta = lateral_controller.get_steer_factor(r_ax1, P_ch, Pd_ch, vel)\n travel = delta * 18\n return travel\n\n\ndef zero_func(t):\n return np.zeros((3, 1), dtype=np.float64)\n\n\nnum_assm.terrain_data.get_state = terrain_state\nnum_assm.ST1_config.UF_mcs_rack_act = steering_function\nnum_assm.AX1_config.UF_far_drive = RR_Torque\nnum_assm.AX1_config.UF_fal_drive = RL_Torque\nnum_assm.CH_config.UF_fas_aero_drag_F = zero_func\nnum_assm.CH_config.UF_fas_aero_drag_T = zero_func\nsim = simulation('sim', num_model, 'dds')\nsim.set_time_array(15, dt)\nsim.set_initial_states('results/equilibrium_v4.npz')\nsim.solve()\nsim.save_as_csv('results', 'constant_radius_v8', 'pos')\nsim.save_as_npz('results', 'constant_radius_v8')\nimport matplotlib.pyplot as plt\nsim.soln.pos_dataframe.plot(x='CH.rbs_chassis.x', y='CH.rbs_chassis.y',\n grid=True)\nsim.soln.vel_dataframe.plot(x='time', y='CH.rbs_chassis.x', grid=True)\nsim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.z', grid=True)\nsim.soln.vel_dataframe.plot(x='time', y='CH.rbs_chassis.z', grid=True)\nsim.soln.acc_dataframe.plot(x='time', y='CH.rbs_chassis.z', grid=True)\nsim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e0', grid=True)\nsim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e1', grid=True)\nsim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e2', grid=True)\nsim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e3', grid=True)\nplt.show()\n",
"step-5": "import sys\nimport os\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom uraeus.nmbd.python import simulation\nfrom uraeus.nmbd.python.engine.numerics.math_funcs import A, B\n\ndatabase_directory = os.path.abspath('../../')\nsys.path.append(database_directory)\n\nfrom uraeus_fsae.simenv.assemblies import asurt_FS17_v1 as num_assm\nfrom controllers import speed_controller, stanley_controller\n\nnum_model = num_assm.num_model\n\ndt = num_assm.dt\nTR = 254\n\ndef generate_circular_path(radius, offset):\n theta = np.deg2rad(np.linspace(0, 360, 360))\n x_data = radius * np.sin(theta) + offset[0]\n y_data = radius * np.cos(theta) + offset[1]\n radii = radius * np.ones((360,))\n return x_data, y_data, radii\n\n\nx_data, y_data, radii = generate_circular_path(10.5, (0, -10.5))\n\npath_data = np.zeros((360, 3))\npath_data[:, 0] = -1e3 * x_data\npath_data[:, 1] = 1e3 * y_data\npath_data[:, 2] = 1e3 * radii\n\nplt.figure(figsize=(10, 5))\nplt.plot(path_data[:, 0], path_data[:, 1])\nplt.grid()\nplt.show()\n\nlogitudinal_controller = speed_controller(35, dt)\nlateral_controller = stanley_controller(path_data, 25)\n\n\ndef terrain_state(x, y):\n local_normal = np.array([[0],[0],[1]], dtype=np.float64)\n hieght = 0\n return [local_normal, hieght]\n\n\ndef torque_function(t):\n P_ch = num_model.Subsystems.CH.P_rbs_chassis\n Rd = num_model.Subsystems.CH.Rd_rbs_chassis\n factor = logitudinal_controller.get_torque_factor(P_ch, Rd)\n return factor\n\ndef RR_Torque(t):\n factor = torque_function(t)\n torque = -factor*(70*9.81)*1e6*TR\n return torque\n\ndef RL_Torque(t):\n factor = torque_function(t)\n torque = -factor*(70*9.81)*1e6*TR\n return torque\n\ndef steering_function(t):\n R_ch = num_model.Subsystems.CH.R_rbs_chassis\n P_ch = num_model.Subsystems.CH.P_rbs_chassis\n Rd_ch = num_model.Subsystems.CH.Rd_rbs_chassis\n Pd_ch = num_model.Subsystems.CH.Pd_rbs_chassis\n\n rbar_ax1 = np.array([[-800], [0], [0]], dtype=np.float64)\n r_ax1 = R_ch + A(P_ch)@rbar_ax1\n vel = (A(P_ch).T @ (Rd_ch + B(P_ch, rbar_ax1)@Pd_ch))[0,0]\n\n delta = lateral_controller.get_steer_factor(r_ax1, P_ch, Pd_ch, vel)\n\n travel = delta * 18\n #print('Travel = %s'%travel)\n return travel\n\n\ndef zero_func(t):\n return np.zeros((3,1), dtype=np.float64)\n\n\nnum_assm.terrain_data.get_state = terrain_state\n\nnum_assm.ST1_config.UF_mcs_rack_act = steering_function\n\nnum_assm.AX1_config.UF_far_drive = RR_Torque\nnum_assm.AX1_config.UF_fal_drive = RL_Torque\n\n#num_assm.DR2_config.UF_far_drive = RR_Torque\n#num_assm.DR2_config.UF_fal_drive = RL_Torque\n\nnum_assm.CH_config.UF_fas_aero_drag_F = zero_func\nnum_assm.CH_config.UF_fas_aero_drag_T = zero_func\n# =============================================================================\n# Setting and Starting Simulation\n# =============================================================================\n\nsim = simulation('sim', num_model, 'dds')\nsim.set_time_array(15, dt)\n\n# Getting Equilibrium results as initial conditions to this simulation\n# ====================================================================\nsim.set_initial_states('results/equilibrium_v4.npz')\n\nsim.solve()\n\nsim.save_as_csv('results', 'constant_radius_v8', 'pos')\nsim.save_as_npz('results', 'constant_radius_v8')\n\n#=============================================================================\n# Plotting Simulation Results\n# =============================================================================\n\nimport matplotlib.pyplot as plt\n\nsim.soln.pos_dataframe.plot(x='CH.rbs_chassis.x', y='CH.rbs_chassis.y', grid=True)\n\nsim.soln.vel_dataframe.plot(x='time', y='CH.rbs_chassis.x', grid=True)\n\nsim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.z', grid=True)\nsim.soln.vel_dataframe.plot(x='time', y='CH.rbs_chassis.z', grid=True)\nsim.soln.acc_dataframe.plot(x='time', y='CH.rbs_chassis.z', grid=True)\n\nsim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e0', grid=True)\nsim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e1', grid=True)\nsim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e2', grid=True)\nsim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e3', grid=True)\n\nplt.show()\n",
"step-ids": [
3,
8,
9,
10,
11
]
}
|
[
3,
8,
9,
10,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in d:
packs[i % k] += 1
<|reserved_special_token_0|>
if k % 2 == 0:
counter += packs[k // 2] // 2
for i in range(1, ceil(k / 2)):
counter += min(packs[i], packs[k - i])
print(counter * 2)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
n, k = map(int, input().split())
d = list(map(int, input().split()))
packs = [0] * k
for i in d:
packs[i % k] += 1
counter = packs[0] // 2
if k % 2 == 0:
counter += packs[k // 2] // 2
for i in range(1, ceil(k / 2)):
counter += min(packs[i], packs[k - i])
print(counter * 2)
<|reserved_special_token_1|>
from math import ceil
n, k = map(int, input().split())
d = list(map(int, input().split()))
packs = [0] * k
for i in d:
packs[i % k] += 1
counter = packs[0] // 2
if k % 2 == 0:
counter += packs[k // 2] // 2
for i in range(1, ceil(k / 2)):
counter += min(packs[i], packs[k - i])
print(counter * 2)
<|reserved_special_token_1|>
from math import ceil
n, k = map(int, input().split())
d = list(map(int, input().split()))
packs = [0]*k
for i in d:
packs[i%k] += 1
counter = packs[0]//2
if (k % 2) == 0:
counter += packs[k//2]//2
for i in range(1, ceil(k/2)):
counter += min(packs[i], packs[k-i])
print(counter*2)
|
flexible
|
{
"blob_id": "2226382c494af33957a44d9f1682f7deacf574a2",
"index": 2075,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in d:\n packs[i % k] += 1\n<mask token>\nif k % 2 == 0:\n counter += packs[k // 2] // 2\nfor i in range(1, ceil(k / 2)):\n counter += min(packs[i], packs[k - i])\nprint(counter * 2)\n",
"step-3": "<mask token>\nn, k = map(int, input().split())\nd = list(map(int, input().split()))\npacks = [0] * k\nfor i in d:\n packs[i % k] += 1\ncounter = packs[0] // 2\nif k % 2 == 0:\n counter += packs[k // 2] // 2\nfor i in range(1, ceil(k / 2)):\n counter += min(packs[i], packs[k - i])\nprint(counter * 2)\n",
"step-4": "from math import ceil\nn, k = map(int, input().split())\nd = list(map(int, input().split()))\npacks = [0] * k\nfor i in d:\n packs[i % k] += 1\ncounter = packs[0] // 2\nif k % 2 == 0:\n counter += packs[k // 2] // 2\nfor i in range(1, ceil(k / 2)):\n counter += min(packs[i], packs[k - i])\nprint(counter * 2)\n",
"step-5": "from math import ceil\n\nn, k = map(int, input().split())\nd = list(map(int, input().split()))\n\npacks = [0]*k\nfor i in d:\n packs[i%k] += 1\n\ncounter = packs[0]//2\nif (k % 2) == 0:\n counter += packs[k//2]//2\nfor i in range(1, ceil(k/2)):\n counter += min(packs[i], packs[k-i])\n\nprint(counter*2)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
pygame.display.set_caption('Space Force Prime')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
img_dir = path.join(path.dirname(__file__), 'img')
WIDTH = 720
HEIGHT = 720
FPS = 30
RED = 255, 0, 0
GREEN = 0, 255, 0
BLUE = 0, 0, 255
BLACK = 0, 0, 0
YELLOW = 255, 255, 0
BROWN = 165, 42, 42
WHITE = 255, 255, 255
screen = pygame.display.set_mode((WIDTH, HEIGHT))
background = pygame.Surface(screen.get_size())
pygame.display.set_caption('Space Force Prime')
clock = pygame.time.Clock()
<|reserved_special_token_1|>
import pygame
from os import path
img_dir = path.join(path.dirname(__file__), 'img')
WIDTH = 720
HEIGHT = 720
FPS = 30
RED = 255, 0, 0
GREEN = 0, 255, 0
BLUE = 0, 0, 255
BLACK = 0, 0, 0
YELLOW = 255, 255, 0
BROWN = 165, 42, 42
WHITE = 255, 255, 255
screen = pygame.display.set_mode((WIDTH, HEIGHT))
background = pygame.Surface(screen.get_size())
pygame.display.set_caption('Space Force Prime')
clock = pygame.time.Clock()
<|reserved_special_token_1|>
import pygame
# import random
# import text_scroll
from os import path
img_dir = path.join(path.dirname(__file__), 'img')
# define screen and refresh rate
WIDTH = 720
HEIGHT = 720
FPS = 30
# define colors
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
BLACK = (0, 0, 0)
YELLOW = (255, 255, 0)
BROWN = (165, 42, 42)
WHITE = (255, 255, 255)
# define runtime settings
screen = pygame.display.set_mode((WIDTH, HEIGHT))
background = pygame.Surface(screen.get_size())
pygame.display.set_caption('Space Force Prime')
clock = pygame.time.Clock()
|
flexible
|
{
"blob_id": "88dfb422b1c9f9a9a8f497e1dbba5598c2710e9b",
"index": 5718,
"step-1": "<mask token>\n",
"step-2": "<mask token>\npygame.display.set_caption('Space Force Prime')\n<mask token>\n",
"step-3": "<mask token>\nimg_dir = path.join(path.dirname(__file__), 'img')\nWIDTH = 720\nHEIGHT = 720\nFPS = 30\nRED = 255, 0, 0\nGREEN = 0, 255, 0\nBLUE = 0, 0, 255\nBLACK = 0, 0, 0\nYELLOW = 255, 255, 0\nBROWN = 165, 42, 42\nWHITE = 255, 255, 255\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\nbackground = pygame.Surface(screen.get_size())\npygame.display.set_caption('Space Force Prime')\nclock = pygame.time.Clock()\n",
"step-4": "import pygame\nfrom os import path\nimg_dir = path.join(path.dirname(__file__), 'img')\nWIDTH = 720\nHEIGHT = 720\nFPS = 30\nRED = 255, 0, 0\nGREEN = 0, 255, 0\nBLUE = 0, 0, 255\nBLACK = 0, 0, 0\nYELLOW = 255, 255, 0\nBROWN = 165, 42, 42\nWHITE = 255, 255, 255\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\nbackground = pygame.Surface(screen.get_size())\npygame.display.set_caption('Space Force Prime')\nclock = pygame.time.Clock()\n",
"step-5": "import pygame\n# import random\n# import text_scroll\n\nfrom os import path\nimg_dir = path.join(path.dirname(__file__), 'img')\n\n# define screen and refresh rate\nWIDTH = 720\nHEIGHT = 720\nFPS = 30\n\n# define colors\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLUE = (0, 0, 255)\nBLACK = (0, 0, 0)\nYELLOW = (255, 255, 0)\nBROWN = (165, 42, 42)\nWHITE = (255, 255, 255)\n\n# define runtime settings\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\nbackground = pygame.Surface(screen.get_size())\npygame.display.set_caption('Space Force Prime')\nclock = pygame.time.Clock()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
STATUS_CHOICES = (-1, 'Eliminado'), (0, 'Inactivo'), (1, 'Activo')
USERTYPES_CHOICES = ()
ACTIVATION_CHOICES = (1, 'Activacion'), (2, 'Solicitud Password'), (3,
'Invitacion')
ACTIVATIONSTATUS_CHOICES = (-1, 'Expirado'), (0, 'Enviado'), (1, 'Activado')
<|reserved_special_token_1|>
STATUS_CHOICES = (
(-1, 'Eliminado'),
(0, 'Inactivo'),
(1, 'Activo'),
)
USERTYPES_CHOICES = ()
#-- Activation Request Values
ACTIVATION_CHOICES = (
(1, 'Activacion'),
(2, 'Solicitud Password'),
(3, 'Invitacion'),
)
#-- Activation Status Values
ACTIVATIONSTATUS_CHOICES = (
(-1, 'Expirado'),
(0, 'Enviado'),
(1, 'Activado'),
)
|
flexible
|
{
"blob_id": "200552b638d6b1a6879b455837677b82689e0069",
"index": 5479,
"step-1": "<mask token>\n",
"step-2": "STATUS_CHOICES = (-1, 'Eliminado'), (0, 'Inactivo'), (1, 'Activo')\nUSERTYPES_CHOICES = ()\nACTIVATION_CHOICES = (1, 'Activacion'), (2, 'Solicitud Password'), (3,\n 'Invitacion')\nACTIVATIONSTATUS_CHOICES = (-1, 'Expirado'), (0, 'Enviado'), (1, 'Activado')\n",
"step-3": "\n\nSTATUS_CHOICES = (\n (-1, 'Eliminado'),\n (0, 'Inactivo'),\n (1, 'Activo'),\n)\n\nUSERTYPES_CHOICES = ()\n\n#-- Activation Request Values\nACTIVATION_CHOICES = (\n (1, 'Activacion'),\n (2, 'Solicitud Password'),\n (3, 'Invitacion'),\n)\n\n#-- Activation Status Values\nACTIVATIONSTATUS_CHOICES = (\n (-1, 'Expirado'),\n (0, 'Enviado'),\n (1, 'Activado'),\n)",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# use local image
import io
import os
from google.cloud import vision
from google.oauth2 import service_account
creds = service_account.Credentials.from_service_account_file('./key.json')
client = vision.ImageAnnotatorClient(
credentials=creds,
)
# The name of the image file to annotate
file_name = os.path.join(
os.path.dirname(__file__),
"./dog.jpg")
# Loads the image into memory
with io.open(file_name, 'rb') as image_file:
content = image_file.read()
request = {
"image": {
"content": content
},
"features": [
{
"max_results": 2,
"type": "LABEL_DETECTION"
},
{
"type": "SAFE_SEARCH_DETECTION"
}
]
}
response = client.annotate_image(request)
print(response)
print(response.safe_search_annotation.adult)
for label in response.label_annotations:
print(label.description)
|
normal
|
{
"blob_id": "800573786913ff2fc37845193b5584a0a815533f",
"index": 8340,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith io.open(file_name, 'rb') as image_file:\n content = image_file.read()\n<mask token>\nprint(response)\nprint(response.safe_search_annotation.adult)\nfor label in response.label_annotations:\n print(label.description)\n",
"step-3": "<mask token>\ncreds = service_account.Credentials.from_service_account_file('./key.json')\nclient = vision.ImageAnnotatorClient(credentials=creds)\nfile_name = os.path.join(os.path.dirname(__file__), './dog.jpg')\nwith io.open(file_name, 'rb') as image_file:\n content = image_file.read()\nrequest = {'image': {'content': content}, 'features': [{'max_results': 2,\n 'type': 'LABEL_DETECTION'}, {'type': 'SAFE_SEARCH_DETECTION'}]}\nresponse = client.annotate_image(request)\nprint(response)\nprint(response.safe_search_annotation.adult)\nfor label in response.label_annotations:\n print(label.description)\n",
"step-4": "import io\nimport os\nfrom google.cloud import vision\nfrom google.oauth2 import service_account\ncreds = service_account.Credentials.from_service_account_file('./key.json')\nclient = vision.ImageAnnotatorClient(credentials=creds)\nfile_name = os.path.join(os.path.dirname(__file__), './dog.jpg')\nwith io.open(file_name, 'rb') as image_file:\n content = image_file.read()\nrequest = {'image': {'content': content}, 'features': [{'max_results': 2,\n 'type': 'LABEL_DETECTION'}, {'type': 'SAFE_SEARCH_DETECTION'}]}\nresponse = client.annotate_image(request)\nprint(response)\nprint(response.safe_search_annotation.adult)\nfor label in response.label_annotations:\n print(label.description)\n",
"step-5": "# use local image\n\nimport io\nimport os\n\nfrom google.cloud import vision\nfrom google.oauth2 import service_account\n\ncreds = service_account.Credentials.from_service_account_file('./key.json')\n\nclient = vision.ImageAnnotatorClient(\n credentials=creds,\n)\n\n# The name of the image file to annotate\nfile_name = os.path.join(\n os.path.dirname(__file__),\n \"./dog.jpg\")\n\n# Loads the image into memory\nwith io.open(file_name, 'rb') as image_file:\n content = image_file.read()\n\nrequest = {\n \"image\": {\n \"content\": content\n }, \n \"features\": [\n {\n \"max_results\": 2,\n \"type\": \"LABEL_DETECTION\"\n },\n {\n \"type\": \"SAFE_SEARCH_DETECTION\"\n }\n ]\n}\n\nresponse = client.annotate_image(request)\n\nprint(response)\n\nprint(response.safe_search_annotation.adult)\n\nfor label in response.label_annotations:\n print(label.description)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
json_data.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
json_data = open('eventnotipy/config.json')
data = json.load(json_data)
json_data.close()
username = data['dbuser']
password = data['password']
host = data['dbhost']
db_name = data['database']
email_host = data['email_host']
email_localhost = data['email_localhost']
sms_host = data['sms_host']
sms_localhost = data['sms_localhost']
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://%s:%s@%s/%s' % (
username, password, host, db_name)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_ECHO'] = False
app.secret_key = data['session_key']
<|reserved_special_token_1|>
from eventnotipy import app
import json
json_data = open('eventnotipy/config.json')
data = json.load(json_data)
json_data.close()
username = data['dbuser']
password = data['password']
host = data['dbhost']
db_name = data['database']
email_host = data['email_host']
email_localhost = data['email_localhost']
sms_host = data['sms_host']
sms_localhost = data['sms_localhost']
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://%s:%s@%s/%s' % (
username, password, host, db_name)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_ECHO'] = False
app.secret_key = data['session_key']
|
flexible
|
{
"blob_id": "1f0680c45afb36439c56a1d202537261df5f9afc",
"index": 5895,
"step-1": "<mask token>\n",
"step-2": "<mask token>\njson_data.close()\n<mask token>\n",
"step-3": "<mask token>\njson_data = open('eventnotipy/config.json')\ndata = json.load(json_data)\njson_data.close()\nusername = data['dbuser']\npassword = data['password']\nhost = data['dbhost']\ndb_name = data['database']\nemail_host = data['email_host']\nemail_localhost = data['email_localhost']\nsms_host = data['sms_host']\nsms_localhost = data['sms_localhost']\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://%s:%s@%s/%s' % (\n username, password, host, db_name)\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SQLALCHEMY_ECHO'] = False\napp.secret_key = data['session_key']\n",
"step-4": "from eventnotipy import app\nimport json\njson_data = open('eventnotipy/config.json')\ndata = json.load(json_data)\njson_data.close()\nusername = data['dbuser']\npassword = data['password']\nhost = data['dbhost']\ndb_name = data['database']\nemail_host = data['email_host']\nemail_localhost = data['email_localhost']\nsms_host = data['sms_host']\nsms_localhost = data['sms_localhost']\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://%s:%s@%s/%s' % (\n username, password, host, db_name)\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SQLALCHEMY_ECHO'] = False\napp.secret_key = data['session_key']\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
gmap.heatmap(latitude, longitude)
gmap.scatter(latitude, longitude, c='r', marker=True)
<|reserved_special_token_0|>
gmap.draw('c:\\users\\jackc\\desktop\\country_heatmap.html')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
latitude = (np.random.random_sample(size=700) - 0.5) * 180
longitude = (np.random.random_sample(size=700) - 0.5) * 360
gmap = gmplot.GoogleMapPlotter(0, 0, 2)
gmap.heatmap(latitude, longitude)
gmap.scatter(latitude, longitude, c='r', marker=True)
gmap.apikey = 'AIzaSyAid6Kk6DZVnu0VNsrDJsmhKwH1pqyiu00'
gmap.draw('c:\\users\\jackc\\desktop\\country_heatmap.html')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import gmplot
import numpy as np
latitude = (np.random.random_sample(size=700) - 0.5) * 180
longitude = (np.random.random_sample(size=700) - 0.5) * 360
gmap = gmplot.GoogleMapPlotter(0, 0, 2)
gmap.heatmap(latitude, longitude)
gmap.scatter(latitude, longitude, c='r', marker=True)
gmap.apikey = 'AIzaSyAid6Kk6DZVnu0VNsrDJsmhKwH1pqyiu00'
gmap.draw('c:\\users\\jackc\\desktop\\country_heatmap.html')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
# import gmplot package
import gmplot
import numpy as np
# generate 700 random lats and lons
latitude = (np.random.random_sample(size = 700) - 0.5) * 180
longitude = (np.random.random_sample(size = 700) - 0.5) * 360
# declare the center of the map, and how much we want the map zoomed in
gmap = gmplot.GoogleMapPlotter(0, 0, 2)
# plot heatmap
gmap.heatmap(latitude, longitude)
gmap.scatter(latitude, longitude, c='r', marker=True)
#Your Google_API_Key
gmap.apikey = "AIzaSyAid6Kk6DZVnu0VNsrDJsmhKwH1pqyiu00"
# save it to html
gmap.draw("c:\\users\\jackc\desktop\\country_heatmap.html")
'''
import csv
import pandas as pd
from operator import itemgetter
import matplotlib.pyplot as plt
import numpy as np
import mplcursors
import gmplot
def outputScatter():
data = pd.read_csv('C:\\Users\\jackc\\Desktop\\ctran\dataMerge.csv')
df = data.groupby('location_id')
gmap = gmplot.GoogleMapPlotter(0,0,2)
counter = 0
result = []
result_lon = []
result_lat = []
result_calculation = []
result_lon_static = []
result_lat_static = []
result_toSCV = []
above50ft = 0
above70ft = 0
above90ft = 0
above150ft = 0
index = 0
colors = ['r','y','g','b']
for x,y in df:
for z in range(y.location_distance.values.size):
result_lon_static.append(y.y_coordinate.values[z])
result_lat_static.append(y.x_coordinate.values[z])
if(y.location_distance.values[z] > 30):
counter = counter + 1
if(y.location_distance.values[z] > 50):
above50ft = above50ft + 1
if(y.location_distance.values[z] > 70):
above70ft = above70ft + 1
if(y.location_distance.values[z] > 90):
above90ft = above90ft + 1
if(y.location_distance.values[z] > 150):
above150ft = above150ft + 1
cal=counter/(y.location_distance.values.size)
result.append([y.stop_code.values[0], cal, y.stop_lat.values[0], y.stop_lon.values[0]])
result_lat.append(y.stop_lat.values[0])
result_lon.append(y.stop_lon.values[0])
result_calculation.append(cal)
result_toSCV.append([y.stop_code.values[0], cal, y.location_distance.values.size, counter, above50ft, above70ft, above90ft, above150ft])
index = index+1
above50ft = 0
above70ft = 0
above90ft = 0
above150ft = 0
counter = 0
result = sorted(result,key=itemgetter(1), reverse=True)
result_toSCV = sorted(result_toSCV, key=itemgetter(1), reverse=True)
plt.scatter(result_lat_static,result_lon_static, c='black')
code_id = []
for x in result:
#code_id.append(x[0])
#result_calculation.append(x[1])
#result_lat.append(x[2])
#result_lon.append(x[3])
if x[1] > 0.9:
red = plt.scatter(x[3],x[2], c=colors[0], label='>90%')
#red = plt.scatter(x[3],x[2], c=colors[0], label=x[0])
elif x[1] > 0.8:
yellow = plt.scatter(x[3],x[2], c=colors[1], label='>80%')
#yellow = plt.scatter(x[3],x[2], c=colors[1], label=x[0])
elif x[1] > 0.7:
green = plt.scatter(x[3],x[2], c=colors[2], label='>70%')
#green = plt.scatter(x[3],x[2], c=colors[2], label=x[0])
else:
blue = plt.scatter(x[3],x[2], c=colors[3], label='>60%')
#blue = plt.scatter(x[3],x[2], c=colors[3], label=x[0])
with open('C:\\Users\\Jackc\\Desktop\\Ctran\\outputPercentError.csv', mode='w', newline='') as file:
writer = csv.writer(file)
writer.writerow(['location_id', 'percent_Error', 'total_count', 'above30ft', 'above50ft', 'above70ft', 'above90ft', 'above150ft'])
for x in result_toSCV:
writer.writerow(x)
'''
|
flexible
|
{
"blob_id": "1cc77ed1c5da025d1b539df202bbd3310a174eac",
"index": 3902,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ngmap.heatmap(latitude, longitude)\ngmap.scatter(latitude, longitude, c='r', marker=True)\n<mask token>\ngmap.draw('c:\\\\users\\\\jackc\\\\desktop\\\\country_heatmap.html')\n<mask token>\n",
"step-3": "<mask token>\nlatitude = (np.random.random_sample(size=700) - 0.5) * 180\nlongitude = (np.random.random_sample(size=700) - 0.5) * 360\ngmap = gmplot.GoogleMapPlotter(0, 0, 2)\ngmap.heatmap(latitude, longitude)\ngmap.scatter(latitude, longitude, c='r', marker=True)\ngmap.apikey = 'AIzaSyAid6Kk6DZVnu0VNsrDJsmhKwH1pqyiu00'\ngmap.draw('c:\\\\users\\\\jackc\\\\desktop\\\\country_heatmap.html')\n<mask token>\n",
"step-4": "import gmplot\nimport numpy as np\nlatitude = (np.random.random_sample(size=700) - 0.5) * 180\nlongitude = (np.random.random_sample(size=700) - 0.5) * 360\ngmap = gmplot.GoogleMapPlotter(0, 0, 2)\ngmap.heatmap(latitude, longitude)\ngmap.scatter(latitude, longitude, c='r', marker=True)\ngmap.apikey = 'AIzaSyAid6Kk6DZVnu0VNsrDJsmhKwH1pqyiu00'\ngmap.draw('c:\\\\users\\\\jackc\\\\desktop\\\\country_heatmap.html')\n<mask token>\n",
"step-5": "# import gmplot package\nimport gmplot\nimport numpy as np\n# generate 700 random lats and lons\nlatitude = (np.random.random_sample(size = 700) - 0.5) * 180\nlongitude = (np.random.random_sample(size = 700) - 0.5) * 360\n# declare the center of the map, and how much we want the map zoomed in\ngmap = gmplot.GoogleMapPlotter(0, 0, 2)\n# plot heatmap\ngmap.heatmap(latitude, longitude)\ngmap.scatter(latitude, longitude, c='r', marker=True)\n#Your Google_API_Key\ngmap.apikey = \"AIzaSyAid6Kk6DZVnu0VNsrDJsmhKwH1pqyiu00\"\n# save it to html\ngmap.draw(\"c:\\\\users\\\\jackc\\desktop\\\\country_heatmap.html\")\n\n'''\nimport csv\nimport pandas as pd\nfrom operator import itemgetter\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport mplcursors\nimport gmplot\n\ndef outputScatter():\n data = pd.read_csv('C:\\\\Users\\\\jackc\\\\Desktop\\\\ctran\\dataMerge.csv')\n df = data.groupby('location_id')\n\tgmap = gmplot.GoogleMapPlotter(0,0,2)\n counter = 0\n result = []\n result_lon = []\n result_lat = []\n result_calculation = []\n result_lon_static = []\n result_lat_static = []\n result_toSCV = []\n above50ft = 0\n above70ft = 0\n above90ft = 0\n above150ft = 0\n index = 0\n colors = ['r','y','g','b']\n\n for x,y in df:\n for z in range(y.location_distance.values.size):\n result_lon_static.append(y.y_coordinate.values[z])\n result_lat_static.append(y.x_coordinate.values[z])\n if(y.location_distance.values[z] > 30):\n counter = counter + 1\n if(y.location_distance.values[z] > 50):\n above50ft = above50ft + 1\n if(y.location_distance.values[z] > 70):\n above70ft = above70ft + 1\n if(y.location_distance.values[z] > 90):\n above90ft = above90ft + 1\n if(y.location_distance.values[z] > 150):\n above150ft = above150ft + 1\n\n cal=counter/(y.location_distance.values.size)\n result.append([y.stop_code.values[0], cal, y.stop_lat.values[0], y.stop_lon.values[0]])\n result_lat.append(y.stop_lat.values[0])\n result_lon.append(y.stop_lon.values[0])\n result_calculation.append(cal)\n result_toSCV.append([y.stop_code.values[0], cal, y.location_distance.values.size, counter, above50ft, above70ft, above90ft, above150ft])\n index = index+1\n above50ft = 0\n above70ft = 0\n above90ft = 0\n above150ft = 0\n counter = 0\n result = sorted(result,key=itemgetter(1), reverse=True)\n result_toSCV = sorted(result_toSCV, key=itemgetter(1), reverse=True)\n plt.scatter(result_lat_static,result_lon_static, c='black')\n\n code_id = []\n for x in result:\n #code_id.append(x[0])\n #result_calculation.append(x[1])\n #result_lat.append(x[2])\n #result_lon.append(x[3])\n if x[1] > 0.9:\n red = plt.scatter(x[3],x[2], c=colors[0], label='>90%')\n #red = plt.scatter(x[3],x[2], c=colors[0], label=x[0])\n\n elif x[1] > 0.8:\n yellow = plt.scatter(x[3],x[2], c=colors[1], label='>80%')\n #yellow = plt.scatter(x[3],x[2], c=colors[1], label=x[0])\n elif x[1] > 0.7:\n green = plt.scatter(x[3],x[2], c=colors[2], label='>70%')\n #green = plt.scatter(x[3],x[2], c=colors[2], label=x[0])\n else:\n blue = plt.scatter(x[3],x[2], c=colors[3], label='>60%')\n #blue = plt.scatter(x[3],x[2], c=colors[3], label=x[0])\n\n\n with open('C:\\\\Users\\\\Jackc\\\\Desktop\\\\Ctran\\\\outputPercentError.csv', mode='w', newline='') as file:\n writer = csv.writer(file)\n writer.writerow(['location_id', 'percent_Error', 'total_count', 'above30ft', 'above50ft', 'above70ft', 'above90ft', 'above150ft'])\n for x in result_toSCV:\n writer.writerow(x)\n\n'''\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class IndexedDBTimelineMetric(timeline_based_metric.TimelineBasedMetric):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class IndexedDBTimelineMetric(timeline_based_metric.TimelineBasedMetric):
<|reserved_special_token_0|>
def __init__(self):
super(IndexedDBTimelineMetric, self).__init__()
self._stats = TraceEventStats()
self._stats.AddInput(TraceEventStatsInput(event_category=
'IndexedDB', event_name='IndexedDBDatabase::GetOperation',
metric_name='idb-gets', metric_description=
'The duration of all "get" ops in IndexedDB', units='ms',
process_name='Browser'))
self._stats.AddInput(TraceEventStatsInput(event_category=
'IndexedDB', event_name='IndexedDBDatabase::PutOperation',
metric_name='idb-puts', metric_description=
'The duration of all "put" ops in IndexedDB', units='ms',
process_name='Browser'))
self._stats.AddInput(TraceEventStatsInput(event_category=
'IndexedDB', event_name='IndexedDBFactoryImpl::Open',
metric_name='idb-opens', metric_description=
'The duration of all "open" ops in IndexedDB', units='ms',
process_name='Browser'))
self._stats.AddInput(TraceEventStatsInput(event_category=
'IndexedDB', event_name='IndexedDBTransaction::Commit',
metric_name='idb-transaction-commits', metric_description=
'The duration of all "commit" ops of ' +
'transactions in IndexedDB.', units='ms', process_name='Browser'))
self._stats.AddInput(TraceEventStatsInput(event_category=
'IndexedDB', event_name='IndexedDBFactoryImpl::DeleteDatabase',
metric_name='idb-database-deletes', metric_description=
'The duration of all "delete" ops of ' + 'IndexedDB databases.',
units='ms', process_name='Browser'))
self._stats.AddInput(TraceEventStatsInput(event_category=
'IndexedDB', event_name=
'IndexedDBDatabase::OpenCursorOperation', metric_name=
'idb-cursor-opens', metric_description=
'The duration of all "open" ops of ' + 'IndexedDB cursors.',
units='ms', process_name='Browser'))
self._stats.AddInput(TraceEventStatsInput(event_category=
'IndexedDB', event_name=
'IndexedDBCursor::CursorIterationOperation', metric_name=
'idb-cursor-iterations', metric_description=
'The duration of all "iteration" ops of ' +
'IndexedDB cursors.', units='ms', process_name='Browser'))
def AddResults(self, model, renderer_process, interactions, results):
self._stats.AddResults(model, renderer_process, interactions, results)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class IndexedDBTimelineMetric(timeline_based_metric.TimelineBasedMetric):
"""Metrics for IndexedDB operations.
"""
def __init__(self):
super(IndexedDBTimelineMetric, self).__init__()
self._stats = TraceEventStats()
self._stats.AddInput(TraceEventStatsInput(event_category=
'IndexedDB', event_name='IndexedDBDatabase::GetOperation',
metric_name='idb-gets', metric_description=
'The duration of all "get" ops in IndexedDB', units='ms',
process_name='Browser'))
self._stats.AddInput(TraceEventStatsInput(event_category=
'IndexedDB', event_name='IndexedDBDatabase::PutOperation',
metric_name='idb-puts', metric_description=
'The duration of all "put" ops in IndexedDB', units='ms',
process_name='Browser'))
self._stats.AddInput(TraceEventStatsInput(event_category=
'IndexedDB', event_name='IndexedDBFactoryImpl::Open',
metric_name='idb-opens', metric_description=
'The duration of all "open" ops in IndexedDB', units='ms',
process_name='Browser'))
self._stats.AddInput(TraceEventStatsInput(event_category=
'IndexedDB', event_name='IndexedDBTransaction::Commit',
metric_name='idb-transaction-commits', metric_description=
'The duration of all "commit" ops of ' +
'transactions in IndexedDB.', units='ms', process_name='Browser'))
self._stats.AddInput(TraceEventStatsInput(event_category=
'IndexedDB', event_name='IndexedDBFactoryImpl::DeleteDatabase',
metric_name='idb-database-deletes', metric_description=
'The duration of all "delete" ops of ' + 'IndexedDB databases.',
units='ms', process_name='Browser'))
self._stats.AddInput(TraceEventStatsInput(event_category=
'IndexedDB', event_name=
'IndexedDBDatabase::OpenCursorOperation', metric_name=
'idb-cursor-opens', metric_description=
'The duration of all "open" ops of ' + 'IndexedDB cursors.',
units='ms', process_name='Browser'))
self._stats.AddInput(TraceEventStatsInput(event_category=
'IndexedDB', event_name=
'IndexedDBCursor::CursorIterationOperation', metric_name=
'idb-cursor-iterations', metric_description=
'The duration of all "iteration" ops of ' +
'IndexedDB cursors.', units='ms', process_name='Browser'))
def AddResults(self, model, renderer_process, interactions, results):
self._stats.AddResults(model, renderer_process, interactions, results)
<|reserved_special_token_1|>
from telemetry.web_perf.metrics import timeline_based_metric
from telemetry.web_perf.metrics.trace_event_stats import TraceEventStats
from telemetry.web_perf.metrics.trace_event_stats import TraceEventStatsInput
class IndexedDBTimelineMetric(timeline_based_metric.TimelineBasedMetric):
"""Metrics for IndexedDB operations.
"""
def __init__(self):
super(IndexedDBTimelineMetric, self).__init__()
self._stats = TraceEventStats()
self._stats.AddInput(TraceEventStatsInput(event_category=
'IndexedDB', event_name='IndexedDBDatabase::GetOperation',
metric_name='idb-gets', metric_description=
'The duration of all "get" ops in IndexedDB', units='ms',
process_name='Browser'))
self._stats.AddInput(TraceEventStatsInput(event_category=
'IndexedDB', event_name='IndexedDBDatabase::PutOperation',
metric_name='idb-puts', metric_description=
'The duration of all "put" ops in IndexedDB', units='ms',
process_name='Browser'))
self._stats.AddInput(TraceEventStatsInput(event_category=
'IndexedDB', event_name='IndexedDBFactoryImpl::Open',
metric_name='idb-opens', metric_description=
'The duration of all "open" ops in IndexedDB', units='ms',
process_name='Browser'))
self._stats.AddInput(TraceEventStatsInput(event_category=
'IndexedDB', event_name='IndexedDBTransaction::Commit',
metric_name='idb-transaction-commits', metric_description=
'The duration of all "commit" ops of ' +
'transactions in IndexedDB.', units='ms', process_name='Browser'))
self._stats.AddInput(TraceEventStatsInput(event_category=
'IndexedDB', event_name='IndexedDBFactoryImpl::DeleteDatabase',
metric_name='idb-database-deletes', metric_description=
'The duration of all "delete" ops of ' + 'IndexedDB databases.',
units='ms', process_name='Browser'))
self._stats.AddInput(TraceEventStatsInput(event_category=
'IndexedDB', event_name=
'IndexedDBDatabase::OpenCursorOperation', metric_name=
'idb-cursor-opens', metric_description=
'The duration of all "open" ops of ' + 'IndexedDB cursors.',
units='ms', process_name='Browser'))
self._stats.AddInput(TraceEventStatsInput(event_category=
'IndexedDB', event_name=
'IndexedDBCursor::CursorIterationOperation', metric_name=
'idb-cursor-iterations', metric_description=
'The duration of all "iteration" ops of ' +
'IndexedDB cursors.', units='ms', process_name='Browser'))
def AddResults(self, model, renderer_process, interactions, results):
self._stats.AddResults(model, renderer_process, interactions, results)
<|reserved_special_token_1|>
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.web_perf.metrics import timeline_based_metric
from telemetry.web_perf.metrics.trace_event_stats import TraceEventStats
from telemetry.web_perf.metrics.trace_event_stats import TraceEventStatsInput
class IndexedDBTimelineMetric(timeline_based_metric.TimelineBasedMetric):
"""Metrics for IndexedDB operations.
"""
def __init__(self):
super(IndexedDBTimelineMetric, self).__init__()
self._stats = TraceEventStats()
self._stats.AddInput(TraceEventStatsInput(
event_category='IndexedDB',
event_name='IndexedDBDatabase::GetOperation',
metric_name='idb-gets',
metric_description='The duration of all "get" ops in IndexedDB',
units='ms',
process_name='Browser'))
self._stats.AddInput(TraceEventStatsInput(
event_category='IndexedDB',
event_name='IndexedDBDatabase::PutOperation',
metric_name='idb-puts',
metric_description='The duration of all "put" ops in IndexedDB',
units='ms',
process_name='Browser'))
self._stats.AddInput(TraceEventStatsInput(
event_category='IndexedDB',
event_name='IndexedDBFactoryImpl::Open',
metric_name='idb-opens',
metric_description='The duration of all "open" ops in IndexedDB',
units='ms',
process_name='Browser'))
self._stats.AddInput(TraceEventStatsInput(
event_category='IndexedDB',
event_name='IndexedDBTransaction::Commit',
metric_name='idb-transaction-commits',
metric_description=('The duration of all "commit" ops of ' +
'transactions in IndexedDB.'),
units='ms',
process_name='Browser'))
self._stats.AddInput(TraceEventStatsInput(
event_category='IndexedDB',
event_name='IndexedDBFactoryImpl::DeleteDatabase',
metric_name='idb-database-deletes',
metric_description=('The duration of all "delete" ops of ' +
'IndexedDB databases.'),
units='ms',
process_name='Browser'))
self._stats.AddInput(TraceEventStatsInput(
event_category='IndexedDB',
event_name='IndexedDBDatabase::OpenCursorOperation',
metric_name='idb-cursor-opens',
metric_description=('The duration of all "open" ops of ' +
'IndexedDB cursors.'),
units='ms',
process_name='Browser'))
self._stats.AddInput(TraceEventStatsInput(
event_category='IndexedDB',
event_name='IndexedDBCursor::CursorIterationOperation',
metric_name='idb-cursor-iterations',
metric_description=('The duration of all "iteration" ops of ' +
'IndexedDB cursors.'),
units='ms',
process_name='Browser'))
def AddResults(self, model, renderer_process, interactions, results):
self._stats.AddResults(model, renderer_process, interactions, results)
|
flexible
|
{
"blob_id": "47f88bc3836490e08f464f71351096b54118420e",
"index": 5297,
"step-1": "<mask token>\n\n\nclass IndexedDBTimelineMetric(timeline_based_metric.TimelineBasedMetric):\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass IndexedDBTimelineMetric(timeline_based_metric.TimelineBasedMetric):\n <mask token>\n\n def __init__(self):\n super(IndexedDBTimelineMetric, self).__init__()\n self._stats = TraceEventStats()\n self._stats.AddInput(TraceEventStatsInput(event_category=\n 'IndexedDB', event_name='IndexedDBDatabase::GetOperation',\n metric_name='idb-gets', metric_description=\n 'The duration of all \"get\" ops in IndexedDB', units='ms',\n process_name='Browser'))\n self._stats.AddInput(TraceEventStatsInput(event_category=\n 'IndexedDB', event_name='IndexedDBDatabase::PutOperation',\n metric_name='idb-puts', metric_description=\n 'The duration of all \"put\" ops in IndexedDB', units='ms',\n process_name='Browser'))\n self._stats.AddInput(TraceEventStatsInput(event_category=\n 'IndexedDB', event_name='IndexedDBFactoryImpl::Open',\n metric_name='idb-opens', metric_description=\n 'The duration of all \"open\" ops in IndexedDB', units='ms',\n process_name='Browser'))\n self._stats.AddInput(TraceEventStatsInput(event_category=\n 'IndexedDB', event_name='IndexedDBTransaction::Commit',\n metric_name='idb-transaction-commits', metric_description=\n 'The duration of all \"commit\" ops of ' +\n 'transactions in IndexedDB.', units='ms', process_name='Browser'))\n self._stats.AddInput(TraceEventStatsInput(event_category=\n 'IndexedDB', event_name='IndexedDBFactoryImpl::DeleteDatabase',\n metric_name='idb-database-deletes', metric_description=\n 'The duration of all \"delete\" ops of ' + 'IndexedDB databases.',\n units='ms', process_name='Browser'))\n self._stats.AddInput(TraceEventStatsInput(event_category=\n 'IndexedDB', event_name=\n 'IndexedDBDatabase::OpenCursorOperation', metric_name=\n 'idb-cursor-opens', metric_description=\n 'The duration of all \"open\" ops of ' + 'IndexedDB cursors.',\n units='ms', process_name='Browser'))\n self._stats.AddInput(TraceEventStatsInput(event_category=\n 'IndexedDB', event_name=\n 'IndexedDBCursor::CursorIterationOperation', metric_name=\n 'idb-cursor-iterations', metric_description=\n 'The duration of all \"iteration\" ops of ' +\n 'IndexedDB cursors.', units='ms', process_name='Browser'))\n\n def AddResults(self, model, renderer_process, interactions, results):\n self._stats.AddResults(model, renderer_process, interactions, results)\n",
"step-3": "<mask token>\n\n\nclass IndexedDBTimelineMetric(timeline_based_metric.TimelineBasedMetric):\n \"\"\"Metrics for IndexedDB operations.\n \"\"\"\n\n def __init__(self):\n super(IndexedDBTimelineMetric, self).__init__()\n self._stats = TraceEventStats()\n self._stats.AddInput(TraceEventStatsInput(event_category=\n 'IndexedDB', event_name='IndexedDBDatabase::GetOperation',\n metric_name='idb-gets', metric_description=\n 'The duration of all \"get\" ops in IndexedDB', units='ms',\n process_name='Browser'))\n self._stats.AddInput(TraceEventStatsInput(event_category=\n 'IndexedDB', event_name='IndexedDBDatabase::PutOperation',\n metric_name='idb-puts', metric_description=\n 'The duration of all \"put\" ops in IndexedDB', units='ms',\n process_name='Browser'))\n self._stats.AddInput(TraceEventStatsInput(event_category=\n 'IndexedDB', event_name='IndexedDBFactoryImpl::Open',\n metric_name='idb-opens', metric_description=\n 'The duration of all \"open\" ops in IndexedDB', units='ms',\n process_name='Browser'))\n self._stats.AddInput(TraceEventStatsInput(event_category=\n 'IndexedDB', event_name='IndexedDBTransaction::Commit',\n metric_name='idb-transaction-commits', metric_description=\n 'The duration of all \"commit\" ops of ' +\n 'transactions in IndexedDB.', units='ms', process_name='Browser'))\n self._stats.AddInput(TraceEventStatsInput(event_category=\n 'IndexedDB', event_name='IndexedDBFactoryImpl::DeleteDatabase',\n metric_name='idb-database-deletes', metric_description=\n 'The duration of all \"delete\" ops of ' + 'IndexedDB databases.',\n units='ms', process_name='Browser'))\n self._stats.AddInput(TraceEventStatsInput(event_category=\n 'IndexedDB', event_name=\n 'IndexedDBDatabase::OpenCursorOperation', metric_name=\n 'idb-cursor-opens', metric_description=\n 'The duration of all \"open\" ops of ' + 'IndexedDB cursors.',\n units='ms', process_name='Browser'))\n self._stats.AddInput(TraceEventStatsInput(event_category=\n 'IndexedDB', event_name=\n 'IndexedDBCursor::CursorIterationOperation', metric_name=\n 'idb-cursor-iterations', metric_description=\n 'The duration of all \"iteration\" ops of ' +\n 'IndexedDB cursors.', units='ms', process_name='Browser'))\n\n def AddResults(self, model, renderer_process, interactions, results):\n self._stats.AddResults(model, renderer_process, interactions, results)\n",
"step-4": "from telemetry.web_perf.metrics import timeline_based_metric\nfrom telemetry.web_perf.metrics.trace_event_stats import TraceEventStats\nfrom telemetry.web_perf.metrics.trace_event_stats import TraceEventStatsInput\n\n\nclass IndexedDBTimelineMetric(timeline_based_metric.TimelineBasedMetric):\n \"\"\"Metrics for IndexedDB operations.\n \"\"\"\n\n def __init__(self):\n super(IndexedDBTimelineMetric, self).__init__()\n self._stats = TraceEventStats()\n self._stats.AddInput(TraceEventStatsInput(event_category=\n 'IndexedDB', event_name='IndexedDBDatabase::GetOperation',\n metric_name='idb-gets', metric_description=\n 'The duration of all \"get\" ops in IndexedDB', units='ms',\n process_name='Browser'))\n self._stats.AddInput(TraceEventStatsInput(event_category=\n 'IndexedDB', event_name='IndexedDBDatabase::PutOperation',\n metric_name='idb-puts', metric_description=\n 'The duration of all \"put\" ops in IndexedDB', units='ms',\n process_name='Browser'))\n self._stats.AddInput(TraceEventStatsInput(event_category=\n 'IndexedDB', event_name='IndexedDBFactoryImpl::Open',\n metric_name='idb-opens', metric_description=\n 'The duration of all \"open\" ops in IndexedDB', units='ms',\n process_name='Browser'))\n self._stats.AddInput(TraceEventStatsInput(event_category=\n 'IndexedDB', event_name='IndexedDBTransaction::Commit',\n metric_name='idb-transaction-commits', metric_description=\n 'The duration of all \"commit\" ops of ' +\n 'transactions in IndexedDB.', units='ms', process_name='Browser'))\n self._stats.AddInput(TraceEventStatsInput(event_category=\n 'IndexedDB', event_name='IndexedDBFactoryImpl::DeleteDatabase',\n metric_name='idb-database-deletes', metric_description=\n 'The duration of all \"delete\" ops of ' + 'IndexedDB databases.',\n units='ms', process_name='Browser'))\n self._stats.AddInput(TraceEventStatsInput(event_category=\n 'IndexedDB', event_name=\n 'IndexedDBDatabase::OpenCursorOperation', metric_name=\n 'idb-cursor-opens', metric_description=\n 'The duration of all \"open\" ops of ' + 'IndexedDB cursors.',\n units='ms', process_name='Browser'))\n self._stats.AddInput(TraceEventStatsInput(event_category=\n 'IndexedDB', event_name=\n 'IndexedDBCursor::CursorIterationOperation', metric_name=\n 'idb-cursor-iterations', metric_description=\n 'The duration of all \"iteration\" ops of ' +\n 'IndexedDB cursors.', units='ms', process_name='Browser'))\n\n def AddResults(self, model, renderer_process, interactions, results):\n self._stats.AddResults(model, renderer_process, interactions, results)\n",
"step-5": "# Copyright 2015 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\nfrom telemetry.web_perf.metrics import timeline_based_metric\nfrom telemetry.web_perf.metrics.trace_event_stats import TraceEventStats\nfrom telemetry.web_perf.metrics.trace_event_stats import TraceEventStatsInput\n\n\nclass IndexedDBTimelineMetric(timeline_based_metric.TimelineBasedMetric):\n \"\"\"Metrics for IndexedDB operations.\n \"\"\"\n\n def __init__(self):\n super(IndexedDBTimelineMetric, self).__init__()\n self._stats = TraceEventStats()\n\n self._stats.AddInput(TraceEventStatsInput(\n event_category='IndexedDB',\n event_name='IndexedDBDatabase::GetOperation',\n metric_name='idb-gets',\n metric_description='The duration of all \"get\" ops in IndexedDB',\n units='ms',\n process_name='Browser'))\n\n self._stats.AddInput(TraceEventStatsInput(\n event_category='IndexedDB',\n event_name='IndexedDBDatabase::PutOperation',\n metric_name='idb-puts',\n metric_description='The duration of all \"put\" ops in IndexedDB',\n units='ms',\n process_name='Browser'))\n\n self._stats.AddInput(TraceEventStatsInput(\n event_category='IndexedDB',\n event_name='IndexedDBFactoryImpl::Open',\n metric_name='idb-opens',\n metric_description='The duration of all \"open\" ops in IndexedDB',\n units='ms',\n process_name='Browser'))\n\n self._stats.AddInput(TraceEventStatsInput(\n event_category='IndexedDB',\n event_name='IndexedDBTransaction::Commit',\n metric_name='idb-transaction-commits',\n metric_description=('The duration of all \"commit\" ops of ' +\n 'transactions in IndexedDB.'),\n units='ms',\n process_name='Browser'))\n\n self._stats.AddInput(TraceEventStatsInput(\n event_category='IndexedDB',\n event_name='IndexedDBFactoryImpl::DeleteDatabase',\n metric_name='idb-database-deletes',\n metric_description=('The duration of all \"delete\" ops of ' +\n 'IndexedDB databases.'),\n units='ms',\n process_name='Browser'))\n\n self._stats.AddInput(TraceEventStatsInput(\n event_category='IndexedDB',\n event_name='IndexedDBDatabase::OpenCursorOperation',\n metric_name='idb-cursor-opens',\n metric_description=('The duration of all \"open\" ops of ' +\n 'IndexedDB cursors.'),\n units='ms',\n process_name='Browser'))\n\n self._stats.AddInput(TraceEventStatsInput(\n event_category='IndexedDB',\n event_name='IndexedDBCursor::CursorIterationOperation',\n metric_name='idb-cursor-iterations',\n metric_description=('The duration of all \"iteration\" ops of ' +\n 'IndexedDB cursors.'),\n units='ms',\n process_name='Browser'))\n\n def AddResults(self, model, renderer_process, interactions, results):\n self._stats.AddResults(model, renderer_process, interactions, results)\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
from pyparsing import ParseException
from pytest import raises
from easymql.expressions import Expression as exp
class TestComparisonExpression:
def test_cmp(self):
assert exp.parse('CMP(1, 2)') == {'$cmp': [1, 2]}
with raises(ParseException):
exp.parse('CMP(1)')
with raises(ParseException):
exp.parse('CMP(1, 2, 3)')
assert exp.parse('CMP(1, 3 + 2)') == {'$cmp': [1, {'$add': [3, 2]}]}
|
normal
|
{
"blob_id": "91959f6621f05b1b814a025f0b95c55cf683ded3",
"index": 5856,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestComparisonExpression:\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TestComparisonExpression:\n\n def test_cmp(self):\n assert exp.parse('CMP(1, 2)') == {'$cmp': [1, 2]}\n with raises(ParseException):\n exp.parse('CMP(1)')\n with raises(ParseException):\n exp.parse('CMP(1, 2, 3)')\n assert exp.parse('CMP(1, 3 + 2)') == {'$cmp': [1, {'$add': [3, 2]}]}\n",
"step-4": "from pyparsing import ParseException\nfrom pytest import raises\nfrom easymql.expressions import Expression as exp\n\n\nclass TestComparisonExpression:\n\n def test_cmp(self):\n assert exp.parse('CMP(1, 2)') == {'$cmp': [1, 2]}\n with raises(ParseException):\n exp.parse('CMP(1)')\n with raises(ParseException):\n exp.parse('CMP(1, 2, 3)')\n assert exp.parse('CMP(1, 3 + 2)') == {'$cmp': [1, {'$add': [3, 2]}]}\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# VGGNet
import numpy as np
np.random.seed(317)
from glob import glob
from itertools import cycle
from keras.applications.vgg19 import VGG19
from keras.optimizers import Adam
from keras.models import Model
from keras.layers import Input, BatchNormalization, Flatten, Dropout, Dense
from keras.utils import plot_model
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, CSVLogger, EarlyStopping, Callback
from keras.losses import kullback_leibler_divergence
from math import ceil
from os import path, mkdir, listdir
from skimage.transform import resize
from scipy.misc import imread, imsave
from time import time
import argparse
import logging
import keras.backend as K
import pandas as pd
import tifffile as tif
import sys
sys.path.append('.')
from planet.utils.data_utils import tagset_to_ints, random_transforms
from planet.utils.keras_utils import HistoryPlot
from planet.utils.runtime import funcname
class VGGNet(object):
def __init__(self, checkpoint_name='VGGNet'):
self.config = {
'image_shape': [256, 256, 3],
'input_shape': [224, 224, 3],
'output_shape': [17, ],
'batch_size': 60,
'trn_steps': 680,
'trn_nb_epochs': 200,
'trn_transform': True,
'trn_imgs_csv': 'data/train_v2.csv',
'trn_imgs_dir': 'data/train-jpg',
'tst_imgs_csv': 'data/sample_submission_v2.csv',
'tst_imgs_dir': 'data/test-jpg'
}
self.checkpoint_name = checkpoint_name
self.imgs = []
self.lbls = []
self.net = None
self.rng = np.random
@property
def cpdir(self):
cpdir = 'checkpoints/%s_%s/' % (self.checkpoint_name, '_'.join([str(x) for x in self.config['input_shape']]))
if not path.exists(cpdir):
mkdir(cpdir)
return cpdir
def create_net(self):
x = inputs = Input(shape=self.config['input_shape'])
vgg = VGG19(include_top=False, input_tensor=x)
outputs = Flatten()(vgg.output)
outputs = Dropout(0.1)(outputs)
outputs = Dense(self.config['output_shape'][0], activation='sigmoid')(outputs)
def true_pos(yt, yp):
return K.sum(K.round(yt)) / K.sum(K.clip(yt, 1, 1))
def pred_pos(yt, yp):
return K.sum(K.round(yp)) / K.sum(K.clip(yt, 1, 1))
def F2(yt, yp):
yt, yp = K.round(yt), K.round(yp)
tp = K.sum(yt * yp)
fp = K.sum(K.clip(yp - yt, 0, 1))
fn = K.sum(K.clip(yt - yp, 0, 1))
p = tp / (tp + fp)
r = tp / (tp + fn)
b = 2.0
return (1 + b**2) * ((p * r) / (b**2 * p + r + K.epsilon()))
self.net = Model(inputs, outputs)
self.net.compile(optimizer=Adam(0.001), loss='binary_crossentropy',
metrics=['binary_accuracy', F2, true_pos, pred_pos])
self.net.summary()
plot_model(self.net, to_file='%s/net.png' % self.cpdir)
return
def train(self):
batch_gen = self.train_batch_gen(self.config['trn_imgs_csv'], self.config[
'trn_imgs_dir'], self.config['trn_transform'])
cb = [
HistoryPlot('%s/history.png' % self.cpdir),
CSVLogger('%s/history.csv' % self.cpdir),
ModelCheckpoint('%s/loss.weights' % self.cpdir, monitor='loss', verbose=1,
save_best_only=True, mode='min', save_weights_only=True),
ModelCheckpoint('%s/F2.weights' % self.cpdir, monitor='F2',
verbose=1, save_best_only=True, mode='max', save_weights_only=True),
ReduceLROnPlateau(monitor='F2', factor=0.8, patience=2, epsilon=0.005, verbose=1, mode='min'),
EarlyStopping(monitor='F2', min_delta=0.01, patience=10, verbose=1, mode='max')
]
self.net.fit_generator(batch_gen, steps_per_epoch=self.config['trn_steps'], verbose=1, callbacks=cb,
epochs=self.config['trn_nb_epochs'], workers=2, pickle_safe=True)
return
def get_mean_img(self, imgs_paths, mean_img_path):
'''Compute the mean image from the given paths and save it to the given path.'''
logger = logging.getLogger(funcname())
if not path.exists(mean_img_path):
mean_img = np.zeros(self.config['image_shape'], dtype=np.float32)
for idx, img_path in enumerate(imgs_paths):
mean_img += imread(img_path, mode='RGB').astype(np.float32) / len(imgs_paths)
if idx % 1000 == 0:
logger.info('%d/%d' % (idx, len(imgs_paths)))
imsave(mean_img_path, mean_img)
return imread(mean_img_path)
def train_batch_gen(self, imgs_csv, imgs_dir, transform):
logger = logging.getLogger(funcname())
# Read the CSV and extract image names and tags.
df = pd.read_csv(imgs_csv)
imgs_paths = ['%s/%s.jpg' % (imgs_dir, n) for n in df['image_name'].values]
tag_sets = [set(t.strip().split(' ')) for t in df['tags'].values]
# Compute the mean image for pre-processing.
mean_img = self.get_mean_img(imgs_paths, '%s/mean_img_trn.jpg' % self.cpdir)
mean_img = mean_img.astype(np.float32) / 255.
mean_img_mean = np.mean(mean_img)
img_preprocess = lambda img: img.astype(np.float32) / 255. - mean_img_mean
while True:
imgs_batch = np.zeros([self.config['batch_size'], ] + self.config['input_shape'])
tags_batch = np.zeros([self.config['batch_size'], ] + self.config['output_shape'])
random_idxs = cycle(np.random.choice(np.arange(len(imgs_paths)), len(imgs_paths)))
for batch_idx in range(self.config['batch_size']):
data_idx = next(random_idxs)
img = imread(imgs_paths[data_idx], mode='RGB')
img = img_preprocess(img)
img = resize(img, self.config['input_shape'], preserve_range=True, mode='constant')
if transform:
img = random_transforms(img, nb_min=0, nb_max=6)
imgs_batch[batch_idx] = img
tags_batch[batch_idx] = tagset_to_ints(tag_sets[data_idx])
yield imgs_batch, tags_batch
def predict(self, img_batch):
# Get the mean image
imgs_paths = listdir(self.config['trn_imgs_dir'])
mean_img_path = '%s/mean_img_trn.jpg' % self.cpdir
mean_img = self.get_mean_img(imgs_paths, mean_img_path).astype(np.float32) / 255.
mean_img_mean = np.mean(mean_img)
img_preprocess = lambda img: img.astype(np.float32) / 255. - mean_img_mean
for idx in range(len(img_batch)):
img_batch[idx] = img_preprocess(img_batch[idx])
tags_pred = self.net.predict(img_batch)
tags_pred = tags_pred.round().astype(np.uint8)
return tags_pred
if __name__ == "__main__":
from planet.model_runner import model_runner
model = VGGNet()
model_runner(model)
|
normal
|
{
"blob_id": "c6a4d566460a06504abf7e2c54be4f2ea36e01fb",
"index": 7735,
"step-1": "<mask token>\n\n\nclass VGGNet(object):\n\n def __init__(self, checkpoint_name='VGGNet'):\n self.config = {'image_shape': [256, 256, 3], 'input_shape': [224, \n 224, 3], 'output_shape': [17], 'batch_size': 60, 'trn_steps': \n 680, 'trn_nb_epochs': 200, 'trn_transform': True,\n 'trn_imgs_csv': 'data/train_v2.csv', 'trn_imgs_dir':\n 'data/train-jpg', 'tst_imgs_csv':\n 'data/sample_submission_v2.csv', 'tst_imgs_dir': 'data/test-jpg'}\n self.checkpoint_name = checkpoint_name\n self.imgs = []\n self.lbls = []\n self.net = None\n self.rng = np.random\n\n @property\n def cpdir(self):\n cpdir = 'checkpoints/%s_%s/' % (self.checkpoint_name, '_'.join([str\n (x) for x in self.config['input_shape']]))\n if not path.exists(cpdir):\n mkdir(cpdir)\n return cpdir\n\n def create_net(self):\n x = inputs = Input(shape=self.config['input_shape'])\n vgg = VGG19(include_top=False, input_tensor=x)\n outputs = Flatten()(vgg.output)\n outputs = Dropout(0.1)(outputs)\n outputs = Dense(self.config['output_shape'][0], activation='sigmoid')(\n outputs)\n\n def true_pos(yt, yp):\n return K.sum(K.round(yt)) / K.sum(K.clip(yt, 1, 1))\n\n def pred_pos(yt, yp):\n return K.sum(K.round(yp)) / K.sum(K.clip(yt, 1, 1))\n\n def F2(yt, yp):\n yt, yp = K.round(yt), K.round(yp)\n tp = K.sum(yt * yp)\n fp = K.sum(K.clip(yp - yt, 0, 1))\n fn = K.sum(K.clip(yt - yp, 0, 1))\n p = tp / (tp + fp)\n r = tp / (tp + fn)\n b = 2.0\n return (1 + b ** 2) * (p * r / (b ** 2 * p + r + K.epsilon()))\n self.net = Model(inputs, outputs)\n self.net.compile(optimizer=Adam(0.001), loss='binary_crossentropy',\n metrics=['binary_accuracy', F2, true_pos, pred_pos])\n self.net.summary()\n plot_model(self.net, to_file='%s/net.png' % self.cpdir)\n return\n <mask token>\n\n def get_mean_img(self, imgs_paths, mean_img_path):\n \"\"\"Compute the mean image from the given paths and save it to the given path.\"\"\"\n logger = logging.getLogger(funcname())\n if not path.exists(mean_img_path):\n mean_img = np.zeros(self.config['image_shape'], dtype=np.float32)\n for idx, img_path in enumerate(imgs_paths):\n mean_img += imread(img_path, mode='RGB').astype(np.float32\n ) / len(imgs_paths)\n if idx % 1000 == 0:\n logger.info('%d/%d' % (idx, len(imgs_paths)))\n imsave(mean_img_path, mean_img)\n return imread(mean_img_path)\n\n def train_batch_gen(self, imgs_csv, imgs_dir, transform):\n logger = logging.getLogger(funcname())\n df = pd.read_csv(imgs_csv)\n imgs_paths = [('%s/%s.jpg' % (imgs_dir, n)) for n in df[\n 'image_name'].values]\n tag_sets = [set(t.strip().split(' ')) for t in df['tags'].values]\n mean_img = self.get_mean_img(imgs_paths, '%s/mean_img_trn.jpg' %\n self.cpdir)\n mean_img = mean_img.astype(np.float32) / 255.0\n mean_img_mean = np.mean(mean_img)\n img_preprocess = lambda img: img.astype(np.float32\n ) / 255.0 - mean_img_mean\n while True:\n imgs_batch = np.zeros([self.config['batch_size']] + self.config\n ['input_shape'])\n tags_batch = np.zeros([self.config['batch_size']] + self.config\n ['output_shape'])\n random_idxs = cycle(np.random.choice(np.arange(len(imgs_paths)),\n len(imgs_paths)))\n for batch_idx in range(self.config['batch_size']):\n data_idx = next(random_idxs)\n img = imread(imgs_paths[data_idx], mode='RGB')\n img = img_preprocess(img)\n img = resize(img, self.config['input_shape'],\n preserve_range=True, mode='constant')\n if transform:\n img = random_transforms(img, nb_min=0, nb_max=6)\n imgs_batch[batch_idx] = img\n tags_batch[batch_idx] = tagset_to_ints(tag_sets[data_idx])\n yield imgs_batch, tags_batch\n\n def predict(self, img_batch):\n imgs_paths = listdir(self.config['trn_imgs_dir'])\n mean_img_path = '%s/mean_img_trn.jpg' % self.cpdir\n mean_img = self.get_mean_img(imgs_paths, mean_img_path).astype(np.\n float32) / 255.0\n mean_img_mean = np.mean(mean_img)\n img_preprocess = lambda img: img.astype(np.float32\n ) / 255.0 - mean_img_mean\n for idx in range(len(img_batch)):\n img_batch[idx] = img_preprocess(img_batch[idx])\n tags_pred = self.net.predict(img_batch)\n tags_pred = tags_pred.round().astype(np.uint8)\n return tags_pred\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass VGGNet(object):\n\n def __init__(self, checkpoint_name='VGGNet'):\n self.config = {'image_shape': [256, 256, 3], 'input_shape': [224, \n 224, 3], 'output_shape': [17], 'batch_size': 60, 'trn_steps': \n 680, 'trn_nb_epochs': 200, 'trn_transform': True,\n 'trn_imgs_csv': 'data/train_v2.csv', 'trn_imgs_dir':\n 'data/train-jpg', 'tst_imgs_csv':\n 'data/sample_submission_v2.csv', 'tst_imgs_dir': 'data/test-jpg'}\n self.checkpoint_name = checkpoint_name\n self.imgs = []\n self.lbls = []\n self.net = None\n self.rng = np.random\n\n @property\n def cpdir(self):\n cpdir = 'checkpoints/%s_%s/' % (self.checkpoint_name, '_'.join([str\n (x) for x in self.config['input_shape']]))\n if not path.exists(cpdir):\n mkdir(cpdir)\n return cpdir\n\n def create_net(self):\n x = inputs = Input(shape=self.config['input_shape'])\n vgg = VGG19(include_top=False, input_tensor=x)\n outputs = Flatten()(vgg.output)\n outputs = Dropout(0.1)(outputs)\n outputs = Dense(self.config['output_shape'][0], activation='sigmoid')(\n outputs)\n\n def true_pos(yt, yp):\n return K.sum(K.round(yt)) / K.sum(K.clip(yt, 1, 1))\n\n def pred_pos(yt, yp):\n return K.sum(K.round(yp)) / K.sum(K.clip(yt, 1, 1))\n\n def F2(yt, yp):\n yt, yp = K.round(yt), K.round(yp)\n tp = K.sum(yt * yp)\n fp = K.sum(K.clip(yp - yt, 0, 1))\n fn = K.sum(K.clip(yt - yp, 0, 1))\n p = tp / (tp + fp)\n r = tp / (tp + fn)\n b = 2.0\n return (1 + b ** 2) * (p * r / (b ** 2 * p + r + K.epsilon()))\n self.net = Model(inputs, outputs)\n self.net.compile(optimizer=Adam(0.001), loss='binary_crossentropy',\n metrics=['binary_accuracy', F2, true_pos, pred_pos])\n self.net.summary()\n plot_model(self.net, to_file='%s/net.png' % self.cpdir)\n return\n\n def train(self):\n batch_gen = self.train_batch_gen(self.config['trn_imgs_csv'], self.\n config['trn_imgs_dir'], self.config['trn_transform'])\n cb = [HistoryPlot('%s/history.png' % self.cpdir), CSVLogger(\n '%s/history.csv' % self.cpdir), ModelCheckpoint(\n '%s/loss.weights' % self.cpdir, monitor='loss', verbose=1,\n save_best_only=True, mode='min', save_weights_only=True),\n ModelCheckpoint('%s/F2.weights' % self.cpdir, monitor='F2',\n verbose=1, save_best_only=True, mode='max', save_weights_only=\n True), ReduceLROnPlateau(monitor='F2', factor=0.8, patience=2,\n epsilon=0.005, verbose=1, mode='min'), EarlyStopping(monitor=\n 'F2', min_delta=0.01, patience=10, verbose=1, mode='max')]\n self.net.fit_generator(batch_gen, steps_per_epoch=self.config[\n 'trn_steps'], verbose=1, callbacks=cb, epochs=self.config[\n 'trn_nb_epochs'], workers=2, pickle_safe=True)\n return\n\n def get_mean_img(self, imgs_paths, mean_img_path):\n \"\"\"Compute the mean image from the given paths and save it to the given path.\"\"\"\n logger = logging.getLogger(funcname())\n if not path.exists(mean_img_path):\n mean_img = np.zeros(self.config['image_shape'], dtype=np.float32)\n for idx, img_path in enumerate(imgs_paths):\n mean_img += imread(img_path, mode='RGB').astype(np.float32\n ) / len(imgs_paths)\n if idx % 1000 == 0:\n logger.info('%d/%d' % (idx, len(imgs_paths)))\n imsave(mean_img_path, mean_img)\n return imread(mean_img_path)\n\n def train_batch_gen(self, imgs_csv, imgs_dir, transform):\n logger = logging.getLogger(funcname())\n df = pd.read_csv(imgs_csv)\n imgs_paths = [('%s/%s.jpg' % (imgs_dir, n)) for n in df[\n 'image_name'].values]\n tag_sets = [set(t.strip().split(' ')) for t in df['tags'].values]\n mean_img = self.get_mean_img(imgs_paths, '%s/mean_img_trn.jpg' %\n self.cpdir)\n mean_img = mean_img.astype(np.float32) / 255.0\n mean_img_mean = np.mean(mean_img)\n img_preprocess = lambda img: img.astype(np.float32\n ) / 255.0 - mean_img_mean\n while True:\n imgs_batch = np.zeros([self.config['batch_size']] + self.config\n ['input_shape'])\n tags_batch = np.zeros([self.config['batch_size']] + self.config\n ['output_shape'])\n random_idxs = cycle(np.random.choice(np.arange(len(imgs_paths)),\n len(imgs_paths)))\n for batch_idx in range(self.config['batch_size']):\n data_idx = next(random_idxs)\n img = imread(imgs_paths[data_idx], mode='RGB')\n img = img_preprocess(img)\n img = resize(img, self.config['input_shape'],\n preserve_range=True, mode='constant')\n if transform:\n img = random_transforms(img, nb_min=0, nb_max=6)\n imgs_batch[batch_idx] = img\n tags_batch[batch_idx] = tagset_to_ints(tag_sets[data_idx])\n yield imgs_batch, tags_batch\n\n def predict(self, img_batch):\n imgs_paths = listdir(self.config['trn_imgs_dir'])\n mean_img_path = '%s/mean_img_trn.jpg' % self.cpdir\n mean_img = self.get_mean_img(imgs_paths, mean_img_path).astype(np.\n float32) / 255.0\n mean_img_mean = np.mean(mean_img)\n img_preprocess = lambda img: img.astype(np.float32\n ) / 255.0 - mean_img_mean\n for idx in range(len(img_batch)):\n img_batch[idx] = img_preprocess(img_batch[idx])\n tags_pred = self.net.predict(img_batch)\n tags_pred = tags_pred.round().astype(np.uint8)\n return tags_pred\n\n\n<mask token>\n",
"step-3": "<mask token>\nnp.random.seed(317)\n<mask token>\nsys.path.append('.')\n<mask token>\n\n\nclass VGGNet(object):\n\n def __init__(self, checkpoint_name='VGGNet'):\n self.config = {'image_shape': [256, 256, 3], 'input_shape': [224, \n 224, 3], 'output_shape': [17], 'batch_size': 60, 'trn_steps': \n 680, 'trn_nb_epochs': 200, 'trn_transform': True,\n 'trn_imgs_csv': 'data/train_v2.csv', 'trn_imgs_dir':\n 'data/train-jpg', 'tst_imgs_csv':\n 'data/sample_submission_v2.csv', 'tst_imgs_dir': 'data/test-jpg'}\n self.checkpoint_name = checkpoint_name\n self.imgs = []\n self.lbls = []\n self.net = None\n self.rng = np.random\n\n @property\n def cpdir(self):\n cpdir = 'checkpoints/%s_%s/' % (self.checkpoint_name, '_'.join([str\n (x) for x in self.config['input_shape']]))\n if not path.exists(cpdir):\n mkdir(cpdir)\n return cpdir\n\n def create_net(self):\n x = inputs = Input(shape=self.config['input_shape'])\n vgg = VGG19(include_top=False, input_tensor=x)\n outputs = Flatten()(vgg.output)\n outputs = Dropout(0.1)(outputs)\n outputs = Dense(self.config['output_shape'][0], activation='sigmoid')(\n outputs)\n\n def true_pos(yt, yp):\n return K.sum(K.round(yt)) / K.sum(K.clip(yt, 1, 1))\n\n def pred_pos(yt, yp):\n return K.sum(K.round(yp)) / K.sum(K.clip(yt, 1, 1))\n\n def F2(yt, yp):\n yt, yp = K.round(yt), K.round(yp)\n tp = K.sum(yt * yp)\n fp = K.sum(K.clip(yp - yt, 0, 1))\n fn = K.sum(K.clip(yt - yp, 0, 1))\n p = tp / (tp + fp)\n r = tp / (tp + fn)\n b = 2.0\n return (1 + b ** 2) * (p * r / (b ** 2 * p + r + K.epsilon()))\n self.net = Model(inputs, outputs)\n self.net.compile(optimizer=Adam(0.001), loss='binary_crossentropy',\n metrics=['binary_accuracy', F2, true_pos, pred_pos])\n self.net.summary()\n plot_model(self.net, to_file='%s/net.png' % self.cpdir)\n return\n\n def train(self):\n batch_gen = self.train_batch_gen(self.config['trn_imgs_csv'], self.\n config['trn_imgs_dir'], self.config['trn_transform'])\n cb = [HistoryPlot('%s/history.png' % self.cpdir), CSVLogger(\n '%s/history.csv' % self.cpdir), ModelCheckpoint(\n '%s/loss.weights' % self.cpdir, monitor='loss', verbose=1,\n save_best_only=True, mode='min', save_weights_only=True),\n ModelCheckpoint('%s/F2.weights' % self.cpdir, monitor='F2',\n verbose=1, save_best_only=True, mode='max', save_weights_only=\n True), ReduceLROnPlateau(monitor='F2', factor=0.8, patience=2,\n epsilon=0.005, verbose=1, mode='min'), EarlyStopping(monitor=\n 'F2', min_delta=0.01, patience=10, verbose=1, mode='max')]\n self.net.fit_generator(batch_gen, steps_per_epoch=self.config[\n 'trn_steps'], verbose=1, callbacks=cb, epochs=self.config[\n 'trn_nb_epochs'], workers=2, pickle_safe=True)\n return\n\n def get_mean_img(self, imgs_paths, mean_img_path):\n \"\"\"Compute the mean image from the given paths and save it to the given path.\"\"\"\n logger = logging.getLogger(funcname())\n if not path.exists(mean_img_path):\n mean_img = np.zeros(self.config['image_shape'], dtype=np.float32)\n for idx, img_path in enumerate(imgs_paths):\n mean_img += imread(img_path, mode='RGB').astype(np.float32\n ) / len(imgs_paths)\n if idx % 1000 == 0:\n logger.info('%d/%d' % (idx, len(imgs_paths)))\n imsave(mean_img_path, mean_img)\n return imread(mean_img_path)\n\n def train_batch_gen(self, imgs_csv, imgs_dir, transform):\n logger = logging.getLogger(funcname())\n df = pd.read_csv(imgs_csv)\n imgs_paths = [('%s/%s.jpg' % (imgs_dir, n)) for n in df[\n 'image_name'].values]\n tag_sets = [set(t.strip().split(' ')) for t in df['tags'].values]\n mean_img = self.get_mean_img(imgs_paths, '%s/mean_img_trn.jpg' %\n self.cpdir)\n mean_img = mean_img.astype(np.float32) / 255.0\n mean_img_mean = np.mean(mean_img)\n img_preprocess = lambda img: img.astype(np.float32\n ) / 255.0 - mean_img_mean\n while True:\n imgs_batch = np.zeros([self.config['batch_size']] + self.config\n ['input_shape'])\n tags_batch = np.zeros([self.config['batch_size']] + self.config\n ['output_shape'])\n random_idxs = cycle(np.random.choice(np.arange(len(imgs_paths)),\n len(imgs_paths)))\n for batch_idx in range(self.config['batch_size']):\n data_idx = next(random_idxs)\n img = imread(imgs_paths[data_idx], mode='RGB')\n img = img_preprocess(img)\n img = resize(img, self.config['input_shape'],\n preserve_range=True, mode='constant')\n if transform:\n img = random_transforms(img, nb_min=0, nb_max=6)\n imgs_batch[batch_idx] = img\n tags_batch[batch_idx] = tagset_to_ints(tag_sets[data_idx])\n yield imgs_batch, tags_batch\n\n def predict(self, img_batch):\n imgs_paths = listdir(self.config['trn_imgs_dir'])\n mean_img_path = '%s/mean_img_trn.jpg' % self.cpdir\n mean_img = self.get_mean_img(imgs_paths, mean_img_path).astype(np.\n float32) / 255.0\n mean_img_mean = np.mean(mean_img)\n img_preprocess = lambda img: img.astype(np.float32\n ) / 255.0 - mean_img_mean\n for idx in range(len(img_batch)):\n img_batch[idx] = img_preprocess(img_batch[idx])\n tags_pred = self.net.predict(img_batch)\n tags_pred = tags_pred.round().astype(np.uint8)\n return tags_pred\n\n\nif __name__ == '__main__':\n from planet.model_runner import model_runner\n model = VGGNet()\n model_runner(model)\n",
"step-4": "import numpy as np\nnp.random.seed(317)\nfrom glob import glob\nfrom itertools import cycle\nfrom keras.applications.vgg19 import VGG19\nfrom keras.optimizers import Adam\nfrom keras.models import Model\nfrom keras.layers import Input, BatchNormalization, Flatten, Dropout, Dense\nfrom keras.utils import plot_model\nfrom keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, CSVLogger, EarlyStopping, Callback\nfrom keras.losses import kullback_leibler_divergence\nfrom math import ceil\nfrom os import path, mkdir, listdir\nfrom skimage.transform import resize\nfrom scipy.misc import imread, imsave\nfrom time import time\nimport argparse\nimport logging\nimport keras.backend as K\nimport pandas as pd\nimport tifffile as tif\nimport sys\nsys.path.append('.')\nfrom planet.utils.data_utils import tagset_to_ints, random_transforms\nfrom planet.utils.keras_utils import HistoryPlot\nfrom planet.utils.runtime import funcname\n\n\nclass VGGNet(object):\n\n def __init__(self, checkpoint_name='VGGNet'):\n self.config = {'image_shape': [256, 256, 3], 'input_shape': [224, \n 224, 3], 'output_shape': [17], 'batch_size': 60, 'trn_steps': \n 680, 'trn_nb_epochs': 200, 'trn_transform': True,\n 'trn_imgs_csv': 'data/train_v2.csv', 'trn_imgs_dir':\n 'data/train-jpg', 'tst_imgs_csv':\n 'data/sample_submission_v2.csv', 'tst_imgs_dir': 'data/test-jpg'}\n self.checkpoint_name = checkpoint_name\n self.imgs = []\n self.lbls = []\n self.net = None\n self.rng = np.random\n\n @property\n def cpdir(self):\n cpdir = 'checkpoints/%s_%s/' % (self.checkpoint_name, '_'.join([str\n (x) for x in self.config['input_shape']]))\n if not path.exists(cpdir):\n mkdir(cpdir)\n return cpdir\n\n def create_net(self):\n x = inputs = Input(shape=self.config['input_shape'])\n vgg = VGG19(include_top=False, input_tensor=x)\n outputs = Flatten()(vgg.output)\n outputs = Dropout(0.1)(outputs)\n outputs = Dense(self.config['output_shape'][0], activation='sigmoid')(\n outputs)\n\n def true_pos(yt, yp):\n return K.sum(K.round(yt)) / K.sum(K.clip(yt, 1, 1))\n\n def pred_pos(yt, yp):\n return K.sum(K.round(yp)) / K.sum(K.clip(yt, 1, 1))\n\n def F2(yt, yp):\n yt, yp = K.round(yt), K.round(yp)\n tp = K.sum(yt * yp)\n fp = K.sum(K.clip(yp - yt, 0, 1))\n fn = K.sum(K.clip(yt - yp, 0, 1))\n p = tp / (tp + fp)\n r = tp / (tp + fn)\n b = 2.0\n return (1 + b ** 2) * (p * r / (b ** 2 * p + r + K.epsilon()))\n self.net = Model(inputs, outputs)\n self.net.compile(optimizer=Adam(0.001), loss='binary_crossentropy',\n metrics=['binary_accuracy', F2, true_pos, pred_pos])\n self.net.summary()\n plot_model(self.net, to_file='%s/net.png' % self.cpdir)\n return\n\n def train(self):\n batch_gen = self.train_batch_gen(self.config['trn_imgs_csv'], self.\n config['trn_imgs_dir'], self.config['trn_transform'])\n cb = [HistoryPlot('%s/history.png' % self.cpdir), CSVLogger(\n '%s/history.csv' % self.cpdir), ModelCheckpoint(\n '%s/loss.weights' % self.cpdir, monitor='loss', verbose=1,\n save_best_only=True, mode='min', save_weights_only=True),\n ModelCheckpoint('%s/F2.weights' % self.cpdir, monitor='F2',\n verbose=1, save_best_only=True, mode='max', save_weights_only=\n True), ReduceLROnPlateau(monitor='F2', factor=0.8, patience=2,\n epsilon=0.005, verbose=1, mode='min'), EarlyStopping(monitor=\n 'F2', min_delta=0.01, patience=10, verbose=1, mode='max')]\n self.net.fit_generator(batch_gen, steps_per_epoch=self.config[\n 'trn_steps'], verbose=1, callbacks=cb, epochs=self.config[\n 'trn_nb_epochs'], workers=2, pickle_safe=True)\n return\n\n def get_mean_img(self, imgs_paths, mean_img_path):\n \"\"\"Compute the mean image from the given paths and save it to the given path.\"\"\"\n logger = logging.getLogger(funcname())\n if not path.exists(mean_img_path):\n mean_img = np.zeros(self.config['image_shape'], dtype=np.float32)\n for idx, img_path in enumerate(imgs_paths):\n mean_img += imread(img_path, mode='RGB').astype(np.float32\n ) / len(imgs_paths)\n if idx % 1000 == 0:\n logger.info('%d/%d' % (idx, len(imgs_paths)))\n imsave(mean_img_path, mean_img)\n return imread(mean_img_path)\n\n def train_batch_gen(self, imgs_csv, imgs_dir, transform):\n logger = logging.getLogger(funcname())\n df = pd.read_csv(imgs_csv)\n imgs_paths = [('%s/%s.jpg' % (imgs_dir, n)) for n in df[\n 'image_name'].values]\n tag_sets = [set(t.strip().split(' ')) for t in df['tags'].values]\n mean_img = self.get_mean_img(imgs_paths, '%s/mean_img_trn.jpg' %\n self.cpdir)\n mean_img = mean_img.astype(np.float32) / 255.0\n mean_img_mean = np.mean(mean_img)\n img_preprocess = lambda img: img.astype(np.float32\n ) / 255.0 - mean_img_mean\n while True:\n imgs_batch = np.zeros([self.config['batch_size']] + self.config\n ['input_shape'])\n tags_batch = np.zeros([self.config['batch_size']] + self.config\n ['output_shape'])\n random_idxs = cycle(np.random.choice(np.arange(len(imgs_paths)),\n len(imgs_paths)))\n for batch_idx in range(self.config['batch_size']):\n data_idx = next(random_idxs)\n img = imread(imgs_paths[data_idx], mode='RGB')\n img = img_preprocess(img)\n img = resize(img, self.config['input_shape'],\n preserve_range=True, mode='constant')\n if transform:\n img = random_transforms(img, nb_min=0, nb_max=6)\n imgs_batch[batch_idx] = img\n tags_batch[batch_idx] = tagset_to_ints(tag_sets[data_idx])\n yield imgs_batch, tags_batch\n\n def predict(self, img_batch):\n imgs_paths = listdir(self.config['trn_imgs_dir'])\n mean_img_path = '%s/mean_img_trn.jpg' % self.cpdir\n mean_img = self.get_mean_img(imgs_paths, mean_img_path).astype(np.\n float32) / 255.0\n mean_img_mean = np.mean(mean_img)\n img_preprocess = lambda img: img.astype(np.float32\n ) / 255.0 - mean_img_mean\n for idx in range(len(img_batch)):\n img_batch[idx] = img_preprocess(img_batch[idx])\n tags_pred = self.net.predict(img_batch)\n tags_pred = tags_pred.round().astype(np.uint8)\n return tags_pred\n\n\nif __name__ == '__main__':\n from planet.model_runner import model_runner\n model = VGGNet()\n model_runner(model)\n",
"step-5": "# VGGNet\nimport numpy as np\nnp.random.seed(317)\n\nfrom glob import glob\nfrom itertools import cycle\nfrom keras.applications.vgg19 import VGG19\nfrom keras.optimizers import Adam\nfrom keras.models import Model\nfrom keras.layers import Input, BatchNormalization, Flatten, Dropout, Dense\nfrom keras.utils import plot_model\nfrom keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, CSVLogger, EarlyStopping, Callback\nfrom keras.losses import kullback_leibler_divergence\nfrom math import ceil\nfrom os import path, mkdir, listdir\nfrom skimage.transform import resize\nfrom scipy.misc import imread, imsave\nfrom time import time\nimport argparse\nimport logging\nimport keras.backend as K\nimport pandas as pd\nimport tifffile as tif\n\nimport sys\nsys.path.append('.')\nfrom planet.utils.data_utils import tagset_to_ints, random_transforms\nfrom planet.utils.keras_utils import HistoryPlot\nfrom planet.utils.runtime import funcname\n\n\nclass VGGNet(object):\n\n def __init__(self, checkpoint_name='VGGNet'):\n\n self.config = {\n 'image_shape': [256, 256, 3],\n 'input_shape': [224, 224, 3],\n 'output_shape': [17, ],\n 'batch_size': 60,\n 'trn_steps': 680,\n 'trn_nb_epochs': 200,\n 'trn_transform': True,\n 'trn_imgs_csv': 'data/train_v2.csv',\n 'trn_imgs_dir': 'data/train-jpg',\n 'tst_imgs_csv': 'data/sample_submission_v2.csv',\n 'tst_imgs_dir': 'data/test-jpg'\n }\n\n self.checkpoint_name = checkpoint_name\n self.imgs = []\n self.lbls = []\n self.net = None\n self.rng = np.random\n\n @property\n def cpdir(self):\n cpdir = 'checkpoints/%s_%s/' % (self.checkpoint_name, '_'.join([str(x) for x in self.config['input_shape']]))\n if not path.exists(cpdir):\n mkdir(cpdir)\n return cpdir\n\n def create_net(self):\n\n x = inputs = Input(shape=self.config['input_shape'])\n vgg = VGG19(include_top=False, input_tensor=x)\n\n outputs = Flatten()(vgg.output)\n outputs = Dropout(0.1)(outputs)\n outputs = Dense(self.config['output_shape'][0], activation='sigmoid')(outputs)\n\n def true_pos(yt, yp):\n return K.sum(K.round(yt)) / K.sum(K.clip(yt, 1, 1))\n\n def pred_pos(yt, yp):\n return K.sum(K.round(yp)) / K.sum(K.clip(yt, 1, 1))\n\n def F2(yt, yp):\n yt, yp = K.round(yt), K.round(yp)\n tp = K.sum(yt * yp)\n fp = K.sum(K.clip(yp - yt, 0, 1))\n fn = K.sum(K.clip(yt - yp, 0, 1))\n p = tp / (tp + fp)\n r = tp / (tp + fn)\n b = 2.0\n return (1 + b**2) * ((p * r) / (b**2 * p + r + K.epsilon()))\n\n self.net = Model(inputs, outputs)\n self.net.compile(optimizer=Adam(0.001), loss='binary_crossentropy',\n metrics=['binary_accuracy', F2, true_pos, pred_pos])\n self.net.summary()\n plot_model(self.net, to_file='%s/net.png' % self.cpdir)\n return\n\n def train(self):\n\n batch_gen = self.train_batch_gen(self.config['trn_imgs_csv'], self.config[\n 'trn_imgs_dir'], self.config['trn_transform'])\n\n cb = [\n HistoryPlot('%s/history.png' % self.cpdir),\n CSVLogger('%s/history.csv' % self.cpdir),\n ModelCheckpoint('%s/loss.weights' % self.cpdir, monitor='loss', verbose=1,\n save_best_only=True, mode='min', save_weights_only=True),\n ModelCheckpoint('%s/F2.weights' % self.cpdir, monitor='F2',\n verbose=1, save_best_only=True, mode='max', save_weights_only=True),\n ReduceLROnPlateau(monitor='F2', factor=0.8, patience=2, epsilon=0.005, verbose=1, mode='min'),\n EarlyStopping(monitor='F2', min_delta=0.01, patience=10, verbose=1, mode='max')\n ]\n\n self.net.fit_generator(batch_gen, steps_per_epoch=self.config['trn_steps'], verbose=1, callbacks=cb,\n epochs=self.config['trn_nb_epochs'], workers=2, pickle_safe=True)\n\n return\n\n def get_mean_img(self, imgs_paths, mean_img_path):\n '''Compute the mean image from the given paths and save it to the given path.'''\n logger = logging.getLogger(funcname())\n if not path.exists(mean_img_path):\n mean_img = np.zeros(self.config['image_shape'], dtype=np.float32)\n for idx, img_path in enumerate(imgs_paths):\n mean_img += imread(img_path, mode='RGB').astype(np.float32) / len(imgs_paths)\n if idx % 1000 == 0:\n logger.info('%d/%d' % (idx, len(imgs_paths)))\n imsave(mean_img_path, mean_img)\n return imread(mean_img_path)\n\n def train_batch_gen(self, imgs_csv, imgs_dir, transform):\n\n logger = logging.getLogger(funcname())\n\n # Read the CSV and extract image names and tags.\n df = pd.read_csv(imgs_csv)\n imgs_paths = ['%s/%s.jpg' % (imgs_dir, n) for n in df['image_name'].values]\n tag_sets = [set(t.strip().split(' ')) for t in df['tags'].values]\n\n # Compute the mean image for pre-processing.\n mean_img = self.get_mean_img(imgs_paths, '%s/mean_img_trn.jpg' % self.cpdir)\n mean_img = mean_img.astype(np.float32) / 255.\n mean_img_mean = np.mean(mean_img)\n img_preprocess = lambda img: img.astype(np.float32) / 255. - mean_img_mean\n\n while True:\n\n imgs_batch = np.zeros([self.config['batch_size'], ] + self.config['input_shape'])\n tags_batch = np.zeros([self.config['batch_size'], ] + self.config['output_shape'])\n random_idxs = cycle(np.random.choice(np.arange(len(imgs_paths)), len(imgs_paths)))\n\n for batch_idx in range(self.config['batch_size']):\n data_idx = next(random_idxs)\n img = imread(imgs_paths[data_idx], mode='RGB')\n img = img_preprocess(img)\n img = resize(img, self.config['input_shape'], preserve_range=True, mode='constant')\n if transform:\n img = random_transforms(img, nb_min=0, nb_max=6)\n imgs_batch[batch_idx] = img\n tags_batch[batch_idx] = tagset_to_ints(tag_sets[data_idx])\n\n yield imgs_batch, tags_batch\n\n def predict(self, img_batch):\n\n # Get the mean image\n imgs_paths = listdir(self.config['trn_imgs_dir'])\n mean_img_path = '%s/mean_img_trn.jpg' % self.cpdir\n mean_img = self.get_mean_img(imgs_paths, mean_img_path).astype(np.float32) / 255.\n mean_img_mean = np.mean(mean_img)\n img_preprocess = lambda img: img.astype(np.float32) / 255. - mean_img_mean\n\n for idx in range(len(img_batch)):\n img_batch[idx] = img_preprocess(img_batch[idx])\n\n tags_pred = self.net.predict(img_batch)\n tags_pred = tags_pred.round().astype(np.uint8)\n return tags_pred\n\nif __name__ == \"__main__\":\n from planet.model_runner import model_runner\n model = VGGNet()\n model_runner(model)\n",
"step-ids": [
7,
8,
9,
10,
11
]
}
|
[
7,
8,
9,
10,
11
] |
<|reserved_special_token_0|>
class PageOne(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
frame_left = Frame(self)
self.frame_left = frame_left
frame_left.pack(fill=BOTH, side=LEFT)
self.label = tk.Label(frame_left, text='', font=('Helvetica', 10),
fg='red')
self.label.pack()
self.bagniere_bleu = tk.Canvas(frame_left, width=50, height=3)
self.bagniere_bleu.pack(side='top', anchor='c')
self.bagniere_bleu.create_rectangle(0, 3, 50, 0, fill='blue')
self.Nombre_1 = Entry(frame_left)
self.Nombre_1.pack(side='top', anchor='w')
self.bagniere_bleu = tk.Canvas(frame_left, width=50, height=3)
self.bagniere_bleu.pack(side='top', anchor='c')
self.bagniere_bleu.create_rectangle(0, 3, 50, 0, fill='red')
self.Nombre_2 = Entry(frame_left)
self.Nombre_2.pack(side='top', anchor='w')
tk.Button(frame_left, text='Go back to start page', command=lambda :
master.switch_frame(StartPage)).pack(side='bottom')
self.frame1 = Frame(self)
self.frame1.pack(fill='x')
self.rectangle = tk.Canvas(self.frame1)
self.rectangle.pack()
self.create_ret(self.rectangle)
self.master = master
self.commencer_un_jeu()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def update_clock(self):
self.temps_de_rect = time.time() - self.debut
self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self.
temps_de_rect))
self.label.configure(text=self.temps_de_rect)
if self.fin:
self.master.after(1000, self.update_clock)
def commencer_un_jeu(self):
self.fin = True
try:
self.rejouer.destroy()
self.label.config(text='')
self.Nombre_2.delete(0, END)
self.Nombre_1.delete(0, END)
except:
pass
self.bt_valider = tk.Button(self.frame_left, text='valider',
command=lambda : self.fin_du_jeu())
self.bt_valider.pack(side='top', anchor='w')
self.debut = time.time()
self.temps_de_rect = time.time() - self.debut
self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self.
temps_de_rect))
self.label.configure(text=self.temps_de_rect)
self.update_clock()
self.rectangle.destroy()
self.rectangle = tk.Canvas(self.frame1)
self.rectangle.pack()
self.create_ret(self.rectangle)
self.nombre_j1 = random.randint(1, 10)
self.nombre_j2 = random.randint(1, 10)
for _ in range(self.nombre_j2):
self.create_circle(20, self.rectangle, 'red')
for _ in range(self.nombre_j1):
self.create_circle(20, self.rectangle, 'blue')
def fin_du_jeu(self):
self.fin = False
if int(self.Nombre_1.get()) == self.nombre_j1 and int(self.Nombre_2
.get()) == self.nombre_j2:
self.bt_valider.destroy()
self.rejouer = Button(self.frame_left, text='Rejouer', command=
lambda : self.commencer_un_jeu())
self.rejouer.pack(side='top', fill='x')
self.temps_de_rect = time.time() - self.debut
self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self
.temps_de_rect))
self.label.configure(text=self.temps_de_rect)
self.rectangle.create_text(200, 150, fill='darkblue', font=
'Times 20 italic bold', text='Victoire')
else:
self.bt_valider.destroy()
self.rejouer = Button(self.frame_left, text='Rejouer', command=
lambda : self.commencer_un_jeu())
self.rejouer.pack(side='top', fill='x')
self.temps_de_rect = time.time() - self.debut
self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self
.temps_de_rect))
self.label.configure(text=self.temps_de_rect)
self.rectangle.create_text(200, 150, fill='darkblue', font=
'Times 20 italic bold', text='Defaite')
class SampleApp(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
self._frame = None
self.switch_frame(StartPage)
def timer(self, frame_game):
self.after(1000, frame_game.update_clock)
def switch_frame(self, frame_class, num=False):
new_frame = frame_class(self)
if self._frame is not None:
self._frame.destroy()
self._frame = new_frame
self._frame.pack()
class PageTwo(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
tk.Frame.configure(self, bg='red')
tk.Label(self, text='Page two', font=('Helvetica', 18, 'bold')).pack(
side='top', fill='x', pady=5)
tk.Button(self, text='Go back to start page', command=lambda :
master.switch_frame(StartPage)).pack()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PageOne(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
frame_left = Frame(self)
self.frame_left = frame_left
frame_left.pack(fill=BOTH, side=LEFT)
self.label = tk.Label(frame_left, text='', font=('Helvetica', 10),
fg='red')
self.label.pack()
self.bagniere_bleu = tk.Canvas(frame_left, width=50, height=3)
self.bagniere_bleu.pack(side='top', anchor='c')
self.bagniere_bleu.create_rectangle(0, 3, 50, 0, fill='blue')
self.Nombre_1 = Entry(frame_left)
self.Nombre_1.pack(side='top', anchor='w')
self.bagniere_bleu = tk.Canvas(frame_left, width=50, height=3)
self.bagniere_bleu.pack(side='top', anchor='c')
self.bagniere_bleu.create_rectangle(0, 3, 50, 0, fill='red')
self.Nombre_2 = Entry(frame_left)
self.Nombre_2.pack(side='top', anchor='w')
tk.Button(frame_left, text='Go back to start page', command=lambda :
master.switch_frame(StartPage)).pack(side='bottom')
self.frame1 = Frame(self)
self.frame1.pack(fill='x')
self.rectangle = tk.Canvas(self.frame1)
self.rectangle.pack()
self.create_ret(self.rectangle)
self.master = master
self.commencer_un_jeu()
<|reserved_special_token_0|>
def create_ret(self, canvas):
return canvas.create_rectangle(0, 500, 500, 0, fill='#fdffdb')
def update_clock(self):
self.temps_de_rect = time.time() - self.debut
self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self.
temps_de_rect))
self.label.configure(text=self.temps_de_rect)
if self.fin:
self.master.after(1000, self.update_clock)
def commencer_un_jeu(self):
self.fin = True
try:
self.rejouer.destroy()
self.label.config(text='')
self.Nombre_2.delete(0, END)
self.Nombre_1.delete(0, END)
except:
pass
self.bt_valider = tk.Button(self.frame_left, text='valider',
command=lambda : self.fin_du_jeu())
self.bt_valider.pack(side='top', anchor='w')
self.debut = time.time()
self.temps_de_rect = time.time() - self.debut
self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self.
temps_de_rect))
self.label.configure(text=self.temps_de_rect)
self.update_clock()
self.rectangle.destroy()
self.rectangle = tk.Canvas(self.frame1)
self.rectangle.pack()
self.create_ret(self.rectangle)
self.nombre_j1 = random.randint(1, 10)
self.nombre_j2 = random.randint(1, 10)
for _ in range(self.nombre_j2):
self.create_circle(20, self.rectangle, 'red')
for _ in range(self.nombre_j1):
self.create_circle(20, self.rectangle, 'blue')
def fin_du_jeu(self):
self.fin = False
if int(self.Nombre_1.get()) == self.nombre_j1 and int(self.Nombre_2
.get()) == self.nombre_j2:
self.bt_valider.destroy()
self.rejouer = Button(self.frame_left, text='Rejouer', command=
lambda : self.commencer_un_jeu())
self.rejouer.pack(side='top', fill='x')
self.temps_de_rect = time.time() - self.debut
self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self
.temps_de_rect))
self.label.configure(text=self.temps_de_rect)
self.rectangle.create_text(200, 150, fill='darkblue', font=
'Times 20 italic bold', text='Victoire')
else:
self.bt_valider.destroy()
self.rejouer = Button(self.frame_left, text='Rejouer', command=
lambda : self.commencer_un_jeu())
self.rejouer.pack(side='top', fill='x')
self.temps_de_rect = time.time() - self.debut
self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self
.temps_de_rect))
self.label.configure(text=self.temps_de_rect)
self.rectangle.create_text(200, 150, fill='darkblue', font=
'Times 20 italic bold', text='Defaite')
class SampleApp(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
self._frame = None
self.switch_frame(StartPage)
def timer(self, frame_game):
self.after(1000, frame_game.update_clock)
def switch_frame(self, frame_class, num=False):
new_frame = frame_class(self)
if self._frame is not None:
self._frame.destroy()
self._frame = new_frame
self._frame.pack()
class PageTwo(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
tk.Frame.configure(self, bg='red')
tk.Label(self, text='Page two', font=('Helvetica', 18, 'bold')).pack(
side='top', fill='x', pady=5)
tk.Button(self, text='Go back to start page', command=lambda :
master.switch_frame(StartPage)).pack()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class StartPage(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
tk.Frame.configure(self, bg='#d0a3d8', height=200, width=200)
tk.Label(self, text='Mini Jeu: \n P-0', font=('Helvetica', 18, 'bold')
).pack(side='top', fill='x', pady=5)
bt = Button(self, text='Jouer', command=lambda : master.
switch_frame(PageOne, num=True))
bt.pack(fill=BOTH, expand=True)
class PageOne(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
frame_left = Frame(self)
self.frame_left = frame_left
frame_left.pack(fill=BOTH, side=LEFT)
self.label = tk.Label(frame_left, text='', font=('Helvetica', 10),
fg='red')
self.label.pack()
self.bagniere_bleu = tk.Canvas(frame_left, width=50, height=3)
self.bagniere_bleu.pack(side='top', anchor='c')
self.bagniere_bleu.create_rectangle(0, 3, 50, 0, fill='blue')
self.Nombre_1 = Entry(frame_left)
self.Nombre_1.pack(side='top', anchor='w')
self.bagniere_bleu = tk.Canvas(frame_left, width=50, height=3)
self.bagniere_bleu.pack(side='top', anchor='c')
self.bagniere_bleu.create_rectangle(0, 3, 50, 0, fill='red')
self.Nombre_2 = Entry(frame_left)
self.Nombre_2.pack(side='top', anchor='w')
tk.Button(frame_left, text='Go back to start page', command=lambda :
master.switch_frame(StartPage)).pack(side='bottom')
self.frame1 = Frame(self)
self.frame1.pack(fill='x')
self.rectangle = tk.Canvas(self.frame1)
self.rectangle.pack()
self.create_ret(self.rectangle)
self.master = master
self.commencer_un_jeu()
def create_circle(self, r, canvasName, color):
x = random.randint(20, 300)
y = random.randint(20, 250)
x0 = x - r
y0 = y - r
x1 = x + r
y1 = y + r
return canvasName.create_oval(x0, y0, x1, y1, fill=color)
def create_ret(self, canvas):
return canvas.create_rectangle(0, 500, 500, 0, fill='#fdffdb')
def update_clock(self):
self.temps_de_rect = time.time() - self.debut
self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self.
temps_de_rect))
self.label.configure(text=self.temps_de_rect)
if self.fin:
self.master.after(1000, self.update_clock)
def commencer_un_jeu(self):
self.fin = True
try:
self.rejouer.destroy()
self.label.config(text='')
self.Nombre_2.delete(0, END)
self.Nombre_1.delete(0, END)
except:
pass
self.bt_valider = tk.Button(self.frame_left, text='valider',
command=lambda : self.fin_du_jeu())
self.bt_valider.pack(side='top', anchor='w')
self.debut = time.time()
self.temps_de_rect = time.time() - self.debut
self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self.
temps_de_rect))
self.label.configure(text=self.temps_de_rect)
self.update_clock()
self.rectangle.destroy()
self.rectangle = tk.Canvas(self.frame1)
self.rectangle.pack()
self.create_ret(self.rectangle)
self.nombre_j1 = random.randint(1, 10)
self.nombre_j2 = random.randint(1, 10)
for _ in range(self.nombre_j2):
self.create_circle(20, self.rectangle, 'red')
for _ in range(self.nombre_j1):
self.create_circle(20, self.rectangle, 'blue')
def fin_du_jeu(self):
self.fin = False
if int(self.Nombre_1.get()) == self.nombre_j1 and int(self.Nombre_2
.get()) == self.nombre_j2:
self.bt_valider.destroy()
self.rejouer = Button(self.frame_left, text='Rejouer', command=
lambda : self.commencer_un_jeu())
self.rejouer.pack(side='top', fill='x')
self.temps_de_rect = time.time() - self.debut
self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self
.temps_de_rect))
self.label.configure(text=self.temps_de_rect)
self.rectangle.create_text(200, 150, fill='darkblue', font=
'Times 20 italic bold', text='Victoire')
else:
self.bt_valider.destroy()
self.rejouer = Button(self.frame_left, text='Rejouer', command=
lambda : self.commencer_un_jeu())
self.rejouer.pack(side='top', fill='x')
self.temps_de_rect = time.time() - self.debut
self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self
.temps_de_rect))
self.label.configure(text=self.temps_de_rect)
self.rectangle.create_text(200, 150, fill='darkblue', font=
'Times 20 italic bold', text='Defaite')
class SampleApp(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
self._frame = None
self.switch_frame(StartPage)
def timer(self, frame_game):
self.after(1000, frame_game.update_clock)
def switch_frame(self, frame_class, num=False):
new_frame = frame_class(self)
if self._frame is not None:
self._frame.destroy()
self._frame = new_frame
self._frame.pack()
class PageTwo(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
tk.Frame.configure(self, bg='red')
tk.Label(self, text='Page two', font=('Helvetica', 18, 'bold')).pack(
side='top', fill='x', pady=5)
tk.Button(self, text='Go back to start page', command=lambda :
master.switch_frame(StartPage)).pack()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import tkinter as tk
from tkinter import Tk, BOTH, RIGHT, LEFT, END
from tkinter.ttk import Frame, Label, Style, Entry
from tkinter.ttk import Frame, Button, Style
import random
import time
class StartPage(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
tk.Frame.configure(self, bg='#d0a3d8', height=200, width=200)
tk.Label(self, text='Mini Jeu: \n P-0', font=('Helvetica', 18, 'bold')
).pack(side='top', fill='x', pady=5)
bt = Button(self, text='Jouer', command=lambda : master.
switch_frame(PageOne, num=True))
bt.pack(fill=BOTH, expand=True)
class PageOne(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
frame_left = Frame(self)
self.frame_left = frame_left
frame_left.pack(fill=BOTH, side=LEFT)
self.label = tk.Label(frame_left, text='', font=('Helvetica', 10),
fg='red')
self.label.pack()
self.bagniere_bleu = tk.Canvas(frame_left, width=50, height=3)
self.bagniere_bleu.pack(side='top', anchor='c')
self.bagniere_bleu.create_rectangle(0, 3, 50, 0, fill='blue')
self.Nombre_1 = Entry(frame_left)
self.Nombre_1.pack(side='top', anchor='w')
self.bagniere_bleu = tk.Canvas(frame_left, width=50, height=3)
self.bagniere_bleu.pack(side='top', anchor='c')
self.bagniere_bleu.create_rectangle(0, 3, 50, 0, fill='red')
self.Nombre_2 = Entry(frame_left)
self.Nombre_2.pack(side='top', anchor='w')
tk.Button(frame_left, text='Go back to start page', command=lambda :
master.switch_frame(StartPage)).pack(side='bottom')
self.frame1 = Frame(self)
self.frame1.pack(fill='x')
self.rectangle = tk.Canvas(self.frame1)
self.rectangle.pack()
self.create_ret(self.rectangle)
self.master = master
self.commencer_un_jeu()
def create_circle(self, r, canvasName, color):
x = random.randint(20, 300)
y = random.randint(20, 250)
x0 = x - r
y0 = y - r
x1 = x + r
y1 = y + r
return canvasName.create_oval(x0, y0, x1, y1, fill=color)
def create_ret(self, canvas):
return canvas.create_rectangle(0, 500, 500, 0, fill='#fdffdb')
def update_clock(self):
self.temps_de_rect = time.time() - self.debut
self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self.
temps_de_rect))
self.label.configure(text=self.temps_de_rect)
if self.fin:
self.master.after(1000, self.update_clock)
def commencer_un_jeu(self):
self.fin = True
try:
self.rejouer.destroy()
self.label.config(text='')
self.Nombre_2.delete(0, END)
self.Nombre_1.delete(0, END)
except:
pass
self.bt_valider = tk.Button(self.frame_left, text='valider',
command=lambda : self.fin_du_jeu())
self.bt_valider.pack(side='top', anchor='w')
self.debut = time.time()
self.temps_de_rect = time.time() - self.debut
self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self.
temps_de_rect))
self.label.configure(text=self.temps_de_rect)
self.update_clock()
self.rectangle.destroy()
self.rectangle = tk.Canvas(self.frame1)
self.rectangle.pack()
self.create_ret(self.rectangle)
self.nombre_j1 = random.randint(1, 10)
self.nombre_j2 = random.randint(1, 10)
for _ in range(self.nombre_j2):
self.create_circle(20, self.rectangle, 'red')
for _ in range(self.nombre_j1):
self.create_circle(20, self.rectangle, 'blue')
def fin_du_jeu(self):
self.fin = False
if int(self.Nombre_1.get()) == self.nombre_j1 and int(self.Nombre_2
.get()) == self.nombre_j2:
self.bt_valider.destroy()
self.rejouer = Button(self.frame_left, text='Rejouer', command=
lambda : self.commencer_un_jeu())
self.rejouer.pack(side='top', fill='x')
self.temps_de_rect = time.time() - self.debut
self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self
.temps_de_rect))
self.label.configure(text=self.temps_de_rect)
self.rectangle.create_text(200, 150, fill='darkblue', font=
'Times 20 italic bold', text='Victoire')
else:
self.bt_valider.destroy()
self.rejouer = Button(self.frame_left, text='Rejouer', command=
lambda : self.commencer_un_jeu())
self.rejouer.pack(side='top', fill='x')
self.temps_de_rect = time.time() - self.debut
self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self
.temps_de_rect))
self.label.configure(text=self.temps_de_rect)
self.rectangle.create_text(200, 150, fill='darkblue', font=
'Times 20 italic bold', text='Defaite')
class SampleApp(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
self._frame = None
self.switch_frame(StartPage)
def timer(self, frame_game):
self.after(1000, frame_game.update_clock)
def switch_frame(self, frame_class, num=False):
new_frame = frame_class(self)
if self._frame is not None:
self._frame.destroy()
self._frame = new_frame
self._frame.pack()
class PageTwo(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
tk.Frame.configure(self, bg='red')
tk.Label(self, text='Page two', font=('Helvetica', 18, 'bold')).pack(
side='top', fill='x', pady=5)
tk.Button(self, text='Go back to start page', command=lambda :
master.switch_frame(StartPage)).pack()
if __name__ == '__main__':
app = SampleApp()
app.geometry('800x800')
app.mainloop()
<|reserved_special_token_1|>
import tkinter as tk
from tkinter import Tk, BOTH,RIGHT,LEFT,END
from tkinter.ttk import Frame, Label, Style,Entry
from tkinter.ttk import Frame, Button, Style
import random
import time
class StartPage(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
tk.Frame.configure(self,bg="#d0a3d8",height=200,width=200)
tk.Label(self, text="Mini Jeu: \n P-0", font=('Helvetica', 18, "bold")).pack(side="top", fill="x", pady=5)
bt=Button(self, text="Jouer",
command=lambda: master.switch_frame(PageOne,num=True))
bt.pack(fill=BOTH,expand=True)
# tk.Button(self, text="Go to page two",
# command=lambda: master.switch_frame(PageTwo)).pack()
class PageOne(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
# tk.Frame.configure(self,bg='blue')
# tk.Label(self, text="Page de jeu", font=('Helvetica', 18, "bold")).pack(side="top", fill=BOTH, pady=5)
frame_left=Frame(self)
self.frame_left=frame_left
frame_left.pack(fill=BOTH,side=LEFT)
# add entry to this frame
self.label=tk.Label(frame_left , text="", font=('Helvetica', 10), fg='red')
self.label.pack()
self.bagniere_bleu=tk.Canvas(frame_left,width=50,height=3)
self.bagniere_bleu.pack(side='top',anchor='c')
self.bagniere_bleu.create_rectangle(0,3,50,0,fill='blue')
self.Nombre_1=Entry(frame_left)
self.Nombre_1.pack(side='top',anchor='w')
# bagnier pour differencier les couleurs
self.bagniere_bleu=tk.Canvas(frame_left,width=50,height=3)
self.bagniere_bleu.pack(side='top',anchor='c')
self.bagniere_bleu.create_rectangle(0,3,50,0,fill='red')
self.Nombre_2=Entry(frame_left)
self.Nombre_2.pack(side='top',anchor='w')
tk.Button(frame_left, text="Go back to start page",
command=lambda: master.switch_frame(StartPage)).pack(side='bottom')
self.frame1 = Frame(self)
self.frame1.pack(fill='x')
self.rectangle=tk.Canvas(self.frame1)
self.rectangle.pack()
self.create_ret(self.rectangle)
# self.update_clock()
self.master=master
self.commencer_un_jeu()
def create_circle(self,r, canvasName,color): #center coordinates, radius
x=random.randint(20,300)
y=random.randint(20,250)
x0 = x - r
y0 = y - r
x1 = x + r
y1 = y + r
return canvasName.create_oval(x0, y0, x1, y1,fill=color)
def create_ret(self,canvas):
return canvas.create_rectangle(0,500,500,0,fill="#fdffdb")
def update_clock(self):
self.temps_de_rect=(time.time()-self.debut)
self.temps_de_rect=time.strftime("%H:%M:%S", time.gmtime(self.temps_de_rect))
self.label.configure(text=self.temps_de_rect)
if self.fin:
self.master.after(1000,self.update_clock)
def commencer_un_jeu(self):
self.fin=True
try :
self.rejouer.destroy()
self.label.config(text='')
self.Nombre_2.delete(0,END)
self.Nombre_1.delete(0,END)
except:
pass
self.bt_valider=tk.Button(self.frame_left,text='valider', command=lambda: self.fin_du_jeu())
self. bt_valider.pack(side='top',anchor='w')
self.debut=time.time()
self.temps_de_rect=(time.time()-self.debut)
self.temps_de_rect=time.strftime("%H:%M:%S", time.gmtime(self.temps_de_rect))
self.label.configure(text=self.temps_de_rect)
self.update_clock()
self.rectangle.destroy()
self.rectangle=tk.Canvas(self.frame1)
self.rectangle.pack()
self.create_ret(self.rectangle)
self.nombre_j1=random.randint(1,10)
self.nombre_j2=random.randint(1,10)
for _ in range(self.nombre_j2):
self.create_circle(20,self.rectangle,'red')
for _ in range(self.nombre_j1):
self.create_circle(20,self.rectangle,'blue')
def fin_du_jeu(self):
self.fin=False
if(int(self.Nombre_1.get())==self.nombre_j1 ) and (int(self.Nombre_2.get())==self.nombre_j2):
#jeu gagné
self.bt_valider.destroy()
self.rejouer=Button(self.frame_left, text="Rejouer",
command=lambda: self.commencer_un_jeu())
self.rejouer.pack(side='top',fill='x')
self.temps_de_rect=(time.time()-self.debut)
self.temps_de_rect=time.strftime("%H:%M:%S", time.gmtime(self.temps_de_rect))
self.label.configure(text=self.temps_de_rect)
self.rectangle.create_text(200,150,fill="darkblue",font="Times 20 italic bold",
text="Victoire")
else:
self.bt_valider.destroy()
self.rejouer=Button(self.frame_left, text="Rejouer",
command=lambda: self.commencer_un_jeu())
self.rejouer.pack(side='top',fill='x')
self.temps_de_rect=(time.time()-self.debut)
self.temps_de_rect=time.strftime("%H:%M:%S", time.gmtime(self.temps_de_rect))
self.label.configure(text=self.temps_de_rect)
self.rectangle.create_text(200,150,fill="darkblue",font="Times 20 italic bold",
text="Defaite")
class SampleApp(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
self._frame = None
self.switch_frame(StartPage)
def timer(self,frame_game):
self.after(1000,frame_game.update_clock)
def switch_frame(self, frame_class,num=False):
new_frame = frame_class(self)
if self._frame is not None:
self._frame.destroy()
self._frame = new_frame
self._frame.pack()
# try:
# if num:
# print(frame_class)
# self.timer(frame_class)
# except:
# print("le frame n'est pas le bon")
class PageTwo(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
tk.Frame.configure(self,bg='red')
tk.Label(self, text="Page two", font=('Helvetica', 18, "bold")).pack(side="top", fill="x", pady=5)
tk.Button(self, text="Go back to start page",
command=lambda: master.switch_frame(StartPage)).pack()
if __name__ == "__main__":
app = SampleApp()
app.geometry('800x800')
app.mainloop()
|
flexible
|
{
"blob_id": "4e6401672d4762b444bb679e4cc39ada04193a26",
"index": 1882,
"step-1": "<mask token>\n\n\nclass PageOne(tk.Frame):\n\n def __init__(self, master):\n tk.Frame.__init__(self, master)\n frame_left = Frame(self)\n self.frame_left = frame_left\n frame_left.pack(fill=BOTH, side=LEFT)\n self.label = tk.Label(frame_left, text='', font=('Helvetica', 10),\n fg='red')\n self.label.pack()\n self.bagniere_bleu = tk.Canvas(frame_left, width=50, height=3)\n self.bagniere_bleu.pack(side='top', anchor='c')\n self.bagniere_bleu.create_rectangle(0, 3, 50, 0, fill='blue')\n self.Nombre_1 = Entry(frame_left)\n self.Nombre_1.pack(side='top', anchor='w')\n self.bagniere_bleu = tk.Canvas(frame_left, width=50, height=3)\n self.bagniere_bleu.pack(side='top', anchor='c')\n self.bagniere_bleu.create_rectangle(0, 3, 50, 0, fill='red')\n self.Nombre_2 = Entry(frame_left)\n self.Nombre_2.pack(side='top', anchor='w')\n tk.Button(frame_left, text='Go back to start page', command=lambda :\n master.switch_frame(StartPage)).pack(side='bottom')\n self.frame1 = Frame(self)\n self.frame1.pack(fill='x')\n self.rectangle = tk.Canvas(self.frame1)\n self.rectangle.pack()\n self.create_ret(self.rectangle)\n self.master = master\n self.commencer_un_jeu()\n <mask token>\n <mask token>\n\n def update_clock(self):\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self.\n temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n if self.fin:\n self.master.after(1000, self.update_clock)\n\n def commencer_un_jeu(self):\n self.fin = True\n try:\n self.rejouer.destroy()\n self.label.config(text='')\n self.Nombre_2.delete(0, END)\n self.Nombre_1.delete(0, END)\n except:\n pass\n self.bt_valider = tk.Button(self.frame_left, text='valider',\n command=lambda : self.fin_du_jeu())\n self.bt_valider.pack(side='top', anchor='w')\n self.debut = time.time()\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self.\n temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.update_clock()\n self.rectangle.destroy()\n self.rectangle = tk.Canvas(self.frame1)\n self.rectangle.pack()\n self.create_ret(self.rectangle)\n self.nombre_j1 = random.randint(1, 10)\n self.nombre_j2 = random.randint(1, 10)\n for _ in range(self.nombre_j2):\n self.create_circle(20, self.rectangle, 'red')\n for _ in range(self.nombre_j1):\n self.create_circle(20, self.rectangle, 'blue')\n\n def fin_du_jeu(self):\n self.fin = False\n if int(self.Nombre_1.get()) == self.nombre_j1 and int(self.Nombre_2\n .get()) == self.nombre_j2:\n self.bt_valider.destroy()\n self.rejouer = Button(self.frame_left, text='Rejouer', command=\n lambda : self.commencer_un_jeu())\n self.rejouer.pack(side='top', fill='x')\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self\n .temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.rectangle.create_text(200, 150, fill='darkblue', font=\n 'Times 20 italic bold', text='Victoire')\n else:\n self.bt_valider.destroy()\n self.rejouer = Button(self.frame_left, text='Rejouer', command=\n lambda : self.commencer_un_jeu())\n self.rejouer.pack(side='top', fill='x')\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self\n .temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.rectangle.create_text(200, 150, fill='darkblue', font=\n 'Times 20 italic bold', text='Defaite')\n\n\nclass SampleApp(tk.Tk):\n\n def __init__(self):\n tk.Tk.__init__(self)\n self._frame = None\n self.switch_frame(StartPage)\n\n def timer(self, frame_game):\n self.after(1000, frame_game.update_clock)\n\n def switch_frame(self, frame_class, num=False):\n new_frame = frame_class(self)\n if self._frame is not None:\n self._frame.destroy()\n self._frame = new_frame\n self._frame.pack()\n\n\nclass PageTwo(tk.Frame):\n\n def __init__(self, master):\n tk.Frame.__init__(self, master)\n tk.Frame.configure(self, bg='red')\n tk.Label(self, text='Page two', font=('Helvetica', 18, 'bold')).pack(\n side='top', fill='x', pady=5)\n tk.Button(self, text='Go back to start page', command=lambda :\n master.switch_frame(StartPage)).pack()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass PageOne(tk.Frame):\n\n def __init__(self, master):\n tk.Frame.__init__(self, master)\n frame_left = Frame(self)\n self.frame_left = frame_left\n frame_left.pack(fill=BOTH, side=LEFT)\n self.label = tk.Label(frame_left, text='', font=('Helvetica', 10),\n fg='red')\n self.label.pack()\n self.bagniere_bleu = tk.Canvas(frame_left, width=50, height=3)\n self.bagniere_bleu.pack(side='top', anchor='c')\n self.bagniere_bleu.create_rectangle(0, 3, 50, 0, fill='blue')\n self.Nombre_1 = Entry(frame_left)\n self.Nombre_1.pack(side='top', anchor='w')\n self.bagniere_bleu = tk.Canvas(frame_left, width=50, height=3)\n self.bagniere_bleu.pack(side='top', anchor='c')\n self.bagniere_bleu.create_rectangle(0, 3, 50, 0, fill='red')\n self.Nombre_2 = Entry(frame_left)\n self.Nombre_2.pack(side='top', anchor='w')\n tk.Button(frame_left, text='Go back to start page', command=lambda :\n master.switch_frame(StartPage)).pack(side='bottom')\n self.frame1 = Frame(self)\n self.frame1.pack(fill='x')\n self.rectangle = tk.Canvas(self.frame1)\n self.rectangle.pack()\n self.create_ret(self.rectangle)\n self.master = master\n self.commencer_un_jeu()\n <mask token>\n\n def create_ret(self, canvas):\n return canvas.create_rectangle(0, 500, 500, 0, fill='#fdffdb')\n\n def update_clock(self):\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self.\n temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n if self.fin:\n self.master.after(1000, self.update_clock)\n\n def commencer_un_jeu(self):\n self.fin = True\n try:\n self.rejouer.destroy()\n self.label.config(text='')\n self.Nombre_2.delete(0, END)\n self.Nombre_1.delete(0, END)\n except:\n pass\n self.bt_valider = tk.Button(self.frame_left, text='valider',\n command=lambda : self.fin_du_jeu())\n self.bt_valider.pack(side='top', anchor='w')\n self.debut = time.time()\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self.\n temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.update_clock()\n self.rectangle.destroy()\n self.rectangle = tk.Canvas(self.frame1)\n self.rectangle.pack()\n self.create_ret(self.rectangle)\n self.nombre_j1 = random.randint(1, 10)\n self.nombre_j2 = random.randint(1, 10)\n for _ in range(self.nombre_j2):\n self.create_circle(20, self.rectangle, 'red')\n for _ in range(self.nombre_j1):\n self.create_circle(20, self.rectangle, 'blue')\n\n def fin_du_jeu(self):\n self.fin = False\n if int(self.Nombre_1.get()) == self.nombre_j1 and int(self.Nombre_2\n .get()) == self.nombre_j2:\n self.bt_valider.destroy()\n self.rejouer = Button(self.frame_left, text='Rejouer', command=\n lambda : self.commencer_un_jeu())\n self.rejouer.pack(side='top', fill='x')\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self\n .temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.rectangle.create_text(200, 150, fill='darkblue', font=\n 'Times 20 italic bold', text='Victoire')\n else:\n self.bt_valider.destroy()\n self.rejouer = Button(self.frame_left, text='Rejouer', command=\n lambda : self.commencer_un_jeu())\n self.rejouer.pack(side='top', fill='x')\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self\n .temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.rectangle.create_text(200, 150, fill='darkblue', font=\n 'Times 20 italic bold', text='Defaite')\n\n\nclass SampleApp(tk.Tk):\n\n def __init__(self):\n tk.Tk.__init__(self)\n self._frame = None\n self.switch_frame(StartPage)\n\n def timer(self, frame_game):\n self.after(1000, frame_game.update_clock)\n\n def switch_frame(self, frame_class, num=False):\n new_frame = frame_class(self)\n if self._frame is not None:\n self._frame.destroy()\n self._frame = new_frame\n self._frame.pack()\n\n\nclass PageTwo(tk.Frame):\n\n def __init__(self, master):\n tk.Frame.__init__(self, master)\n tk.Frame.configure(self, bg='red')\n tk.Label(self, text='Page two', font=('Helvetica', 18, 'bold')).pack(\n side='top', fill='x', pady=5)\n tk.Button(self, text='Go back to start page', command=lambda :\n master.switch_frame(StartPage)).pack()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass StartPage(tk.Frame):\n\n def __init__(self, master):\n tk.Frame.__init__(self, master)\n tk.Frame.configure(self, bg='#d0a3d8', height=200, width=200)\n tk.Label(self, text='Mini Jeu: \\n P-0', font=('Helvetica', 18, 'bold')\n ).pack(side='top', fill='x', pady=5)\n bt = Button(self, text='Jouer', command=lambda : master.\n switch_frame(PageOne, num=True))\n bt.pack(fill=BOTH, expand=True)\n\n\nclass PageOne(tk.Frame):\n\n def __init__(self, master):\n tk.Frame.__init__(self, master)\n frame_left = Frame(self)\n self.frame_left = frame_left\n frame_left.pack(fill=BOTH, side=LEFT)\n self.label = tk.Label(frame_left, text='', font=('Helvetica', 10),\n fg='red')\n self.label.pack()\n self.bagniere_bleu = tk.Canvas(frame_left, width=50, height=3)\n self.bagniere_bleu.pack(side='top', anchor='c')\n self.bagniere_bleu.create_rectangle(0, 3, 50, 0, fill='blue')\n self.Nombre_1 = Entry(frame_left)\n self.Nombre_1.pack(side='top', anchor='w')\n self.bagniere_bleu = tk.Canvas(frame_left, width=50, height=3)\n self.bagniere_bleu.pack(side='top', anchor='c')\n self.bagniere_bleu.create_rectangle(0, 3, 50, 0, fill='red')\n self.Nombre_2 = Entry(frame_left)\n self.Nombre_2.pack(side='top', anchor='w')\n tk.Button(frame_left, text='Go back to start page', command=lambda :\n master.switch_frame(StartPage)).pack(side='bottom')\n self.frame1 = Frame(self)\n self.frame1.pack(fill='x')\n self.rectangle = tk.Canvas(self.frame1)\n self.rectangle.pack()\n self.create_ret(self.rectangle)\n self.master = master\n self.commencer_un_jeu()\n\n def create_circle(self, r, canvasName, color):\n x = random.randint(20, 300)\n y = random.randint(20, 250)\n x0 = x - r\n y0 = y - r\n x1 = x + r\n y1 = y + r\n return canvasName.create_oval(x0, y0, x1, y1, fill=color)\n\n def create_ret(self, canvas):\n return canvas.create_rectangle(0, 500, 500, 0, fill='#fdffdb')\n\n def update_clock(self):\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self.\n temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n if self.fin:\n self.master.after(1000, self.update_clock)\n\n def commencer_un_jeu(self):\n self.fin = True\n try:\n self.rejouer.destroy()\n self.label.config(text='')\n self.Nombre_2.delete(0, END)\n self.Nombre_1.delete(0, END)\n except:\n pass\n self.bt_valider = tk.Button(self.frame_left, text='valider',\n command=lambda : self.fin_du_jeu())\n self.bt_valider.pack(side='top', anchor='w')\n self.debut = time.time()\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self.\n temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.update_clock()\n self.rectangle.destroy()\n self.rectangle = tk.Canvas(self.frame1)\n self.rectangle.pack()\n self.create_ret(self.rectangle)\n self.nombre_j1 = random.randint(1, 10)\n self.nombre_j2 = random.randint(1, 10)\n for _ in range(self.nombre_j2):\n self.create_circle(20, self.rectangle, 'red')\n for _ in range(self.nombre_j1):\n self.create_circle(20, self.rectangle, 'blue')\n\n def fin_du_jeu(self):\n self.fin = False\n if int(self.Nombre_1.get()) == self.nombre_j1 and int(self.Nombre_2\n .get()) == self.nombre_j2:\n self.bt_valider.destroy()\n self.rejouer = Button(self.frame_left, text='Rejouer', command=\n lambda : self.commencer_un_jeu())\n self.rejouer.pack(side='top', fill='x')\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self\n .temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.rectangle.create_text(200, 150, fill='darkblue', font=\n 'Times 20 italic bold', text='Victoire')\n else:\n self.bt_valider.destroy()\n self.rejouer = Button(self.frame_left, text='Rejouer', command=\n lambda : self.commencer_un_jeu())\n self.rejouer.pack(side='top', fill='x')\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self\n .temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.rectangle.create_text(200, 150, fill='darkblue', font=\n 'Times 20 italic bold', text='Defaite')\n\n\nclass SampleApp(tk.Tk):\n\n def __init__(self):\n tk.Tk.__init__(self)\n self._frame = None\n self.switch_frame(StartPage)\n\n def timer(self, frame_game):\n self.after(1000, frame_game.update_clock)\n\n def switch_frame(self, frame_class, num=False):\n new_frame = frame_class(self)\n if self._frame is not None:\n self._frame.destroy()\n self._frame = new_frame\n self._frame.pack()\n\n\nclass PageTwo(tk.Frame):\n\n def __init__(self, master):\n tk.Frame.__init__(self, master)\n tk.Frame.configure(self, bg='red')\n tk.Label(self, text='Page two', font=('Helvetica', 18, 'bold')).pack(\n side='top', fill='x', pady=5)\n tk.Button(self, text='Go back to start page', command=lambda :\n master.switch_frame(StartPage)).pack()\n\n\n<mask token>\n",
"step-4": "import tkinter as tk\nfrom tkinter import Tk, BOTH, RIGHT, LEFT, END\nfrom tkinter.ttk import Frame, Label, Style, Entry\nfrom tkinter.ttk import Frame, Button, Style\nimport random\nimport time\n\n\nclass StartPage(tk.Frame):\n\n def __init__(self, master):\n tk.Frame.__init__(self, master)\n tk.Frame.configure(self, bg='#d0a3d8', height=200, width=200)\n tk.Label(self, text='Mini Jeu: \\n P-0', font=('Helvetica', 18, 'bold')\n ).pack(side='top', fill='x', pady=5)\n bt = Button(self, text='Jouer', command=lambda : master.\n switch_frame(PageOne, num=True))\n bt.pack(fill=BOTH, expand=True)\n\n\nclass PageOne(tk.Frame):\n\n def __init__(self, master):\n tk.Frame.__init__(self, master)\n frame_left = Frame(self)\n self.frame_left = frame_left\n frame_left.pack(fill=BOTH, side=LEFT)\n self.label = tk.Label(frame_left, text='', font=('Helvetica', 10),\n fg='red')\n self.label.pack()\n self.bagniere_bleu = tk.Canvas(frame_left, width=50, height=3)\n self.bagniere_bleu.pack(side='top', anchor='c')\n self.bagniere_bleu.create_rectangle(0, 3, 50, 0, fill='blue')\n self.Nombre_1 = Entry(frame_left)\n self.Nombre_1.pack(side='top', anchor='w')\n self.bagniere_bleu = tk.Canvas(frame_left, width=50, height=3)\n self.bagniere_bleu.pack(side='top', anchor='c')\n self.bagniere_bleu.create_rectangle(0, 3, 50, 0, fill='red')\n self.Nombre_2 = Entry(frame_left)\n self.Nombre_2.pack(side='top', anchor='w')\n tk.Button(frame_left, text='Go back to start page', command=lambda :\n master.switch_frame(StartPage)).pack(side='bottom')\n self.frame1 = Frame(self)\n self.frame1.pack(fill='x')\n self.rectangle = tk.Canvas(self.frame1)\n self.rectangle.pack()\n self.create_ret(self.rectangle)\n self.master = master\n self.commencer_un_jeu()\n\n def create_circle(self, r, canvasName, color):\n x = random.randint(20, 300)\n y = random.randint(20, 250)\n x0 = x - r\n y0 = y - r\n x1 = x + r\n y1 = y + r\n return canvasName.create_oval(x0, y0, x1, y1, fill=color)\n\n def create_ret(self, canvas):\n return canvas.create_rectangle(0, 500, 500, 0, fill='#fdffdb')\n\n def update_clock(self):\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self.\n temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n if self.fin:\n self.master.after(1000, self.update_clock)\n\n def commencer_un_jeu(self):\n self.fin = True\n try:\n self.rejouer.destroy()\n self.label.config(text='')\n self.Nombre_2.delete(0, END)\n self.Nombre_1.delete(0, END)\n except:\n pass\n self.bt_valider = tk.Button(self.frame_left, text='valider',\n command=lambda : self.fin_du_jeu())\n self.bt_valider.pack(side='top', anchor='w')\n self.debut = time.time()\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self.\n temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.update_clock()\n self.rectangle.destroy()\n self.rectangle = tk.Canvas(self.frame1)\n self.rectangle.pack()\n self.create_ret(self.rectangle)\n self.nombre_j1 = random.randint(1, 10)\n self.nombre_j2 = random.randint(1, 10)\n for _ in range(self.nombre_j2):\n self.create_circle(20, self.rectangle, 'red')\n for _ in range(self.nombre_j1):\n self.create_circle(20, self.rectangle, 'blue')\n\n def fin_du_jeu(self):\n self.fin = False\n if int(self.Nombre_1.get()) == self.nombre_j1 and int(self.Nombre_2\n .get()) == self.nombre_j2:\n self.bt_valider.destroy()\n self.rejouer = Button(self.frame_left, text='Rejouer', command=\n lambda : self.commencer_un_jeu())\n self.rejouer.pack(side='top', fill='x')\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self\n .temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.rectangle.create_text(200, 150, fill='darkblue', font=\n 'Times 20 italic bold', text='Victoire')\n else:\n self.bt_valider.destroy()\n self.rejouer = Button(self.frame_left, text='Rejouer', command=\n lambda : self.commencer_un_jeu())\n self.rejouer.pack(side='top', fill='x')\n self.temps_de_rect = time.time() - self.debut\n self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self\n .temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.rectangle.create_text(200, 150, fill='darkblue', font=\n 'Times 20 italic bold', text='Defaite')\n\n\nclass SampleApp(tk.Tk):\n\n def __init__(self):\n tk.Tk.__init__(self)\n self._frame = None\n self.switch_frame(StartPage)\n\n def timer(self, frame_game):\n self.after(1000, frame_game.update_clock)\n\n def switch_frame(self, frame_class, num=False):\n new_frame = frame_class(self)\n if self._frame is not None:\n self._frame.destroy()\n self._frame = new_frame\n self._frame.pack()\n\n\nclass PageTwo(tk.Frame):\n\n def __init__(self, master):\n tk.Frame.__init__(self, master)\n tk.Frame.configure(self, bg='red')\n tk.Label(self, text='Page two', font=('Helvetica', 18, 'bold')).pack(\n side='top', fill='x', pady=5)\n tk.Button(self, text='Go back to start page', command=lambda :\n master.switch_frame(StartPage)).pack()\n\n\nif __name__ == '__main__':\n app = SampleApp()\n app.geometry('800x800')\n app.mainloop()\n",
"step-5": "\nimport tkinter as tk\nfrom tkinter import Tk, BOTH,RIGHT,LEFT,END\nfrom tkinter.ttk import Frame, Label, Style,Entry\nfrom tkinter.ttk import Frame, Button, Style\nimport random\nimport time\n\nclass StartPage(tk.Frame):\n def __init__(self, master):\n tk.Frame.__init__(self, master)\n \n tk.Frame.configure(self,bg=\"#d0a3d8\",height=200,width=200)\n\n tk.Label(self, text=\"Mini Jeu: \\n P-0\", font=('Helvetica', 18, \"bold\")).pack(side=\"top\", fill=\"x\", pady=5)\n bt=Button(self, text=\"Jouer\",\n command=lambda: master.switch_frame(PageOne,num=True))\n bt.pack(fill=BOTH,expand=True)\n\n \n # tk.Button(self, text=\"Go to page two\",\n # command=lambda: master.switch_frame(PageTwo)).pack()\n\nclass PageOne(tk.Frame):\n def __init__(self, master):\n \n\n tk.Frame.__init__(self, master)\n # tk.Frame.configure(self,bg='blue')\n # tk.Label(self, text=\"Page de jeu\", font=('Helvetica', 18, \"bold\")).pack(side=\"top\", fill=BOTH, pady=5)\n \n frame_left=Frame(self)\n self.frame_left=frame_left\n frame_left.pack(fill=BOTH,side=LEFT)\n\n\n # add entry to this frame \n self.label=tk.Label(frame_left , text=\"\", font=('Helvetica', 10), fg='red')\n self.label.pack()\n\n self.bagniere_bleu=tk.Canvas(frame_left,width=50,height=3)\n self.bagniere_bleu.pack(side='top',anchor='c')\n self.bagniere_bleu.create_rectangle(0,3,50,0,fill='blue')\n\n \n\n self.Nombre_1=Entry(frame_left)\n self.Nombre_1.pack(side='top',anchor='w')\n\n# bagnier pour differencier les couleurs\n self.bagniere_bleu=tk.Canvas(frame_left,width=50,height=3)\n self.bagniere_bleu.pack(side='top',anchor='c')\n self.bagniere_bleu.create_rectangle(0,3,50,0,fill='red')\n\n\n self.Nombre_2=Entry(frame_left)\n self.Nombre_2.pack(side='top',anchor='w')\n\n tk.Button(frame_left, text=\"Go back to start page\",\n command=lambda: master.switch_frame(StartPage)).pack(side='bottom')\n\n \n self.frame1 = Frame(self)\n self.frame1.pack(fill='x')\n self.rectangle=tk.Canvas(self.frame1)\n self.rectangle.pack()\n self.create_ret(self.rectangle)\n \n # self.update_clock()\n self.master=master\n self.commencer_un_jeu()\n\n \n def create_circle(self,r, canvasName,color): #center coordinates, radius\n x=random.randint(20,300)\n y=random.randint(20,250)\n x0 = x - r\n y0 = y - r\n x1 = x + r\n y1 = y + r\n return canvasName.create_oval(x0, y0, x1, y1,fill=color)\n def create_ret(self,canvas):\n return canvas.create_rectangle(0,500,500,0,fill=\"#fdffdb\")\n\n\n\n def update_clock(self):\n self.temps_de_rect=(time.time()-self.debut)\n self.temps_de_rect=time.strftime(\"%H:%M:%S\", time.gmtime(self.temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n if self.fin:\n self.master.after(1000,self.update_clock)\n\n def commencer_un_jeu(self):\n self.fin=True\n try :\n self.rejouer.destroy()\n self.label.config(text='')\n self.Nombre_2.delete(0,END)\n self.Nombre_1.delete(0,END)\n\n except:\n pass\n\n\n self.bt_valider=tk.Button(self.frame_left,text='valider', command=lambda: self.fin_du_jeu())\n self. bt_valider.pack(side='top',anchor='w')\n\n self.debut=time.time()\n self.temps_de_rect=(time.time()-self.debut)\n self.temps_de_rect=time.strftime(\"%H:%M:%S\", time.gmtime(self.temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.update_clock()\n \n\n self.rectangle.destroy()\n self.rectangle=tk.Canvas(self.frame1)\n self.rectangle.pack()\n self.create_ret(self.rectangle)\n\n self.nombre_j1=random.randint(1,10)\n self.nombre_j2=random.randint(1,10)\n for _ in range(self.nombre_j2):\n self.create_circle(20,self.rectangle,'red')\n for _ in range(self.nombre_j1):\n self.create_circle(20,self.rectangle,'blue')\n def fin_du_jeu(self):\n self.fin=False\n if(int(self.Nombre_1.get())==self.nombre_j1 ) and (int(self.Nombre_2.get())==self.nombre_j2):\n #jeu gagné\n \n self.bt_valider.destroy()\n self.rejouer=Button(self.frame_left, text=\"Rejouer\",\n command=lambda: self.commencer_un_jeu())\n \n self.rejouer.pack(side='top',fill='x')\n\n self.temps_de_rect=(time.time()-self.debut)\n self.temps_de_rect=time.strftime(\"%H:%M:%S\", time.gmtime(self.temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.rectangle.create_text(200,150,fill=\"darkblue\",font=\"Times 20 italic bold\",\n text=\"Victoire\")\n else:\n\n \n self.bt_valider.destroy()\n self.rejouer=Button(self.frame_left, text=\"Rejouer\",\n command=lambda: self.commencer_un_jeu())\n\n self.rejouer.pack(side='top',fill='x')\n\n self.temps_de_rect=(time.time()-self.debut)\n self.temps_de_rect=time.strftime(\"%H:%M:%S\", time.gmtime(self.temps_de_rect))\n self.label.configure(text=self.temps_de_rect)\n self.rectangle.create_text(200,150,fill=\"darkblue\",font=\"Times 20 italic bold\",\n text=\"Defaite\")\n\n\n \n\n \n\n\n\n\n \nclass SampleApp(tk.Tk):\n def __init__(self):\n\n tk.Tk.__init__(self)\n \n self._frame = None\n self.switch_frame(StartPage)\n \n\n def timer(self,frame_game):\n self.after(1000,frame_game.update_clock)\n\n\n def switch_frame(self, frame_class,num=False):\n new_frame = frame_class(self)\n if self._frame is not None:\n self._frame.destroy()\n self._frame = new_frame\n self._frame.pack()\n # try:\n \n # if num:\n # print(frame_class)\n # self.timer(frame_class) \n # except:\n # print(\"le frame n'est pas le bon\")\n\n\n\n\n\n\n\nclass PageTwo(tk.Frame):\n def __init__(self, master):\n tk.Frame.__init__(self, master)\n tk.Frame.configure(self,bg='red')\n tk.Label(self, text=\"Page two\", font=('Helvetica', 18, \"bold\")).pack(side=\"top\", fill=\"x\", pady=5)\n tk.Button(self, text=\"Go back to start page\",\n command=lambda: master.switch_frame(StartPage)).pack()\n\nif __name__ == \"__main__\":\n app = SampleApp()\n app.geometry('800x800')\n app.mainloop()",
"step-ids": [
11,
12,
15,
17,
18
]
}
|
[
11,
12,
15,
17,
18
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
n.notify('READY=1')
time.sleep(2)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
n = sdnotify.SystemdNotifier()
if __name__ == '__main__':
n.notify('READY=1')
time.sleep(2)
<|reserved_special_token_1|>
import signal
import time
import sdnotify
n = sdnotify.SystemdNotifier()
if __name__ == '__main__':
n.notify('READY=1')
time.sleep(2)
<|reserved_special_token_1|>
import signal
import time
import sdnotify
n = sdnotify.SystemdNotifier()
if __name__ == '__main__':
n.notify("READY=1")
time.sleep(2)
|
flexible
|
{
"blob_id": "78dc2193c05ddb4cd4c80b1c0322890eca7fcf19",
"index": 789,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n n.notify('READY=1')\n time.sleep(2)\n",
"step-3": "<mask token>\nn = sdnotify.SystemdNotifier()\nif __name__ == '__main__':\n n.notify('READY=1')\n time.sleep(2)\n",
"step-4": "import signal\nimport time\nimport sdnotify\nn = sdnotify.SystemdNotifier()\nif __name__ == '__main__':\n n.notify('READY=1')\n time.sleep(2)\n",
"step-5": "import signal\nimport time\n\nimport sdnotify\n\nn = sdnotify.SystemdNotifier()\n\nif __name__ == '__main__':\n\n n.notify(\"READY=1\")\n time.sleep(2)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# 在写Python爬虫的时候,最麻烦的不是那些海量的静态网站,而是那些通过JavaScript获取数据的站点。Python本身对js的支持就不好,所以就有良心的开发者来做贡献了,这就是Selenium,他本身可以模拟真实的浏览器,浏览器所具有的功能他一个都不拉下,加载js更是小菜了
# https://zhuanlan.zhihu.com/p/27115580
# C:\Users\hedy\AppData\Local\Programs\Python\Python36\Scripts\;C:\Users\hedy\AppData\Local\Programs\Python\Python36\
# pip 换源
# http://blog.csdn.net/lambert310/article/details/52412059
# 安装全家桶(ipython,jupyter notebook)
# https://jingyan.baidu.com/article/cbcede070c8eac02f40b4d8e.html
# http://blog.csdn.net/sanshixia/article/details/53996126
|
normal
|
{
"blob_id": "e2948c0ad78ce210b08d65b3e0f75d757e286ad9",
"index": 3883,
"step-1": "# 在写Python爬虫的时候,最麻烦的不是那些海量的静态网站,而是那些通过JavaScript获取数据的站点。Python本身对js的支持就不好,所以就有良心的开发者来做贡献了,这就是Selenium,他本身可以模拟真实的浏览器,浏览器所具有的功能他一个都不拉下,加载js更是小菜了\n# https://zhuanlan.zhihu.com/p/27115580\n# C:\\Users\\hedy\\AppData\\Local\\Programs\\Python\\Python36\\Scripts\\;C:\\Users\\hedy\\AppData\\Local\\Programs\\Python\\Python36\\\n\n# pip 换源\n# http://blog.csdn.net/lambert310/article/details/52412059\n\n# 安装全家桶(ipython,jupyter notebook)\n# https://jingyan.baidu.com/article/cbcede070c8eac02f40b4d8e.html\n# http://blog.csdn.net/sanshixia/article/details/53996126\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
1
]
}
|
[
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
setup(name='RedHatSecurityAdvisory', version='0.1', description=
'Script that automatically checks the RedHat security advisories to see if a CVE applies'
, author='Pieter-Jan Moreels', url=
'https://github.com/PidgeyL/RedHat-Advisory-Checker', entry_points={
'console_scripts': ['rhsa = RHSA:redhatAdvisory.main']}, packages=[
'RHSA'], license='Modified BSD license')
<|reserved_special_token_1|>
from setuptools import setup
setup(name='RedHatSecurityAdvisory', version='0.1', description=
'Script that automatically checks the RedHat security advisories to see if a CVE applies'
, author='Pieter-Jan Moreels', url=
'https://github.com/PidgeyL/RedHat-Advisory-Checker', entry_points={
'console_scripts': ['rhsa = RHSA:redhatAdvisory.main']}, packages=[
'RHSA'], license='Modified BSD license')
<|reserved_special_token_1|>
from setuptools import setup
setup(name='RedHatSecurityAdvisory',
version='0.1',
description='Script that automatically checks the RedHat security advisories to see if a CVE applies',
author='Pieter-Jan Moreels',
url='https://github.com/PidgeyL/RedHat-Advisory-Checker',
entry_points={'console_scripts': ['rhsa = RHSA:redhatAdvisory.main']},
packages=['RHSA'],
license="Modified BSD license",
)
|
flexible
|
{
"blob_id": "3f8c13be547099aa6612365452926db95828b9a0",
"index": 554,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(name='RedHatSecurityAdvisory', version='0.1', description=\n 'Script that automatically checks the RedHat security advisories to see if a CVE applies'\n , author='Pieter-Jan Moreels', url=\n 'https://github.com/PidgeyL/RedHat-Advisory-Checker', entry_points={\n 'console_scripts': ['rhsa = RHSA:redhatAdvisory.main']}, packages=[\n 'RHSA'], license='Modified BSD license')\n",
"step-3": "from setuptools import setup\nsetup(name='RedHatSecurityAdvisory', version='0.1', description=\n 'Script that automatically checks the RedHat security advisories to see if a CVE applies'\n , author='Pieter-Jan Moreels', url=\n 'https://github.com/PidgeyL/RedHat-Advisory-Checker', entry_points={\n 'console_scripts': ['rhsa = RHSA:redhatAdvisory.main']}, packages=[\n 'RHSA'], license='Modified BSD license')\n",
"step-4": "from setuptools import setup\n\nsetup(name='RedHatSecurityAdvisory',\n version='0.1',\n description='Script that automatically checks the RedHat security advisories to see if a CVE applies',\n author='Pieter-Jan Moreels',\n url='https://github.com/PidgeyL/RedHat-Advisory-Checker',\n entry_points={'console_scripts': ['rhsa = RHSA:redhatAdvisory.main']},\n packages=['RHSA'],\n license=\"Modified BSD license\",\n )\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#Uses python3
import sys
def lcs2(a, b):
dp_result = [[0 for j in range(b+1)] for i in range(a+1)]
for x in range(1, a+1):
for y in range(1, b+1):
if a[x-1] == b[y-1] and b[y-1] == c[z-1]:
dp_result[x][y] = dp_result[x-1][y-1] + 1
else:
dp_result[x][y] = max(dp_result[x-1][y], dp_result[x][y-1], dp_result[x][y])
return dp_result
if __name__ == '__main__':
input = sys.stdin.read()
data = list(map(int, input.split()))
n = data[0]
data = data[1:]
a = data[:n]
data = data[n:]
m = data[0]
data = data[1:]
b = data[:m]
print(lcs2(a, b))
|
normal
|
{
"blob_id": "d20b336c6588c3cfc4393256b660d6e4ff56b84e",
"index": 1543,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef lcs2(a, b):\n dp_result = [[(0) for j in range(b + 1)] for i in range(a + 1)]\n for x in range(1, a + 1):\n for y in range(1, b + 1):\n if a[x - 1] == b[y - 1] and b[y - 1] == c[z - 1]:\n dp_result[x][y] = dp_result[x - 1][y - 1] + 1\n else:\n dp_result[x][y] = max(dp_result[x - 1][y], dp_result[x][y -\n 1], dp_result[x][y])\n return dp_result\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef lcs2(a, b):\n dp_result = [[(0) for j in range(b + 1)] for i in range(a + 1)]\n for x in range(1, a + 1):\n for y in range(1, b + 1):\n if a[x - 1] == b[y - 1] and b[y - 1] == c[z - 1]:\n dp_result[x][y] = dp_result[x - 1][y - 1] + 1\n else:\n dp_result[x][y] = max(dp_result[x - 1][y], dp_result[x][y -\n 1], dp_result[x][y])\n return dp_result\n\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n data = list(map(int, input.split()))\n n = data[0]\n data = data[1:]\n a = data[:n]\n data = data[n:]\n m = data[0]\n data = data[1:]\n b = data[:m]\n print(lcs2(a, b))\n",
"step-4": "import sys\n\n\ndef lcs2(a, b):\n dp_result = [[(0) for j in range(b + 1)] for i in range(a + 1)]\n for x in range(1, a + 1):\n for y in range(1, b + 1):\n if a[x - 1] == b[y - 1] and b[y - 1] == c[z - 1]:\n dp_result[x][y] = dp_result[x - 1][y - 1] + 1\n else:\n dp_result[x][y] = max(dp_result[x - 1][y], dp_result[x][y -\n 1], dp_result[x][y])\n return dp_result\n\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n data = list(map(int, input.split()))\n n = data[0]\n data = data[1:]\n a = data[:n]\n data = data[n:]\n m = data[0]\n data = data[1:]\n b = data[:m]\n print(lcs2(a, b))\n",
"step-5": "#Uses python3\n\nimport sys\n\ndef lcs2(a, b): \n dp_result = [[0 for j in range(b+1)] for i in range(a+1)]\n for x in range(1, a+1):\n for y in range(1, b+1):\n if a[x-1] == b[y-1] and b[y-1] == c[z-1]: \n dp_result[x][y] = dp_result[x-1][y-1] + 1\n else:\n dp_result[x][y] = max(dp_result[x-1][y], dp_result[x][y-1], dp_result[x][y])\n\n return dp_result\n\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n data = list(map(int, input.split()))\n\n n = data[0]\n data = data[1:]\n a = data[:n]\n\n data = data[n:]\n m = data[0]\n data = data[1:]\n b = data[:m]\n\n print(lcs2(a, b))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
Array.diff
Our goal in this kata is to implement a difference function,
which subtracts one list from another and returns the result.
It should remove all values from list a, which are present in list b keeping their order.
"""
from unittest import TestCase
def list_diff(a, b):
return [x for x in a if x not in b]
class TestListDiff(TestCase):
def test_one(self):
assert list_diff([1, 2], [1]) == [2]
def test_two(self):
assert list_diff([1, 2, 2, 2, 3], [2]) == [1, 3]
def test_three(self):
assert list_diff([1, 2, 2, 2, 3], [1, 3]) == [2, 2, 2]
def list_diff_left_right(a, b):
left = [x for x in a if x not in b]
right = [x for x in b if x not in a]
return left, right
class TestDiffLR(TestCase):
def test_one(self):
assert list_diff_left_right([1, 2], [1]) == ([2], [])
def test_two(self):
assert list_diff_left_right([1, 2, 2, 2, 3], [2]) == ([1, 3], [])
def test_three(self):
assert list_diff_left_right([1, 2, 2, 2], [1, 3, 3]) == ([2, 2, 2], [3, 3])
|
normal
|
{
"blob_id": "76526bdff7418997ac90f761936abccbb3468499",
"index": 6513,
"step-1": "<mask token>\n\n\nclass TestListDiff(TestCase):\n <mask token>\n\n def test_two(self):\n assert list_diff([1, 2, 2, 2, 3], [2]) == [1, 3]\n <mask token>\n\n\n<mask token>\n\n\nclass TestDiffLR(TestCase):\n\n def test_one(self):\n assert list_diff_left_right([1, 2], [1]) == ([2], [])\n\n def test_two(self):\n assert list_diff_left_right([1, 2, 2, 2, 3], [2]) == ([1, 3], [])\n\n def test_three(self):\n assert list_diff_left_right([1, 2, 2, 2], [1, 3, 3]) == ([2, 2, 2],\n [3, 3])\n",
"step-2": "<mask token>\n\n\nclass TestListDiff(TestCase):\n\n def test_one(self):\n assert list_diff([1, 2], [1]) == [2]\n\n def test_two(self):\n assert list_diff([1, 2, 2, 2, 3], [2]) == [1, 3]\n\n def test_three(self):\n assert list_diff([1, 2, 2, 2, 3], [1, 3]) == [2, 2, 2]\n\n\n<mask token>\n\n\nclass TestDiffLR(TestCase):\n\n def test_one(self):\n assert list_diff_left_right([1, 2], [1]) == ([2], [])\n\n def test_two(self):\n assert list_diff_left_right([1, 2, 2, 2, 3], [2]) == ([1, 3], [])\n\n def test_three(self):\n assert list_diff_left_right([1, 2, 2, 2], [1, 3, 3]) == ([2, 2, 2],\n [3, 3])\n",
"step-3": "<mask token>\n\n\nclass TestListDiff(TestCase):\n\n def test_one(self):\n assert list_diff([1, 2], [1]) == [2]\n\n def test_two(self):\n assert list_diff([1, 2, 2, 2, 3], [2]) == [1, 3]\n\n def test_three(self):\n assert list_diff([1, 2, 2, 2, 3], [1, 3]) == [2, 2, 2]\n\n\ndef list_diff_left_right(a, b):\n left = [x for x in a if x not in b]\n right = [x for x in b if x not in a]\n return left, right\n\n\nclass TestDiffLR(TestCase):\n\n def test_one(self):\n assert list_diff_left_right([1, 2], [1]) == ([2], [])\n\n def test_two(self):\n assert list_diff_left_right([1, 2, 2, 2, 3], [2]) == ([1, 3], [])\n\n def test_three(self):\n assert list_diff_left_right([1, 2, 2, 2], [1, 3, 3]) == ([2, 2, 2],\n [3, 3])\n",
"step-4": "<mask token>\n\n\ndef list_diff(a, b):\n return [x for x in a if x not in b]\n\n\nclass TestListDiff(TestCase):\n\n def test_one(self):\n assert list_diff([1, 2], [1]) == [2]\n\n def test_two(self):\n assert list_diff([1, 2, 2, 2, 3], [2]) == [1, 3]\n\n def test_three(self):\n assert list_diff([1, 2, 2, 2, 3], [1, 3]) == [2, 2, 2]\n\n\ndef list_diff_left_right(a, b):\n left = [x for x in a if x not in b]\n right = [x for x in b if x not in a]\n return left, right\n\n\nclass TestDiffLR(TestCase):\n\n def test_one(self):\n assert list_diff_left_right([1, 2], [1]) == ([2], [])\n\n def test_two(self):\n assert list_diff_left_right([1, 2, 2, 2, 3], [2]) == ([1, 3], [])\n\n def test_three(self):\n assert list_diff_left_right([1, 2, 2, 2], [1, 3, 3]) == ([2, 2, 2],\n [3, 3])\n",
"step-5": "\"\"\"\nArray.diff\nOur goal in this kata is to implement a difference function,\n which subtracts one list from another and returns the result.\nIt should remove all values from list a, which are present in list b keeping their order.\n\"\"\"\nfrom unittest import TestCase\n\n\ndef list_diff(a, b):\n return [x for x in a if x not in b]\n\n\nclass TestListDiff(TestCase):\n def test_one(self):\n assert list_diff([1, 2], [1]) == [2]\n\n def test_two(self):\n assert list_diff([1, 2, 2, 2, 3], [2]) == [1, 3]\n\n def test_three(self):\n assert list_diff([1, 2, 2, 2, 3], [1, 3]) == [2, 2, 2]\n\n\ndef list_diff_left_right(a, b):\n left = [x for x in a if x not in b]\n right = [x for x in b if x not in a]\n return left, right\n\n\nclass TestDiffLR(TestCase):\n def test_one(self):\n assert list_diff_left_right([1, 2], [1]) == ([2], [])\n\n def test_two(self):\n assert list_diff_left_right([1, 2, 2, 2, 3], [2]) == ([1, 3], [])\n\n def test_three(self):\n assert list_diff_left_right([1, 2, 2, 2], [1, 3, 3]) == ([2, 2, 2], [3, 3])\n",
"step-ids": [
6,
8,
9,
10,
12
]
}
|
[
6,
8,
9,
10,
12
] |
# Multiple Linear Regression
# To set the working directory save this .py file where we have the Data.csv file
# and then press the Run button. This will automatically set the working directory.
# Importing the data from preprocessing data
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv('50_Startups.csv')
# iloc integer location based [rows, columns] : means all rows :-1 all columns except last one
X = dataset.iloc[:, :-1].values
# In python indexes are started from 0 and R starts from 1
y = dataset.iloc[:, 4].values
# Categorical Data
# Encoding Independent Data
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X = LabelEncoder()
X[:,3] = labelencoder_X.fit_transform(X[:,3])
onehotencoder = OneHotEncoder(categorical_features= [3])
X = onehotencoder.fit_transform(X).toarray()
# Avoiding Dummy Variable Trap
X = X[:, 1:]
#In the above thing it The above column will start from 1 to end.
#Splitting the dataset into Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.2, random_state =0)
# Feature Scaling
# For multi-comment line use """ This will not be executed """
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)"""
# Fitting Multiple Linear Regression to the Training set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
# Predicting the Test set results
y_pred = regressor.predict(X_test)
# Building the model using Backword Elimination
import statsmodels.formula.api as sm
X = np.append(arr = np.ones((50,1)).astype(int), values = X, axis = 1)
X_opt = X[:, [0,1,2,3,4,5]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
# Omit the variables which have prob more than .95
X_opt = X[:, [0,1,3,4,5]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
# Omit the variables until you have P < SL
X_opt = X[:, [0,3,4,5]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
X_opt = X[:, [0,3,5]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
X_opt = X[:, [0,3]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
# End of Backward ELimination Algorithm
# I would like to visualize the performance of R&D vs Profit scale
|
normal
|
{
"blob_id": "4d722975b4ffc1bbfe7591e6ceccc758f67a5599",
"index": 6920,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nregressor.fit(X_train, y_train)\n<mask token>\nregressor_OLS.summary()\n<mask token>\nregressor_OLS.summary()\n<mask token>\nregressor_OLS.summary()\n<mask token>\nregressor_OLS.summary()\n<mask token>\nregressor_OLS.summary()\n",
"step-3": "<mask token>\ndataset = pd.read_csv('50_Startups.csv')\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, 4].values\n<mask token>\nlabelencoder_X = LabelEncoder()\nX[:, 3] = labelencoder_X.fit_transform(X[:, 3])\nonehotencoder = OneHotEncoder(categorical_features=[3])\nX = onehotencoder.fit_transform(X).toarray()\nX = X[:, 1:]\n<mask token>\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,\n random_state=0)\n<mask token>\nregressor = LinearRegression()\nregressor.fit(X_train, y_train)\ny_pred = regressor.predict(X_test)\n<mask token>\nX = np.append(arr=np.ones((50, 1)).astype(int), values=X, axis=1)\nX_opt = X[:, [0, 1, 2, 3, 4, 5]]\nregressor_OLS = sm.OLS(endog=y, exog=X_opt).fit()\nregressor_OLS.summary()\nX_opt = X[:, [0, 1, 3, 4, 5]]\nregressor_OLS = sm.OLS(endog=y, exog=X_opt).fit()\nregressor_OLS.summary()\nX_opt = X[:, [0, 3, 4, 5]]\nregressor_OLS = sm.OLS(endog=y, exog=X_opt).fit()\nregressor_OLS.summary()\nX_opt = X[:, [0, 3, 5]]\nregressor_OLS = sm.OLS(endog=y, exog=X_opt).fit()\nregressor_OLS.summary()\nX_opt = X[:, [0, 3]]\nregressor_OLS = sm.OLS(endog=y, exog=X_opt).fit()\nregressor_OLS.summary()\n",
"step-4": "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\ndataset = pd.read_csv('50_Startups.csv')\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, 4].values\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nlabelencoder_X = LabelEncoder()\nX[:, 3] = labelencoder_X.fit_transform(X[:, 3])\nonehotencoder = OneHotEncoder(categorical_features=[3])\nX = onehotencoder.fit_transform(X).toarray()\nX = X[:, 1:]\nfrom sklearn.cross_validation import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,\n random_state=0)\n<mask token>\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor.fit(X_train, y_train)\ny_pred = regressor.predict(X_test)\nimport statsmodels.formula.api as sm\nX = np.append(arr=np.ones((50, 1)).astype(int), values=X, axis=1)\nX_opt = X[:, [0, 1, 2, 3, 4, 5]]\nregressor_OLS = sm.OLS(endog=y, exog=X_opt).fit()\nregressor_OLS.summary()\nX_opt = X[:, [0, 1, 3, 4, 5]]\nregressor_OLS = sm.OLS(endog=y, exog=X_opt).fit()\nregressor_OLS.summary()\nX_opt = X[:, [0, 3, 4, 5]]\nregressor_OLS = sm.OLS(endog=y, exog=X_opt).fit()\nregressor_OLS.summary()\nX_opt = X[:, [0, 3, 5]]\nregressor_OLS = sm.OLS(endog=y, exog=X_opt).fit()\nregressor_OLS.summary()\nX_opt = X[:, [0, 3]]\nregressor_OLS = sm.OLS(endog=y, exog=X_opt).fit()\nregressor_OLS.summary()\n",
"step-5": "# Multiple Linear Regression\n# To set the working directory save this .py file where we have the Data.csv file \n# and then press the Run button. This will automatically set the working directory.\n# Importing the data from preprocessing data\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd \n\ndataset = pd.read_csv('50_Startups.csv')\n\n# iloc integer location based [rows, columns] : means all rows :-1 all columns except last one\nX = dataset.iloc[:, :-1].values\n\n# In python indexes are started from 0 and R starts from 1\ny = dataset.iloc[:, 4].values\n\n# Categorical Data\n# Encoding Independent Data\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nlabelencoder_X = LabelEncoder()\nX[:,3] = labelencoder_X.fit_transform(X[:,3])\nonehotencoder = OneHotEncoder(categorical_features= [3])\nX = onehotencoder.fit_transform(X).toarray()\n\n# Avoiding Dummy Variable Trap\nX = X[:, 1:] \n#In the above thing it The above column will start from 1 to end.\n\n#Splitting the dataset into Training set and Test set\nfrom sklearn.cross_validation import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.2, random_state =0)\n\n# Feature Scaling\n# For multi-comment line use \"\"\" This will not be executed \"\"\" \n\"\"\"from sklearn.preprocessing import StandardScaler\nsc_X = StandardScaler()\nX_train = sc_X.fit_transform(X_train)\nX_test = sc_X.transform(X_test)\"\"\"\n\n# Fitting Multiple Linear Regression to the Training set\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor.fit(X_train, y_train)\n\n# Predicting the Test set results\ny_pred = regressor.predict(X_test)\n\n# Building the model using Backword Elimination\nimport statsmodels.formula.api as sm\nX = np.append(arr = np.ones((50,1)).astype(int), values = X, axis = 1)\nX_opt = X[:, [0,1,2,3,4,5]]\nregressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()\nregressor_OLS.summary()\n\n# Omit the variables which have prob more than .95\nX_opt = X[:, [0,1,3,4,5]]\nregressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()\nregressor_OLS.summary()\n\n# Omit the variables until you have P < SL\nX_opt = X[:, [0,3,4,5]]\nregressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()\nregressor_OLS.summary()\n\nX_opt = X[:, [0,3,5]]\nregressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()\nregressor_OLS.summary()\n\nX_opt = X[:, [0,3]]\nregressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()\nregressor_OLS.summary()\n\n# End of Backward ELimination Algorithm\n\n# I would like to visualize the performance of R&D vs Profit scale\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def mini200(videopath, minipath, mod='train'):
with open(videopath, 'r') as video_f:
all_videos = video_f.readlines()
count = [(0) for _ in range(0, 200)]
with open(minipath, 'w') as f:
for video in all_videos:
path, label = video.split(',')
label = int(label)
if label < 200:
count[label] += 1
f.write(video)
for cls, i in enumerate(count):
print('{} class have : {}'.format(cls, i))
print('total {}'.format(sum(count)))
def exist_or_not(ann):
with open(ann, 'r') as f:
all = f.readlines()
for video in all:
path = video.split(',')[0]
if not os.path.isfile(path):
print(path)
print('all done!')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def mini100(videopath, minipath, mod='train'):
with open(videopath, 'r') as video_f:
all_videos = video_f.readlines()
count = [(0) for _ in range(0, 100)]
with open(minipath, 'w') as f:
for video in all_videos:
path, label = video.split(',')
label = int(label)
if label < 100:
count[label] += 1
f.write(video)
for cls, i in enumerate(count):
print('{} class have : {}'.format(cls, i))
print('total {}'.format(sum(count)))
def mini200(videopath, minipath, mod='train'):
with open(videopath, 'r') as video_f:
all_videos = video_f.readlines()
count = [(0) for _ in range(0, 200)]
with open(minipath, 'w') as f:
for video in all_videos:
path, label = video.split(',')
label = int(label)
if label < 200:
count[label] += 1
f.write(video)
for cls, i in enumerate(count):
print('{} class have : {}'.format(cls, i))
print('total {}'.format(sum(count)))
def exist_or_not(ann):
with open(ann, 'r') as f:
all = f.readlines()
for video in all:
path = video.split(',')[0]
if not os.path.isfile(path):
print(path)
print('all done!')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def mini100(videopath, minipath, mod='train'):
with open(videopath, 'r') as video_f:
all_videos = video_f.readlines()
count = [(0) for _ in range(0, 100)]
with open(minipath, 'w') as f:
for video in all_videos:
path, label = video.split(',')
label = int(label)
if label < 100:
count[label] += 1
f.write(video)
for cls, i in enumerate(count):
print('{} class have : {}'.format(cls, i))
print('total {}'.format(sum(count)))
def mini200(videopath, minipath, mod='train'):
with open(videopath, 'r') as video_f:
all_videos = video_f.readlines()
count = [(0) for _ in range(0, 200)]
with open(minipath, 'w') as f:
for video in all_videos:
path, label = video.split(',')
label = int(label)
if label < 200:
count[label] += 1
f.write(video)
for cls, i in enumerate(count):
print('{} class have : {}'.format(cls, i))
print('total {}'.format(sum(count)))
def exist_or_not(ann):
with open(ann, 'r') as f:
all = f.readlines()
for video in all:
path = video.split(',')[0]
if not os.path.isfile(path):
print(path)
print('all done!')
if __name__ == '__main__':
import fire
fire.Fire()
<|reserved_special_token_1|>
import os
def mini100(videopath, minipath, mod='train'):
with open(videopath, 'r') as video_f:
all_videos = video_f.readlines()
count = [(0) for _ in range(0, 100)]
with open(minipath, 'w') as f:
for video in all_videos:
path, label = video.split(',')
label = int(label)
if label < 100:
count[label] += 1
f.write(video)
for cls, i in enumerate(count):
print('{} class have : {}'.format(cls, i))
print('total {}'.format(sum(count)))
def mini200(videopath, minipath, mod='train'):
with open(videopath, 'r') as video_f:
all_videos = video_f.readlines()
count = [(0) for _ in range(0, 200)]
with open(minipath, 'w') as f:
for video in all_videos:
path, label = video.split(',')
label = int(label)
if label < 200:
count[label] += 1
f.write(video)
for cls, i in enumerate(count):
print('{} class have : {}'.format(cls, i))
print('total {}'.format(sum(count)))
def exist_or_not(ann):
with open(ann, 'r') as f:
all = f.readlines()
for video in all:
path = video.split(',')[0]
if not os.path.isfile(path):
print(path)
print('all done!')
if __name__ == '__main__':
import fire
fire.Fire()
<|reserved_special_token_1|>
import os
def mini100(videopath, minipath,mod='train'):
with open(videopath, 'r') as video_f:
all_videos = video_f.readlines()
#if mod=='train':
# count = [400 for _ in range(0,100)]
#else:
# count = [25 for _ in range(0,100)]
count = [0 for _ in range(0,100)]
with open(minipath,'w') as f:
for video in all_videos:
#print(video)
path, label = video.split(',')
label = int(label)
if label<100:
#if count[label]>0:
# count[label] -= 1
count[label] +=1
f.write(video)
for cls,i in enumerate(count):
#if i!=0:
print("{} class have : {}".format(cls,i))
print("total {}".format(sum(count)))
# assert i==0
def mini200(videopath, minipath,mod='train'):
with open(videopath, 'r') as video_f:
all_videos = video_f.readlines()
#if mod=='train':
# count = [400 for _ in range(0,100)]
#else:
# count = [25 for _ in range(0,100)]
count = [0 for _ in range(0,200)]
with open(minipath,'w') as f:
for video in all_videos:
#print(video)
path, label = video.split(',')
label = int(label)
if label<200:
#if count[label]>0:
# count[label] -= 1
count[label] +=1
f.write(video)
for cls,i in enumerate(count):
#if i!=0:
print("{} class have : {}".format(cls,i))
print("total {}".format(sum(count)))
# assert i==0
def exist_or_not(ann,):
with open(ann, 'r') as f:
all = f.readlines()
for video in all:
path =video.split(',')[0]
if not os.path.isfile(path):
print(path)
print("all done!")
if __name__ == "__main__":
import fire
fire.Fire()
|
flexible
|
{
"blob_id": "f6d4208afee7aacd96ea5ae6c9e38d2876466703",
"index": 7417,
"step-1": "<mask token>\n\n\ndef mini200(videopath, minipath, mod='train'):\n with open(videopath, 'r') as video_f:\n all_videos = video_f.readlines()\n count = [(0) for _ in range(0, 200)]\n with open(minipath, 'w') as f:\n for video in all_videos:\n path, label = video.split(',')\n label = int(label)\n if label < 200:\n count[label] += 1\n f.write(video)\n for cls, i in enumerate(count):\n print('{} class have : {}'.format(cls, i))\n print('total {}'.format(sum(count)))\n\n\ndef exist_or_not(ann):\n with open(ann, 'r') as f:\n all = f.readlines()\n for video in all:\n path = video.split(',')[0]\n if not os.path.isfile(path):\n print(path)\n print('all done!')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef mini100(videopath, minipath, mod='train'):\n with open(videopath, 'r') as video_f:\n all_videos = video_f.readlines()\n count = [(0) for _ in range(0, 100)]\n with open(minipath, 'w') as f:\n for video in all_videos:\n path, label = video.split(',')\n label = int(label)\n if label < 100:\n count[label] += 1\n f.write(video)\n for cls, i in enumerate(count):\n print('{} class have : {}'.format(cls, i))\n print('total {}'.format(sum(count)))\n\n\ndef mini200(videopath, minipath, mod='train'):\n with open(videopath, 'r') as video_f:\n all_videos = video_f.readlines()\n count = [(0) for _ in range(0, 200)]\n with open(minipath, 'w') as f:\n for video in all_videos:\n path, label = video.split(',')\n label = int(label)\n if label < 200:\n count[label] += 1\n f.write(video)\n for cls, i in enumerate(count):\n print('{} class have : {}'.format(cls, i))\n print('total {}'.format(sum(count)))\n\n\ndef exist_or_not(ann):\n with open(ann, 'r') as f:\n all = f.readlines()\n for video in all:\n path = video.split(',')[0]\n if not os.path.isfile(path):\n print(path)\n print('all done!')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef mini100(videopath, minipath, mod='train'):\n with open(videopath, 'r') as video_f:\n all_videos = video_f.readlines()\n count = [(0) for _ in range(0, 100)]\n with open(minipath, 'w') as f:\n for video in all_videos:\n path, label = video.split(',')\n label = int(label)\n if label < 100:\n count[label] += 1\n f.write(video)\n for cls, i in enumerate(count):\n print('{} class have : {}'.format(cls, i))\n print('total {}'.format(sum(count)))\n\n\ndef mini200(videopath, minipath, mod='train'):\n with open(videopath, 'r') as video_f:\n all_videos = video_f.readlines()\n count = [(0) for _ in range(0, 200)]\n with open(minipath, 'w') as f:\n for video in all_videos:\n path, label = video.split(',')\n label = int(label)\n if label < 200:\n count[label] += 1\n f.write(video)\n for cls, i in enumerate(count):\n print('{} class have : {}'.format(cls, i))\n print('total {}'.format(sum(count)))\n\n\ndef exist_or_not(ann):\n with open(ann, 'r') as f:\n all = f.readlines()\n for video in all:\n path = video.split(',')[0]\n if not os.path.isfile(path):\n print(path)\n print('all done!')\n\n\nif __name__ == '__main__':\n import fire\n fire.Fire()\n",
"step-4": "import os\n\n\ndef mini100(videopath, minipath, mod='train'):\n with open(videopath, 'r') as video_f:\n all_videos = video_f.readlines()\n count = [(0) for _ in range(0, 100)]\n with open(minipath, 'w') as f:\n for video in all_videos:\n path, label = video.split(',')\n label = int(label)\n if label < 100:\n count[label] += 1\n f.write(video)\n for cls, i in enumerate(count):\n print('{} class have : {}'.format(cls, i))\n print('total {}'.format(sum(count)))\n\n\ndef mini200(videopath, minipath, mod='train'):\n with open(videopath, 'r') as video_f:\n all_videos = video_f.readlines()\n count = [(0) for _ in range(0, 200)]\n with open(minipath, 'w') as f:\n for video in all_videos:\n path, label = video.split(',')\n label = int(label)\n if label < 200:\n count[label] += 1\n f.write(video)\n for cls, i in enumerate(count):\n print('{} class have : {}'.format(cls, i))\n print('total {}'.format(sum(count)))\n\n\ndef exist_or_not(ann):\n with open(ann, 'r') as f:\n all = f.readlines()\n for video in all:\n path = video.split(',')[0]\n if not os.path.isfile(path):\n print(path)\n print('all done!')\n\n\nif __name__ == '__main__':\n import fire\n fire.Fire()\n",
"step-5": "import os\n\ndef mini100(videopath, minipath,mod='train'):\n with open(videopath, 'r') as video_f:\n all_videos = video_f.readlines()\n #if mod=='train':\n # count = [400 for _ in range(0,100)]\n #else:\n # count = [25 for _ in range(0,100)]\n count = [0 for _ in range(0,100)]\n with open(minipath,'w') as f:\n for video in all_videos:\n #print(video)\n path, label = video.split(',')\n label = int(label)\n if label<100:\n #if count[label]>0:\n # count[label] -= 1\n count[label] +=1\n \n f.write(video)\n \n for cls,i in enumerate(count):\n #if i!=0:\n print(\"{} class have : {}\".format(cls,i))\n print(\"total {}\".format(sum(count)))\n # assert i==0\n\ndef mini200(videopath, minipath,mod='train'):\n with open(videopath, 'r') as video_f:\n all_videos = video_f.readlines()\n #if mod=='train':\n # count = [400 for _ in range(0,100)]\n #else:\n # count = [25 for _ in range(0,100)]\n count = [0 for _ in range(0,200)]\n with open(minipath,'w') as f:\n for video in all_videos:\n #print(video)\n path, label = video.split(',')\n label = int(label)\n if label<200:\n #if count[label]>0:\n # count[label] -= 1\n count[label] +=1\n \n f.write(video)\n \n for cls,i in enumerate(count):\n #if i!=0:\n print(\"{} class have : {}\".format(cls,i))\n print(\"total {}\".format(sum(count)))\n # assert i==0\n\ndef exist_or_not(ann,):\n with open(ann, 'r') as f:\n all = f.readlines()\n for video in all:\n path =video.split(',')[0]\n if not os.path.isfile(path):\n print(path)\n print(\"all done!\")\n \nif __name__ == \"__main__\":\n import fire\n fire.Fire()\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
"""Module for the bot"""
from copy import deepcopy
from time import sleep
import mcpi.minecraft as minecraft
from mcpi.vec3 import Vec3
import mcpi.block as block
from search import SearchProblem, astar, bfs
from singleton import singleton
_AIR = block.AIR.id
_WATER = block.WATER.id
_LAVA = block.LAVA.id
_BEDROCK = block.BEDROCK.id
_DROP = 2 # It can drop at most this many
_DROP_PLUS_1 = _DROP + 1
_DELAY = 1
class _Vec3(Vec3):
"""A Vec3 that is hashable. Everything in this program should use this
class."""
def __hash__(self):
"""Return the hash."""
return hash((self.x, self.y, self.z))
def clone(self):
"""Return a clone."""
return _Vec3(self.x, self.y, self.z)
class _GenericBot:
"""A generic bot."""
def __init__(self, pos, inventory=None):
"""Initialize with an empty inventory.
inventory is a dictionary. If None, an empty one will be used."""
if inventory is None:
self._inventory = {}
else:
self._inventory = deepcopy(inventory)
self._pos = deepcopy(pos)
def take_action(self, action):
"""Take the action (acquired from _get_legal_actions)."""
getattr(self, action['func'])(
*action.get('args', ()),
**action.get('kwargs', {})
)
def take_actions(self, actions, seconds=None):
"""Take these actions. If seconds is not None, sleep 'seconds'
seconds.
"""
if not actions:
return
self.take_action(actions[0])
for action in actions[1:]:
if seconds is not None:
sleep(seconds)
self.take_action(action)
def get_pos(self):
"""Return the position."""
return deepcopy(self._pos)
def get_legal_actions(self, block_=None):
"""Return a list of legal actions.
If block_ is None, return all legal actions. Otherwise, return all
legal actions that don't involve placing the block."""
return self._get_move_actions(block_) + self._get_mine_actions() + \
self._get_placement_actions(block_)
def contains(self, block_):
"""Return whether or not the bot contains the block id."""
return block_ in self._inventory
def _get_block(self, pos):
"""Get the block at the position."""
raise NotImplementedError
def _place(self, loc, exclude=None, block_=None):
"""Place a block from the inventory only.
If exclude is not None, place a block that is not 'exclude'.
If block is not None, place that block only.
"""
if not self._inventory:
raise Exception('Inventory empty')
if block_ is None:
for key in self._inventory:
if key != exclude:
block_ = key
break
else:
raise Exception((
'You requested not to place %s, but it is the only '
'block in the inventory.' % exclude
))
if block_ not in self._inventory:
raise Exception('Block %s is not in the inventory' % block_)
if self._inventory[block_] == 1:
del self._inventory[block_]
else:
self._inventory[block_] -= 1
self._set_block(loc, block_)
def _move_down(self):
"""Move and mine the block below."""
new_pos = self._pos + _Vec3(0, -1, 0)
block_ = self._get_block(new_pos)
if block_ != _WATER:
self._add_to_inv(block_)
self._move(new_pos)
def _add_to_inv(self, block_):
"""Add the block to the inventory."""
if block_ in self._inventory:
self._inventory[block_] += 1
else:
self._inventory[block_] = 1
def _move_up(self, exclude=None):
"""Move and place a block below.
If exclude is not None, place a block that is not 'exclude'.
"""
self._move(self._pos + _Vec3(0, 1, 0))
self._place(self._pos + _Vec3(0, -1, 0), exclude)
def _mine(self, loc):
"""Mine the block."""
block_ = self._get_block(loc)
self._add_to_inv(block_)
self._set_block(loc, _AIR)
def _get_move_actions(self, exclude=None):
"""Return a list of legal movement actions.
exclude is the block to exclude.
"""
rtn = []
# Check for moving up
can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR, _WATER}
if can_move_up:
if self._surrounded():
rtn.append({
'func': '_move',
'args': (self._pos + _Vec3(0, 1, 0),)
})
else:
rtn.append({
'func': '_move_up',
'args': (exclude,)
})
# Check for moving down
hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))
if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:
rtn.append({'func': '_move_down'})
# Check for side moves
for dir_ in _adj_dirs():
rtn.extend(self._side_moves(dir_, can_move_up))
return rtn
def _side_moves(self, dir_, can_move_up):
"""Return the list of side moves.
dir_ is an adjacent direction.
can_move_up is a boolean for whether or not the bot can move up.
"""
rtn = []
base_pos = self._pos + dir_
base_block = self._get_block(base_pos)
empty_blocks = {_AIR, _WATER}
# Check if it can move up
if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:
for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:
if self._get_block(base_pos + vert_dir) not in empty_blocks:
break
else:
rtn.append({
'func': '_move',
'args': (base_pos + _Vec3(0, 1, 0),)
})
# Check if it can move in that direction
for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:
if self._get_block(base_pos + vert_dir) not in empty_blocks:
break
# Fall
else:
pos = base_pos + _Vec3(0, -1, 0)
for _ in xrange(_DROP_PLUS_1):
block_ = self._get_block(pos)
if block_ != _AIR:
if block_ != _LAVA:
rtn.append({
'func': '_move',
'args': (pos + _Vec3(0, 1, 0),)
})
break
pos.y -= 1
def _surrounded(self):
"""Return whether or not the bot is surrounded by water."""
for dir_ in _adj_dirs():
if self._get_block(self._pos + dir_) != _WATER:
return False
return True
def _get_mine_actions(self):
"""Return a list of legal mining actions (that only involve mining
and not moving)."""
rtn = []
dont_mine = {_AIR, _WATER, _LAVA}
# Mine above.
pos_above = self._pos + _Vec3(0, 2, 0)
if self._get_block(pos_above) not in dont_mine:
rtn.append({
'func': '_mine',
'args': (pos_above,)
})
for dir_ in _adj_dirs():
pos = self._pos + dir_
for _ in xrange(2):
if self._get_block(pos) not in dont_mine:
rtn.append({
'func': '_mine',
'args': (pos,)
})
pos = pos + _Vec3(0, 1, 0)
return rtn
def _get_placement_actions(self, exclude=None):
"""Return a list of legal actions that only involve placing a block
from the inventory.
exclude is a block id. It is the block that should not be placed. If None,
any block can be placed."""
if not self._has_blocks_to_place(exclude=exclude):
return []
dirs = [_Vec3(0, 2, 0)]
for dir_ in _adj_dirs():
dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])
if self._get_block(self._pos + dir_) in [_AIR, _WATER]:
dirs.append(dir_ + _Vec3(0, -1, 0))
rtn = []
for dir_ in dirs:
pos = self._pos + dir_
if self._can_place(pos):
rtn.append({
'func': '_place',
'args': (pos,),
'kwargs': {'exclude': exclude}
})
return rtn
def _can_place(self, loc):
"""Return whether or not the bot can place a block at that location
independent of what it has in its inventory."""
non_blocks = [_AIR, _WATER, _LAVA]
player = [self._pos, self._pos + _Vec3(0, 1, 0)]
for dir_ in _adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]:
new_loc = loc + dir_
if new_loc not in player and self._get_block(new_loc) \
not in non_blocks:
return True
return False
def _has_blocks_to_place(self, exclude=None):
"""Return whether or not the bot can place a block from the
inventory. If exclude is None, any block can be placed."""
for block_ in self._inventory:
if block_ != exclude:
return True
return False
def _set_block(self, pos, block_):
"""Set a block. block_ is the block id."""
raise NotImplementedError
def _move(self, pos):
"""Move there only."""
self._pos = deepcopy(pos)
class _ImaginaryBot(_GenericBot):
"""A bot used for finding paths that doesn't actually change blocks
in the world."""
def __init__(self, pos, inventory=None):
"""Create a new bot."""
_GenericBot.__init__(self, pos, inventory)
self._changes = {} # Changes to the world
def _set_block(self, pos, block_):
"""Set a block. block_ is the block id."""
self._changes[deepcopy(pos)] = block
def _get_block(self, pos):
"""Get the block at the position."""
if pos in self._changes:
return self._changes[pos]
else:
return _get_mc().getBlock(pos)
def get_block(self, pos):
"""The public version."""
return self._get_block(pos)
def __hash__(self):
"""Return the hash."""
return hash(frozenset([self._pos] + \
_key_vals(self._inventory) + \
_key_vals(self._changes)
))
class Bot(_GenericBot):
"""The real bot.
All vector arguments are Vec3s."""
_BOT_BLOCK = block.IRON_BLOCK.id
def __init__(self):
"""Create a bot next to the player."""
pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)
pos = _Vec3(pos.x, pos.y, pos.z)
_GenericBot.__init__(self, pos)
self._pos = pos
self._move(self._pos)
@staticmethod
def destroy_all():
"""Destroy all bots within a small distance (in case I forget to
destroy one)."""
player_loc = _player_loc()
minec = _get_mc()
rad = 10
for x in xrange(player_loc.x - rad, player_loc.x + rad):
for y in xrange(player_loc.y - rad, player_loc.y + rad):
for z in xrange(player_loc.z - rad, player_loc.z + rad):
if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:
minec.setBlock(x, y, z, _AIR)
def destroy(self):
"""Set itself to air."""
self._set_block(self._pos, _AIR)
self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)
def fetch(self, block_name):
"""Mine and return a block to the player."""
imag_bot = _ImaginaryBot(self._pos, self._inventory)
block_id = getattr(block, block_name).id
block_loc = self._get_block_loc(block_id)
mine_prob = _MineProblem(imag_bot, block_loc, block_id)
mine_actions = astar(mine_prob, _mine_heuristic)
self.take_actions(mine_actions, _DELAY)
imag_bot = _ImaginaryBot(self._pos, self._inventory)
player_loc = _player_loc()
return_prob = _ReturnProblem(imag_bot, block_id, player_loc)
return_actions = astar(return_prob, _return_heuristic)
imag_bot.take_actions(return_actions)
return_actions.append({
'func': '_place',
'args': (imag_bot.get_pos() + player_loc) / 2,
'kwargs': {'block': block_id}
})
self.take_actions(return_actions, _DELAY)
def _get_block_loc(self, block_id):
"""Return the location of the block."""
find_prob = FindProblem(self._pos, block_id)
dirs = bfs(find_prob)
return self._pos + sum(dirs)
def _set_block(self, pos, block_):
"""Place an actual block in the world.
block is a block id."""
_get_mc().setBlock(pos, block_)
def _get_block(self, pos):
"""Get the block at the position."""
return _get_mc().getBlock(pos)
def _move(self, pos):
"""Move there, and set the appropriate blocks."""
self._set_block(self._pos, _AIR)
self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)
self._set_block(pos, self._BOT_BLOCK)
self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)
self._pos = pos
class FindProblem(SearchProblem):
"""Problem for finding the location of a block in the world.
A state in this problem is a location.
"""
def __init__(self, start_loc, block_id):
"""Initialize."""
self._start_loc = deepcopy(start_loc)
self._block_id = block_id
def getStartState(self):
"""Return the starting location."""
return self._start_loc
def isGoalState(self, state):
return _get_mc().getBlock(state) == self._block_id
def getSuccessors(self, state):
"""Return the successors."""
rtn = []
for dir_ in _all_dirs():
successor = state + dir_
if successor.y <= _get_mc().getHeight(successor.x, successor.z) \
and _get_mc().getBlock(successor) != _BEDROCK:
rtn.append((successor, dir_, 1))
return rtn
class _MineProblem(SearchProblem):
"""The problem of finding the block and mining it (not returning
it)."""
def __init__(self, imag_bot, block_loc, block_id):
"""Initialize the problem with an _ImaginaryBot.
block_loc is a Vec3.
"""
self._bot = imag_bot
self._block_loc = deepcopy(block_loc)
self._block_id = block_id
def get_block_loc(self):
"""Return the block location."""
return deepcopy(self._block_loc)
def get_block_id(self):
"""Return the block it's trying to mine."""
return self._block_id
def getStartState(self):
"""Return the bot passed in."""
return self._bot
def isGoalState(self, state):
"""Return whether or not the bot has the block."""
return state.contains(self._block_id)
def getSuccessors(self, state):
"""Return the successors."""
rtn = []
for action in state.get_legal_actions():
successor = deepcopy(state)
successor.take_action(action)
rtn.append((successor, action, 1))
return rtn
class _ReturnProblem(SearchProblem):
"""The problem of returning to the player. This does not place the block
next to the player."""
def __init__(self, imag_bot, block_, player_loc):
"""Initialized the problem with an _ImaginaryBot.
block is a block id."""
self._bot = imag_bot
self._block = block_
self._player_loc = player_loc
def get_player_loc(self):
"""Return the player location."""
return deepcopy(self._player_loc)
def getStartState(self):
"""Return the bot passed in."""
return self._bot
def isGoalState(self, state):
"""Return whether or not the bot is next to the player."""
diff = state.get_pos() - self._player_loc
return diff.y == 0 and (diff.x == 0 or diff.z == 0) and \
abs(diff.x) + abs(diff.z) == 2 and \
state.get_block(self._player_loc + diff/2 + _Vec3(0, -1, 0)) not in \
(_AIR, _LAVA, _WATER)
def getSuccessors(self, state):
"""Return the successors."""
rtn = []
for action in state.get_legal_actions(self._block):
successor = deepcopy(state)
successor.take_action(action)
rtn.append((successor, action, 1))
return rtn
def _mine_heuristic(bot, problem):
"""Return the mining heuristic.
bot is an _ImaginaryBot.
"""
if bot.contains(problem.get_block_id()):
return 0
bot_pos = bot.get_pos()
dest_pos = problem.get_block_loc()
# If man == dy: return man + 1
# If man > dy: return man
# If man < dy: return dy?
man_dist = _manhattan((bot_pos.x, bot_pos.z), (dest_pos.x, dest_pos.z))
y_diff = bot_pos.y - dest_pos.y
if y_diff < 0:
y_diff += 1
if y_diff == 0:
return man_dist
# Transform so that it's only dropping
drop = _DROP if y_diff > 0 else 1
y_diff = abs(y_diff)
drops = _drops(y_diff, drop)
if man_dist > drops:
return man_dist
if man_dist == drops:
return man_dist + 1
if drop == 1:
return drops
if y_diff % drop == 1:
return drops
return drops + 1
def _drops(dist, drop):
"""Return the number of times it takes to drop a distance dist. drop is the
length of one drop. Both are assumed positive."""
rtn = dist / drop
if dist % drop != 0:
rtn += 1
return rtn
def _return_heuristic(bot, problem):
"""Return the return heuristic.
bot is an _ImaginaryBot.
"""
bot_pos = bot.get_pos()
player_pos = problem.get_player_loc()
bot_plane_pos = (bot.x, bot.z)
y_diff = bot_pos.y - player_pos.y
drop = _DROP if y_diff > 0 else 1
y_diff = abs(y_diff)
drops = _drops(y_diff, drop)
min_man = float('inf')
for dir_ in _adj_dirs():
loc = player_pos + 2 * dir_
man_dist = _manhattan(bot_plane_pos, (loc.x, loc.z))
if man_dist < min_man:
min_man = man_dist
if man_dist < drops:
return drops
return min_man
def _to_my_vec3(vec):
"""Return the _Vec3 alternative of the Vec3."""
return _Vec3(vec.x, vec.y, vec.z)
def _player_loc():
"""Return the player's location."""
return _to_my_vec3(_get_mc().player.getTilePos())
def _adj_dirs():
"""Return the adjacent directions."""
return [_Vec3(1, 0, 0), _Vec3(-1, 0, 0), _Vec3(0, 0, 1), _Vec3(0, 0, -1)]
def _all_dirs():
"""Return all adjacent directions."""
return _adj_dirs() + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]
def _manhattan(pos1, pos2):
"""Return the manhattan distance. pos1 and pos2 should be iterable."""
return sum(abs(val1 - val2) for val1, val2 in zip(pos1, pos2))
@singleton
def _get_mc():
"""Return the Minecraft instance."""
return minecraft.Minecraft.create()
def _key_vals(dict_):
"""Return a list of key-val tuples."""
return [(key, val) for key, val in dict_.iteritems()]
|
normal
|
{
"blob_id": "54f0ed5f705d5ada28721301f297b2b0058773ad",
"index": 2,
"step-1": "<mask token>\n\n\nclass _GenericBot:\n <mask token>\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n\n def take_action(self, action):\n \"\"\"Take the action (acquired from _get_legal_actions).\"\"\"\n getattr(self, action['func'])(*action.get('args', ()), **action.get\n ('kwargs', {}))\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n\n def get_pos(self):\n \"\"\"Return the position.\"\"\"\n return deepcopy(self._pos)\n\n def get_legal_actions(self, block_=None):\n \"\"\"Return a list of legal actions.\n\n If block_ is None, return all legal actions. Otherwise, return all\n legal actions that don't involve placing the block.\"\"\"\n return self._get_move_actions(block_) + self._get_mine_actions(\n ) + self._get_placement_actions(block_)\n <mask token>\n <mask token>\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception(\n 'You requested not to place %s, but it is the only block in the inventory.'\n % exclude)\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n self._set_block(loc, block_)\n <mask token>\n <mask token>\n <mask token>\n\n def _mine(self, loc):\n \"\"\"Mine the block.\"\"\"\n block_ = self._get_block(loc)\n self._add_to_inv(block_)\n self._set_block(loc, _AIR)\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,\n 0),)})\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({'func': '_move', 'args': (pos + _Vec3(0,\n 1, 0),)})\n break\n pos.y -= 1\n <mask token>\n\n def _get_mine_actions(self):\n \"\"\"Return a list of legal mining actions (that only involve mining\n and not moving).\"\"\"\n rtn = []\n dont_mine = {_AIR, _WATER, _LAVA}\n pos_above = self._pos + _Vec3(0, 2, 0)\n if self._get_block(pos_above) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos_above,)})\n for dir_ in _adj_dirs():\n pos = self._pos + dir_\n for _ in xrange(2):\n if self._get_block(pos) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos,)})\n pos = pos + _Vec3(0, 1, 0)\n return rtn\n\n def _get_placement_actions(self, exclude=None):\n \"\"\"Return a list of legal actions that only involve placing a block\n from the inventory.\n\n exclude is a block id. It is the block that should not be placed. If None,\n any block can be placed.\"\"\"\n if not self._has_blocks_to_place(exclude=exclude):\n return []\n dirs = [_Vec3(0, 2, 0)]\n for dir_ in _adj_dirs():\n dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])\n if self._get_block(self._pos + dir_) in [_AIR, _WATER]:\n dirs.append(dir_ + _Vec3(0, -1, 0))\n rtn = []\n for dir_ in dirs:\n pos = self._pos + dir_\n if self._can_place(pos):\n rtn.append({'func': '_place', 'args': (pos,), 'kwargs': {\n 'exclude': exclude}})\n return rtn\n <mask token>\n\n def _has_blocks_to_place(self, exclude=None):\n \"\"\"Return whether or not the bot can place a block from the\n inventory. If exclude is None, any block can be placed.\"\"\"\n for block_ in self._inventory:\n if block_ != exclude:\n return True\n return False\n <mask token>\n <mask token>\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass _GenericBot:\n <mask token>\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n\n def take_action(self, action):\n \"\"\"Take the action (acquired from _get_legal_actions).\"\"\"\n getattr(self, action['func'])(*action.get('args', ()), **action.get\n ('kwargs', {}))\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n\n def get_pos(self):\n \"\"\"Return the position.\"\"\"\n return deepcopy(self._pos)\n\n def get_legal_actions(self, block_=None):\n \"\"\"Return a list of legal actions.\n\n If block_ is None, return all legal actions. Otherwise, return all\n legal actions that don't involve placing the block.\"\"\"\n return self._get_move_actions(block_) + self._get_mine_actions(\n ) + self._get_placement_actions(block_)\n <mask token>\n <mask token>\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception(\n 'You requested not to place %s, but it is the only block in the inventory.'\n % exclude)\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n self._set_block(loc, block_)\n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n <mask token>\n <mask token>\n\n def _mine(self, loc):\n \"\"\"Mine the block.\"\"\"\n block_ = self._get_block(loc)\n self._add_to_inv(block_)\n self._set_block(loc, _AIR)\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,\n 0),)})\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({'func': '_move', 'args': (pos + _Vec3(0,\n 1, 0),)})\n break\n pos.y -= 1\n <mask token>\n\n def _get_mine_actions(self):\n \"\"\"Return a list of legal mining actions (that only involve mining\n and not moving).\"\"\"\n rtn = []\n dont_mine = {_AIR, _WATER, _LAVA}\n pos_above = self._pos + _Vec3(0, 2, 0)\n if self._get_block(pos_above) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos_above,)})\n for dir_ in _adj_dirs():\n pos = self._pos + dir_\n for _ in xrange(2):\n if self._get_block(pos) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos,)})\n pos = pos + _Vec3(0, 1, 0)\n return rtn\n\n def _get_placement_actions(self, exclude=None):\n \"\"\"Return a list of legal actions that only involve placing a block\n from the inventory.\n\n exclude is a block id. It is the block that should not be placed. If None,\n any block can be placed.\"\"\"\n if not self._has_blocks_to_place(exclude=exclude):\n return []\n dirs = [_Vec3(0, 2, 0)]\n for dir_ in _adj_dirs():\n dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])\n if self._get_block(self._pos + dir_) in [_AIR, _WATER]:\n dirs.append(dir_ + _Vec3(0, -1, 0))\n rtn = []\n for dir_ in dirs:\n pos = self._pos + dir_\n if self._can_place(pos):\n rtn.append({'func': '_place', 'args': (pos,), 'kwargs': {\n 'exclude': exclude}})\n return rtn\n <mask token>\n\n def _has_blocks_to_place(self, exclude=None):\n \"\"\"Return whether or not the bot can place a block from the\n inventory. If exclude is None, any block can be placed.\"\"\"\n for block_ in self._inventory:\n if block_ != exclude:\n return True\n return False\n <mask token>\n <mask token>\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass _GenericBot:\n <mask token>\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n\n def take_action(self, action):\n \"\"\"Take the action (acquired from _get_legal_actions).\"\"\"\n getattr(self, action['func'])(*action.get('args', ()), **action.get\n ('kwargs', {}))\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n\n def get_pos(self):\n \"\"\"Return the position.\"\"\"\n return deepcopy(self._pos)\n\n def get_legal_actions(self, block_=None):\n \"\"\"Return a list of legal actions.\n\n If block_ is None, return all legal actions. Otherwise, return all\n legal actions that don't involve placing the block.\"\"\"\n return self._get_move_actions(block_) + self._get_mine_actions(\n ) + self._get_placement_actions(block_)\n\n def contains(self, block_):\n \"\"\"Return whether or not the bot contains the block id.\"\"\"\n return block_ in self._inventory\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception(\n 'You requested not to place %s, but it is the only block in the inventory.'\n % exclude)\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n self._set_block(loc, block_)\n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n <mask token>\n\n def _move_up(self, exclude=None):\n \"\"\"Move and place a block below.\n\n If exclude is not None, place a block that is not 'exclude'.\n \"\"\"\n self._move(self._pos + _Vec3(0, 1, 0))\n self._place(self._pos + _Vec3(0, -1, 0), exclude)\n\n def _mine(self, loc):\n \"\"\"Mine the block.\"\"\"\n block_ = self._get_block(loc)\n self._add_to_inv(block_)\n self._set_block(loc, _AIR)\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,\n 0),)})\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({'func': '_move', 'args': (pos + _Vec3(0,\n 1, 0),)})\n break\n pos.y -= 1\n\n def _surrounded(self):\n \"\"\"Return whether or not the bot is surrounded by water.\"\"\"\n for dir_ in _adj_dirs():\n if self._get_block(self._pos + dir_) != _WATER:\n return False\n return True\n\n def _get_mine_actions(self):\n \"\"\"Return a list of legal mining actions (that only involve mining\n and not moving).\"\"\"\n rtn = []\n dont_mine = {_AIR, _WATER, _LAVA}\n pos_above = self._pos + _Vec3(0, 2, 0)\n if self._get_block(pos_above) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos_above,)})\n for dir_ in _adj_dirs():\n pos = self._pos + dir_\n for _ in xrange(2):\n if self._get_block(pos) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos,)})\n pos = pos + _Vec3(0, 1, 0)\n return rtn\n\n def _get_placement_actions(self, exclude=None):\n \"\"\"Return a list of legal actions that only involve placing a block\n from the inventory.\n\n exclude is a block id. It is the block that should not be placed. If None,\n any block can be placed.\"\"\"\n if not self._has_blocks_to_place(exclude=exclude):\n return []\n dirs = [_Vec3(0, 2, 0)]\n for dir_ in _adj_dirs():\n dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])\n if self._get_block(self._pos + dir_) in [_AIR, _WATER]:\n dirs.append(dir_ + _Vec3(0, -1, 0))\n rtn = []\n for dir_ in dirs:\n pos = self._pos + dir_\n if self._can_place(pos):\n rtn.append({'func': '_place', 'args': (pos,), 'kwargs': {\n 'exclude': exclude}})\n return rtn\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc\n ) not in non_blocks:\n return True\n return False\n\n def _has_blocks_to_place(self, exclude=None):\n \"\"\"Return whether or not the bot can place a block from the\n inventory. If exclude is None, any block can be placed.\"\"\"\n for block_ in self._inventory:\n if block_ != exclude:\n return True\n return False\n <mask token>\n <mask token>\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass _GenericBot:\n <mask token>\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n\n def take_action(self, action):\n \"\"\"Take the action (acquired from _get_legal_actions).\"\"\"\n getattr(self, action['func'])(*action.get('args', ()), **action.get\n ('kwargs', {}))\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n\n def get_pos(self):\n \"\"\"Return the position.\"\"\"\n return deepcopy(self._pos)\n\n def get_legal_actions(self, block_=None):\n \"\"\"Return a list of legal actions.\n\n If block_ is None, return all legal actions. Otherwise, return all\n legal actions that don't involve placing the block.\"\"\"\n return self._get_move_actions(block_) + self._get_mine_actions(\n ) + self._get_placement_actions(block_)\n\n def contains(self, block_):\n \"\"\"Return whether or not the bot contains the block id.\"\"\"\n return block_ in self._inventory\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception(\n 'You requested not to place %s, but it is the only block in the inventory.'\n % exclude)\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n self._set_block(loc, block_)\n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n <mask token>\n\n def _move_up(self, exclude=None):\n \"\"\"Move and place a block below.\n\n If exclude is not None, place a block that is not 'exclude'.\n \"\"\"\n self._move(self._pos + _Vec3(0, 1, 0))\n self._place(self._pos + _Vec3(0, -1, 0), exclude)\n\n def _mine(self, loc):\n \"\"\"Mine the block.\"\"\"\n block_ = self._get_block(loc)\n self._add_to_inv(block_)\n self._set_block(loc, _AIR)\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,\n _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0, \n 1, 0),)})\n else:\n rtn.append({'func': '_move_up', 'args': (exclude,)})\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,\n 0),)})\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({'func': '_move', 'args': (pos + _Vec3(0,\n 1, 0),)})\n break\n pos.y -= 1\n\n def _surrounded(self):\n \"\"\"Return whether or not the bot is surrounded by water.\"\"\"\n for dir_ in _adj_dirs():\n if self._get_block(self._pos + dir_) != _WATER:\n return False\n return True\n\n def _get_mine_actions(self):\n \"\"\"Return a list of legal mining actions (that only involve mining\n and not moving).\"\"\"\n rtn = []\n dont_mine = {_AIR, _WATER, _LAVA}\n pos_above = self._pos + _Vec3(0, 2, 0)\n if self._get_block(pos_above) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos_above,)})\n for dir_ in _adj_dirs():\n pos = self._pos + dir_\n for _ in xrange(2):\n if self._get_block(pos) not in dont_mine:\n rtn.append({'func': '_mine', 'args': (pos,)})\n pos = pos + _Vec3(0, 1, 0)\n return rtn\n\n def _get_placement_actions(self, exclude=None):\n \"\"\"Return a list of legal actions that only involve placing a block\n from the inventory.\n\n exclude is a block id. It is the block that should not be placed. If None,\n any block can be placed.\"\"\"\n if not self._has_blocks_to_place(exclude=exclude):\n return []\n dirs = [_Vec3(0, 2, 0)]\n for dir_ in _adj_dirs():\n dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])\n if self._get_block(self._pos + dir_) in [_AIR, _WATER]:\n dirs.append(dir_ + _Vec3(0, -1, 0))\n rtn = []\n for dir_ in dirs:\n pos = self._pos + dir_\n if self._can_place(pos):\n rtn.append({'func': '_place', 'args': (pos,), 'kwargs': {\n 'exclude': exclude}})\n return rtn\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc\n ) not in non_blocks:\n return True\n return False\n\n def _has_blocks_to_place(self, exclude=None):\n \"\"\"Return whether or not the bot can place a block from the\n inventory. If exclude is None, any block can be placed.\"\"\"\n for block_ in self._inventory:\n if block_ != exclude:\n return True\n return False\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n raise NotImplementedError\n\n def _move(self, pos):\n \"\"\"Move there only.\"\"\"\n self._pos = deepcopy(pos)\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {}\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + _key_vals(self._inventory) +\n _key_vals(self._changes)))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(\n ) + player_loc) / 2, 'kwargs': {'block': block_id}})\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z\n ) and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x\n ) + abs(diff.z) == 2 and state.get_block(self._player_loc + \n diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\n<mask token>\n",
"step-5": "\"\"\"Module for the bot\"\"\"\n\nfrom copy import deepcopy\nfrom time import sleep\n\nimport mcpi.minecraft as minecraft\nfrom mcpi.vec3 import Vec3\nimport mcpi.block as block\n\nfrom search import SearchProblem, astar, bfs\nfrom singleton import singleton\n\n_AIR = block.AIR.id\n_WATER = block.WATER.id\n_LAVA = block.LAVA.id\n_BEDROCK = block.BEDROCK.id\n\n_DROP = 2 # It can drop at most this many\n_DROP_PLUS_1 = _DROP + 1\n_DELAY = 1\n\n\nclass _Vec3(Vec3):\n \"\"\"A Vec3 that is hashable. Everything in this program should use this\n class.\"\"\"\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash((self.x, self.y, self.z))\n\n def clone(self):\n \"\"\"Return a clone.\"\"\"\n return _Vec3(self.x, self.y, self.z)\n\n\nclass _GenericBot:\n \"\"\"A generic bot.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Initialize with an empty inventory.\n\n inventory is a dictionary. If None, an empty one will be used.\"\"\"\n if inventory is None:\n self._inventory = {}\n else:\n self._inventory = deepcopy(inventory)\n self._pos = deepcopy(pos)\n\n def take_action(self, action):\n \"\"\"Take the action (acquired from _get_legal_actions).\"\"\"\n getattr(self, action['func'])(\n *action.get('args', ()), \n **action.get('kwargs', {})\n )\n\n def take_actions(self, actions, seconds=None):\n \"\"\"Take these actions. If seconds is not None, sleep 'seconds' \n seconds.\n \"\"\"\n if not actions:\n return\n\n self.take_action(actions[0])\n for action in actions[1:]:\n if seconds is not None:\n sleep(seconds)\n self.take_action(action)\n\n def get_pos(self):\n \"\"\"Return the position.\"\"\"\n return deepcopy(self._pos)\n\n def get_legal_actions(self, block_=None):\n \"\"\"Return a list of legal actions.\n\n If block_ is None, return all legal actions. Otherwise, return all\n legal actions that don't involve placing the block.\"\"\"\n return self._get_move_actions(block_) + self._get_mine_actions() + \\\n self._get_placement_actions(block_)\n\n def contains(self, block_):\n \"\"\"Return whether or not the bot contains the block id.\"\"\"\n return block_ in self._inventory\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n raise NotImplementedError\n\n def _place(self, loc, exclude=None, block_=None):\n \"\"\"Place a block from the inventory only.\n\n If exclude is not None, place a block that is not 'exclude'.\n If block is not None, place that block only.\n \"\"\"\n if not self._inventory:\n raise Exception('Inventory empty')\n\n if block_ is None:\n for key in self._inventory:\n if key != exclude:\n block_ = key\n break\n else:\n raise Exception((\n 'You requested not to place %s, but it is the only '\n 'block in the inventory.' % exclude\n ))\n\n if block_ not in self._inventory:\n raise Exception('Block %s is not in the inventory' % block_)\n\n if self._inventory[block_] == 1:\n del self._inventory[block_]\n else:\n self._inventory[block_] -= 1\n\n self._set_block(loc, block_)\n \n\n def _move_down(self):\n \"\"\"Move and mine the block below.\"\"\"\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)\n \n def _add_to_inv(self, block_):\n \"\"\"Add the block to the inventory.\"\"\"\n if block_ in self._inventory:\n self._inventory[block_] += 1\n else:\n self._inventory[block_] = 1\n\n def _move_up(self, exclude=None):\n \"\"\"Move and place a block below.\n\n If exclude is not None, place a block that is not 'exclude'.\n \"\"\"\n self._move(self._pos + _Vec3(0, 1, 0))\n self._place(self._pos + _Vec3(0, -1, 0), exclude)\n\n def _mine(self, loc):\n \"\"\"Mine the block.\"\"\"\n block_ = self._get_block(loc)\n self._add_to_inv(block_)\n self._set_block(loc, _AIR)\n\n def _get_move_actions(self, exclude=None):\n \"\"\"Return a list of legal movement actions.\n\n exclude is the block to exclude.\n \"\"\"\n rtn = []\n\n # Check for moving up\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR, _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({\n 'func': '_move',\n 'args': (self._pos + _Vec3(0, 1, 0),)\n })\n else:\n rtn.append({\n 'func': '_move_up',\n 'args': (exclude,)\n })\n\n # Check for moving down\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n\n # Check for side moves \n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n\n return rtn\n\n def _side_moves(self, dir_, can_move_up):\n \"\"\"Return the list of side moves.\n\n dir_ is an adjacent direction.\n can_move_up is a boolean for whether or not the bot can move up.\n \"\"\"\n rtn = []\n base_pos = self._pos + dir_\n base_block = self._get_block(base_pos)\n empty_blocks = {_AIR, _WATER}\n\n # Check if it can move up\n if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:\n for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n else:\n rtn.append({\n 'func': '_move',\n 'args': (base_pos + _Vec3(0, 1, 0),)\n })\n\n # Check if it can move in that direction\n for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:\n if self._get_block(base_pos + vert_dir) not in empty_blocks:\n break\n\n # Fall\n else:\n pos = base_pos + _Vec3(0, -1, 0)\n for _ in xrange(_DROP_PLUS_1):\n block_ = self._get_block(pos)\n if block_ != _AIR:\n if block_ != _LAVA:\n rtn.append({\n 'func': '_move',\n 'args': (pos + _Vec3(0, 1, 0),)\n })\n break\n pos.y -= 1 \n \n def _surrounded(self):\n \"\"\"Return whether or not the bot is surrounded by water.\"\"\"\n for dir_ in _adj_dirs():\n if self._get_block(self._pos + dir_) != _WATER:\n return False\n return True\n\n def _get_mine_actions(self):\n \"\"\"Return a list of legal mining actions (that only involve mining\n and not moving).\"\"\"\n rtn = []\n dont_mine = {_AIR, _WATER, _LAVA}\n # Mine above.\n pos_above = self._pos + _Vec3(0, 2, 0)\n if self._get_block(pos_above) not in dont_mine:\n rtn.append({\n 'func': '_mine',\n 'args': (pos_above,)\n })\n\n for dir_ in _adj_dirs():\n pos = self._pos + dir_\n for _ in xrange(2):\n if self._get_block(pos) not in dont_mine:\n rtn.append({\n 'func': '_mine',\n 'args': (pos,)\n })\n pos = pos + _Vec3(0, 1, 0)\n\n return rtn\n\n def _get_placement_actions(self, exclude=None):\n \"\"\"Return a list of legal actions that only involve placing a block\n from the inventory.\n\n exclude is a block id. It is the block that should not be placed. If None,\n any block can be placed.\"\"\"\n if not self._has_blocks_to_place(exclude=exclude):\n return []\n\n dirs = [_Vec3(0, 2, 0)]\n for dir_ in _adj_dirs():\n dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])\n if self._get_block(self._pos + dir_) in [_AIR, _WATER]:\n dirs.append(dir_ + _Vec3(0, -1, 0))\n\n rtn = []\n for dir_ in dirs:\n pos = self._pos + dir_\n if self._can_place(pos):\n rtn.append({\n 'func': '_place',\n 'args': (pos,),\n 'kwargs': {'exclude': exclude}\n })\n\n return rtn\n\n def _can_place(self, loc):\n \"\"\"Return whether or not the bot can place a block at that location\n independent of what it has in its inventory.\"\"\"\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in _adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]:\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc) \\\n not in non_blocks:\n return True\n return False\n\n def _has_blocks_to_place(self, exclude=None):\n \"\"\"Return whether or not the bot can place a block from the\n inventory. If exclude is None, any block can be placed.\"\"\"\n for block_ in self._inventory:\n if block_ != exclude:\n return True\n return False\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n raise NotImplementedError\n\n def _move(self, pos):\n \"\"\"Move there only.\"\"\"\n self._pos = deepcopy(pos)\n\n\nclass _ImaginaryBot(_GenericBot):\n \"\"\"A bot used for finding paths that doesn't actually change blocks\n in the world.\"\"\"\n\n def __init__(self, pos, inventory=None):\n \"\"\"Create a new bot.\"\"\"\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {} # Changes to the world\n\n def _set_block(self, pos, block_):\n \"\"\"Set a block. block_ is the block id.\"\"\"\n self._changes[deepcopy(pos)] = block\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n if pos in self._changes:\n return self._changes[pos]\n else:\n return _get_mc().getBlock(pos)\n\n def get_block(self, pos):\n \"\"\"The public version.\"\"\"\n return self._get_block(pos)\n\n def __hash__(self):\n \"\"\"Return the hash.\"\"\"\n return hash(frozenset([self._pos] + \\\n _key_vals(self._inventory) + \\\n _key_vals(self._changes)\n ))\n\n\nclass Bot(_GenericBot):\n \"\"\"The real bot.\n\n All vector arguments are Vec3s.\"\"\"\n\n _BOT_BLOCK = block.IRON_BLOCK.id\n\n def __init__(self):\n \"\"\"Create a bot next to the player.\"\"\"\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)\n\n @staticmethod\n def destroy_all():\n \"\"\"Destroy all bots within a small distance (in case I forget to\n destroy one).\"\"\"\n player_loc = _player_loc()\n minec = _get_mc()\n rad = 10\n for x in xrange(player_loc.x - rad, player_loc.x + rad):\n for y in xrange(player_loc.y - rad, player_loc.y + rad):\n for z in xrange(player_loc.z - rad, player_loc.z + rad):\n if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:\n minec.setBlock(x, y, z, _AIR)\n\n def destroy(self):\n \"\"\"Set itself to air.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n\n def fetch(self, block_name):\n \"\"\"Mine and return a block to the player.\"\"\"\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n block_id = getattr(block, block_name).id\n block_loc = self._get_block_loc(block_id)\n mine_prob = _MineProblem(imag_bot, block_loc, block_id)\n mine_actions = astar(mine_prob, _mine_heuristic)\n self.take_actions(mine_actions, _DELAY)\n imag_bot = _ImaginaryBot(self._pos, self._inventory)\n player_loc = _player_loc()\n return_prob = _ReturnProblem(imag_bot, block_id, player_loc)\n return_actions = astar(return_prob, _return_heuristic)\n imag_bot.take_actions(return_actions)\n return_actions.append({\n 'func': '_place',\n 'args': (imag_bot.get_pos() + player_loc) / 2,\n 'kwargs': {'block': block_id}\n })\n self.take_actions(return_actions, _DELAY)\n\n def _get_block_loc(self, block_id):\n \"\"\"Return the location of the block.\"\"\"\n find_prob = FindProblem(self._pos, block_id)\n dirs = bfs(find_prob)\n return self._pos + sum(dirs)\n\n def _set_block(self, pos, block_):\n \"\"\"Place an actual block in the world.\n\n block is a block id.\"\"\"\n _get_mc().setBlock(pos, block_)\n\n def _get_block(self, pos):\n \"\"\"Get the block at the position.\"\"\"\n return _get_mc().getBlock(pos)\n\n def _move(self, pos):\n \"\"\"Move there, and set the appropriate blocks.\"\"\"\n self._set_block(self._pos, _AIR)\n self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)\n self._set_block(pos, self._BOT_BLOCK)\n self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)\n self._pos = pos\n\n\nclass FindProblem(SearchProblem):\n \"\"\"Problem for finding the location of a block in the world.\n\n A state in this problem is a location.\n \"\"\"\n\n def __init__(self, start_loc, block_id):\n \"\"\"Initialize.\"\"\"\n self._start_loc = deepcopy(start_loc)\n self._block_id = block_id\n\n def getStartState(self):\n \"\"\"Return the starting location.\"\"\"\n return self._start_loc\n\n def isGoalState(self, state):\n return _get_mc().getBlock(state) == self._block_id\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for dir_ in _all_dirs():\n successor = state + dir_\n if successor.y <= _get_mc().getHeight(successor.x, successor.z) \\\n and _get_mc().getBlock(successor) != _BEDROCK:\n rtn.append((successor, dir_, 1))\n return rtn\n\n\nclass _MineProblem(SearchProblem):\n \"\"\"The problem of finding the block and mining it (not returning\n it).\"\"\"\n\n def __init__(self, imag_bot, block_loc, block_id):\n \"\"\"Initialize the problem with an _ImaginaryBot.\n\n block_loc is a Vec3.\n \"\"\"\n self._bot = imag_bot\n self._block_loc = deepcopy(block_loc)\n self._block_id = block_id\n\n def get_block_loc(self):\n \"\"\"Return the block location.\"\"\"\n return deepcopy(self._block_loc)\n\n def get_block_id(self):\n \"\"\"Return the block it's trying to mine.\"\"\"\n return self._block_id\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot has the block.\"\"\"\n return state.contains(self._block_id)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions():\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\nclass _ReturnProblem(SearchProblem):\n \"\"\"The problem of returning to the player. This does not place the block\n next to the player.\"\"\"\n\n def __init__(self, imag_bot, block_, player_loc):\n \"\"\"Initialized the problem with an _ImaginaryBot.\n\n block is a block id.\"\"\"\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc\n\n def get_player_loc(self):\n \"\"\"Return the player location.\"\"\"\n return deepcopy(self._player_loc)\n\n def getStartState(self):\n \"\"\"Return the bot passed in.\"\"\"\n return self._bot\n\n def isGoalState(self, state):\n \"\"\"Return whether or not the bot is next to the player.\"\"\"\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and \\\n abs(diff.x) + abs(diff.z) == 2 and \\\n state.get_block(self._player_loc + diff/2 + _Vec3(0, -1, 0)) not in \\\n (_AIR, _LAVA, _WATER)\n\n def getSuccessors(self, state):\n \"\"\"Return the successors.\"\"\"\n rtn = []\n for action in state.get_legal_actions(self._block):\n successor = deepcopy(state)\n successor.take_action(action)\n rtn.append((successor, action, 1))\n return rtn\n\n\ndef _mine_heuristic(bot, problem):\n \"\"\"Return the mining heuristic.\n\n bot is an _ImaginaryBot.\n \"\"\"\n if bot.contains(problem.get_block_id()):\n return 0\n\n bot_pos = bot.get_pos()\n dest_pos = problem.get_block_loc()\n\n # If man == dy: return man + 1\n # If man > dy: return man\n # If man < dy: return dy?\n man_dist = _manhattan((bot_pos.x, bot_pos.z), (dest_pos.x, dest_pos.z))\n y_diff = bot_pos.y - dest_pos.y\n if y_diff < 0:\n y_diff += 1\n\n if y_diff == 0:\n return man_dist\n\n # Transform so that it's only dropping\n drop = _DROP if y_diff > 0 else 1\n y_diff = abs(y_diff)\n\n drops = _drops(y_diff, drop)\n\n if man_dist > drops:\n return man_dist\n if man_dist == drops:\n return man_dist + 1\n if drop == 1:\n return drops\n if y_diff % drop == 1:\n return drops\n return drops + 1\n \n\ndef _drops(dist, drop):\n \"\"\"Return the number of times it takes to drop a distance dist. drop is the\n length of one drop. Both are assumed positive.\"\"\"\n rtn = dist / drop\n if dist % drop != 0:\n rtn += 1\n return rtn\n \n\ndef _return_heuristic(bot, problem):\n \"\"\"Return the return heuristic.\n\n bot is an _ImaginaryBot.\n \"\"\"\n bot_pos = bot.get_pos()\n player_pos = problem.get_player_loc()\n bot_plane_pos = (bot.x, bot.z)\n\n y_diff = bot_pos.y - player_pos.y\n\n drop = _DROP if y_diff > 0 else 1\n y_diff = abs(y_diff)\n drops = _drops(y_diff, drop)\n min_man = float('inf')\n for dir_ in _adj_dirs():\n loc = player_pos + 2 * dir_\n man_dist = _manhattan(bot_plane_pos, (loc.x, loc.z))\n if man_dist < min_man:\n min_man = man_dist\n if man_dist < drops:\n return drops\n return min_man\n\n\ndef _to_my_vec3(vec):\n \"\"\"Return the _Vec3 alternative of the Vec3.\"\"\"\n return _Vec3(vec.x, vec.y, vec.z)\n\n\ndef _player_loc():\n \"\"\"Return the player's location.\"\"\"\n return _to_my_vec3(_get_mc().player.getTilePos())\n\n\ndef _adj_dirs():\n \"\"\"Return the adjacent directions.\"\"\"\n return [_Vec3(1, 0, 0), _Vec3(-1, 0, 0), _Vec3(0, 0, 1), _Vec3(0, 0, -1)]\n\n\ndef _all_dirs():\n \"\"\"Return all adjacent directions.\"\"\"\n return _adj_dirs() + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]\n\n\ndef _manhattan(pos1, pos2):\n \"\"\"Return the manhattan distance. pos1 and pos2 should be iterable.\"\"\"\n return sum(abs(val1 - val2) for val1, val2 in zip(pos1, pos2))\n\n\n@singleton\ndef _get_mc():\n \"\"\"Return the Minecraft instance.\"\"\"\n return minecraft.Minecraft.create()\n\n\ndef _key_vals(dict_):\n \"\"\"Return a list of key-val tuples.\"\"\"\n return [(key, val) for key, val in dict_.iteritems()]\n\n",
"step-ids": [
52,
53,
58,
60,
79
]
}
|
[
52,
53,
58,
60,
79
] |
import urllib.request
import json
def kind():
data={}
with open("dataset.json", "r") as read_file:
data = json.load(read_file)
return data["kind"]
def items():
data={}
with open("dataset.json", "r") as read_file:
data = json.load(read_file)
return data["items"]
#Can add a bunch of other things after refering to data
|
normal
|
{
"blob_id": "630480e9458491a26ea9060bd36541a0d5805a11",
"index": 647,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef kind():\n data = {}\n with open('dataset.json', 'r') as read_file:\n data = json.load(read_file)\n return data['kind']\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef kind():\n data = {}\n with open('dataset.json', 'r') as read_file:\n data = json.load(read_file)\n return data['kind']\n\n\ndef items():\n data = {}\n with open('dataset.json', 'r') as read_file:\n data = json.load(read_file)\n return data['items']\n",
"step-4": "import urllib.request\nimport json\n\n\ndef kind():\n data = {}\n with open('dataset.json', 'r') as read_file:\n data = json.load(read_file)\n return data['kind']\n\n\ndef items():\n data = {}\n with open('dataset.json', 'r') as read_file:\n data = json.load(read_file)\n return data['items']\n",
"step-5": "import urllib.request\nimport json\n\ndef kind():\n data={}\n with open(\"dataset.json\", \"r\") as read_file:\n data = json.load(read_file)\n return data[\"kind\"]\n\ndef items():\n data={}\n with open(\"dataset.json\", \"r\") as read_file:\n data = json.load(read_file)\n return data[\"items\"]\n\n#Can add a bunch of other things after refering to data\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class Thing3:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Thing3:
def __init__(self):
self.letters = 'xyz'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Thing3:
def __init__(self):
self.letters = 'xyz'
<|reserved_special_token_0|>
print(th.letters)
<|reserved_special_token_1|>
class Thing3:
def __init__(self):
self.letters = 'xyz'
th = Thing3()
print(th.letters)
<|reserved_special_token_1|>
class Thing3:
def __init__(self):
self.letters = 'xyz'
# print(Thing3.letters)
th = Thing3()
print(th.letters)
|
flexible
|
{
"blob_id": "22bf65a20f7398b82f528112d2ba50f1dccd465c",
"index": 6487,
"step-1": "class Thing3:\n <mask token>\n\n\n<mask token>\n",
"step-2": "class Thing3:\n\n def __init__(self):\n self.letters = 'xyz'\n\n\n<mask token>\n",
"step-3": "class Thing3:\n\n def __init__(self):\n self.letters = 'xyz'\n\n\n<mask token>\nprint(th.letters)\n",
"step-4": "class Thing3:\n\n def __init__(self):\n self.letters = 'xyz'\n\n\nth = Thing3()\nprint(th.letters)\n",
"step-5": "\nclass Thing3:\n def __init__(self):\n self.letters = 'xyz'\n\n# print(Thing3.letters)\nth = Thing3()\nprint(th.letters)",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
#multi layer perceptron with back propogation
import numpy as np
import theano
import matplotlib.pyplot as plt
# In[2]:
inputs=[[0,0],
[1,0],
[0,1],
[1,1]]
outputs=[1,0,0,1]
# In[3]:
x=theano.tensor.matrix(name='x')
# In[4]:
#Hidden layer as inputs from every neuron are 2 and we have 3 neuron
w1val=np.asarray([np.random.randn(),np.random.randn()])#weight of synapse
w1=theano.shared(w1val,name='w1')
w2val=np.asarray([np.random.randn(),np.random.randn()])#weight of synapse
w2=theano.shared(w2val,name='w2')
w3val=np.asarray([np.random.randn(),np.random.randn()])#weight of synapse
w3=theano.shared(w3val,name='w3')
# In[5]:
#Bias value is 1
b1 = theano.shared(1.1,name='b1')
b2 = theano.shared(1.2,name='b2')
b3 = theano.shared(1.3,name='b3')
# In[6]:
#computation foe every neuron
#hidden layer
a1sum=theano.tensor.dot(x,w1)+b1
a2sum=theano.tensor.dot(x,w2)+b2
a1=1/(1+theano.tensor.exp(-1*a1sum))
a2=1/(1+theano.tensor.exp(-1*a2sum))
#output layer neuron
#stack is combining two hiding layer values & feeding to the output layer
x2 = theano.tensor.stack([a1,a2],axis=1)
# In[7]:
'''if we write
[[a11,a12,a21,a22],[a33,a34,a43,a44]]-> inputs
what stack will do is
[a11,a33],[a12,a34],[a21,a43],[a22,a44]'''
a3sum=theano.tensor.dot(x2,w3)+b3
a3=1/(1+theano.tensor.exp(-1*a3sum))
#final output
ahat=a3
#actual output
a=theano.tensor.vector(name='a')
# In[8]:
#cost function
cost=-(a*theano.tensor.log(ahat)+(1-a)*theano.tensor.log(1-ahat)).sum()#it is defined for 1/1+eraise to -z
#GDA role
#for calculating gradient
dcostdw1 = theano.tensor.grad(cost,w1)
dcostdw2 = theano.tensor.grad(cost,w2)
dcostdw3 = theano.tensor.grad(cost,w3)
dcostdb1=theano.tensor.grad(cost,b1)
dcostdb2=theano.tensor.grad(cost,b2)
dcostdb3=theano.tensor.grad(cost,b3)
#apply GDA to update the weights
wn1=w1-0.02*dcostdw1
wn2=w2-0.02*dcostdw2
wn3=w3-0.02*dcostdw3
wb1=b1-0.02*dcostdb1
wb2=b2-0.02*dcostdb2
wb3=b3-0.02*dcostdb3
#theano function for training the algorithm
train=theano.function([x,a],[ahat,cost],updates=[(w1,wn1),(w2,wn2),(w3,wn3),(b1,wb1),(b2,wb2),(b3,wb3)])
cost1=[]
val1=[]
#training a model
for i in range(25000):
pval,costval=train(inputs,outputs)
print(costval)
val1.append(pval)
cost1.append(costval)
# In[9]:
print('the final outputs are:')
for i in range(len(inputs)):
print("the output of x1=%d | x2=%d is %.2f"%(inputs[i][0],inputs[i][1],pval[i]))
plt.plot(cost1,color='red')
plt.show()
# In[ ]:
# In[ ]:
|
normal
|
{
"blob_id": "adec7efceb038c0ecb23c256c23c2ea212752d64",
"index": 4010,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(25000):\n pval, costval = train(inputs, outputs)\n print(costval)\n val1.append(pval)\n cost1.append(costval)\nprint('the final outputs are:')\nfor i in range(len(inputs)):\n print('the output of x1=%d | x2=%d is %.2f' % (inputs[i][0], inputs[i][\n 1], pval[i]))\nplt.plot(cost1, color='red')\nplt.show()\n",
"step-3": "<mask token>\ninputs = [[0, 0], [1, 0], [0, 1], [1, 1]]\noutputs = [1, 0, 0, 1]\nx = theano.tensor.matrix(name='x')\nw1val = np.asarray([np.random.randn(), np.random.randn()])\nw1 = theano.shared(w1val, name='w1')\nw2val = np.asarray([np.random.randn(), np.random.randn()])\nw2 = theano.shared(w2val, name='w2')\nw3val = np.asarray([np.random.randn(), np.random.randn()])\nw3 = theano.shared(w3val, name='w3')\nb1 = theano.shared(1.1, name='b1')\nb2 = theano.shared(1.2, name='b2')\nb3 = theano.shared(1.3, name='b3')\na1sum = theano.tensor.dot(x, w1) + b1\na2sum = theano.tensor.dot(x, w2) + b2\na1 = 1 / (1 + theano.tensor.exp(-1 * a1sum))\na2 = 1 / (1 + theano.tensor.exp(-1 * a2sum))\nx2 = theano.tensor.stack([a1, a2], axis=1)\n<mask token>\na3sum = theano.tensor.dot(x2, w3) + b3\na3 = 1 / (1 + theano.tensor.exp(-1 * a3sum))\nahat = a3\na = theano.tensor.vector(name='a')\ncost = -(a * theano.tensor.log(ahat) + (1 - a) * theano.tensor.log(1 - ahat)\n ).sum()\ndcostdw1 = theano.tensor.grad(cost, w1)\ndcostdw2 = theano.tensor.grad(cost, w2)\ndcostdw3 = theano.tensor.grad(cost, w3)\ndcostdb1 = theano.tensor.grad(cost, b1)\ndcostdb2 = theano.tensor.grad(cost, b2)\ndcostdb3 = theano.tensor.grad(cost, b3)\nwn1 = w1 - 0.02 * dcostdw1\nwn2 = w2 - 0.02 * dcostdw2\nwn3 = w3 - 0.02 * dcostdw3\nwb1 = b1 - 0.02 * dcostdb1\nwb2 = b2 - 0.02 * dcostdb2\nwb3 = b3 - 0.02 * dcostdb3\ntrain = theano.function([x, a], [ahat, cost], updates=[(w1, wn1), (w2, wn2),\n (w3, wn3), (b1, wb1), (b2, wb2), (b3, wb3)])\ncost1 = []\nval1 = []\nfor i in range(25000):\n pval, costval = train(inputs, outputs)\n print(costval)\n val1.append(pval)\n cost1.append(costval)\nprint('the final outputs are:')\nfor i in range(len(inputs)):\n print('the output of x1=%d | x2=%d is %.2f' % (inputs[i][0], inputs[i][\n 1], pval[i]))\nplt.plot(cost1, color='red')\nplt.show()\n",
"step-4": "import numpy as np\nimport theano\nimport matplotlib.pyplot as plt\ninputs = [[0, 0], [1, 0], [0, 1], [1, 1]]\noutputs = [1, 0, 0, 1]\nx = theano.tensor.matrix(name='x')\nw1val = np.asarray([np.random.randn(), np.random.randn()])\nw1 = theano.shared(w1val, name='w1')\nw2val = np.asarray([np.random.randn(), np.random.randn()])\nw2 = theano.shared(w2val, name='w2')\nw3val = np.asarray([np.random.randn(), np.random.randn()])\nw3 = theano.shared(w3val, name='w3')\nb1 = theano.shared(1.1, name='b1')\nb2 = theano.shared(1.2, name='b2')\nb3 = theano.shared(1.3, name='b3')\na1sum = theano.tensor.dot(x, w1) + b1\na2sum = theano.tensor.dot(x, w2) + b2\na1 = 1 / (1 + theano.tensor.exp(-1 * a1sum))\na2 = 1 / (1 + theano.tensor.exp(-1 * a2sum))\nx2 = theano.tensor.stack([a1, a2], axis=1)\n<mask token>\na3sum = theano.tensor.dot(x2, w3) + b3\na3 = 1 / (1 + theano.tensor.exp(-1 * a3sum))\nahat = a3\na = theano.tensor.vector(name='a')\ncost = -(a * theano.tensor.log(ahat) + (1 - a) * theano.tensor.log(1 - ahat)\n ).sum()\ndcostdw1 = theano.tensor.grad(cost, w1)\ndcostdw2 = theano.tensor.grad(cost, w2)\ndcostdw3 = theano.tensor.grad(cost, w3)\ndcostdb1 = theano.tensor.grad(cost, b1)\ndcostdb2 = theano.tensor.grad(cost, b2)\ndcostdb3 = theano.tensor.grad(cost, b3)\nwn1 = w1 - 0.02 * dcostdw1\nwn2 = w2 - 0.02 * dcostdw2\nwn3 = w3 - 0.02 * dcostdw3\nwb1 = b1 - 0.02 * dcostdb1\nwb2 = b2 - 0.02 * dcostdb2\nwb3 = b3 - 0.02 * dcostdb3\ntrain = theano.function([x, a], [ahat, cost], updates=[(w1, wn1), (w2, wn2),\n (w3, wn3), (b1, wb1), (b2, wb2), (b3, wb3)])\ncost1 = []\nval1 = []\nfor i in range(25000):\n pval, costval = train(inputs, outputs)\n print(costval)\n val1.append(pval)\n cost1.append(costval)\nprint('the final outputs are:')\nfor i in range(len(inputs)):\n print('the output of x1=%d | x2=%d is %.2f' % (inputs[i][0], inputs[i][\n 1], pval[i]))\nplt.plot(cost1, color='red')\nplt.show()\n",
"step-5": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n#multi layer perceptron with back propogation\nimport numpy as np\nimport theano\nimport matplotlib.pyplot as plt\n\n\n# In[2]:\n\n\ninputs=[[0,0],\n [1,0],\n [0,1],\n [1,1]]\noutputs=[1,0,0,1]\n\n\n# In[3]:\n\n\nx=theano.tensor.matrix(name='x')\n\n\n# In[4]:\n\n\n#Hidden layer as inputs from every neuron are 2 and we have 3 neuron\nw1val=np.asarray([np.random.randn(),np.random.randn()])#weight of synapse\nw1=theano.shared(w1val,name='w1')\nw2val=np.asarray([np.random.randn(),np.random.randn()])#weight of synapse\nw2=theano.shared(w2val,name='w2')\nw3val=np.asarray([np.random.randn(),np.random.randn()])#weight of synapse\nw3=theano.shared(w3val,name='w3')\n\n\n# In[5]:\n\n\n#Bias value is 1\nb1 = theano.shared(1.1,name='b1')\nb2 = theano.shared(1.2,name='b2')\nb3 = theano.shared(1.3,name='b3')\n\n\n# In[6]:\n\n\n#computation foe every neuron\n#hidden layer\na1sum=theano.tensor.dot(x,w1)+b1\na2sum=theano.tensor.dot(x,w2)+b2\n\na1=1/(1+theano.tensor.exp(-1*a1sum))\na2=1/(1+theano.tensor.exp(-1*a2sum))\n\n#output layer neuron\n#stack is combining two hiding layer values & feeding to the output layer\nx2 = theano.tensor.stack([a1,a2],axis=1)\n\n\n# In[7]:\n\n\n'''if we write\n[[a11,a12,a21,a22],[a33,a34,a43,a44]]-> inputs\nwhat stack will do is\n[a11,a33],[a12,a34],[a21,a43],[a22,a44]'''\n\na3sum=theano.tensor.dot(x2,w3)+b3\na3=1/(1+theano.tensor.exp(-1*a3sum))\n\n#final output\nahat=a3\n\n#actual output\na=theano.tensor.vector(name='a')\n\n\n# In[8]:\n\n\n#cost function\ncost=-(a*theano.tensor.log(ahat)+(1-a)*theano.tensor.log(1-ahat)).sum()#it is defined for 1/1+eraise to -z\n#GDA role\n#for calculating gradient\n\ndcostdw1 = theano.tensor.grad(cost,w1)\ndcostdw2 = theano.tensor.grad(cost,w2)\ndcostdw3 = theano.tensor.grad(cost,w3)\n\ndcostdb1=theano.tensor.grad(cost,b1)\ndcostdb2=theano.tensor.grad(cost,b2)\ndcostdb3=theano.tensor.grad(cost,b3)\n\n#apply GDA to update the weights\nwn1=w1-0.02*dcostdw1\nwn2=w2-0.02*dcostdw2\nwn3=w3-0.02*dcostdw3\n\nwb1=b1-0.02*dcostdb1\nwb2=b2-0.02*dcostdb2\nwb3=b3-0.02*dcostdb3\n#theano function for training the algorithm\ntrain=theano.function([x,a],[ahat,cost],updates=[(w1,wn1),(w2,wn2),(w3,wn3),(b1,wb1),(b2,wb2),(b3,wb3)])\n\ncost1=[]\nval1=[]\n\n#training a model\nfor i in range(25000):\n pval,costval=train(inputs,outputs)\n print(costval)\n val1.append(pval)\n cost1.append(costval)\n\n\n# In[9]:\n\n\nprint('the final outputs are:')\nfor i in range(len(inputs)):\n print(\"the output of x1=%d | x2=%d is %.2f\"%(inputs[i][0],inputs[i][1],pval[i]))\nplt.plot(cost1,color='red')\nplt.show()\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class LoginView(Resource):
def __init__(self):
self.db = UsersModel()
self.user_db = IncidentsModel()
def post(self):
data = request.get_json()
username = data['username']
password = data['password']
auth = self.db.authenticate(username, password)
return auth
class UserView(Resource):
def __init__(self):
self.db = UsersModel()
def get(self, id):
access_token = Validations().get_access_token()
if not access_token:
return jsonify({'Message': 'Token needed. Please login'})
else:
res = self.db.get_single_user(id)
return make_response(jsonify({'Response': res}), 201)
def delete(self, id):
access_token = Validations().get_access_token()
user = self.db.check_user_id(id)
if not access_token:
return jsonify({'Message': 'Token needed. Please login'})
elif not user:
return jsonify({'Message': 'User ID does not exist'})
else:
self.db.delete_user(id)
return {'Message': 'User Deleted'}
def put(self, id):
access_token = Validations().get_access_token()
user = self.db.check_user_id(id)
if not access_token:
return jsonify({'Message': 'Token needed. Please login'})
elif not user:
return jsonify({'Message': 'User ID does not exist'})
if access_token:
data = request.get_json()
resp = Validations().validate_user_inputs(data)
if resp == str(resp):
return make_response(jsonify({'Message': resp}), 201)
else:
self.db.update_user(id, resp)
return make_response(jsonify({'Message':
'User Details Updated'}), 201)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UsersView(Resource):
<|reserved_special_token_0|>
def post(self):
data = request.get_json()
resp = Validations().validate_user_inputs(data)
username = data['username']
user = self.db.register_users(username)
if len(user) != 0:
return make_response(jsonify({'Message':
'Username already exists'}), 202)
elif resp == str(resp):
return make_response(jsonify({'Message': resp}), 201)
else:
self.db.save(resp)
return make_response(jsonify({'Message':
'User Registered. Please login'}), 201)
<|reserved_special_token_0|>
class LoginView(Resource):
def __init__(self):
self.db = UsersModel()
self.user_db = IncidentsModel()
def post(self):
data = request.get_json()
username = data['username']
password = data['password']
auth = self.db.authenticate(username, password)
return auth
class UserView(Resource):
def __init__(self):
self.db = UsersModel()
def get(self, id):
access_token = Validations().get_access_token()
if not access_token:
return jsonify({'Message': 'Token needed. Please login'})
else:
res = self.db.get_single_user(id)
return make_response(jsonify({'Response': res}), 201)
def delete(self, id):
access_token = Validations().get_access_token()
user = self.db.check_user_id(id)
if not access_token:
return jsonify({'Message': 'Token needed. Please login'})
elif not user:
return jsonify({'Message': 'User ID does not exist'})
else:
self.db.delete_user(id)
return {'Message': 'User Deleted'}
def put(self, id):
access_token = Validations().get_access_token()
user = self.db.check_user_id(id)
if not access_token:
return jsonify({'Message': 'Token needed. Please login'})
elif not user:
return jsonify({'Message': 'User ID does not exist'})
if access_token:
data = request.get_json()
resp = Validations().validate_user_inputs(data)
if resp == str(resp):
return make_response(jsonify({'Message': resp}), 201)
else:
self.db.update_user(id, resp)
return make_response(jsonify({'Message':
'User Details Updated'}), 201)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UsersView(Resource):
def __init__(self):
self.db = UsersModel()
def post(self):
data = request.get_json()
resp = Validations().validate_user_inputs(data)
username = data['username']
user = self.db.register_users(username)
if len(user) != 0:
return make_response(jsonify({'Message':
'Username already exists'}), 202)
elif resp == str(resp):
return make_response(jsonify({'Message': resp}), 201)
else:
self.db.save(resp)
return make_response(jsonify({'Message':
'User Registered. Please login'}), 201)
<|reserved_special_token_0|>
class LoginView(Resource):
def __init__(self):
self.db = UsersModel()
self.user_db = IncidentsModel()
def post(self):
data = request.get_json()
username = data['username']
password = data['password']
auth = self.db.authenticate(username, password)
return auth
class UserView(Resource):
def __init__(self):
self.db = UsersModel()
def get(self, id):
access_token = Validations().get_access_token()
if not access_token:
return jsonify({'Message': 'Token needed. Please login'})
else:
res = self.db.get_single_user(id)
return make_response(jsonify({'Response': res}), 201)
def delete(self, id):
access_token = Validations().get_access_token()
user = self.db.check_user_id(id)
if not access_token:
return jsonify({'Message': 'Token needed. Please login'})
elif not user:
return jsonify({'Message': 'User ID does not exist'})
else:
self.db.delete_user(id)
return {'Message': 'User Deleted'}
def put(self, id):
access_token = Validations().get_access_token()
user = self.db.check_user_id(id)
if not access_token:
return jsonify({'Message': 'Token needed. Please login'})
elif not user:
return jsonify({'Message': 'User ID does not exist'})
if access_token:
data = request.get_json()
resp = Validations().validate_user_inputs(data)
if resp == str(resp):
return make_response(jsonify({'Message': resp}), 201)
else:
self.db.update_user(id, resp)
return make_response(jsonify({'Message':
'User Details Updated'}), 201)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UsersView(Resource):
def __init__(self):
self.db = UsersModel()
def post(self):
data = request.get_json()
resp = Validations().validate_user_inputs(data)
username = data['username']
user = self.db.register_users(username)
if len(user) != 0:
return make_response(jsonify({'Message':
'Username already exists'}), 202)
elif resp == str(resp):
return make_response(jsonify({'Message': resp}), 201)
else:
self.db.save(resp)
return make_response(jsonify({'Message':
'User Registered. Please login'}), 201)
def get(self):
access_token = Validations().get_access_token()
if not access_token:
return jsonify({'Message': 'Token needed. Please login'})
else:
users = self.db.get_users()
return make_response(jsonify({'Users': users, 'Message':
'All Users'}), 200)
class LoginView(Resource):
def __init__(self):
self.db = UsersModel()
self.user_db = IncidentsModel()
def post(self):
data = request.get_json()
username = data['username']
password = data['password']
auth = self.db.authenticate(username, password)
return auth
class UserView(Resource):
def __init__(self):
self.db = UsersModel()
def get(self, id):
access_token = Validations().get_access_token()
if not access_token:
return jsonify({'Message': 'Token needed. Please login'})
else:
res = self.db.get_single_user(id)
return make_response(jsonify({'Response': res}), 201)
def delete(self, id):
access_token = Validations().get_access_token()
user = self.db.check_user_id(id)
if not access_token:
return jsonify({'Message': 'Token needed. Please login'})
elif not user:
return jsonify({'Message': 'User ID does not exist'})
else:
self.db.delete_user(id)
return {'Message': 'User Deleted'}
def put(self, id):
access_token = Validations().get_access_token()
user = self.db.check_user_id(id)
if not access_token:
return jsonify({'Message': 'Token needed. Please login'})
elif not user:
return jsonify({'Message': 'User ID does not exist'})
if access_token:
data = request.get_json()
resp = Validations().validate_user_inputs(data)
if resp == str(resp):
return make_response(jsonify({'Message': resp}), 201)
else:
self.db.update_user(id, resp)
return make_response(jsonify({'Message':
'User Details Updated'}), 201)
<|reserved_special_token_1|>
from flask_restful import Resource
from flask import jsonify, make_response, request
from ..models.Users import UsersModel
from ..models.Incidents import IncidentsModel
from app.api.validations.validations import Validations
class UsersView(Resource):
def __init__(self):
self.db = UsersModel()
def post(self):
data = request.get_json()
resp = Validations().validate_user_inputs(data)
username = data['username']
user = self.db.register_users(username)
if len(user) != 0:
return make_response(jsonify({
'Message': 'Username already exists'
}), 202)
elif resp == str(resp):
return make_response(jsonify({
"Message": resp
}), 201)
else:
self.db.save(resp)
return make_response(jsonify({
"Message": "User Registered. Please login"
}), 201)
def get(self):
access_token = Validations().get_access_token()
if not access_token:
return jsonify({"Message": "Token needed. Please login"})
else:
users = self.db.get_users()
return make_response(jsonify({
"Users": users,
"Message": "All Users"
}), 200)
class LoginView(Resource):
def __init__(self):
self.db = UsersModel()
self.user_db = IncidentsModel()
def post(self):
data = request.get_json()
username = data['username']
password = data['password']
auth = self.db.authenticate(username, password)
return auth
class UserView(Resource):
def __init__(self):
self.db = UsersModel()
def get(self, id):
access_token = Validations().get_access_token()
if not access_token:
return jsonify({"Message": "Token needed. Please login"})
else:
res = self.db.get_single_user(id)
return make_response(jsonify({
'Response': res
}), 201)
def delete(self, id):
access_token = Validations().get_access_token()
user = self.db.check_user_id(id)
if not access_token:
return jsonify({"Message": "Token needed. Please login"})
elif not user:
return jsonify({"Message": "User ID does not exist"})
else:
self.db.delete_user(id)
return {
"Message": "User Deleted"
}
def put(self, id):
access_token = Validations().get_access_token()
user = self.db.check_user_id(id)
if not access_token:
return jsonify({"Message": "Token needed. Please login"})
elif not user:
return jsonify({"Message": "User ID does not exist"})
if access_token:
data = request.get_json()
resp = Validations().validate_user_inputs(data)
if resp == str(resp):
return make_response(jsonify({
"Message": resp
}), 201)
else:
self.db.update_user(id, resp)
return make_response(jsonify({
'Message': 'User Details Updated'
}), 201)
|
flexible
|
{
"blob_id": "0188355f84054143bd4ff9da63f1128e9eb5b23b",
"index": 2244,
"step-1": "<mask token>\n\n\nclass LoginView(Resource):\n\n def __init__(self):\n self.db = UsersModel()\n self.user_db = IncidentsModel()\n\n def post(self):\n data = request.get_json()\n username = data['username']\n password = data['password']\n auth = self.db.authenticate(username, password)\n return auth\n\n\nclass UserView(Resource):\n\n def __init__(self):\n self.db = UsersModel()\n\n def get(self, id):\n access_token = Validations().get_access_token()\n if not access_token:\n return jsonify({'Message': 'Token needed. Please login'})\n else:\n res = self.db.get_single_user(id)\n return make_response(jsonify({'Response': res}), 201)\n\n def delete(self, id):\n access_token = Validations().get_access_token()\n user = self.db.check_user_id(id)\n if not access_token:\n return jsonify({'Message': 'Token needed. Please login'})\n elif not user:\n return jsonify({'Message': 'User ID does not exist'})\n else:\n self.db.delete_user(id)\n return {'Message': 'User Deleted'}\n\n def put(self, id):\n access_token = Validations().get_access_token()\n user = self.db.check_user_id(id)\n if not access_token:\n return jsonify({'Message': 'Token needed. Please login'})\n elif not user:\n return jsonify({'Message': 'User ID does not exist'})\n if access_token:\n data = request.get_json()\n resp = Validations().validate_user_inputs(data)\n if resp == str(resp):\n return make_response(jsonify({'Message': resp}), 201)\n else:\n self.db.update_user(id, resp)\n return make_response(jsonify({'Message':\n 'User Details Updated'}), 201)\n",
"step-2": "<mask token>\n\n\nclass UsersView(Resource):\n <mask token>\n\n def post(self):\n data = request.get_json()\n resp = Validations().validate_user_inputs(data)\n username = data['username']\n user = self.db.register_users(username)\n if len(user) != 0:\n return make_response(jsonify({'Message':\n 'Username already exists'}), 202)\n elif resp == str(resp):\n return make_response(jsonify({'Message': resp}), 201)\n else:\n self.db.save(resp)\n return make_response(jsonify({'Message':\n 'User Registered. Please login'}), 201)\n <mask token>\n\n\nclass LoginView(Resource):\n\n def __init__(self):\n self.db = UsersModel()\n self.user_db = IncidentsModel()\n\n def post(self):\n data = request.get_json()\n username = data['username']\n password = data['password']\n auth = self.db.authenticate(username, password)\n return auth\n\n\nclass UserView(Resource):\n\n def __init__(self):\n self.db = UsersModel()\n\n def get(self, id):\n access_token = Validations().get_access_token()\n if not access_token:\n return jsonify({'Message': 'Token needed. Please login'})\n else:\n res = self.db.get_single_user(id)\n return make_response(jsonify({'Response': res}), 201)\n\n def delete(self, id):\n access_token = Validations().get_access_token()\n user = self.db.check_user_id(id)\n if not access_token:\n return jsonify({'Message': 'Token needed. Please login'})\n elif not user:\n return jsonify({'Message': 'User ID does not exist'})\n else:\n self.db.delete_user(id)\n return {'Message': 'User Deleted'}\n\n def put(self, id):\n access_token = Validations().get_access_token()\n user = self.db.check_user_id(id)\n if not access_token:\n return jsonify({'Message': 'Token needed. Please login'})\n elif not user:\n return jsonify({'Message': 'User ID does not exist'})\n if access_token:\n data = request.get_json()\n resp = Validations().validate_user_inputs(data)\n if resp == str(resp):\n return make_response(jsonify({'Message': resp}), 201)\n else:\n self.db.update_user(id, resp)\n return make_response(jsonify({'Message':\n 'User Details Updated'}), 201)\n",
"step-3": "<mask token>\n\n\nclass UsersView(Resource):\n\n def __init__(self):\n self.db = UsersModel()\n\n def post(self):\n data = request.get_json()\n resp = Validations().validate_user_inputs(data)\n username = data['username']\n user = self.db.register_users(username)\n if len(user) != 0:\n return make_response(jsonify({'Message':\n 'Username already exists'}), 202)\n elif resp == str(resp):\n return make_response(jsonify({'Message': resp}), 201)\n else:\n self.db.save(resp)\n return make_response(jsonify({'Message':\n 'User Registered. Please login'}), 201)\n <mask token>\n\n\nclass LoginView(Resource):\n\n def __init__(self):\n self.db = UsersModel()\n self.user_db = IncidentsModel()\n\n def post(self):\n data = request.get_json()\n username = data['username']\n password = data['password']\n auth = self.db.authenticate(username, password)\n return auth\n\n\nclass UserView(Resource):\n\n def __init__(self):\n self.db = UsersModel()\n\n def get(self, id):\n access_token = Validations().get_access_token()\n if not access_token:\n return jsonify({'Message': 'Token needed. Please login'})\n else:\n res = self.db.get_single_user(id)\n return make_response(jsonify({'Response': res}), 201)\n\n def delete(self, id):\n access_token = Validations().get_access_token()\n user = self.db.check_user_id(id)\n if not access_token:\n return jsonify({'Message': 'Token needed. Please login'})\n elif not user:\n return jsonify({'Message': 'User ID does not exist'})\n else:\n self.db.delete_user(id)\n return {'Message': 'User Deleted'}\n\n def put(self, id):\n access_token = Validations().get_access_token()\n user = self.db.check_user_id(id)\n if not access_token:\n return jsonify({'Message': 'Token needed. Please login'})\n elif not user:\n return jsonify({'Message': 'User ID does not exist'})\n if access_token:\n data = request.get_json()\n resp = Validations().validate_user_inputs(data)\n if resp == str(resp):\n return make_response(jsonify({'Message': resp}), 201)\n else:\n self.db.update_user(id, resp)\n return make_response(jsonify({'Message':\n 'User Details Updated'}), 201)\n",
"step-4": "<mask token>\n\n\nclass UsersView(Resource):\n\n def __init__(self):\n self.db = UsersModel()\n\n def post(self):\n data = request.get_json()\n resp = Validations().validate_user_inputs(data)\n username = data['username']\n user = self.db.register_users(username)\n if len(user) != 0:\n return make_response(jsonify({'Message':\n 'Username already exists'}), 202)\n elif resp == str(resp):\n return make_response(jsonify({'Message': resp}), 201)\n else:\n self.db.save(resp)\n return make_response(jsonify({'Message':\n 'User Registered. Please login'}), 201)\n\n def get(self):\n access_token = Validations().get_access_token()\n if not access_token:\n return jsonify({'Message': 'Token needed. Please login'})\n else:\n users = self.db.get_users()\n return make_response(jsonify({'Users': users, 'Message':\n 'All Users'}), 200)\n\n\nclass LoginView(Resource):\n\n def __init__(self):\n self.db = UsersModel()\n self.user_db = IncidentsModel()\n\n def post(self):\n data = request.get_json()\n username = data['username']\n password = data['password']\n auth = self.db.authenticate(username, password)\n return auth\n\n\nclass UserView(Resource):\n\n def __init__(self):\n self.db = UsersModel()\n\n def get(self, id):\n access_token = Validations().get_access_token()\n if not access_token:\n return jsonify({'Message': 'Token needed. Please login'})\n else:\n res = self.db.get_single_user(id)\n return make_response(jsonify({'Response': res}), 201)\n\n def delete(self, id):\n access_token = Validations().get_access_token()\n user = self.db.check_user_id(id)\n if not access_token:\n return jsonify({'Message': 'Token needed. Please login'})\n elif not user:\n return jsonify({'Message': 'User ID does not exist'})\n else:\n self.db.delete_user(id)\n return {'Message': 'User Deleted'}\n\n def put(self, id):\n access_token = Validations().get_access_token()\n user = self.db.check_user_id(id)\n if not access_token:\n return jsonify({'Message': 'Token needed. Please login'})\n elif not user:\n return jsonify({'Message': 'User ID does not exist'})\n if access_token:\n data = request.get_json()\n resp = Validations().validate_user_inputs(data)\n if resp == str(resp):\n return make_response(jsonify({'Message': resp}), 201)\n else:\n self.db.update_user(id, resp)\n return make_response(jsonify({'Message':\n 'User Details Updated'}), 201)\n",
"step-5": "from flask_restful import Resource\nfrom flask import jsonify, make_response, request\n\nfrom ..models.Users import UsersModel\n\nfrom ..models.Incidents import IncidentsModel\n\nfrom app.api.validations.validations import Validations\n\n\nclass UsersView(Resource):\n def __init__(self):\n self.db = UsersModel()\n\n def post(self):\n data = request.get_json()\n resp = Validations().validate_user_inputs(data)\n username = data['username']\n user = self.db.register_users(username)\n if len(user) != 0:\n return make_response(jsonify({\n 'Message': 'Username already exists'\n }), 202)\n elif resp == str(resp):\n return make_response(jsonify({\n \"Message\": resp\n }), 201)\n else:\n self.db.save(resp)\n return make_response(jsonify({\n \"Message\": \"User Registered. Please login\"\n }), 201)\n\n def get(self):\n access_token = Validations().get_access_token()\n if not access_token:\n return jsonify({\"Message\": \"Token needed. Please login\"})\n else:\n users = self.db.get_users()\n return make_response(jsonify({\n \"Users\": users,\n \"Message\": \"All Users\"\n }), 200)\n\n\nclass LoginView(Resource):\n def __init__(self):\n self.db = UsersModel()\n self.user_db = IncidentsModel()\n\n def post(self):\n data = request.get_json()\n username = data['username']\n password = data['password']\n auth = self.db.authenticate(username, password)\n return auth\n\n\nclass UserView(Resource):\n def __init__(self):\n self.db = UsersModel()\n\n def get(self, id):\n access_token = Validations().get_access_token()\n if not access_token:\n return jsonify({\"Message\": \"Token needed. Please login\"})\n else:\n res = self.db.get_single_user(id)\n return make_response(jsonify({\n 'Response': res\n }), 201)\n\n def delete(self, id):\n access_token = Validations().get_access_token()\n user = self.db.check_user_id(id)\n if not access_token:\n return jsonify({\"Message\": \"Token needed. Please login\"})\n elif not user:\n return jsonify({\"Message\": \"User ID does not exist\"})\n else:\n self.db.delete_user(id)\n return {\n \"Message\": \"User Deleted\"\n }\n\n def put(self, id):\n access_token = Validations().get_access_token()\n user = self.db.check_user_id(id)\n if not access_token:\n return jsonify({\"Message\": \"Token needed. Please login\"})\n elif not user:\n return jsonify({\"Message\": \"User ID does not exist\"})\n if access_token:\n data = request.get_json()\n resp = Validations().validate_user_inputs(data)\n if resp == str(resp):\n return make_response(jsonify({\n \"Message\": resp\n }), 201)\n else:\n self.db.update_user(id, resp)\n return make_response(jsonify({\n 'Message': 'User Details Updated'\n }), 201)\n",
"step-ids": [
8,
10,
11,
12,
14
]
}
|
[
8,
10,
11,
12,
14
] |
# Generated by Django 2.1 on 2018-12-05 00:02
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('PleniApp', '0006_auto_20181203_1144'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('body', models.TextField()),
('date', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Reply',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('body', models.TextField()),
('date', models.DateTimeField(auto_now_add=True)),
('comment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='PleniApp.Comment')),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=50)),
('password', models.CharField(max_length=50)),
('user_type', models.CharField(default='regular', max_length=20)),
],
),
migrations.AddField(
model_name='comment',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='PleniApp.User'),
),
]
|
normal
|
{
"blob_id": "ccb6973910dba5897f6a12be23c74a35e848313b",
"index": 4005,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('PleniApp', '0006_auto_20181203_1144')]\n operations = [migrations.CreateModel(name='Comment', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('body', models.TextField()), ('date',\n models.DateTimeField(auto_now_add=True))]), migrations.CreateModel(\n name='Reply', fields=[('id', models.AutoField(auto_created=True,\n primary_key=True, serialize=False, verbose_name='ID')), ('body',\n models.TextField()), ('date', models.DateTimeField(auto_now_add=\n True)), ('comment', models.ForeignKey(on_delete=django.db.models.\n deletion.CASCADE, to='PleniApp.Comment'))]), migrations.CreateModel\n (name='User', fields=[('id', models.AutoField(auto_created=True,\n primary_key=True, serialize=False, verbose_name='ID')), ('username',\n models.CharField(max_length=50)), ('password', models.CharField(\n max_length=50)), ('user_type', models.CharField(default='regular',\n max_length=20))]), migrations.AddField(model_name='comment', name=\n 'user', field=models.ForeignKey(on_delete=django.db.models.deletion\n .CASCADE, to='PleniApp.User'))]\n",
"step-4": "from django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [('PleniApp', '0006_auto_20181203_1144')]\n operations = [migrations.CreateModel(name='Comment', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('body', models.TextField()), ('date',\n models.DateTimeField(auto_now_add=True))]), migrations.CreateModel(\n name='Reply', fields=[('id', models.AutoField(auto_created=True,\n primary_key=True, serialize=False, verbose_name='ID')), ('body',\n models.TextField()), ('date', models.DateTimeField(auto_now_add=\n True)), ('comment', models.ForeignKey(on_delete=django.db.models.\n deletion.CASCADE, to='PleniApp.Comment'))]), migrations.CreateModel\n (name='User', fields=[('id', models.AutoField(auto_created=True,\n primary_key=True, serialize=False, verbose_name='ID')), ('username',\n models.CharField(max_length=50)), ('password', models.CharField(\n max_length=50)), ('user_type', models.CharField(default='regular',\n max_length=20))]), migrations.AddField(model_name='comment', name=\n 'user', field=models.ForeignKey(on_delete=django.db.models.deletion\n .CASCADE, to='PleniApp.User'))]\n",
"step-5": "# Generated by Django 2.1 on 2018-12-05 00:02\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('PleniApp', '0006_auto_20181203_1144'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Comment',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('body', models.TextField()),\n ('date', models.DateTimeField(auto_now_add=True)),\n ],\n ),\n migrations.CreateModel(\n name='Reply',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('body', models.TextField()),\n ('date', models.DateTimeField(auto_now_add=True)),\n ('comment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='PleniApp.Comment')),\n ],\n ),\n migrations.CreateModel(\n name='User',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('username', models.CharField(max_length=50)),\n ('password', models.CharField(max_length=50)),\n ('user_type', models.CharField(default='regular', max_length=20)),\n ],\n ),\n migrations.AddField(\n model_name='comment',\n name='user',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='PleniApp.User'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import sys
import HTSeq
import re
import string
import glob
import os
import time
import difflib
import argparse
def parse_input():
parser = argparse.ArgumentParser(description="""
USAGE: python make_figs.py -f data_file
""")
# If the -b option is used, tRNAs with no tails are not counted.
# This speeds up the removal of duplicates for large datasets
#parser.add_option("-b", "--blanks", action="store_false", dest="includeBlankTails", default=True)
parser.add_argument("-f", "--data_file", action="store",
dest="data_file",
help="Filename of data.")
args = parser.parse_args()
return args
def write_most_common_tails(inserts, base_filename, control=False):
for exp in inserts:
with open("%s_%s" % (base_filename,
os.path.basename(exp).rstrip('.inserts').rstrip(
'.fastq')),
'w') as f:
if(not control):
lines = inserts[exp].write_table_of_most_common_tails(control)
if(control):
lines = inserts[exp].write_table_of_most_common_tails(
control, get_pvalues=True)
f.write(lines)
def parse_data_file(filename):
data = {}
print "Opening %s with file size %i..." % (
filename, os.path.getsize(filename))
with open(filename, 'r') as f:
dataset = ""
for li in f:
#print li
s = li.strip('\n').split('\t')
m = re.match(r'number tails in ([^:]+):.*', li)
if(m is not None):
dataset = m.group(1)
dataset = os.path.basename(dataset)
cur_dataset = dataset
data[dataset] = {'n_tails': s[1:]}
continue
m = re.match(r'([AGCTN]):.*', s[0])
if(m is not None):
data[dataset][m.group(1)] = s[1:]
continue
m = re.match(r'tail length:.*', li)
if(m is not None):
data[dataset]['tail_len'] = s[1:]
continue
m = re.match(r'.*Number of unique.*', li)
if(m is not None):
data[dataset]['n_unique'] = s[1:]
continue
return data
def check_data_agreement(data):
for exp in data:
max_range = min(len(data[exp]['n_tails']),
len(data[exp]['tail_len']),
len(data[exp]['n_unique']))
n_tails = 0
for index in range(1, max_range-1):
try:
n_tails += float(data[exp]['n_tails'][index])
except:
print "Error at %s, %i" % (exp, index)
print "%s: total tails=%f" % (exp, n_tails)
def write_for_R(data, src_path):
src_path = os.path.dirname(os.path.realpath(__file__))
files_for_R = list()
check_data_agreement(data)
for exp in data:
with open("%s/figs/%s.forR" % (
src_path, exp.rstrip('.fastq.inserts')
), 'w') as f:
li = "tail_len\tn_tails\tn_unique\tA\tC\tT\tG\n"
max_range = min(len(data[exp]['n_tails']),
len(data[exp]['tail_len']),
len(data[exp]['n_unique']))
for index in range(0, max_range):
li += "%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (
data[exp]['tail_len'][index],
data[exp]['n_tails'][index],
data[exp]['n_unique'][index],
data[exp]['A'][index],
data[exp]['C'][index],
data[exp]['T'][index],
data[exp]['G'][index])
f.write(li)
files_for_R.append("%s/figs/%s.forR" % (
src_path, exp.rstrip('.fastq.inserts')))
return files_for_R
def r_script_for_barplot(files_for_R, src_path):
for filename in files_for_R:
li = """
f = read.table("%s", head=T)""" % filename
li += """
bases = as.data.frame(cbind(f$A, f$C, f$T, f$G))
m = as.matrix(bases)
outfname = "%s/figs/barplot_%s.eps"
""" % (src_path, os.path.basename(filename))
li += r'''
library(RColorBrewer)
my_cols <- brewer.pal(4, "RdBu")
setEPS(width=5,height=3); postscript(outfname)
barplot(t(m), xlab = 'Tail length',
ylab = 'Percent base composition',
legend=c('A','C','T','G'), col=my_cols)
dev.off()
'''
li += """
outfname = "%s/figs/plot_%s.eps"
""" % (src_path, os.path.basename(filename))
li += r'''
library(RColorBrewer)
my_cols <- brewer.pal(4, "RdBu")
setEPS(width=5,height=10); postscript(outfname)
par(mfrow=c(3,1))
plot(f$n_tails, x=f$tail_len, type='l', xlab='Tail length',
ylab='Number of tails')
plot(f$n_unique, x=f$tail_len, type='l', xlab='Tail length',
ylab='Number of unique tails')
barplot(t(m), xlab = 'Tail length',
ylab = 'Percent base composition',
legend=c('A','C','T','G'), col=my_cols)
dev.off()
'''
with open('tmp.r', 'w') as f:
f.write(li)
cmdl = """R CMD BATCH tmp.r"""
os.system(cmdl)
def make_figs(data_filename, src_path):
print "In make_figs. Processing file %s" % data_filename
data = parse_data_file(data_filename)
if(not os.path.exists(src_path + "/figs")):
print "making %s/figs" % src_path
os.system("mkdir %s/figs" % src_path)
files_for_R = write_for_R(data, src_path)
r_script_for_barplot(files_for_R, src_path)
if __name__ == '__main__':
src_path = os.path.dirname(os.path.realpath(__file__))
args = parse_input()
data = parse_data_file(args.data_file)
if(not os.path.exists(src_path + '/figs')):
os.system('mkdir ' + src_path + '/figs')
files_for_R = write_for_R(data)
r_script_for_barplot(files_for_R)
|
normal
|
{
"blob_id": "05f5931a53c9916f151f42910575f9c5533bfceb",
"index": 9921,
"step-1": "import sys\nimport HTSeq\nimport re\nimport string\nimport glob\nimport os\nimport time\nimport difflib\nimport argparse\n\n\ndef parse_input():\n parser = argparse.ArgumentParser(description=\"\"\"\n USAGE: python make_figs.py -f data_file\n \"\"\")\n\n # If the -b option is used, tRNAs with no tails are not counted.\n # This speeds up the removal of duplicates for large datasets\n #parser.add_option(\"-b\", \"--blanks\", action=\"store_false\", dest=\"includeBlankTails\", default=True)\n\n parser.add_argument(\"-f\", \"--data_file\", action=\"store\",\n dest=\"data_file\",\n help=\"Filename of data.\")\n args = parser.parse_args()\n return args\n\n\ndef write_most_common_tails(inserts, base_filename, control=False):\n for exp in inserts:\n with open(\"%s_%s\" % (base_filename,\n os.path.basename(exp).rstrip('.inserts').rstrip(\n '.fastq')),\n 'w') as f:\n if(not control):\n lines = inserts[exp].write_table_of_most_common_tails(control)\n if(control):\n lines = inserts[exp].write_table_of_most_common_tails(\n control, get_pvalues=True)\n f.write(lines)\n\n\ndef parse_data_file(filename):\n data = {}\n print \"Opening %s with file size %i...\" % (\n filename, os.path.getsize(filename))\n with open(filename, 'r') as f:\n dataset = \"\"\n for li in f:\n #print li\n s = li.strip('\\n').split('\\t')\n m = re.match(r'number tails in ([^:]+):.*', li)\n if(m is not None):\n dataset = m.group(1)\n dataset = os.path.basename(dataset)\n cur_dataset = dataset\n data[dataset] = {'n_tails': s[1:]}\n continue\n m = re.match(r'([AGCTN]):.*', s[0])\n if(m is not None):\n data[dataset][m.group(1)] = s[1:]\n continue\n m = re.match(r'tail length:.*', li)\n if(m is not None):\n data[dataset]['tail_len'] = s[1:]\n continue\n m = re.match(r'.*Number of unique.*', li)\n if(m is not None):\n data[dataset]['n_unique'] = s[1:]\n continue\n return data\n \n\ndef check_data_agreement(data):\n for exp in data:\n max_range = min(len(data[exp]['n_tails']),\n len(data[exp]['tail_len']),\n len(data[exp]['n_unique']))\n n_tails = 0\n for index in range(1, max_range-1):\n try:\n n_tails += float(data[exp]['n_tails'][index])\n except:\n print \"Error at %s, %i\" % (exp, index)\n print \"%s: total tails=%f\" % (exp, n_tails)\n \n\ndef write_for_R(data, src_path):\n src_path = os.path.dirname(os.path.realpath(__file__))\n files_for_R = list()\n check_data_agreement(data)\n for exp in data:\n with open(\"%s/figs/%s.forR\" % (\n src_path, exp.rstrip('.fastq.inserts')\n ), 'w') as f:\n li = \"tail_len\\tn_tails\\tn_unique\\tA\\tC\\tT\\tG\\n\"\n max_range = min(len(data[exp]['n_tails']),\n len(data[exp]['tail_len']),\n len(data[exp]['n_unique']))\n for index in range(0, max_range):\n li += \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\" % (\n data[exp]['tail_len'][index],\n data[exp]['n_tails'][index],\n data[exp]['n_unique'][index],\n data[exp]['A'][index],\n data[exp]['C'][index],\n data[exp]['T'][index],\n data[exp]['G'][index])\n f.write(li)\n files_for_R.append(\"%s/figs/%s.forR\" % (\n src_path, exp.rstrip('.fastq.inserts')))\n return files_for_R\n\n\ndef r_script_for_barplot(files_for_R, src_path):\n for filename in files_for_R:\n li = \"\"\"\n f = read.table(\"%s\", head=T)\"\"\" % filename\n li += \"\"\"\n bases = as.data.frame(cbind(f$A, f$C, f$T, f$G))\n m = as.matrix(bases)\n outfname = \"%s/figs/barplot_%s.eps\"\n \"\"\" % (src_path, os.path.basename(filename))\n li += r'''\n library(RColorBrewer)\n my_cols <- brewer.pal(4, \"RdBu\")\n setEPS(width=5,height=3); postscript(outfname)\n barplot(t(m), xlab = 'Tail length',\n ylab = 'Percent base composition',\n legend=c('A','C','T','G'), col=my_cols)\n dev.off()\n '''\n li += \"\"\"\n outfname = \"%s/figs/plot_%s.eps\"\n\"\"\" % (src_path, os.path.basename(filename))\n li += r'''\n library(RColorBrewer)\n my_cols <- brewer.pal(4, \"RdBu\")\n setEPS(width=5,height=10); postscript(outfname)\n par(mfrow=c(3,1))\n plot(f$n_tails, x=f$tail_len, type='l', xlab='Tail length',\n ylab='Number of tails')\n plot(f$n_unique, x=f$tail_len, type='l', xlab='Tail length',\n ylab='Number of unique tails')\n barplot(t(m), xlab = 'Tail length',\n ylab = 'Percent base composition',\n legend=c('A','C','T','G'), col=my_cols)\n dev.off()\n '''\n with open('tmp.r', 'w') as f:\n f.write(li)\n cmdl = \"\"\"R CMD BATCH tmp.r\"\"\"\n os.system(cmdl)\n\n\ndef make_figs(data_filename, src_path):\n print \"In make_figs. Processing file %s\" % data_filename\n data = parse_data_file(data_filename)\n if(not os.path.exists(src_path + \"/figs\")):\n print \"making %s/figs\" % src_path\n os.system(\"mkdir %s/figs\" % src_path)\n files_for_R = write_for_R(data, src_path)\n r_script_for_barplot(files_for_R, src_path)\n\n \nif __name__ == '__main__':\n src_path = os.path.dirname(os.path.realpath(__file__))\n args = parse_input()\n data = parse_data_file(args.data_file)\n if(not os.path.exists(src_path + '/figs')):\n os.system('mkdir ' + src_path + '/figs')\n files_for_R = write_for_R(data)\n r_script_for_barplot(files_for_R)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
"""
"""
import os
import json
import csv
cutoff = float(input("Tolerance (decimal)? "))
docpath = "C:/Users/RackS/Documents/"
out = open("isosegmenter_scoring_error"+str(cutoff*100)+".csv", 'w', encoding='UTF-8')
summary = open("isosegmenter_score_summary_error"+str(cutoff*100)+".txt", 'w', encoding='UTF-8')
out.write("SEQUENCE_ID,TYPE,DOMAINS,TP,FP,FN,Sens,PPV,Jaccard\n")
tp_eq = 0
fp_eq = 0
fn_eq = 0
for file in os.listdir(docpath+"isoSegmenter100"):
if file.endswith(".csv") and "E" in file:
predict_data = csv.DictReader(open(docpath+"isoSegmenter100/"+file, 'r', encoding='UTF-8'))
seqid = file.replace(".csv", "")
with open(docpath+"ground_truth100/"+seqid+".json", 'r', encoding='UTF-8') as json_file:
truth_data = json.load(json_file)
true_boundaries = []
tp_seq = 0
fp_seq = 0
fn_seq = 0
for i in range(0, int(truth_data['tot_length']) + 1, int(truth_data['domain_length'])):
true_boundaries.append(i)
for pred_domain in predict_data:
matched = False
for i in range(0, len(true_boundaries) - 1):
startdiff = int(pred_domain['Start']) - true_boundaries[i]
enddiff = int(pred_domain['End']) - true_boundaries[i+1]
tolerance = cutoff*(true_boundaries[i+1] - true_boundaries[i])
if abs(startdiff) <= tolerance:
if abs(enddiff) <= tolerance:
tp_seq += 1
matched = True
print(seqid)
print("START MATCH: " + str(true_boundaries[i]) + ", " + pred_domain['Start'])
print("END MATCH: " + str(true_boundaries[i+1]) + ", " + pred_domain['End'])
print("DIFFERENCES: " + str(startdiff) + ", " + str(enddiff) + ", TOLERANCE = " + str(tolerance))
print()
break
if not matched:
fp_seq += 1
fn_seq = int(truth_data['domains']) - tp_seq
tp_eq += tp_seq
fp_eq += fp_seq
fn_eq += fn_seq
sensitivity = round(tp_seq/(tp_seq + fn_seq), 5)
ppv = round(tp_seq/(tp_seq+fp_seq), 5)
jaccard = round(tp_seq/(tp_seq + fp_seq + fn_seq), 5)
out.write(seqid+",E,"+str(truth_data['domains'])+","+str(tp_seq)+","+str(fp_seq)+","+str(fn_seq)+","+str(sensitivity)+","+str(ppv)+","+str(jaccard)+"\n")
summary.write("EQUAL-LENGTH STATISTICS\n")
summary.write("TP equal domain: " + str(tp_eq) + "\n")
summary.write("FP equal domain: " + str(fp_eq) + "\n")
summary.write("FN equal domain: " + str(fn_eq) + "\n")
summary.write("Sensitivity: " + str(round(tp_eq/(tp_eq + fn_eq),5)) + "\n")
summary.write("Precision(PPV): " + str(round(tp_eq/(tp_eq + fp_eq),5)) + "\n")
summary.write("Jaccard Index: " + str(round(tp_eq/(tp_eq + fp_eq + fn_eq),5)) + "\n\n")
tp_var = 0
fp_var = 0
fn_var = 0
for file in os.listdir(docpath+"isoSegmenter100"):
if file.endswith(".csv") and "V" in file:
predict_data = csv.DictReader(open(docpath+"isoSegmenter100/"+file, 'r', encoding='UTF-8'))
seqid = file.replace(".csv", "")
with open(docpath+"ground_truth100/"+seqid+".json", 'r', encoding='UTF-8') as json_file:
truth_data = json.load(json_file)
true_boundaries = [1]
tp_seq = 0
fp_seq = 0
fn_seq = 0
for i in range(1, int(truth_data['domains']) + 1):
b_next = true_boundaries[i-1] + int(truth_data['length_'+str(i)])
true_boundaries.append(b_next)
for pred_domain in predict_data:
matched = False
for i in range(0, len(true_boundaries) - 1):
startdiff = int(pred_domain['Start']) - true_boundaries[i]
enddiff = int(pred_domain['End']) - true_boundaries[i+1]
tolerance = cutoff*(true_boundaries[i+1] - true_boundaries[i])
if abs(startdiff) <= tolerance:
if abs(enddiff) <= tolerance:
tp_seq += 1
matched = True
print(seqid)
print("START MATCH: " + str(true_boundaries[i]) + ", " + pred_domain['Start'])
print("END MATCH: " + str(true_boundaries[i+1]) + ", " + pred_domain['End'])
print("DIFFERENCES: " + str(startdiff) + ", " + str(enddiff) + ", TOLERANCE = " + str(tolerance))
print()
break
if not matched:
fp_seq += 1
fn_seq = int(truth_data['domains']) - tp_seq
tp_var += tp_seq
fp_var += fp_seq
fn_var += fn_seq
sensitivity = round(tp_seq/(tp_seq + fn_seq), 5)
ppv = round(tp_seq/(tp_seq+fp_seq), 5)
jaccard = round(tp_seq/(tp_seq + fp_seq + fn_seq), 5)
out.write(seqid+",V,"+str(truth_data['domains'])+","+str(tp_seq)+","+str(fp_seq)+","+str(fn_seq)+","+str(sensitivity)+","+str(ppv)+","+str(jaccard)+"\n")
summary.write("VARIABLE-LENGTH STATISTICS\n")
summary.write("TP equal domain: " + str(tp_var) + "\n")
summary.write("FP equal domain: " + str(fp_var) + "\n")
summary.write("FN equal domain: " + str(fn_var) + "\n")
summary.write("Sensitivity: " + str(round(tp_var/(tp_var + fn_var),5)) + "\n")
summary.write("Precision(PPV): " + str(round(tp_var/(tp_var + fp_var),5)) + "\n")
summary.write("Jaccard Index: " + str(round(tp_var/(tp_var + fp_var + fn_var),5)) + "\n\n")
summary.write("OVERALL STATISTICS\n")
summary.write("TP: " + str(tp_var + tp_eq) + "\n")
summary.write("FP: " + str(fp_var + fp_eq) + "\n")
summary.write("FN: " + str(fn_var + fn_eq) + "\n")
summary.write("Sensitivity: " + str(round((tp_var + tp_eq)/(tp_var + fn_var + tp_eq + fn_eq),5)) + "\n")
summary.write("Precision(PPV): " + str(round((tp_var + tp_eq)/(tp_var + fp_var + tp_eq + fp_eq),5)) + "\n")
summary.write("Jaccard Index: " + str(round((tp_var + tp_eq)/(tp_var + fp_var + fn_var + tp_eq + fp_eq + fn_eq),5)) + "\n")
|
normal
|
{
"blob_id": "af2aa236f6bfc582093faf868a374be1ebdfabf2",
"index": 1235,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nout.write('SEQUENCE_ID,TYPE,DOMAINS,TP,FP,FN,Sens,PPV,Jaccard\\n')\n<mask token>\nfor file in os.listdir(docpath + 'isoSegmenter100'):\n if file.endswith('.csv') and 'E' in file:\n predict_data = csv.DictReader(open(docpath + 'isoSegmenter100/' +\n file, 'r', encoding='UTF-8'))\n seqid = file.replace('.csv', '')\n with open(docpath + 'ground_truth100/' + seqid + '.json', 'r',\n encoding='UTF-8') as json_file:\n truth_data = json.load(json_file)\n true_boundaries = []\n tp_seq = 0\n fp_seq = 0\n fn_seq = 0\n for i in range(0, int(truth_data['tot_length']) + 1, int(truth_data\n ['domain_length'])):\n true_boundaries.append(i)\n for pred_domain in predict_data:\n matched = False\n for i in range(0, len(true_boundaries) - 1):\n startdiff = int(pred_domain['Start']) - true_boundaries[i]\n enddiff = int(pred_domain['End']) - true_boundaries[i + 1]\n tolerance = cutoff * (true_boundaries[i + 1] -\n true_boundaries[i])\n if abs(startdiff) <= tolerance:\n if abs(enddiff) <= tolerance:\n tp_seq += 1\n matched = True\n print(seqid)\n print('START MATCH: ' + str(true_boundaries[i]) +\n ', ' + pred_domain['Start'])\n print('END MATCH: ' + str(true_boundaries[i + 1]) +\n ', ' + pred_domain['End'])\n print('DIFFERENCES: ' + str(startdiff) + ', ' + str\n (enddiff) + ', TOLERANCE = ' + str(tolerance))\n print()\n break\n if not matched:\n fp_seq += 1\n fn_seq = int(truth_data['domains']) - tp_seq\n tp_eq += tp_seq\n fp_eq += fp_seq\n fn_eq += fn_seq\n sensitivity = round(tp_seq / (tp_seq + fn_seq), 5)\n ppv = round(tp_seq / (tp_seq + fp_seq), 5)\n jaccard = round(tp_seq / (tp_seq + fp_seq + fn_seq), 5)\n out.write(seqid + ',E,' + str(truth_data['domains']) + ',' + str(\n tp_seq) + ',' + str(fp_seq) + ',' + str(fn_seq) + ',' + str(\n sensitivity) + ',' + str(ppv) + ',' + str(jaccard) + '\\n')\nsummary.write('EQUAL-LENGTH STATISTICS\\n')\nsummary.write('TP equal domain: ' + str(tp_eq) + '\\n')\nsummary.write('FP equal domain: ' + str(fp_eq) + '\\n')\nsummary.write('FN equal domain: ' + str(fn_eq) + '\\n')\nsummary.write('Sensitivity: ' + str(round(tp_eq / (tp_eq + fn_eq), 5)) + '\\n')\nsummary.write('Precision(PPV): ' + str(round(tp_eq / (tp_eq + fp_eq), 5)) +\n '\\n')\nsummary.write('Jaccard Index: ' + str(round(tp_eq / (tp_eq + fp_eq + fn_eq),\n 5)) + '\\n\\n')\n<mask token>\nfor file in os.listdir(docpath + 'isoSegmenter100'):\n if file.endswith('.csv') and 'V' in file:\n predict_data = csv.DictReader(open(docpath + 'isoSegmenter100/' +\n file, 'r', encoding='UTF-8'))\n seqid = file.replace('.csv', '')\n with open(docpath + 'ground_truth100/' + seqid + '.json', 'r',\n encoding='UTF-8') as json_file:\n truth_data = json.load(json_file)\n true_boundaries = [1]\n tp_seq = 0\n fp_seq = 0\n fn_seq = 0\n for i in range(1, int(truth_data['domains']) + 1):\n b_next = true_boundaries[i - 1] + int(truth_data['length_' +\n str(i)])\n true_boundaries.append(b_next)\n for pred_domain in predict_data:\n matched = False\n for i in range(0, len(true_boundaries) - 1):\n startdiff = int(pred_domain['Start']) - true_boundaries[i]\n enddiff = int(pred_domain['End']) - true_boundaries[i + 1]\n tolerance = cutoff * (true_boundaries[i + 1] -\n true_boundaries[i])\n if abs(startdiff) <= tolerance:\n if abs(enddiff) <= tolerance:\n tp_seq += 1\n matched = True\n print(seqid)\n print('START MATCH: ' + str(true_boundaries[i]) +\n ', ' + pred_domain['Start'])\n print('END MATCH: ' + str(true_boundaries[i + 1]) +\n ', ' + pred_domain['End'])\n print('DIFFERENCES: ' + str(startdiff) + ', ' + str\n (enddiff) + ', TOLERANCE = ' + str(tolerance))\n print()\n break\n if not matched:\n fp_seq += 1\n fn_seq = int(truth_data['domains']) - tp_seq\n tp_var += tp_seq\n fp_var += fp_seq\n fn_var += fn_seq\n sensitivity = round(tp_seq / (tp_seq + fn_seq), 5)\n ppv = round(tp_seq / (tp_seq + fp_seq), 5)\n jaccard = round(tp_seq / (tp_seq + fp_seq + fn_seq), 5)\n out.write(seqid + ',V,' + str(truth_data['domains']) + ',' + str(\n tp_seq) + ',' + str(fp_seq) + ',' + str(fn_seq) + ',' + str(\n sensitivity) + ',' + str(ppv) + ',' + str(jaccard) + '\\n')\nsummary.write('VARIABLE-LENGTH STATISTICS\\n')\nsummary.write('TP equal domain: ' + str(tp_var) + '\\n')\nsummary.write('FP equal domain: ' + str(fp_var) + '\\n')\nsummary.write('FN equal domain: ' + str(fn_var) + '\\n')\nsummary.write('Sensitivity: ' + str(round(tp_var / (tp_var + fn_var), 5)) +\n '\\n')\nsummary.write('Precision(PPV): ' + str(round(tp_var / (tp_var + fp_var), 5)\n ) + '\\n')\nsummary.write('Jaccard Index: ' + str(round(tp_var / (tp_var + fp_var +\n fn_var), 5)) + '\\n\\n')\nsummary.write('OVERALL STATISTICS\\n')\nsummary.write('TP: ' + str(tp_var + tp_eq) + '\\n')\nsummary.write('FP: ' + str(fp_var + fp_eq) + '\\n')\nsummary.write('FN: ' + str(fn_var + fn_eq) + '\\n')\nsummary.write('Sensitivity: ' + str(round((tp_var + tp_eq) / (tp_var +\n fn_var + tp_eq + fn_eq), 5)) + '\\n')\nsummary.write('Precision(PPV): ' + str(round((tp_var + tp_eq) / (tp_var +\n fp_var + tp_eq + fp_eq), 5)) + '\\n')\nsummary.write('Jaccard Index: ' + str(round((tp_var + tp_eq) / (tp_var +\n fp_var + fn_var + tp_eq + fp_eq + fn_eq), 5)) + '\\n')\n",
"step-3": "<mask token>\ncutoff = float(input('Tolerance (decimal)? '))\ndocpath = 'C:/Users/RackS/Documents/'\nout = open('isosegmenter_scoring_error' + str(cutoff * 100) + '.csv', 'w',\n encoding='UTF-8')\nsummary = open('isosegmenter_score_summary_error' + str(cutoff * 100) +\n '.txt', 'w', encoding='UTF-8')\nout.write('SEQUENCE_ID,TYPE,DOMAINS,TP,FP,FN,Sens,PPV,Jaccard\\n')\ntp_eq = 0\nfp_eq = 0\nfn_eq = 0\nfor file in os.listdir(docpath + 'isoSegmenter100'):\n if file.endswith('.csv') and 'E' in file:\n predict_data = csv.DictReader(open(docpath + 'isoSegmenter100/' +\n file, 'r', encoding='UTF-8'))\n seqid = file.replace('.csv', '')\n with open(docpath + 'ground_truth100/' + seqid + '.json', 'r',\n encoding='UTF-8') as json_file:\n truth_data = json.load(json_file)\n true_boundaries = []\n tp_seq = 0\n fp_seq = 0\n fn_seq = 0\n for i in range(0, int(truth_data['tot_length']) + 1, int(truth_data\n ['domain_length'])):\n true_boundaries.append(i)\n for pred_domain in predict_data:\n matched = False\n for i in range(0, len(true_boundaries) - 1):\n startdiff = int(pred_domain['Start']) - true_boundaries[i]\n enddiff = int(pred_domain['End']) - true_boundaries[i + 1]\n tolerance = cutoff * (true_boundaries[i + 1] -\n true_boundaries[i])\n if abs(startdiff) <= tolerance:\n if abs(enddiff) <= tolerance:\n tp_seq += 1\n matched = True\n print(seqid)\n print('START MATCH: ' + str(true_boundaries[i]) +\n ', ' + pred_domain['Start'])\n print('END MATCH: ' + str(true_boundaries[i + 1]) +\n ', ' + pred_domain['End'])\n print('DIFFERENCES: ' + str(startdiff) + ', ' + str\n (enddiff) + ', TOLERANCE = ' + str(tolerance))\n print()\n break\n if not matched:\n fp_seq += 1\n fn_seq = int(truth_data['domains']) - tp_seq\n tp_eq += tp_seq\n fp_eq += fp_seq\n fn_eq += fn_seq\n sensitivity = round(tp_seq / (tp_seq + fn_seq), 5)\n ppv = round(tp_seq / (tp_seq + fp_seq), 5)\n jaccard = round(tp_seq / (tp_seq + fp_seq + fn_seq), 5)\n out.write(seqid + ',E,' + str(truth_data['domains']) + ',' + str(\n tp_seq) + ',' + str(fp_seq) + ',' + str(fn_seq) + ',' + str(\n sensitivity) + ',' + str(ppv) + ',' + str(jaccard) + '\\n')\nsummary.write('EQUAL-LENGTH STATISTICS\\n')\nsummary.write('TP equal domain: ' + str(tp_eq) + '\\n')\nsummary.write('FP equal domain: ' + str(fp_eq) + '\\n')\nsummary.write('FN equal domain: ' + str(fn_eq) + '\\n')\nsummary.write('Sensitivity: ' + str(round(tp_eq / (tp_eq + fn_eq), 5)) + '\\n')\nsummary.write('Precision(PPV): ' + str(round(tp_eq / (tp_eq + fp_eq), 5)) +\n '\\n')\nsummary.write('Jaccard Index: ' + str(round(tp_eq / (tp_eq + fp_eq + fn_eq),\n 5)) + '\\n\\n')\ntp_var = 0\nfp_var = 0\nfn_var = 0\nfor file in os.listdir(docpath + 'isoSegmenter100'):\n if file.endswith('.csv') and 'V' in file:\n predict_data = csv.DictReader(open(docpath + 'isoSegmenter100/' +\n file, 'r', encoding='UTF-8'))\n seqid = file.replace('.csv', '')\n with open(docpath + 'ground_truth100/' + seqid + '.json', 'r',\n encoding='UTF-8') as json_file:\n truth_data = json.load(json_file)\n true_boundaries = [1]\n tp_seq = 0\n fp_seq = 0\n fn_seq = 0\n for i in range(1, int(truth_data['domains']) + 1):\n b_next = true_boundaries[i - 1] + int(truth_data['length_' +\n str(i)])\n true_boundaries.append(b_next)\n for pred_domain in predict_data:\n matched = False\n for i in range(0, len(true_boundaries) - 1):\n startdiff = int(pred_domain['Start']) - true_boundaries[i]\n enddiff = int(pred_domain['End']) - true_boundaries[i + 1]\n tolerance = cutoff * (true_boundaries[i + 1] -\n true_boundaries[i])\n if abs(startdiff) <= tolerance:\n if abs(enddiff) <= tolerance:\n tp_seq += 1\n matched = True\n print(seqid)\n print('START MATCH: ' + str(true_boundaries[i]) +\n ', ' + pred_domain['Start'])\n print('END MATCH: ' + str(true_boundaries[i + 1]) +\n ', ' + pred_domain['End'])\n print('DIFFERENCES: ' + str(startdiff) + ', ' + str\n (enddiff) + ', TOLERANCE = ' + str(tolerance))\n print()\n break\n if not matched:\n fp_seq += 1\n fn_seq = int(truth_data['domains']) - tp_seq\n tp_var += tp_seq\n fp_var += fp_seq\n fn_var += fn_seq\n sensitivity = round(tp_seq / (tp_seq + fn_seq), 5)\n ppv = round(tp_seq / (tp_seq + fp_seq), 5)\n jaccard = round(tp_seq / (tp_seq + fp_seq + fn_seq), 5)\n out.write(seqid + ',V,' + str(truth_data['domains']) + ',' + str(\n tp_seq) + ',' + str(fp_seq) + ',' + str(fn_seq) + ',' + str(\n sensitivity) + ',' + str(ppv) + ',' + str(jaccard) + '\\n')\nsummary.write('VARIABLE-LENGTH STATISTICS\\n')\nsummary.write('TP equal domain: ' + str(tp_var) + '\\n')\nsummary.write('FP equal domain: ' + str(fp_var) + '\\n')\nsummary.write('FN equal domain: ' + str(fn_var) + '\\n')\nsummary.write('Sensitivity: ' + str(round(tp_var / (tp_var + fn_var), 5)) +\n '\\n')\nsummary.write('Precision(PPV): ' + str(round(tp_var / (tp_var + fp_var), 5)\n ) + '\\n')\nsummary.write('Jaccard Index: ' + str(round(tp_var / (tp_var + fp_var +\n fn_var), 5)) + '\\n\\n')\nsummary.write('OVERALL STATISTICS\\n')\nsummary.write('TP: ' + str(tp_var + tp_eq) + '\\n')\nsummary.write('FP: ' + str(fp_var + fp_eq) + '\\n')\nsummary.write('FN: ' + str(fn_var + fn_eq) + '\\n')\nsummary.write('Sensitivity: ' + str(round((tp_var + tp_eq) / (tp_var +\n fn_var + tp_eq + fn_eq), 5)) + '\\n')\nsummary.write('Precision(PPV): ' + str(round((tp_var + tp_eq) / (tp_var +\n fp_var + tp_eq + fp_eq), 5)) + '\\n')\nsummary.write('Jaccard Index: ' + str(round((tp_var + tp_eq) / (tp_var +\n fp_var + fn_var + tp_eq + fp_eq + fn_eq), 5)) + '\\n')\n",
"step-4": "<mask token>\nimport os\nimport json\nimport csv\ncutoff = float(input('Tolerance (decimal)? '))\ndocpath = 'C:/Users/RackS/Documents/'\nout = open('isosegmenter_scoring_error' + str(cutoff * 100) + '.csv', 'w',\n encoding='UTF-8')\nsummary = open('isosegmenter_score_summary_error' + str(cutoff * 100) +\n '.txt', 'w', encoding='UTF-8')\nout.write('SEQUENCE_ID,TYPE,DOMAINS,TP,FP,FN,Sens,PPV,Jaccard\\n')\ntp_eq = 0\nfp_eq = 0\nfn_eq = 0\nfor file in os.listdir(docpath + 'isoSegmenter100'):\n if file.endswith('.csv') and 'E' in file:\n predict_data = csv.DictReader(open(docpath + 'isoSegmenter100/' +\n file, 'r', encoding='UTF-8'))\n seqid = file.replace('.csv', '')\n with open(docpath + 'ground_truth100/' + seqid + '.json', 'r',\n encoding='UTF-8') as json_file:\n truth_data = json.load(json_file)\n true_boundaries = []\n tp_seq = 0\n fp_seq = 0\n fn_seq = 0\n for i in range(0, int(truth_data['tot_length']) + 1, int(truth_data\n ['domain_length'])):\n true_boundaries.append(i)\n for pred_domain in predict_data:\n matched = False\n for i in range(0, len(true_boundaries) - 1):\n startdiff = int(pred_domain['Start']) - true_boundaries[i]\n enddiff = int(pred_domain['End']) - true_boundaries[i + 1]\n tolerance = cutoff * (true_boundaries[i + 1] -\n true_boundaries[i])\n if abs(startdiff) <= tolerance:\n if abs(enddiff) <= tolerance:\n tp_seq += 1\n matched = True\n print(seqid)\n print('START MATCH: ' + str(true_boundaries[i]) +\n ', ' + pred_domain['Start'])\n print('END MATCH: ' + str(true_boundaries[i + 1]) +\n ', ' + pred_domain['End'])\n print('DIFFERENCES: ' + str(startdiff) + ', ' + str\n (enddiff) + ', TOLERANCE = ' + str(tolerance))\n print()\n break\n if not matched:\n fp_seq += 1\n fn_seq = int(truth_data['domains']) - tp_seq\n tp_eq += tp_seq\n fp_eq += fp_seq\n fn_eq += fn_seq\n sensitivity = round(tp_seq / (tp_seq + fn_seq), 5)\n ppv = round(tp_seq / (tp_seq + fp_seq), 5)\n jaccard = round(tp_seq / (tp_seq + fp_seq + fn_seq), 5)\n out.write(seqid + ',E,' + str(truth_data['domains']) + ',' + str(\n tp_seq) + ',' + str(fp_seq) + ',' + str(fn_seq) + ',' + str(\n sensitivity) + ',' + str(ppv) + ',' + str(jaccard) + '\\n')\nsummary.write('EQUAL-LENGTH STATISTICS\\n')\nsummary.write('TP equal domain: ' + str(tp_eq) + '\\n')\nsummary.write('FP equal domain: ' + str(fp_eq) + '\\n')\nsummary.write('FN equal domain: ' + str(fn_eq) + '\\n')\nsummary.write('Sensitivity: ' + str(round(tp_eq / (tp_eq + fn_eq), 5)) + '\\n')\nsummary.write('Precision(PPV): ' + str(round(tp_eq / (tp_eq + fp_eq), 5)) +\n '\\n')\nsummary.write('Jaccard Index: ' + str(round(tp_eq / (tp_eq + fp_eq + fn_eq),\n 5)) + '\\n\\n')\ntp_var = 0\nfp_var = 0\nfn_var = 0\nfor file in os.listdir(docpath + 'isoSegmenter100'):\n if file.endswith('.csv') and 'V' in file:\n predict_data = csv.DictReader(open(docpath + 'isoSegmenter100/' +\n file, 'r', encoding='UTF-8'))\n seqid = file.replace('.csv', '')\n with open(docpath + 'ground_truth100/' + seqid + '.json', 'r',\n encoding='UTF-8') as json_file:\n truth_data = json.load(json_file)\n true_boundaries = [1]\n tp_seq = 0\n fp_seq = 0\n fn_seq = 0\n for i in range(1, int(truth_data['domains']) + 1):\n b_next = true_boundaries[i - 1] + int(truth_data['length_' +\n str(i)])\n true_boundaries.append(b_next)\n for pred_domain in predict_data:\n matched = False\n for i in range(0, len(true_boundaries) - 1):\n startdiff = int(pred_domain['Start']) - true_boundaries[i]\n enddiff = int(pred_domain['End']) - true_boundaries[i + 1]\n tolerance = cutoff * (true_boundaries[i + 1] -\n true_boundaries[i])\n if abs(startdiff) <= tolerance:\n if abs(enddiff) <= tolerance:\n tp_seq += 1\n matched = True\n print(seqid)\n print('START MATCH: ' + str(true_boundaries[i]) +\n ', ' + pred_domain['Start'])\n print('END MATCH: ' + str(true_boundaries[i + 1]) +\n ', ' + pred_domain['End'])\n print('DIFFERENCES: ' + str(startdiff) + ', ' + str\n (enddiff) + ', TOLERANCE = ' + str(tolerance))\n print()\n break\n if not matched:\n fp_seq += 1\n fn_seq = int(truth_data['domains']) - tp_seq\n tp_var += tp_seq\n fp_var += fp_seq\n fn_var += fn_seq\n sensitivity = round(tp_seq / (tp_seq + fn_seq), 5)\n ppv = round(tp_seq / (tp_seq + fp_seq), 5)\n jaccard = round(tp_seq / (tp_seq + fp_seq + fn_seq), 5)\n out.write(seqid + ',V,' + str(truth_data['domains']) + ',' + str(\n tp_seq) + ',' + str(fp_seq) + ',' + str(fn_seq) + ',' + str(\n sensitivity) + ',' + str(ppv) + ',' + str(jaccard) + '\\n')\nsummary.write('VARIABLE-LENGTH STATISTICS\\n')\nsummary.write('TP equal domain: ' + str(tp_var) + '\\n')\nsummary.write('FP equal domain: ' + str(fp_var) + '\\n')\nsummary.write('FN equal domain: ' + str(fn_var) + '\\n')\nsummary.write('Sensitivity: ' + str(round(tp_var / (tp_var + fn_var), 5)) +\n '\\n')\nsummary.write('Precision(PPV): ' + str(round(tp_var / (tp_var + fp_var), 5)\n ) + '\\n')\nsummary.write('Jaccard Index: ' + str(round(tp_var / (tp_var + fp_var +\n fn_var), 5)) + '\\n\\n')\nsummary.write('OVERALL STATISTICS\\n')\nsummary.write('TP: ' + str(tp_var + tp_eq) + '\\n')\nsummary.write('FP: ' + str(fp_var + fp_eq) + '\\n')\nsummary.write('FN: ' + str(fn_var + fn_eq) + '\\n')\nsummary.write('Sensitivity: ' + str(round((tp_var + tp_eq) / (tp_var +\n fn_var + tp_eq + fn_eq), 5)) + '\\n')\nsummary.write('Precision(PPV): ' + str(round((tp_var + tp_eq) / (tp_var +\n fp_var + tp_eq + fp_eq), 5)) + '\\n')\nsummary.write('Jaccard Index: ' + str(round((tp_var + tp_eq) / (tp_var +\n fp_var + fn_var + tp_eq + fp_eq + fn_eq), 5)) + '\\n')\n",
"step-5": "\"\"\"\n\"\"\"\nimport os\nimport json\nimport csv\n\ncutoff = float(input(\"Tolerance (decimal)? \"))\ndocpath = \"C:/Users/RackS/Documents/\"\nout = open(\"isosegmenter_scoring_error\"+str(cutoff*100)+\".csv\", 'w', encoding='UTF-8')\nsummary = open(\"isosegmenter_score_summary_error\"+str(cutoff*100)+\".txt\", 'w', encoding='UTF-8')\nout.write(\"SEQUENCE_ID,TYPE,DOMAINS,TP,FP,FN,Sens,PPV,Jaccard\\n\")\n\ntp_eq = 0\nfp_eq = 0\nfn_eq = 0\n\nfor file in os.listdir(docpath+\"isoSegmenter100\"):\n if file.endswith(\".csv\") and \"E\" in file:\n predict_data = csv.DictReader(open(docpath+\"isoSegmenter100/\"+file, 'r', encoding='UTF-8'))\n seqid = file.replace(\".csv\", \"\")\n with open(docpath+\"ground_truth100/\"+seqid+\".json\", 'r', encoding='UTF-8') as json_file:\n truth_data = json.load(json_file)\n\n true_boundaries = []\n tp_seq = 0\n fp_seq = 0\n fn_seq = 0\n for i in range(0, int(truth_data['tot_length']) + 1, int(truth_data['domain_length'])):\n true_boundaries.append(i)\n\n for pred_domain in predict_data:\n matched = False\n for i in range(0, len(true_boundaries) - 1):\n startdiff = int(pred_domain['Start']) - true_boundaries[i]\n enddiff = int(pred_domain['End']) - true_boundaries[i+1]\n tolerance = cutoff*(true_boundaries[i+1] - true_boundaries[i])\n if abs(startdiff) <= tolerance:\n if abs(enddiff) <= tolerance:\n tp_seq += 1\n matched = True\n print(seqid)\n print(\"START MATCH: \" + str(true_boundaries[i]) + \", \" + pred_domain['Start'])\n print(\"END MATCH: \" + str(true_boundaries[i+1]) + \", \" + pred_domain['End'])\n print(\"DIFFERENCES: \" + str(startdiff) + \", \" + str(enddiff) + \", TOLERANCE = \" + str(tolerance))\n print()\n break\n if not matched:\n fp_seq += 1\n\n fn_seq = int(truth_data['domains']) - tp_seq\n tp_eq += tp_seq\n fp_eq += fp_seq\n fn_eq += fn_seq\n sensitivity = round(tp_seq/(tp_seq + fn_seq), 5)\n ppv = round(tp_seq/(tp_seq+fp_seq), 5)\n jaccard = round(tp_seq/(tp_seq + fp_seq + fn_seq), 5)\n out.write(seqid+\",E,\"+str(truth_data['domains'])+\",\"+str(tp_seq)+\",\"+str(fp_seq)+\",\"+str(fn_seq)+\",\"+str(sensitivity)+\",\"+str(ppv)+\",\"+str(jaccard)+\"\\n\")\n\nsummary.write(\"EQUAL-LENGTH STATISTICS\\n\")\nsummary.write(\"TP equal domain: \" + str(tp_eq) + \"\\n\")\nsummary.write(\"FP equal domain: \" + str(fp_eq) + \"\\n\")\nsummary.write(\"FN equal domain: \" + str(fn_eq) + \"\\n\")\nsummary.write(\"Sensitivity: \" + str(round(tp_eq/(tp_eq + fn_eq),5)) + \"\\n\")\nsummary.write(\"Precision(PPV): \" + str(round(tp_eq/(tp_eq + fp_eq),5)) + \"\\n\")\nsummary.write(\"Jaccard Index: \" + str(round(tp_eq/(tp_eq + fp_eq + fn_eq),5)) + \"\\n\\n\")\n\ntp_var = 0\nfp_var = 0\nfn_var = 0\nfor file in os.listdir(docpath+\"isoSegmenter100\"):\n if file.endswith(\".csv\") and \"V\" in file:\n predict_data = csv.DictReader(open(docpath+\"isoSegmenter100/\"+file, 'r', encoding='UTF-8'))\n seqid = file.replace(\".csv\", \"\")\n with open(docpath+\"ground_truth100/\"+seqid+\".json\", 'r', encoding='UTF-8') as json_file:\n truth_data = json.load(json_file)\n\n true_boundaries = [1]\n tp_seq = 0\n fp_seq = 0\n fn_seq = 0\n for i in range(1, int(truth_data['domains']) + 1):\n b_next = true_boundaries[i-1] + int(truth_data['length_'+str(i)])\n true_boundaries.append(b_next)\n\n for pred_domain in predict_data:\n matched = False\n for i in range(0, len(true_boundaries) - 1):\n startdiff = int(pred_domain['Start']) - true_boundaries[i]\n enddiff = int(pred_domain['End']) - true_boundaries[i+1]\n tolerance = cutoff*(true_boundaries[i+1] - true_boundaries[i])\n if abs(startdiff) <= tolerance:\n if abs(enddiff) <= tolerance:\n tp_seq += 1\n matched = True\n print(seqid)\n print(\"START MATCH: \" + str(true_boundaries[i]) + \", \" + pred_domain['Start'])\n print(\"END MATCH: \" + str(true_boundaries[i+1]) + \", \" + pred_domain['End'])\n print(\"DIFFERENCES: \" + str(startdiff) + \", \" + str(enddiff) + \", TOLERANCE = \" + str(tolerance))\n print()\n break\n if not matched:\n fp_seq += 1\n\n fn_seq = int(truth_data['domains']) - tp_seq\n tp_var += tp_seq\n fp_var += fp_seq\n fn_var += fn_seq\n sensitivity = round(tp_seq/(tp_seq + fn_seq), 5)\n ppv = round(tp_seq/(tp_seq+fp_seq), 5)\n jaccard = round(tp_seq/(tp_seq + fp_seq + fn_seq), 5)\n out.write(seqid+\",V,\"+str(truth_data['domains'])+\",\"+str(tp_seq)+\",\"+str(fp_seq)+\",\"+str(fn_seq)+\",\"+str(sensitivity)+\",\"+str(ppv)+\",\"+str(jaccard)+\"\\n\")\n\nsummary.write(\"VARIABLE-LENGTH STATISTICS\\n\")\nsummary.write(\"TP equal domain: \" + str(tp_var) + \"\\n\")\nsummary.write(\"FP equal domain: \" + str(fp_var) + \"\\n\")\nsummary.write(\"FN equal domain: \" + str(fn_var) + \"\\n\")\nsummary.write(\"Sensitivity: \" + str(round(tp_var/(tp_var + fn_var),5)) + \"\\n\")\nsummary.write(\"Precision(PPV): \" + str(round(tp_var/(tp_var + fp_var),5)) + \"\\n\")\nsummary.write(\"Jaccard Index: \" + str(round(tp_var/(tp_var + fp_var + fn_var),5)) + \"\\n\\n\")\n \n\nsummary.write(\"OVERALL STATISTICS\\n\")\nsummary.write(\"TP: \" + str(tp_var + tp_eq) + \"\\n\")\nsummary.write(\"FP: \" + str(fp_var + fp_eq) + \"\\n\")\nsummary.write(\"FN: \" + str(fn_var + fn_eq) + \"\\n\")\nsummary.write(\"Sensitivity: \" + str(round((tp_var + tp_eq)/(tp_var + fn_var + tp_eq + fn_eq),5)) + \"\\n\")\nsummary.write(\"Precision(PPV): \" + str(round((tp_var + tp_eq)/(tp_var + fp_var + tp_eq + fp_eq),5)) + \"\\n\")\nsummary.write(\"Jaccard Index: \" + str(round((tp_var + tp_eq)/(tp_var + fp_var + fn_var + tp_eq + fp_eq + fn_eq),5)) + \"\\n\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from robocorp_ls_core.python_ls import PythonLanguageServer
from robocorp_ls_core.basic import overrides
from robocorp_ls_core.robotframework_log import get_logger
from typing import Optional, List, Dict
from robocorp_ls_core.protocols import IConfig, IMonitor, ITestInfoTypedDict, IWorkspace
from functools import partial
from robocorp_ls_core.jsonrpc.endpoint import require_monitor
from robocorp_ls_core.lsp import (
SymbolInformationTypedDict,
FoldingRangeTypedDict,
HoverTypedDict,
TextDocumentTypedDict,
CodeLensTypedDict,
DocumentSymbolTypedDict,
PositionTypedDict,
)
from robotframework_ls.impl.protocols import IKeywordFound
from robocorp_ls_core.watchdog_wrapper import IFSObserver
import itertools
log = get_logger(__name__)
class RobotFrameworkServerApi(PythonLanguageServer):
"""
This is a custom server. It uses the same message-format used in the language
server but with custom messages (i.e.: this is not the language server, but
an API to use the bits we need from robotframework in a separate process).
"""
def __init__(
self,
read_from,
write_to,
libspec_manager=None,
observer: Optional[IFSObserver] = None,
):
from robotframework_ls.impl.libspec_manager import LibspecManager
if libspec_manager is None:
try:
libspec_manager = LibspecManager(observer=observer)
except:
log.exception("Unable to properly initialize the LibspecManager.")
raise
self.libspec_manager = libspec_manager
PythonLanguageServer.__init__(self, read_from, write_to)
self._version = None
self._next_time = partial(next, itertools.count(0))
@overrides(PythonLanguageServer._create_config)
def _create_config(self) -> IConfig:
from robotframework_ls.robot_config import RobotConfig
return RobotConfig()
def m_version(self):
if self._version is not None:
return self._version
try:
import robot # noqa
except:
log.exception("Unable to import 'robot'.")
version = "NO_ROBOT"
else:
try:
from robot import get_version
version = get_version(naked=True)
except:
log.exception("Unable to get version.")
version = "N/A" # Too old?
self._version = version
return self._version
def _check_min_version(self, min_version):
from robocorp_ls_core.basic import check_min_version
version = self.m_version()
return check_min_version(version, min_version)
@overrides(PythonLanguageServer.m_workspace__did_change_configuration)
def m_workspace__did_change_configuration(self, **kwargs):
PythonLanguageServer.m_workspace__did_change_configuration(self, **kwargs)
self.libspec_manager.config = self.config
@overrides(PythonLanguageServer.lint)
def lint(self, *args, **kwargs):
pass # No-op for this server.
@overrides(PythonLanguageServer.cancel_lint)
def cancel_lint(self, *args, **kwargs):
pass # No-op for this server.
@overrides(PythonLanguageServer._obtain_fs_observer)
def _obtain_fs_observer(self) -> IFSObserver:
return self.libspec_manager.fs_observer
@overrides(PythonLanguageServer._create_workspace)
def _create_workspace(
self, root_uri: str, fs_observer: IFSObserver, workspace_folders
) -> IWorkspace:
from robotframework_ls.impl.robot_workspace import RobotWorkspace
return RobotWorkspace(
root_uri,
fs_observer,
workspace_folders,
libspec_manager=self.libspec_manager,
)
def m_lint(self, doc_uri):
if not self._check_min_version((3, 2)):
from robocorp_ls_core.lsp import Error
msg = (
"robotframework version (%s) too old for linting.\n"
"Please install a newer version and restart the language server."
% (self.m_version(),)
)
log.info(msg)
return [Error(msg, (0, 0), (1, 0)).to_lsp_diagnostic()]
func = partial(self._threaded_lint, doc_uri)
func = require_monitor(func)
return func
def _threaded_lint(self, doc_uri, monitor: IMonitor):
from robocorp_ls_core.jsonrpc.exceptions import JsonRpcRequestCancelled
from robotframework_ls.impl.robot_lsp_constants import (
OPTION_ROBOT_LINT_ROBOCOP_ENABLED,
)
from robocorp_ls_core import uris
from robocorp_ls_core.lsp import Error
try:
from robotframework_ls.impl.ast_utils import collect_errors
from robotframework_ls.impl import code_analysis
import os.path
log.debug("Lint: starting (in thread).")
completion_context = self._create_completion_context(doc_uri, 0, 0, monitor)
if completion_context is None:
return []
config = completion_context.config
robocop_enabled = config is None or config.get_setting(
OPTION_ROBOT_LINT_ROBOCOP_ENABLED, bool, False
)
ast = completion_context.get_ast()
source = completion_context.doc.source
monitor.check_cancelled()
errors = collect_errors(ast)
log.debug("Collected AST errors (in thread): %s", len(errors))
monitor.check_cancelled()
analysis_errors = code_analysis.collect_analysis_errors(completion_context)
monitor.check_cancelled()
log.debug("Collected analysis errors (in thread): %s", len(analysis_errors))
errors.extend(analysis_errors)
lsp_diagnostics = [error.to_lsp_diagnostic() for error in errors]
try:
if robocop_enabled:
from robocorp_ls_core.robocop_wrapper import (
collect_robocop_diagnostics,
)
workspace = completion_context.workspace
if workspace is not None:
project_root = workspace.root_path
else:
project_root = os.path.abspath(".")
monitor.check_cancelled()
lsp_diagnostics.extend(
collect_robocop_diagnostics(
project_root, ast, uris.to_fs_path(doc_uri), source
)
)
except Exception as e:
log.exception(
"Error collecting Robocop errors (possibly an unsupported Robocop version is installed)."
)
lsp_diagnostics.append(
Error(
f"Error collecting Robocop errors: {e}", (0, 0), (1, 0)
).to_lsp_diagnostic()
)
return lsp_diagnostics
except JsonRpcRequestCancelled:
raise JsonRpcRequestCancelled("Lint cancelled (inside lint)")
except Exception as e:
log.exception("Error collecting errors.")
ret = [
Error(
f"Error collecting Robocop errors: {e}", (0, 0), (1, 0)
).to_lsp_diagnostic()
]
return ret
def m_complete_all(self, doc_uri, line, col):
func = partial(self._threaded_complete_all, doc_uri, line, col)
func = require_monitor(func)
return func
def _threaded_complete_all(self, doc_uri, line, col, monitor: IMonitor):
completion_context = self._create_completion_context(
doc_uri, line, col, monitor
)
if completion_context is None:
return []
return self._complete_from_completion_context(completion_context)
def _complete_from_completion_context(self, completion_context):
from robotframework_ls.impl import section_name_completions
from robotframework_ls.impl import keyword_completions
from robotframework_ls.impl import variable_completions
from robotframework_ls.impl import dictionary_completions
from robotframework_ls.impl import filesystem_section_completions
from robotframework_ls.impl import keyword_parameter_completions
from robotframework_ls.impl import auto_import_completions
from robotframework_ls.impl.collect_keywords import (
collect_keyword_name_to_keyword_found,
)
from robotframework_ls.impl import ast_utils
ret = section_name_completions.complete(completion_context)
if not ret:
ret.extend(filesystem_section_completions.complete(completion_context))
if not ret:
token_info = completion_context.get_current_token()
if token_info is not None:
token = ast_utils.get_keyword_name_token(
token_info.node, token_info.token
)
if token is not None:
keyword_name_to_keyword_found: Dict[
str, List[IKeywordFound]
] = collect_keyword_name_to_keyword_found(completion_context)
ret.extend(keyword_completions.complete(completion_context))
ret.extend(
auto_import_completions.complete(
completion_context, keyword_name_to_keyword_found
)
)
return ret
if not ret:
ret.extend(variable_completions.complete(completion_context))
if not ret:
ret.extend(dictionary_completions.complete(completion_context))
if not ret:
ret.extend(keyword_parameter_completions.complete(completion_context))
return ret
def m_section_name_complete(self, doc_uri, line, col):
from robotframework_ls.impl import section_name_completions
completion_context = self._create_completion_context(doc_uri, line, col, None)
if completion_context is None:
return []
return section_name_completions.complete(completion_context)
def m_keyword_complete(self, doc_uri, line, col):
from robotframework_ls.impl import keyword_completions
completion_context = self._create_completion_context(doc_uri, line, col, None)
if completion_context is None:
return []
return keyword_completions.complete(completion_context)
def m_find_definition(self, doc_uri, line, col):
func = partial(self._threaded_find_definition, doc_uri, line, col)
func = require_monitor(func)
return func
def _threaded_find_definition(self, doc_uri, line, col, monitor) -> Optional[list]:
from robotframework_ls.impl.find_definition import find_definition
import os.path
from robocorp_ls_core.lsp import Location, Range
from robocorp_ls_core import uris
completion_context = self._create_completion_context(
doc_uri, line, col, monitor
)
if completion_context is None:
return None
definitions = find_definition(completion_context)
ret = []
for definition in definitions:
if not definition.source:
log.info("Found definition with empty source (%s).", definition)
continue
if not os.path.exists(definition.source):
log.info(
"Found definition: %s (but source does not exist).", definition
)
continue
lineno = definition.lineno
if lineno is None or lineno < 0:
lineno = 0
end_lineno = definition.end_lineno
if end_lineno is None or end_lineno < 0:
end_lineno = 0
col_offset = definition.col_offset
end_col_offset = definition.end_col_offset
ret.append(
Location(
uris.from_fs_path(definition.source),
Range((lineno, col_offset), (end_lineno, end_col_offset)),
).to_dict()
)
return ret
def m_code_format(self, text_document, options):
func = partial(self._threaded_code_format, text_document, options)
func = require_monitor(func)
return func
def _threaded_code_format(self, text_document, options, monitor: IMonitor):
from robotframework_ls.impl.formatting import create_text_edit_from_diff
from robocorp_ls_core.lsp import TextDocumentItem
import os.path
from robotframework_ls.impl.robot_lsp_constants import (
OPTION_ROBOT_CODE_FORMATTER,
)
from robotframework_ls.impl.robot_lsp_constants import (
OPTION_ROBOT_CODE_FORMATTER_ROBOTIDY,
)
from robotframework_ls.impl.robot_lsp_constants import (
OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY,
)
text_document_item = TextDocumentItem(**text_document)
text = text_document_item.text
if not text:
completion_context = self._create_completion_context(
text_document_item.uri, 0, 0, monitor
)
if completion_context is None:
return []
text = completion_context.doc.source
if not text:
return []
if options is None:
options = {}
tab_size = options.get("tabSize", 4)
# Default for now is the builtin. This will probably be changed in the future.
formatter = self._config.get_setting(
OPTION_ROBOT_CODE_FORMATTER, str, OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY
)
if formatter not in (
OPTION_ROBOT_CODE_FORMATTER_ROBOTIDY,
OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY,
):
log.critical(
f"Code formatter invalid: {formatter}. Please select one of: {OPTION_ROBOT_CODE_FORMATTER_ROBOTIDY}, {OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY}."
)
return []
if formatter == OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY:
from robotframework_ls.impl.formatting import robot_source_format
new_contents = robot_source_format(text, space_count=tab_size)
else:
if not self._check_min_version((4, 0)):
log.critical(
f"To use the robotidy formatter, at least Robot Framework 4 is needed. Found: {self.m_version()}"
)
return []
from robocorp_ls_core.robotidy_wrapper import robot_tidy_source_format
ast = completion_context.get_ast()
path = completion_context.doc.path
dirname = "."
try:
os.stat(path)
except:
# It doesn't exist
ws = self._workspace
if ws is not None:
dirname = ws.root_path
else:
dirname = os.path.dirname(path)
new_contents = robot_tidy_source_format(ast, dirname)
if new_contents is None or new_contents == text:
return []
return [x.to_dict() for x in create_text_edit_from_diff(text, new_contents)]
def _create_completion_context(
self, doc_uri, line, col, monitor: Optional[IMonitor]
):
from robotframework_ls.impl.completion_context import CompletionContext
if not self._check_min_version((3, 2)):
log.info("robotframework version too old.")
return None
workspace = self.workspace
if not workspace:
log.info("Workspace still not initialized.")
return None
document = workspace.get_document(doc_uri, accept_from_file=True)
if document is None:
log.info("Unable to get document for uri: %s.", doc_uri)
return None
return CompletionContext(
document,
line,
col,
workspace=workspace,
config=self.config,
monitor=monitor,
)
def m_signature_help(self, doc_uri: str, line: int, col: int):
func = partial(self._threaded_signature_help, doc_uri, line, col)
func = require_monitor(func)
return func
def _threaded_signature_help(
self, doc_uri: str, line: int, col: int, monitor: IMonitor
) -> Optional[dict]:
from robotframework_ls.impl.signature_help import signature_help
completion_context = self._create_completion_context(
doc_uri, line, col, monitor
)
if completion_context is None:
return None
return signature_help(completion_context)
def m_folding_range(self, doc_uri: str):
func = partial(self._threaded_folding_range, doc_uri)
func = require_monitor(func)
return func
def _threaded_folding_range(
self, doc_uri: str, monitor: IMonitor
) -> List[FoldingRangeTypedDict]:
from robotframework_ls.impl.folding_range import folding_range
completion_context = self._create_completion_context(doc_uri, 0, 0, monitor)
if completion_context is None:
return []
return folding_range(completion_context)
def m_code_lens(self, doc_uri: str):
func = partial(self._threaded_code_lens, doc_uri)
func = require_monitor(func)
return func
def _threaded_code_lens(
self, doc_uri: str, monitor: IMonitor
) -> List[CodeLensTypedDict]:
from robotframework_ls.impl.code_lens import code_lens
completion_context = self._create_completion_context(doc_uri, 0, 0, monitor)
if completion_context is None:
return []
return code_lens(completion_context)
def m_resolve_code_lens(self, **code_lens: CodeLensTypedDict):
func = partial(self._threaded_resolve_code_lens, code_lens)
func = require_monitor(func)
return func
def _threaded_resolve_code_lens(
self, code_lens: CodeLensTypedDict, monitor: IMonitor
) -> CodeLensTypedDict:
from robotframework_ls.impl.code_lens import code_lens_resolve
data = code_lens.get("data")
if not isinstance(data, dict):
return code_lens
doc_uri = data.get("uri")
completion_context = self._create_completion_context(doc_uri, 0, 0, monitor)
if completion_context is None:
return code_lens
return code_lens_resolve(completion_context, code_lens)
def m_document_symbol(self, doc_uri: str):
func = partial(self._threaded_document_symbol, doc_uri)
func = require_monitor(func)
return func
def _threaded_document_symbol(
self, doc_uri: str, monitor: IMonitor
) -> List[DocumentSymbolTypedDict]:
from robotframework_ls.impl.document_symbol import document_symbol
completion_context = self._create_completion_context(doc_uri, 0, 0, monitor)
if completion_context is None:
return []
return document_symbol(completion_context)
def m_list_tests(self, doc_uri: str):
func = partial(self._threaded_list_tests, doc_uri)
func = require_monitor(func)
return func
def _threaded_list_tests(
self, doc_uri: str, monitor: IMonitor
) -> List[ITestInfoTypedDict]:
from robotframework_ls.impl.code_lens import list_tests
completion_context = self._create_completion_context(doc_uri, 0, 0, monitor)
if completion_context is None:
return []
return list_tests(completion_context)
def m_hover(self, doc_uri: str, line: int, col: int):
func = partial(self._threaded_hover, doc_uri, line, col)
func = require_monitor(func)
return func
def _threaded_hover(
self, doc_uri: str, line, col, monitor: IMonitor
) -> Optional[HoverTypedDict]:
from robotframework_ls.impl.hover import hover
completion_context = self._create_completion_context(
doc_uri, line, col, monitor
)
if completion_context is None:
return None
return hover(completion_context)
def m_workspace_symbols(self, query: Optional[str] = None):
func = partial(self._threaded_workspace_symbols, query)
func = require_monitor(func)
return func
def _threaded_workspace_symbols(
self, query: Optional[str], monitor: IMonitor
) -> Optional[List[SymbolInformationTypedDict]]:
from robotframework_ls.impl.workspace_symbols import workspace_symbols
from robotframework_ls.impl.completion_context import BaseContext
from robotframework_ls.impl.protocols import IRobotWorkspace
from typing import cast
workspace = self._workspace
if not workspace:
return []
robot_workspace = cast(IRobotWorkspace, workspace)
return workspace_symbols(
query,
BaseContext(workspace=robot_workspace, config=self.config, monitor=monitor),
)
def m_text_document__semantic_tokens__range(self, textDocument=None, range=None):
raise RuntimeError("Not currently implemented!")
def m_text_document__semantic_tokens__full(self, textDocument=None):
func = partial(self.threaded_semantic_tokens_full, textDocument=textDocument)
func = require_monitor(func)
return func
def threaded_semantic_tokens_full(
self, textDocument: TextDocumentTypedDict, monitor: Optional[IMonitor] = None
):
from robotframework_ls.impl.semantic_tokens import semantic_tokens_full
doc_uri = textDocument["uri"]
context = self._create_completion_context(doc_uri, -1, -1, monitor)
if context is None:
return {"resultId": None, "data": []}
return {"resultId": None, "data": semantic_tokens_full(context)}
def m_monaco_completions_from_code_full(
self,
prefix: str = "",
full_code: str = "",
position=PositionTypedDict,
uri: str = "",
indent: str = "",
):
func = partial(
self.threaded_monaco_completions_from_code_full,
prefix=prefix,
full_code=full_code,
position=position,
uri=uri,
indent=indent,
)
func = require_monitor(func)
return func
def threaded_monaco_completions_from_code_full(
self,
prefix: str,
full_code: str,
position: PositionTypedDict,
uri: str,
indent: str,
monitor: Optional[IMonitor] = None,
):
from robotframework_ls.impl.robot_workspace import RobotDocument
from robotframework_ls.impl.completion_context import CompletionContext
from robocorp_ls_core.workspace import Document
from robotframework_ls.impl import section_completions
from robotframework_ls.impl import snippets_completions
from robotframework_ls.server_api.monaco_conversions import (
convert_to_monaco_completion,
)
from robotframework_ls.impl.completion_context import CompletionType
d = Document(uri, prefix)
last_line, _last_col = d.get_last_line_col()
line = last_line + position["line"]
col = position["character"]
col += len(indent)
document = RobotDocument(uri, full_code)
completion_context = CompletionContext(
document,
line,
col,
config=self.config,
monitor=monitor,
workspace=self.workspace,
)
completion_context.type = CompletionType.shell
completions = self._complete_from_completion_context(completion_context)
completions.extend(section_completions.complete(completion_context))
completions.extend(snippets_completions.complete(completion_context))
return {
"suggestions": [
convert_to_monaco_completion(
c, line_delta=last_line, col_delta=len(indent), uri=uri
)
for c in completions
]
}
def m_semantic_tokens_from_code_full(
self, prefix: str = "", full_code: str = "", indent: str = ""
):
func = partial(
self.threaded_semantic_tokens_from_code_full,
prefix=prefix,
full_code=full_code,
indent=indent,
)
func = require_monitor(func)
return func
def threaded_semantic_tokens_from_code_full(
self,
prefix: str,
full_code: str,
indent: str,
monitor: Optional[IMonitor] = None,
):
from robotframework_ls.impl.semantic_tokens import semantic_tokens_full_from_ast
try:
from robotframework_ls.impl.robot_workspace import RobotDocument
doc = RobotDocument("")
doc.source = full_code
ast = doc.get_ast()
data = semantic_tokens_full_from_ast(ast, monitor)
if not prefix:
return {"resultId": None, "data": data}
# We have to exclude the prefix from the coloring...
# debug info...
# import io
# from robotframework_ls.impl.semantic_tokens import decode_semantic_tokens
# stream = io.StringIO()
# decode_semantic_tokens(data, doc, stream)
# found = stream.getvalue()
prefix_doc = RobotDocument("")
prefix_doc.source = prefix
last_line, last_col = prefix_doc.get_last_line_col()
# Now we have the data from the full code, but we need to remove whatever
# we have in the prefix from the result...
ints_iter = iter(data)
line = 0
col = 0
new_data = []
indent_len = len(indent)
while True:
try:
line_delta = next(ints_iter)
except StopIteration:
break
col_delta = next(ints_iter)
token_len = next(ints_iter)
token_type = next(ints_iter)
token_modifier = next(ints_iter)
line += line_delta
if line_delta == 0:
col += col_delta
else:
col = col_delta
if line >= last_line:
new_data.append(line - last_line)
new_data.append(col_delta - indent_len)
new_data.append(token_len)
new_data.append(token_type)
new_data.append(token_modifier)
# Ok, now, we have to add the indent_len to all the
# next lines
while True:
try:
line_delta = next(ints_iter)
except StopIteration:
break
col_delta = next(ints_iter)
token_len = next(ints_iter)
token_type = next(ints_iter)
token_modifier = next(ints_iter)
new_data.append(line_delta)
if line_delta > 0:
new_data.append(col_delta - indent_len)
else:
new_data.append(col_delta)
new_data.append(token_len)
new_data.append(token_type)
new_data.append(token_modifier)
break
# Approach changed so that we always have a new line
# i.e.:
# \n<indent><code>
#
# so, the condition below no longer applies.
# elif line == last_line and col >= last_col:
# new_data.append(0)
# new_data.append(col - last_col)
# new_data.append(token_len)
# new_data.append(token_type)
# new_data.append(token_modifier)
# new_data.extend(ints_iter)
# break
# debug info...
# temp_stream = io.StringIO()
# temp_doc = RobotDocument("")
# temp_doc.source = full_code[len(prefix) :]
# decode_semantic_tokens(new_data, temp_doc, temp_stream)
# temp_found = temp_stream.getvalue()
return {"resultId": None, "data": new_data}
except:
log.exception("Error computing semantic tokens from code.")
return {"resultId": None, "data": []}
def m_shutdown(self, **_kwargs):
PythonLanguageServer.m_shutdown(self, **_kwargs)
self.libspec_manager.dispose()
def m_exit(self, **_kwargs):
PythonLanguageServer.m_exit(self, **_kwargs)
self.libspec_manager.dispose()
|
normal
|
{
"blob_id": "18b43ea8696e2e54f4c1cbbece4cde1fd3130145",
"index": 194,
"step-1": "<mask token>\n\n\nclass RobotFrameworkServerApi(PythonLanguageServer):\n <mask token>\n\n def __init__(self, read_from, write_to, libspec_manager=None, observer:\n Optional[IFSObserver]=None):\n from robotframework_ls.impl.libspec_manager import LibspecManager\n if libspec_manager is None:\n try:\n libspec_manager = LibspecManager(observer=observer)\n except:\n log.exception(\n 'Unable to properly initialize the LibspecManager.')\n raise\n self.libspec_manager = libspec_manager\n PythonLanguageServer.__init__(self, read_from, write_to)\n self._version = None\n self._next_time = partial(next, itertools.count(0))\n <mask token>\n <mask token>\n\n def _check_min_version(self, min_version):\n from robocorp_ls_core.basic import check_min_version\n version = self.m_version()\n return check_min_version(version, min_version)\n\n @overrides(PythonLanguageServer.m_workspace__did_change_configuration)\n def m_workspace__did_change_configuration(self, **kwargs):\n PythonLanguageServer.m_workspace__did_change_configuration(self, **\n kwargs)\n self.libspec_manager.config = self.config\n\n @overrides(PythonLanguageServer.lint)\n def lint(self, *args, **kwargs):\n pass\n <mask token>\n <mask token>\n\n @overrides(PythonLanguageServer._create_workspace)\n def _create_workspace(self, root_uri: str, fs_observer: IFSObserver,\n workspace_folders) ->IWorkspace:\n from robotframework_ls.impl.robot_workspace import RobotWorkspace\n return RobotWorkspace(root_uri, fs_observer, workspace_folders,\n libspec_manager=self.libspec_manager)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def _complete_from_completion_context(self, completion_context):\n from robotframework_ls.impl import section_name_completions\n from robotframework_ls.impl import keyword_completions\n from robotframework_ls.impl import variable_completions\n from robotframework_ls.impl import dictionary_completions\n from robotframework_ls.impl import filesystem_section_completions\n from robotframework_ls.impl import keyword_parameter_completions\n from robotframework_ls.impl import auto_import_completions\n from robotframework_ls.impl.collect_keywords import collect_keyword_name_to_keyword_found\n from robotframework_ls.impl import ast_utils\n ret = section_name_completions.complete(completion_context)\n if not ret:\n ret.extend(filesystem_section_completions.complete(\n completion_context))\n if not ret:\n token_info = completion_context.get_current_token()\n if token_info is not None:\n token = ast_utils.get_keyword_name_token(token_info.node,\n token_info.token)\n if token is not None:\n keyword_name_to_keyword_found: Dict[str, List[\n IKeywordFound]\n ] = collect_keyword_name_to_keyword_found(\n completion_context)\n ret.extend(keyword_completions.complete(completion_context)\n )\n ret.extend(auto_import_completions.complete(\n completion_context, keyword_name_to_keyword_found))\n return ret\n if not ret:\n ret.extend(variable_completions.complete(completion_context))\n if not ret:\n ret.extend(dictionary_completions.complete(completion_context))\n if not ret:\n ret.extend(keyword_parameter_completions.complete(\n completion_context))\n return ret\n <mask token>\n <mask token>\n\n def m_find_definition(self, doc_uri, line, col):\n func = partial(self._threaded_find_definition, doc_uri, line, col)\n func = require_monitor(func)\n return func\n\n def _threaded_find_definition(self, doc_uri, line, col, monitor\n ) ->Optional[list]:\n from robotframework_ls.impl.find_definition import find_definition\n import os.path\n from robocorp_ls_core.lsp import Location, Range\n from robocorp_ls_core import uris\n completion_context = self._create_completion_context(doc_uri, line,\n col, monitor)\n if completion_context is None:\n return None\n definitions = find_definition(completion_context)\n ret = []\n for definition in definitions:\n if not definition.source:\n log.info('Found definition with empty source (%s).', definition\n )\n continue\n if not os.path.exists(definition.source):\n log.info('Found definition: %s (but source does not exist).',\n definition)\n continue\n lineno = definition.lineno\n if lineno is None or lineno < 0:\n lineno = 0\n end_lineno = definition.end_lineno\n if end_lineno is None or end_lineno < 0:\n end_lineno = 0\n col_offset = definition.col_offset\n end_col_offset = definition.end_col_offset\n ret.append(Location(uris.from_fs_path(definition.source), Range\n ((lineno, col_offset), (end_lineno, end_col_offset))).to_dict()\n )\n return ret\n <mask token>\n\n def _threaded_code_format(self, text_document, options, monitor: IMonitor):\n from robotframework_ls.impl.formatting import create_text_edit_from_diff\n from robocorp_ls_core.lsp import TextDocumentItem\n import os.path\n from robotframework_ls.impl.robot_lsp_constants import OPTION_ROBOT_CODE_FORMATTER\n from robotframework_ls.impl.robot_lsp_constants import OPTION_ROBOT_CODE_FORMATTER_ROBOTIDY\n from robotframework_ls.impl.robot_lsp_constants import OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY\n text_document_item = TextDocumentItem(**text_document)\n text = text_document_item.text\n if not text:\n completion_context = self._create_completion_context(\n text_document_item.uri, 0, 0, monitor)\n if completion_context is None:\n return []\n text = completion_context.doc.source\n if not text:\n return []\n if options is None:\n options = {}\n tab_size = options.get('tabSize', 4)\n formatter = self._config.get_setting(OPTION_ROBOT_CODE_FORMATTER,\n str, OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY)\n if formatter not in (OPTION_ROBOT_CODE_FORMATTER_ROBOTIDY,\n OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY):\n log.critical(\n f'Code formatter invalid: {formatter}. Please select one of: {OPTION_ROBOT_CODE_FORMATTER_ROBOTIDY}, {OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY}.'\n )\n return []\n if formatter == OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY:\n from robotframework_ls.impl.formatting import robot_source_format\n new_contents = robot_source_format(text, space_count=tab_size)\n else:\n if not self._check_min_version((4, 0)):\n log.critical(\n f'To use the robotidy formatter, at least Robot Framework 4 is needed. Found: {self.m_version()}'\n )\n return []\n from robocorp_ls_core.robotidy_wrapper import robot_tidy_source_format\n ast = completion_context.get_ast()\n path = completion_context.doc.path\n dirname = '.'\n try:\n os.stat(path)\n except:\n ws = self._workspace\n if ws is not None:\n dirname = ws.root_path\n else:\n dirname = os.path.dirname(path)\n new_contents = robot_tidy_source_format(ast, dirname)\n if new_contents is None or new_contents == text:\n return []\n return [x.to_dict() for x in create_text_edit_from_diff(text,\n new_contents)]\n <mask token>\n <mask token>\n <mask token>\n\n def m_folding_range(self, doc_uri: str):\n func = partial(self._threaded_folding_range, doc_uri)\n func = require_monitor(func)\n return func\n\n def _threaded_folding_range(self, doc_uri: str, monitor: IMonitor) ->List[\n FoldingRangeTypedDict]:\n from robotframework_ls.impl.folding_range import folding_range\n completion_context = self._create_completion_context(doc_uri, 0, 0,\n monitor)\n if completion_context is None:\n return []\n return folding_range(completion_context)\n <mask token>\n\n def _threaded_code_lens(self, doc_uri: str, monitor: IMonitor) ->List[\n CodeLensTypedDict]:\n from robotframework_ls.impl.code_lens import code_lens\n completion_context = self._create_completion_context(doc_uri, 0, 0,\n monitor)\n if completion_context is None:\n return []\n return code_lens(completion_context)\n <mask token>\n <mask token>\n <mask token>\n\n def _threaded_document_symbol(self, doc_uri: str, monitor: IMonitor\n ) ->List[DocumentSymbolTypedDict]:\n from robotframework_ls.impl.document_symbol import document_symbol\n completion_context = self._create_completion_context(doc_uri, 0, 0,\n monitor)\n if completion_context is None:\n return []\n return document_symbol(completion_context)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def m_workspace_symbols(self, query: Optional[str]=None):\n func = partial(self._threaded_workspace_symbols, query)\n func = require_monitor(func)\n return func\n\n def _threaded_workspace_symbols(self, query: Optional[str], monitor:\n IMonitor) ->Optional[List[SymbolInformationTypedDict]]:\n from robotframework_ls.impl.workspace_symbols import workspace_symbols\n from robotframework_ls.impl.completion_context import BaseContext\n from robotframework_ls.impl.protocols import IRobotWorkspace\n from typing import cast\n workspace = self._workspace\n if not workspace:\n return []\n robot_workspace = cast(IRobotWorkspace, workspace)\n return workspace_symbols(query, BaseContext(workspace=\n robot_workspace, config=self.config, monitor=monitor))\n <mask token>\n <mask token>\n\n def threaded_semantic_tokens_full(self, textDocument:\n TextDocumentTypedDict, monitor: Optional[IMonitor]=None):\n from robotframework_ls.impl.semantic_tokens import semantic_tokens_full\n doc_uri = textDocument['uri']\n context = self._create_completion_context(doc_uri, -1, -1, monitor)\n if context is None:\n return {'resultId': None, 'data': []}\n return {'resultId': None, 'data': semantic_tokens_full(context)}\n\n def m_monaco_completions_from_code_full(self, prefix: str='', full_code:\n str='', position=PositionTypedDict, uri: str='', indent: str=''):\n func = partial(self.threaded_monaco_completions_from_code_full,\n prefix=prefix, full_code=full_code, position=position, uri=uri,\n indent=indent)\n func = require_monitor(func)\n return func\n\n def threaded_monaco_completions_from_code_full(self, prefix: str,\n full_code: str, position: PositionTypedDict, uri: str, indent: str,\n monitor: Optional[IMonitor]=None):\n from robotframework_ls.impl.robot_workspace import RobotDocument\n from robotframework_ls.impl.completion_context import CompletionContext\n from robocorp_ls_core.workspace import Document\n from robotframework_ls.impl import section_completions\n from robotframework_ls.impl import snippets_completions\n from robotframework_ls.server_api.monaco_conversions import convert_to_monaco_completion\n from robotframework_ls.impl.completion_context import CompletionType\n d = Document(uri, prefix)\n last_line, _last_col = d.get_last_line_col()\n line = last_line + position['line']\n col = position['character']\n col += len(indent)\n document = RobotDocument(uri, full_code)\n completion_context = CompletionContext(document, line, col, config=\n self.config, monitor=monitor, workspace=self.workspace)\n completion_context.type = CompletionType.shell\n completions = self._complete_from_completion_context(completion_context\n )\n completions.extend(section_completions.complete(completion_context))\n completions.extend(snippets_completions.complete(completion_context))\n return {'suggestions': [convert_to_monaco_completion(c, line_delta=\n last_line, col_delta=len(indent), uri=uri) for c in completions]}\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass RobotFrameworkServerApi(PythonLanguageServer):\n <mask token>\n\n def __init__(self, read_from, write_to, libspec_manager=None, observer:\n Optional[IFSObserver]=None):\n from robotframework_ls.impl.libspec_manager import LibspecManager\n if libspec_manager is None:\n try:\n libspec_manager = LibspecManager(observer=observer)\n except:\n log.exception(\n 'Unable to properly initialize the LibspecManager.')\n raise\n self.libspec_manager = libspec_manager\n PythonLanguageServer.__init__(self, read_from, write_to)\n self._version = None\n self._next_time = partial(next, itertools.count(0))\n <mask token>\n <mask token>\n\n def _check_min_version(self, min_version):\n from robocorp_ls_core.basic import check_min_version\n version = self.m_version()\n return check_min_version(version, min_version)\n\n @overrides(PythonLanguageServer.m_workspace__did_change_configuration)\n def m_workspace__did_change_configuration(self, **kwargs):\n PythonLanguageServer.m_workspace__did_change_configuration(self, **\n kwargs)\n self.libspec_manager.config = self.config\n\n @overrides(PythonLanguageServer.lint)\n def lint(self, *args, **kwargs):\n pass\n\n @overrides(PythonLanguageServer.cancel_lint)\n def cancel_lint(self, *args, **kwargs):\n pass\n <mask token>\n\n @overrides(PythonLanguageServer._create_workspace)\n def _create_workspace(self, root_uri: str, fs_observer: IFSObserver,\n workspace_folders) ->IWorkspace:\n from robotframework_ls.impl.robot_workspace import RobotWorkspace\n return RobotWorkspace(root_uri, fs_observer, workspace_folders,\n libspec_manager=self.libspec_manager)\n\n def m_lint(self, doc_uri):\n if not self._check_min_version((3, 2)):\n from robocorp_ls_core.lsp import Error\n msg = (\n \"\"\"robotframework version (%s) too old for linting.\nPlease install a newer version and restart the language server.\"\"\"\n % (self.m_version(),))\n log.info(msg)\n return [Error(msg, (0, 0), (1, 0)).to_lsp_diagnostic()]\n func = partial(self._threaded_lint, doc_uri)\n func = require_monitor(func)\n return func\n <mask token>\n\n def m_complete_all(self, doc_uri, line, col):\n func = partial(self._threaded_complete_all, doc_uri, line, col)\n func = require_monitor(func)\n return func\n\n def _threaded_complete_all(self, doc_uri, line, col, monitor: IMonitor):\n completion_context = self._create_completion_context(doc_uri, line,\n col, monitor)\n if completion_context is None:\n return []\n return self._complete_from_completion_context(completion_context)\n\n def _complete_from_completion_context(self, completion_context):\n from robotframework_ls.impl import section_name_completions\n from robotframework_ls.impl import keyword_completions\n from robotframework_ls.impl import variable_completions\n from robotframework_ls.impl import dictionary_completions\n from robotframework_ls.impl import filesystem_section_completions\n from robotframework_ls.impl import keyword_parameter_completions\n from robotframework_ls.impl import auto_import_completions\n from robotframework_ls.impl.collect_keywords import collect_keyword_name_to_keyword_found\n from robotframework_ls.impl import ast_utils\n ret = section_name_completions.complete(completion_context)\n if not ret:\n ret.extend(filesystem_section_completions.complete(\n completion_context))\n if not ret:\n token_info = completion_context.get_current_token()\n if token_info is not None:\n token = ast_utils.get_keyword_name_token(token_info.node,\n token_info.token)\n if token is not None:\n keyword_name_to_keyword_found: Dict[str, List[\n IKeywordFound]\n ] = collect_keyword_name_to_keyword_found(\n completion_context)\n ret.extend(keyword_completions.complete(completion_context)\n )\n ret.extend(auto_import_completions.complete(\n completion_context, keyword_name_to_keyword_found))\n return ret\n if not ret:\n ret.extend(variable_completions.complete(completion_context))\n if not ret:\n ret.extend(dictionary_completions.complete(completion_context))\n if not ret:\n ret.extend(keyword_parameter_completions.complete(\n completion_context))\n return ret\n\n def m_section_name_complete(self, doc_uri, line, col):\n from robotframework_ls.impl import section_name_completions\n completion_context = self._create_completion_context(doc_uri, line,\n col, None)\n if completion_context is None:\n return []\n return section_name_completions.complete(completion_context)\n <mask token>\n\n def m_find_definition(self, doc_uri, line, col):\n func = partial(self._threaded_find_definition, doc_uri, line, col)\n func = require_monitor(func)\n return func\n\n def _threaded_find_definition(self, doc_uri, line, col, monitor\n ) ->Optional[list]:\n from robotframework_ls.impl.find_definition import find_definition\n import os.path\n from robocorp_ls_core.lsp import Location, Range\n from robocorp_ls_core import uris\n completion_context = self._create_completion_context(doc_uri, line,\n col, monitor)\n if completion_context is None:\n return None\n definitions = find_definition(completion_context)\n ret = []\n for definition in definitions:\n if not definition.source:\n log.info('Found definition with empty source (%s).', definition\n )\n continue\n if not os.path.exists(definition.source):\n log.info('Found definition: %s (but source does not exist).',\n definition)\n continue\n lineno = definition.lineno\n if lineno is None or lineno < 0:\n lineno = 0\n end_lineno = definition.end_lineno\n if end_lineno is None or end_lineno < 0:\n end_lineno = 0\n col_offset = definition.col_offset\n end_col_offset = definition.end_col_offset\n ret.append(Location(uris.from_fs_path(definition.source), Range\n ((lineno, col_offset), (end_lineno, end_col_offset))).to_dict()\n )\n return ret\n <mask token>\n\n def _threaded_code_format(self, text_document, options, monitor: IMonitor):\n from robotframework_ls.impl.formatting import create_text_edit_from_diff\n from robocorp_ls_core.lsp import TextDocumentItem\n import os.path\n from robotframework_ls.impl.robot_lsp_constants import OPTION_ROBOT_CODE_FORMATTER\n from robotframework_ls.impl.robot_lsp_constants import OPTION_ROBOT_CODE_FORMATTER_ROBOTIDY\n from robotframework_ls.impl.robot_lsp_constants import OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY\n text_document_item = TextDocumentItem(**text_document)\n text = text_document_item.text\n if not text:\n completion_context = self._create_completion_context(\n text_document_item.uri, 0, 0, monitor)\n if completion_context is None:\n return []\n text = completion_context.doc.source\n if not text:\n return []\n if options is None:\n options = {}\n tab_size = options.get('tabSize', 4)\n formatter = self._config.get_setting(OPTION_ROBOT_CODE_FORMATTER,\n str, OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY)\n if formatter not in (OPTION_ROBOT_CODE_FORMATTER_ROBOTIDY,\n OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY):\n log.critical(\n f'Code formatter invalid: {formatter}. Please select one of: {OPTION_ROBOT_CODE_FORMATTER_ROBOTIDY}, {OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY}.'\n )\n return []\n if formatter == OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY:\n from robotframework_ls.impl.formatting import robot_source_format\n new_contents = robot_source_format(text, space_count=tab_size)\n else:\n if not self._check_min_version((4, 0)):\n log.critical(\n f'To use the robotidy formatter, at least Robot Framework 4 is needed. Found: {self.m_version()}'\n )\n return []\n from robocorp_ls_core.robotidy_wrapper import robot_tidy_source_format\n ast = completion_context.get_ast()\n path = completion_context.doc.path\n dirname = '.'\n try:\n os.stat(path)\n except:\n ws = self._workspace\n if ws is not None:\n dirname = ws.root_path\n else:\n dirname = os.path.dirname(path)\n new_contents = robot_tidy_source_format(ast, dirname)\n if new_contents is None or new_contents == text:\n return []\n return [x.to_dict() for x in create_text_edit_from_diff(text,\n new_contents)]\n <mask token>\n <mask token>\n\n def _threaded_signature_help(self, doc_uri: str, line: int, col: int,\n monitor: IMonitor) ->Optional[dict]:\n from robotframework_ls.impl.signature_help import signature_help\n completion_context = self._create_completion_context(doc_uri, line,\n col, monitor)\n if completion_context is None:\n return None\n return signature_help(completion_context)\n\n def m_folding_range(self, doc_uri: str):\n func = partial(self._threaded_folding_range, doc_uri)\n func = require_monitor(func)\n return func\n\n def _threaded_folding_range(self, doc_uri: str, monitor: IMonitor) ->List[\n FoldingRangeTypedDict]:\n from robotframework_ls.impl.folding_range import folding_range\n completion_context = self._create_completion_context(doc_uri, 0, 0,\n monitor)\n if completion_context is None:\n return []\n return folding_range(completion_context)\n\n def m_code_lens(self, doc_uri: str):\n func = partial(self._threaded_code_lens, doc_uri)\n func = require_monitor(func)\n return func\n\n def _threaded_code_lens(self, doc_uri: str, monitor: IMonitor) ->List[\n CodeLensTypedDict]:\n from robotframework_ls.impl.code_lens import code_lens\n completion_context = self._create_completion_context(doc_uri, 0, 0,\n monitor)\n if completion_context is None:\n return []\n return code_lens(completion_context)\n\n def m_resolve_code_lens(self, **code_lens: CodeLensTypedDict):\n func = partial(self._threaded_resolve_code_lens, code_lens)\n func = require_monitor(func)\n return func\n <mask token>\n\n def m_document_symbol(self, doc_uri: str):\n func = partial(self._threaded_document_symbol, doc_uri)\n func = require_monitor(func)\n return func\n\n def _threaded_document_symbol(self, doc_uri: str, monitor: IMonitor\n ) ->List[DocumentSymbolTypedDict]:\n from robotframework_ls.impl.document_symbol import document_symbol\n completion_context = self._create_completion_context(doc_uri, 0, 0,\n monitor)\n if completion_context is None:\n return []\n return document_symbol(completion_context)\n <mask token>\n\n def _threaded_list_tests(self, doc_uri: str, monitor: IMonitor) ->List[\n ITestInfoTypedDict]:\n from robotframework_ls.impl.code_lens import list_tests\n completion_context = self._create_completion_context(doc_uri, 0, 0,\n monitor)\n if completion_context is None:\n return []\n return list_tests(completion_context)\n <mask token>\n\n def _threaded_hover(self, doc_uri: str, line, col, monitor: IMonitor\n ) ->Optional[HoverTypedDict]:\n from robotframework_ls.impl.hover import hover\n completion_context = self._create_completion_context(doc_uri, line,\n col, monitor)\n if completion_context is None:\n return None\n return hover(completion_context)\n\n def m_workspace_symbols(self, query: Optional[str]=None):\n func = partial(self._threaded_workspace_symbols, query)\n func = require_monitor(func)\n return func\n\n def _threaded_workspace_symbols(self, query: Optional[str], monitor:\n IMonitor) ->Optional[List[SymbolInformationTypedDict]]:\n from robotframework_ls.impl.workspace_symbols import workspace_symbols\n from robotframework_ls.impl.completion_context import BaseContext\n from robotframework_ls.impl.protocols import IRobotWorkspace\n from typing import cast\n workspace = self._workspace\n if not workspace:\n return []\n robot_workspace = cast(IRobotWorkspace, workspace)\n return workspace_symbols(query, BaseContext(workspace=\n robot_workspace, config=self.config, monitor=monitor))\n <mask token>\n\n def m_text_document__semantic_tokens__full(self, textDocument=None):\n func = partial(self.threaded_semantic_tokens_full, textDocument=\n textDocument)\n func = require_monitor(func)\n return func\n\n def threaded_semantic_tokens_full(self, textDocument:\n TextDocumentTypedDict, monitor: Optional[IMonitor]=None):\n from robotframework_ls.impl.semantic_tokens import semantic_tokens_full\n doc_uri = textDocument['uri']\n context = self._create_completion_context(doc_uri, -1, -1, monitor)\n if context is None:\n return {'resultId': None, 'data': []}\n return {'resultId': None, 'data': semantic_tokens_full(context)}\n\n def m_monaco_completions_from_code_full(self, prefix: str='', full_code:\n str='', position=PositionTypedDict, uri: str='', indent: str=''):\n func = partial(self.threaded_monaco_completions_from_code_full,\n prefix=prefix, full_code=full_code, position=position, uri=uri,\n indent=indent)\n func = require_monitor(func)\n return func\n\n def threaded_monaco_completions_from_code_full(self, prefix: str,\n full_code: str, position: PositionTypedDict, uri: str, indent: str,\n monitor: Optional[IMonitor]=None):\n from robotframework_ls.impl.robot_workspace import RobotDocument\n from robotframework_ls.impl.completion_context import CompletionContext\n from robocorp_ls_core.workspace import Document\n from robotframework_ls.impl import section_completions\n from robotframework_ls.impl import snippets_completions\n from robotframework_ls.server_api.monaco_conversions import convert_to_monaco_completion\n from robotframework_ls.impl.completion_context import CompletionType\n d = Document(uri, prefix)\n last_line, _last_col = d.get_last_line_col()\n line = last_line + position['line']\n col = position['character']\n col += len(indent)\n document = RobotDocument(uri, full_code)\n completion_context = CompletionContext(document, line, col, config=\n self.config, monitor=monitor, workspace=self.workspace)\n completion_context.type = CompletionType.shell\n completions = self._complete_from_completion_context(completion_context\n )\n completions.extend(section_completions.complete(completion_context))\n completions.extend(snippets_completions.complete(completion_context))\n return {'suggestions': [convert_to_monaco_completion(c, line_delta=\n last_line, col_delta=len(indent), uri=uri) for c in completions]}\n <mask token>\n\n def threaded_semantic_tokens_from_code_full(self, prefix: str,\n full_code: str, indent: str, monitor: Optional[IMonitor]=None):\n from robotframework_ls.impl.semantic_tokens import semantic_tokens_full_from_ast\n try:\n from robotframework_ls.impl.robot_workspace import RobotDocument\n doc = RobotDocument('')\n doc.source = full_code\n ast = doc.get_ast()\n data = semantic_tokens_full_from_ast(ast, monitor)\n if not prefix:\n return {'resultId': None, 'data': data}\n prefix_doc = RobotDocument('')\n prefix_doc.source = prefix\n last_line, last_col = prefix_doc.get_last_line_col()\n ints_iter = iter(data)\n line = 0\n col = 0\n new_data = []\n indent_len = len(indent)\n while True:\n try:\n line_delta = next(ints_iter)\n except StopIteration:\n break\n col_delta = next(ints_iter)\n token_len = next(ints_iter)\n token_type = next(ints_iter)\n token_modifier = next(ints_iter)\n line += line_delta\n if line_delta == 0:\n col += col_delta\n else:\n col = col_delta\n if line >= last_line:\n new_data.append(line - last_line)\n new_data.append(col_delta - indent_len)\n new_data.append(token_len)\n new_data.append(token_type)\n new_data.append(token_modifier)\n while True:\n try:\n line_delta = next(ints_iter)\n except StopIteration:\n break\n col_delta = next(ints_iter)\n token_len = next(ints_iter)\n token_type = next(ints_iter)\n token_modifier = next(ints_iter)\n new_data.append(line_delta)\n if line_delta > 0:\n new_data.append(col_delta - indent_len)\n else:\n new_data.append(col_delta)\n new_data.append(token_len)\n new_data.append(token_type)\n new_data.append(token_modifier)\n break\n return {'resultId': None, 'data': new_data}\n except:\n log.exception('Error computing semantic tokens from code.')\n return {'resultId': None, 'data': []}\n <mask token>\n\n def m_exit(self, **_kwargs):\n PythonLanguageServer.m_exit(self, **_kwargs)\n self.libspec_manager.dispose()\n",
"step-3": "<mask token>\n\n\nclass RobotFrameworkServerApi(PythonLanguageServer):\n <mask token>\n\n def __init__(self, read_from, write_to, libspec_manager=None, observer:\n Optional[IFSObserver]=None):\n from robotframework_ls.impl.libspec_manager import LibspecManager\n if libspec_manager is None:\n try:\n libspec_manager = LibspecManager(observer=observer)\n except:\n log.exception(\n 'Unable to properly initialize the LibspecManager.')\n raise\n self.libspec_manager = libspec_manager\n PythonLanguageServer.__init__(self, read_from, write_to)\n self._version = None\n self._next_time = partial(next, itertools.count(0))\n <mask token>\n <mask token>\n\n def _check_min_version(self, min_version):\n from robocorp_ls_core.basic import check_min_version\n version = self.m_version()\n return check_min_version(version, min_version)\n\n @overrides(PythonLanguageServer.m_workspace__did_change_configuration)\n def m_workspace__did_change_configuration(self, **kwargs):\n PythonLanguageServer.m_workspace__did_change_configuration(self, **\n kwargs)\n self.libspec_manager.config = self.config\n\n @overrides(PythonLanguageServer.lint)\n def lint(self, *args, **kwargs):\n pass\n\n @overrides(PythonLanguageServer.cancel_lint)\n def cancel_lint(self, *args, **kwargs):\n pass\n <mask token>\n\n @overrides(PythonLanguageServer._create_workspace)\n def _create_workspace(self, root_uri: str, fs_observer: IFSObserver,\n workspace_folders) ->IWorkspace:\n from robotframework_ls.impl.robot_workspace import RobotWorkspace\n return RobotWorkspace(root_uri, fs_observer, workspace_folders,\n libspec_manager=self.libspec_manager)\n\n def m_lint(self, doc_uri):\n if not self._check_min_version((3, 2)):\n from robocorp_ls_core.lsp import Error\n msg = (\n \"\"\"robotframework version (%s) too old for linting.\nPlease install a newer version and restart the language server.\"\"\"\n % (self.m_version(),))\n log.info(msg)\n return [Error(msg, (0, 0), (1, 0)).to_lsp_diagnostic()]\n func = partial(self._threaded_lint, doc_uri)\n func = require_monitor(func)\n return func\n <mask token>\n\n def m_complete_all(self, doc_uri, line, col):\n func = partial(self._threaded_complete_all, doc_uri, line, col)\n func = require_monitor(func)\n return func\n\n def _threaded_complete_all(self, doc_uri, line, col, monitor: IMonitor):\n completion_context = self._create_completion_context(doc_uri, line,\n col, monitor)\n if completion_context is None:\n return []\n return self._complete_from_completion_context(completion_context)\n\n def _complete_from_completion_context(self, completion_context):\n from robotframework_ls.impl import section_name_completions\n from robotframework_ls.impl import keyword_completions\n from robotframework_ls.impl import variable_completions\n from robotframework_ls.impl import dictionary_completions\n from robotframework_ls.impl import filesystem_section_completions\n from robotframework_ls.impl import keyword_parameter_completions\n from robotframework_ls.impl import auto_import_completions\n from robotframework_ls.impl.collect_keywords import collect_keyword_name_to_keyword_found\n from robotframework_ls.impl import ast_utils\n ret = section_name_completions.complete(completion_context)\n if not ret:\n ret.extend(filesystem_section_completions.complete(\n completion_context))\n if not ret:\n token_info = completion_context.get_current_token()\n if token_info is not None:\n token = ast_utils.get_keyword_name_token(token_info.node,\n token_info.token)\n if token is not None:\n keyword_name_to_keyword_found: Dict[str, List[\n IKeywordFound]\n ] = collect_keyword_name_to_keyword_found(\n completion_context)\n ret.extend(keyword_completions.complete(completion_context)\n )\n ret.extend(auto_import_completions.complete(\n completion_context, keyword_name_to_keyword_found))\n return ret\n if not ret:\n ret.extend(variable_completions.complete(completion_context))\n if not ret:\n ret.extend(dictionary_completions.complete(completion_context))\n if not ret:\n ret.extend(keyword_parameter_completions.complete(\n completion_context))\n return ret\n\n def m_section_name_complete(self, doc_uri, line, col):\n from robotframework_ls.impl import section_name_completions\n completion_context = self._create_completion_context(doc_uri, line,\n col, None)\n if completion_context is None:\n return []\n return section_name_completions.complete(completion_context)\n <mask token>\n\n def m_find_definition(self, doc_uri, line, col):\n func = partial(self._threaded_find_definition, doc_uri, line, col)\n func = require_monitor(func)\n return func\n\n def _threaded_find_definition(self, doc_uri, line, col, monitor\n ) ->Optional[list]:\n from robotframework_ls.impl.find_definition import find_definition\n import os.path\n from robocorp_ls_core.lsp import Location, Range\n from robocorp_ls_core import uris\n completion_context = self._create_completion_context(doc_uri, line,\n col, monitor)\n if completion_context is None:\n return None\n definitions = find_definition(completion_context)\n ret = []\n for definition in definitions:\n if not definition.source:\n log.info('Found definition with empty source (%s).', definition\n )\n continue\n if not os.path.exists(definition.source):\n log.info('Found definition: %s (but source does not exist).',\n definition)\n continue\n lineno = definition.lineno\n if lineno is None or lineno < 0:\n lineno = 0\n end_lineno = definition.end_lineno\n if end_lineno is None or end_lineno < 0:\n end_lineno = 0\n col_offset = definition.col_offset\n end_col_offset = definition.end_col_offset\n ret.append(Location(uris.from_fs_path(definition.source), Range\n ((lineno, col_offset), (end_lineno, end_col_offset))).to_dict()\n )\n return ret\n <mask token>\n\n def _threaded_code_format(self, text_document, options, monitor: IMonitor):\n from robotframework_ls.impl.formatting import create_text_edit_from_diff\n from robocorp_ls_core.lsp import TextDocumentItem\n import os.path\n from robotframework_ls.impl.robot_lsp_constants import OPTION_ROBOT_CODE_FORMATTER\n from robotframework_ls.impl.robot_lsp_constants import OPTION_ROBOT_CODE_FORMATTER_ROBOTIDY\n from robotframework_ls.impl.robot_lsp_constants import OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY\n text_document_item = TextDocumentItem(**text_document)\n text = text_document_item.text\n if not text:\n completion_context = self._create_completion_context(\n text_document_item.uri, 0, 0, monitor)\n if completion_context is None:\n return []\n text = completion_context.doc.source\n if not text:\n return []\n if options is None:\n options = {}\n tab_size = options.get('tabSize', 4)\n formatter = self._config.get_setting(OPTION_ROBOT_CODE_FORMATTER,\n str, OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY)\n if formatter not in (OPTION_ROBOT_CODE_FORMATTER_ROBOTIDY,\n OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY):\n log.critical(\n f'Code formatter invalid: {formatter}. Please select one of: {OPTION_ROBOT_CODE_FORMATTER_ROBOTIDY}, {OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY}.'\n )\n return []\n if formatter == OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY:\n from robotframework_ls.impl.formatting import robot_source_format\n new_contents = robot_source_format(text, space_count=tab_size)\n else:\n if not self._check_min_version((4, 0)):\n log.critical(\n f'To use the robotidy formatter, at least Robot Framework 4 is needed. Found: {self.m_version()}'\n )\n return []\n from robocorp_ls_core.robotidy_wrapper import robot_tidy_source_format\n ast = completion_context.get_ast()\n path = completion_context.doc.path\n dirname = '.'\n try:\n os.stat(path)\n except:\n ws = self._workspace\n if ws is not None:\n dirname = ws.root_path\n else:\n dirname = os.path.dirname(path)\n new_contents = robot_tidy_source_format(ast, dirname)\n if new_contents is None or new_contents == text:\n return []\n return [x.to_dict() for x in create_text_edit_from_diff(text,\n new_contents)]\n <mask token>\n <mask token>\n\n def _threaded_signature_help(self, doc_uri: str, line: int, col: int,\n monitor: IMonitor) ->Optional[dict]:\n from robotframework_ls.impl.signature_help import signature_help\n completion_context = self._create_completion_context(doc_uri, line,\n col, monitor)\n if completion_context is None:\n return None\n return signature_help(completion_context)\n\n def m_folding_range(self, doc_uri: str):\n func = partial(self._threaded_folding_range, doc_uri)\n func = require_monitor(func)\n return func\n\n def _threaded_folding_range(self, doc_uri: str, monitor: IMonitor) ->List[\n FoldingRangeTypedDict]:\n from robotframework_ls.impl.folding_range import folding_range\n completion_context = self._create_completion_context(doc_uri, 0, 0,\n monitor)\n if completion_context is None:\n return []\n return folding_range(completion_context)\n\n def m_code_lens(self, doc_uri: str):\n func = partial(self._threaded_code_lens, doc_uri)\n func = require_monitor(func)\n return func\n\n def _threaded_code_lens(self, doc_uri: str, monitor: IMonitor) ->List[\n CodeLensTypedDict]:\n from robotframework_ls.impl.code_lens import code_lens\n completion_context = self._create_completion_context(doc_uri, 0, 0,\n monitor)\n if completion_context is None:\n return []\n return code_lens(completion_context)\n\n def m_resolve_code_lens(self, **code_lens: CodeLensTypedDict):\n func = partial(self._threaded_resolve_code_lens, code_lens)\n func = require_monitor(func)\n return func\n\n def _threaded_resolve_code_lens(self, code_lens: CodeLensTypedDict,\n monitor: IMonitor) ->CodeLensTypedDict:\n from robotframework_ls.impl.code_lens import code_lens_resolve\n data = code_lens.get('data')\n if not isinstance(data, dict):\n return code_lens\n doc_uri = data.get('uri')\n completion_context = self._create_completion_context(doc_uri, 0, 0,\n monitor)\n if completion_context is None:\n return code_lens\n return code_lens_resolve(completion_context, code_lens)\n\n def m_document_symbol(self, doc_uri: str):\n func = partial(self._threaded_document_symbol, doc_uri)\n func = require_monitor(func)\n return func\n\n def _threaded_document_symbol(self, doc_uri: str, monitor: IMonitor\n ) ->List[DocumentSymbolTypedDict]:\n from robotframework_ls.impl.document_symbol import document_symbol\n completion_context = self._create_completion_context(doc_uri, 0, 0,\n monitor)\n if completion_context is None:\n return []\n return document_symbol(completion_context)\n <mask token>\n\n def _threaded_list_tests(self, doc_uri: str, monitor: IMonitor) ->List[\n ITestInfoTypedDict]:\n from robotframework_ls.impl.code_lens import list_tests\n completion_context = self._create_completion_context(doc_uri, 0, 0,\n monitor)\n if completion_context is None:\n return []\n return list_tests(completion_context)\n <mask token>\n\n def _threaded_hover(self, doc_uri: str, line, col, monitor: IMonitor\n ) ->Optional[HoverTypedDict]:\n from robotframework_ls.impl.hover import hover\n completion_context = self._create_completion_context(doc_uri, line,\n col, monitor)\n if completion_context is None:\n return None\n return hover(completion_context)\n\n def m_workspace_symbols(self, query: Optional[str]=None):\n func = partial(self._threaded_workspace_symbols, query)\n func = require_monitor(func)\n return func\n\n def _threaded_workspace_symbols(self, query: Optional[str], monitor:\n IMonitor) ->Optional[List[SymbolInformationTypedDict]]:\n from robotframework_ls.impl.workspace_symbols import workspace_symbols\n from robotframework_ls.impl.completion_context import BaseContext\n from robotframework_ls.impl.protocols import IRobotWorkspace\n from typing import cast\n workspace = self._workspace\n if not workspace:\n return []\n robot_workspace = cast(IRobotWorkspace, workspace)\n return workspace_symbols(query, BaseContext(workspace=\n robot_workspace, config=self.config, monitor=monitor))\n <mask token>\n\n def m_text_document__semantic_tokens__full(self, textDocument=None):\n func = partial(self.threaded_semantic_tokens_full, textDocument=\n textDocument)\n func = require_monitor(func)\n return func\n\n def threaded_semantic_tokens_full(self, textDocument:\n TextDocumentTypedDict, monitor: Optional[IMonitor]=None):\n from robotframework_ls.impl.semantic_tokens import semantic_tokens_full\n doc_uri = textDocument['uri']\n context = self._create_completion_context(doc_uri, -1, -1, monitor)\n if context is None:\n return {'resultId': None, 'data': []}\n return {'resultId': None, 'data': semantic_tokens_full(context)}\n\n def m_monaco_completions_from_code_full(self, prefix: str='', full_code:\n str='', position=PositionTypedDict, uri: str='', indent: str=''):\n func = partial(self.threaded_monaco_completions_from_code_full,\n prefix=prefix, full_code=full_code, position=position, uri=uri,\n indent=indent)\n func = require_monitor(func)\n return func\n\n def threaded_monaco_completions_from_code_full(self, prefix: str,\n full_code: str, position: PositionTypedDict, uri: str, indent: str,\n monitor: Optional[IMonitor]=None):\n from robotframework_ls.impl.robot_workspace import RobotDocument\n from robotframework_ls.impl.completion_context import CompletionContext\n from robocorp_ls_core.workspace import Document\n from robotframework_ls.impl import section_completions\n from robotframework_ls.impl import snippets_completions\n from robotframework_ls.server_api.monaco_conversions import convert_to_monaco_completion\n from robotframework_ls.impl.completion_context import CompletionType\n d = Document(uri, prefix)\n last_line, _last_col = d.get_last_line_col()\n line = last_line + position['line']\n col = position['character']\n col += len(indent)\n document = RobotDocument(uri, full_code)\n completion_context = CompletionContext(document, line, col, config=\n self.config, monitor=monitor, workspace=self.workspace)\n completion_context.type = CompletionType.shell\n completions = self._complete_from_completion_context(completion_context\n )\n completions.extend(section_completions.complete(completion_context))\n completions.extend(snippets_completions.complete(completion_context))\n return {'suggestions': [convert_to_monaco_completion(c, line_delta=\n last_line, col_delta=len(indent), uri=uri) for c in completions]}\n <mask token>\n\n def threaded_semantic_tokens_from_code_full(self, prefix: str,\n full_code: str, indent: str, monitor: Optional[IMonitor]=None):\n from robotframework_ls.impl.semantic_tokens import semantic_tokens_full_from_ast\n try:\n from robotframework_ls.impl.robot_workspace import RobotDocument\n doc = RobotDocument('')\n doc.source = full_code\n ast = doc.get_ast()\n data = semantic_tokens_full_from_ast(ast, monitor)\n if not prefix:\n return {'resultId': None, 'data': data}\n prefix_doc = RobotDocument('')\n prefix_doc.source = prefix\n last_line, last_col = prefix_doc.get_last_line_col()\n ints_iter = iter(data)\n line = 0\n col = 0\n new_data = []\n indent_len = len(indent)\n while True:\n try:\n line_delta = next(ints_iter)\n except StopIteration:\n break\n col_delta = next(ints_iter)\n token_len = next(ints_iter)\n token_type = next(ints_iter)\n token_modifier = next(ints_iter)\n line += line_delta\n if line_delta == 0:\n col += col_delta\n else:\n col = col_delta\n if line >= last_line:\n new_data.append(line - last_line)\n new_data.append(col_delta - indent_len)\n new_data.append(token_len)\n new_data.append(token_type)\n new_data.append(token_modifier)\n while True:\n try:\n line_delta = next(ints_iter)\n except StopIteration:\n break\n col_delta = next(ints_iter)\n token_len = next(ints_iter)\n token_type = next(ints_iter)\n token_modifier = next(ints_iter)\n new_data.append(line_delta)\n if line_delta > 0:\n new_data.append(col_delta - indent_len)\n else:\n new_data.append(col_delta)\n new_data.append(token_len)\n new_data.append(token_type)\n new_data.append(token_modifier)\n break\n return {'resultId': None, 'data': new_data}\n except:\n log.exception('Error computing semantic tokens from code.')\n return {'resultId': None, 'data': []}\n\n def m_shutdown(self, **_kwargs):\n PythonLanguageServer.m_shutdown(self, **_kwargs)\n self.libspec_manager.dispose()\n\n def m_exit(self, **_kwargs):\n PythonLanguageServer.m_exit(self, **_kwargs)\n self.libspec_manager.dispose()\n",
"step-4": "<mask token>\n\n\nclass RobotFrameworkServerApi(PythonLanguageServer):\n <mask token>\n\n def __init__(self, read_from, write_to, libspec_manager=None, observer:\n Optional[IFSObserver]=None):\n from robotframework_ls.impl.libspec_manager import LibspecManager\n if libspec_manager is None:\n try:\n libspec_manager = LibspecManager(observer=observer)\n except:\n log.exception(\n 'Unable to properly initialize the LibspecManager.')\n raise\n self.libspec_manager = libspec_manager\n PythonLanguageServer.__init__(self, read_from, write_to)\n self._version = None\n self._next_time = partial(next, itertools.count(0))\n\n @overrides(PythonLanguageServer._create_config)\n def _create_config(self) ->IConfig:\n from robotframework_ls.robot_config import RobotConfig\n return RobotConfig()\n\n def m_version(self):\n if self._version is not None:\n return self._version\n try:\n import robot\n except:\n log.exception(\"Unable to import 'robot'.\")\n version = 'NO_ROBOT'\n else:\n try:\n from robot import get_version\n version = get_version(naked=True)\n except:\n log.exception('Unable to get version.')\n version = 'N/A'\n self._version = version\n return self._version\n\n def _check_min_version(self, min_version):\n from robocorp_ls_core.basic import check_min_version\n version = self.m_version()\n return check_min_version(version, min_version)\n\n @overrides(PythonLanguageServer.m_workspace__did_change_configuration)\n def m_workspace__did_change_configuration(self, **kwargs):\n PythonLanguageServer.m_workspace__did_change_configuration(self, **\n kwargs)\n self.libspec_manager.config = self.config\n\n @overrides(PythonLanguageServer.lint)\n def lint(self, *args, **kwargs):\n pass\n\n @overrides(PythonLanguageServer.cancel_lint)\n def cancel_lint(self, *args, **kwargs):\n pass\n <mask token>\n\n @overrides(PythonLanguageServer._create_workspace)\n def _create_workspace(self, root_uri: str, fs_observer: IFSObserver,\n workspace_folders) ->IWorkspace:\n from robotframework_ls.impl.robot_workspace import RobotWorkspace\n return RobotWorkspace(root_uri, fs_observer, workspace_folders,\n libspec_manager=self.libspec_manager)\n\n def m_lint(self, doc_uri):\n if not self._check_min_version((3, 2)):\n from robocorp_ls_core.lsp import Error\n msg = (\n \"\"\"robotframework version (%s) too old for linting.\nPlease install a newer version and restart the language server.\"\"\"\n % (self.m_version(),))\n log.info(msg)\n return [Error(msg, (0, 0), (1, 0)).to_lsp_diagnostic()]\n func = partial(self._threaded_lint, doc_uri)\n func = require_monitor(func)\n return func\n <mask token>\n\n def m_complete_all(self, doc_uri, line, col):\n func = partial(self._threaded_complete_all, doc_uri, line, col)\n func = require_monitor(func)\n return func\n\n def _threaded_complete_all(self, doc_uri, line, col, monitor: IMonitor):\n completion_context = self._create_completion_context(doc_uri, line,\n col, monitor)\n if completion_context is None:\n return []\n return self._complete_from_completion_context(completion_context)\n\n def _complete_from_completion_context(self, completion_context):\n from robotframework_ls.impl import section_name_completions\n from robotframework_ls.impl import keyword_completions\n from robotframework_ls.impl import variable_completions\n from robotframework_ls.impl import dictionary_completions\n from robotframework_ls.impl import filesystem_section_completions\n from robotframework_ls.impl import keyword_parameter_completions\n from robotframework_ls.impl import auto_import_completions\n from robotframework_ls.impl.collect_keywords import collect_keyword_name_to_keyword_found\n from robotframework_ls.impl import ast_utils\n ret = section_name_completions.complete(completion_context)\n if not ret:\n ret.extend(filesystem_section_completions.complete(\n completion_context))\n if not ret:\n token_info = completion_context.get_current_token()\n if token_info is not None:\n token = ast_utils.get_keyword_name_token(token_info.node,\n token_info.token)\n if token is not None:\n keyword_name_to_keyword_found: Dict[str, List[\n IKeywordFound]\n ] = collect_keyword_name_to_keyword_found(\n completion_context)\n ret.extend(keyword_completions.complete(completion_context)\n )\n ret.extend(auto_import_completions.complete(\n completion_context, keyword_name_to_keyword_found))\n return ret\n if not ret:\n ret.extend(variable_completions.complete(completion_context))\n if not ret:\n ret.extend(dictionary_completions.complete(completion_context))\n if not ret:\n ret.extend(keyword_parameter_completions.complete(\n completion_context))\n return ret\n\n def m_section_name_complete(self, doc_uri, line, col):\n from robotframework_ls.impl import section_name_completions\n completion_context = self._create_completion_context(doc_uri, line,\n col, None)\n if completion_context is None:\n return []\n return section_name_completions.complete(completion_context)\n\n def m_keyword_complete(self, doc_uri, line, col):\n from robotframework_ls.impl import keyword_completions\n completion_context = self._create_completion_context(doc_uri, line,\n col, None)\n if completion_context is None:\n return []\n return keyword_completions.complete(completion_context)\n\n def m_find_definition(self, doc_uri, line, col):\n func = partial(self._threaded_find_definition, doc_uri, line, col)\n func = require_monitor(func)\n return func\n\n def _threaded_find_definition(self, doc_uri, line, col, monitor\n ) ->Optional[list]:\n from robotframework_ls.impl.find_definition import find_definition\n import os.path\n from robocorp_ls_core.lsp import Location, Range\n from robocorp_ls_core import uris\n completion_context = self._create_completion_context(doc_uri, line,\n col, monitor)\n if completion_context is None:\n return None\n definitions = find_definition(completion_context)\n ret = []\n for definition in definitions:\n if not definition.source:\n log.info('Found definition with empty source (%s).', definition\n )\n continue\n if not os.path.exists(definition.source):\n log.info('Found definition: %s (but source does not exist).',\n definition)\n continue\n lineno = definition.lineno\n if lineno is None or lineno < 0:\n lineno = 0\n end_lineno = definition.end_lineno\n if end_lineno is None or end_lineno < 0:\n end_lineno = 0\n col_offset = definition.col_offset\n end_col_offset = definition.end_col_offset\n ret.append(Location(uris.from_fs_path(definition.source), Range\n ((lineno, col_offset), (end_lineno, end_col_offset))).to_dict()\n )\n return ret\n\n def m_code_format(self, text_document, options):\n func = partial(self._threaded_code_format, text_document, options)\n func = require_monitor(func)\n return func\n\n def _threaded_code_format(self, text_document, options, monitor: IMonitor):\n from robotframework_ls.impl.formatting import create_text_edit_from_diff\n from robocorp_ls_core.lsp import TextDocumentItem\n import os.path\n from robotframework_ls.impl.robot_lsp_constants import OPTION_ROBOT_CODE_FORMATTER\n from robotframework_ls.impl.robot_lsp_constants import OPTION_ROBOT_CODE_FORMATTER_ROBOTIDY\n from robotframework_ls.impl.robot_lsp_constants import OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY\n text_document_item = TextDocumentItem(**text_document)\n text = text_document_item.text\n if not text:\n completion_context = self._create_completion_context(\n text_document_item.uri, 0, 0, monitor)\n if completion_context is None:\n return []\n text = completion_context.doc.source\n if not text:\n return []\n if options is None:\n options = {}\n tab_size = options.get('tabSize', 4)\n formatter = self._config.get_setting(OPTION_ROBOT_CODE_FORMATTER,\n str, OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY)\n if formatter not in (OPTION_ROBOT_CODE_FORMATTER_ROBOTIDY,\n OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY):\n log.critical(\n f'Code formatter invalid: {formatter}. Please select one of: {OPTION_ROBOT_CODE_FORMATTER_ROBOTIDY}, {OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY}.'\n )\n return []\n if formatter == OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY:\n from robotframework_ls.impl.formatting import robot_source_format\n new_contents = robot_source_format(text, space_count=tab_size)\n else:\n if not self._check_min_version((4, 0)):\n log.critical(\n f'To use the robotidy formatter, at least Robot Framework 4 is needed. Found: {self.m_version()}'\n )\n return []\n from robocorp_ls_core.robotidy_wrapper import robot_tidy_source_format\n ast = completion_context.get_ast()\n path = completion_context.doc.path\n dirname = '.'\n try:\n os.stat(path)\n except:\n ws = self._workspace\n if ws is not None:\n dirname = ws.root_path\n else:\n dirname = os.path.dirname(path)\n new_contents = robot_tidy_source_format(ast, dirname)\n if new_contents is None or new_contents == text:\n return []\n return [x.to_dict() for x in create_text_edit_from_diff(text,\n new_contents)]\n <mask token>\n\n def m_signature_help(self, doc_uri: str, line: int, col: int):\n func = partial(self._threaded_signature_help, doc_uri, line, col)\n func = require_monitor(func)\n return func\n\n def _threaded_signature_help(self, doc_uri: str, line: int, col: int,\n monitor: IMonitor) ->Optional[dict]:\n from robotframework_ls.impl.signature_help import signature_help\n completion_context = self._create_completion_context(doc_uri, line,\n col, monitor)\n if completion_context is None:\n return None\n return signature_help(completion_context)\n\n def m_folding_range(self, doc_uri: str):\n func = partial(self._threaded_folding_range, doc_uri)\n func = require_monitor(func)\n return func\n\n def _threaded_folding_range(self, doc_uri: str, monitor: IMonitor) ->List[\n FoldingRangeTypedDict]:\n from robotframework_ls.impl.folding_range import folding_range\n completion_context = self._create_completion_context(doc_uri, 0, 0,\n monitor)\n if completion_context is None:\n return []\n return folding_range(completion_context)\n\n def m_code_lens(self, doc_uri: str):\n func = partial(self._threaded_code_lens, doc_uri)\n func = require_monitor(func)\n return func\n\n def _threaded_code_lens(self, doc_uri: str, monitor: IMonitor) ->List[\n CodeLensTypedDict]:\n from robotframework_ls.impl.code_lens import code_lens\n completion_context = self._create_completion_context(doc_uri, 0, 0,\n monitor)\n if completion_context is None:\n return []\n return code_lens(completion_context)\n\n def m_resolve_code_lens(self, **code_lens: CodeLensTypedDict):\n func = partial(self._threaded_resolve_code_lens, code_lens)\n func = require_monitor(func)\n return func\n\n def _threaded_resolve_code_lens(self, code_lens: CodeLensTypedDict,\n monitor: IMonitor) ->CodeLensTypedDict:\n from robotframework_ls.impl.code_lens import code_lens_resolve\n data = code_lens.get('data')\n if not isinstance(data, dict):\n return code_lens\n doc_uri = data.get('uri')\n completion_context = self._create_completion_context(doc_uri, 0, 0,\n monitor)\n if completion_context is None:\n return code_lens\n return code_lens_resolve(completion_context, code_lens)\n\n def m_document_symbol(self, doc_uri: str):\n func = partial(self._threaded_document_symbol, doc_uri)\n func = require_monitor(func)\n return func\n\n def _threaded_document_symbol(self, doc_uri: str, monitor: IMonitor\n ) ->List[DocumentSymbolTypedDict]:\n from robotframework_ls.impl.document_symbol import document_symbol\n completion_context = self._create_completion_context(doc_uri, 0, 0,\n monitor)\n if completion_context is None:\n return []\n return document_symbol(completion_context)\n\n def m_list_tests(self, doc_uri: str):\n func = partial(self._threaded_list_tests, doc_uri)\n func = require_monitor(func)\n return func\n\n def _threaded_list_tests(self, doc_uri: str, monitor: IMonitor) ->List[\n ITestInfoTypedDict]:\n from robotframework_ls.impl.code_lens import list_tests\n completion_context = self._create_completion_context(doc_uri, 0, 0,\n monitor)\n if completion_context is None:\n return []\n return list_tests(completion_context)\n\n def m_hover(self, doc_uri: str, line: int, col: int):\n func = partial(self._threaded_hover, doc_uri, line, col)\n func = require_monitor(func)\n return func\n\n def _threaded_hover(self, doc_uri: str, line, col, monitor: IMonitor\n ) ->Optional[HoverTypedDict]:\n from robotframework_ls.impl.hover import hover\n completion_context = self._create_completion_context(doc_uri, line,\n col, monitor)\n if completion_context is None:\n return None\n return hover(completion_context)\n\n def m_workspace_symbols(self, query: Optional[str]=None):\n func = partial(self._threaded_workspace_symbols, query)\n func = require_monitor(func)\n return func\n\n def _threaded_workspace_symbols(self, query: Optional[str], monitor:\n IMonitor) ->Optional[List[SymbolInformationTypedDict]]:\n from robotframework_ls.impl.workspace_symbols import workspace_symbols\n from robotframework_ls.impl.completion_context import BaseContext\n from robotframework_ls.impl.protocols import IRobotWorkspace\n from typing import cast\n workspace = self._workspace\n if not workspace:\n return []\n robot_workspace = cast(IRobotWorkspace, workspace)\n return workspace_symbols(query, BaseContext(workspace=\n robot_workspace, config=self.config, monitor=monitor))\n\n def m_text_document__semantic_tokens__range(self, textDocument=None,\n range=None):\n raise RuntimeError('Not currently implemented!')\n\n def m_text_document__semantic_tokens__full(self, textDocument=None):\n func = partial(self.threaded_semantic_tokens_full, textDocument=\n textDocument)\n func = require_monitor(func)\n return func\n\n def threaded_semantic_tokens_full(self, textDocument:\n TextDocumentTypedDict, monitor: Optional[IMonitor]=None):\n from robotframework_ls.impl.semantic_tokens import semantic_tokens_full\n doc_uri = textDocument['uri']\n context = self._create_completion_context(doc_uri, -1, -1, monitor)\n if context is None:\n return {'resultId': None, 'data': []}\n return {'resultId': None, 'data': semantic_tokens_full(context)}\n\n def m_monaco_completions_from_code_full(self, prefix: str='', full_code:\n str='', position=PositionTypedDict, uri: str='', indent: str=''):\n func = partial(self.threaded_monaco_completions_from_code_full,\n prefix=prefix, full_code=full_code, position=position, uri=uri,\n indent=indent)\n func = require_monitor(func)\n return func\n\n def threaded_monaco_completions_from_code_full(self, prefix: str,\n full_code: str, position: PositionTypedDict, uri: str, indent: str,\n monitor: Optional[IMonitor]=None):\n from robotframework_ls.impl.robot_workspace import RobotDocument\n from robotframework_ls.impl.completion_context import CompletionContext\n from robocorp_ls_core.workspace import Document\n from robotframework_ls.impl import section_completions\n from robotframework_ls.impl import snippets_completions\n from robotframework_ls.server_api.monaco_conversions import convert_to_monaco_completion\n from robotframework_ls.impl.completion_context import CompletionType\n d = Document(uri, prefix)\n last_line, _last_col = d.get_last_line_col()\n line = last_line + position['line']\n col = position['character']\n col += len(indent)\n document = RobotDocument(uri, full_code)\n completion_context = CompletionContext(document, line, col, config=\n self.config, monitor=monitor, workspace=self.workspace)\n completion_context.type = CompletionType.shell\n completions = self._complete_from_completion_context(completion_context\n )\n completions.extend(section_completions.complete(completion_context))\n completions.extend(snippets_completions.complete(completion_context))\n return {'suggestions': [convert_to_monaco_completion(c, line_delta=\n last_line, col_delta=len(indent), uri=uri) for c in completions]}\n\n def m_semantic_tokens_from_code_full(self, prefix: str='', full_code:\n str='', indent: str=''):\n func = partial(self.threaded_semantic_tokens_from_code_full, prefix\n =prefix, full_code=full_code, indent=indent)\n func = require_monitor(func)\n return func\n\n def threaded_semantic_tokens_from_code_full(self, prefix: str,\n full_code: str, indent: str, monitor: Optional[IMonitor]=None):\n from robotframework_ls.impl.semantic_tokens import semantic_tokens_full_from_ast\n try:\n from robotframework_ls.impl.robot_workspace import RobotDocument\n doc = RobotDocument('')\n doc.source = full_code\n ast = doc.get_ast()\n data = semantic_tokens_full_from_ast(ast, monitor)\n if not prefix:\n return {'resultId': None, 'data': data}\n prefix_doc = RobotDocument('')\n prefix_doc.source = prefix\n last_line, last_col = prefix_doc.get_last_line_col()\n ints_iter = iter(data)\n line = 0\n col = 0\n new_data = []\n indent_len = len(indent)\n while True:\n try:\n line_delta = next(ints_iter)\n except StopIteration:\n break\n col_delta = next(ints_iter)\n token_len = next(ints_iter)\n token_type = next(ints_iter)\n token_modifier = next(ints_iter)\n line += line_delta\n if line_delta == 0:\n col += col_delta\n else:\n col = col_delta\n if line >= last_line:\n new_data.append(line - last_line)\n new_data.append(col_delta - indent_len)\n new_data.append(token_len)\n new_data.append(token_type)\n new_data.append(token_modifier)\n while True:\n try:\n line_delta = next(ints_iter)\n except StopIteration:\n break\n col_delta = next(ints_iter)\n token_len = next(ints_iter)\n token_type = next(ints_iter)\n token_modifier = next(ints_iter)\n new_data.append(line_delta)\n if line_delta > 0:\n new_data.append(col_delta - indent_len)\n else:\n new_data.append(col_delta)\n new_data.append(token_len)\n new_data.append(token_type)\n new_data.append(token_modifier)\n break\n return {'resultId': None, 'data': new_data}\n except:\n log.exception('Error computing semantic tokens from code.')\n return {'resultId': None, 'data': []}\n\n def m_shutdown(self, **_kwargs):\n PythonLanguageServer.m_shutdown(self, **_kwargs)\n self.libspec_manager.dispose()\n\n def m_exit(self, **_kwargs):\n PythonLanguageServer.m_exit(self, **_kwargs)\n self.libspec_manager.dispose()\n",
"step-5": "from robocorp_ls_core.python_ls import PythonLanguageServer\nfrom robocorp_ls_core.basic import overrides\nfrom robocorp_ls_core.robotframework_log import get_logger\nfrom typing import Optional, List, Dict\nfrom robocorp_ls_core.protocols import IConfig, IMonitor, ITestInfoTypedDict, IWorkspace\nfrom functools import partial\nfrom robocorp_ls_core.jsonrpc.endpoint import require_monitor\nfrom robocorp_ls_core.lsp import (\n SymbolInformationTypedDict,\n FoldingRangeTypedDict,\n HoverTypedDict,\n TextDocumentTypedDict,\n CodeLensTypedDict,\n DocumentSymbolTypedDict,\n PositionTypedDict,\n)\nfrom robotframework_ls.impl.protocols import IKeywordFound\nfrom robocorp_ls_core.watchdog_wrapper import IFSObserver\nimport itertools\n\n\nlog = get_logger(__name__)\n\n\nclass RobotFrameworkServerApi(PythonLanguageServer):\n \"\"\"\n This is a custom server. It uses the same message-format used in the language\n server but with custom messages (i.e.: this is not the language server, but\n an API to use the bits we need from robotframework in a separate process).\n \"\"\"\n\n def __init__(\n self,\n read_from,\n write_to,\n libspec_manager=None,\n observer: Optional[IFSObserver] = None,\n ):\n from robotframework_ls.impl.libspec_manager import LibspecManager\n\n if libspec_manager is None:\n try:\n libspec_manager = LibspecManager(observer=observer)\n except:\n log.exception(\"Unable to properly initialize the LibspecManager.\")\n raise\n\n self.libspec_manager = libspec_manager\n PythonLanguageServer.__init__(self, read_from, write_to)\n self._version = None\n self._next_time = partial(next, itertools.count(0))\n\n @overrides(PythonLanguageServer._create_config)\n def _create_config(self) -> IConfig:\n from robotframework_ls.robot_config import RobotConfig\n\n return RobotConfig()\n\n def m_version(self):\n if self._version is not None:\n return self._version\n try:\n import robot # noqa\n except:\n log.exception(\"Unable to import 'robot'.\")\n version = \"NO_ROBOT\"\n else:\n try:\n from robot import get_version\n\n version = get_version(naked=True)\n except:\n log.exception(\"Unable to get version.\")\n version = \"N/A\" # Too old?\n self._version = version\n return self._version\n\n def _check_min_version(self, min_version):\n from robocorp_ls_core.basic import check_min_version\n\n version = self.m_version()\n return check_min_version(version, min_version)\n\n @overrides(PythonLanguageServer.m_workspace__did_change_configuration)\n def m_workspace__did_change_configuration(self, **kwargs):\n PythonLanguageServer.m_workspace__did_change_configuration(self, **kwargs)\n self.libspec_manager.config = self.config\n\n @overrides(PythonLanguageServer.lint)\n def lint(self, *args, **kwargs):\n pass # No-op for this server.\n\n @overrides(PythonLanguageServer.cancel_lint)\n def cancel_lint(self, *args, **kwargs):\n pass # No-op for this server.\n\n @overrides(PythonLanguageServer._obtain_fs_observer)\n def _obtain_fs_observer(self) -> IFSObserver:\n return self.libspec_manager.fs_observer\n\n @overrides(PythonLanguageServer._create_workspace)\n def _create_workspace(\n self, root_uri: str, fs_observer: IFSObserver, workspace_folders\n ) -> IWorkspace:\n from robotframework_ls.impl.robot_workspace import RobotWorkspace\n\n return RobotWorkspace(\n root_uri,\n fs_observer,\n workspace_folders,\n libspec_manager=self.libspec_manager,\n )\n\n def m_lint(self, doc_uri):\n if not self._check_min_version((3, 2)):\n from robocorp_ls_core.lsp import Error\n\n msg = (\n \"robotframework version (%s) too old for linting.\\n\"\n \"Please install a newer version and restart the language server.\"\n % (self.m_version(),)\n )\n log.info(msg)\n return [Error(msg, (0, 0), (1, 0)).to_lsp_diagnostic()]\n\n func = partial(self._threaded_lint, doc_uri)\n func = require_monitor(func)\n return func\n\n def _threaded_lint(self, doc_uri, monitor: IMonitor):\n from robocorp_ls_core.jsonrpc.exceptions import JsonRpcRequestCancelled\n from robotframework_ls.impl.robot_lsp_constants import (\n OPTION_ROBOT_LINT_ROBOCOP_ENABLED,\n )\n from robocorp_ls_core import uris\n from robocorp_ls_core.lsp import Error\n\n try:\n from robotframework_ls.impl.ast_utils import collect_errors\n from robotframework_ls.impl import code_analysis\n import os.path\n\n log.debug(\"Lint: starting (in thread).\")\n\n completion_context = self._create_completion_context(doc_uri, 0, 0, monitor)\n if completion_context is None:\n return []\n\n config = completion_context.config\n robocop_enabled = config is None or config.get_setting(\n OPTION_ROBOT_LINT_ROBOCOP_ENABLED, bool, False\n )\n\n ast = completion_context.get_ast()\n source = completion_context.doc.source\n monitor.check_cancelled()\n errors = collect_errors(ast)\n log.debug(\"Collected AST errors (in thread): %s\", len(errors))\n monitor.check_cancelled()\n analysis_errors = code_analysis.collect_analysis_errors(completion_context)\n monitor.check_cancelled()\n log.debug(\"Collected analysis errors (in thread): %s\", len(analysis_errors))\n errors.extend(analysis_errors)\n\n lsp_diagnostics = [error.to_lsp_diagnostic() for error in errors]\n\n try:\n if robocop_enabled:\n from robocorp_ls_core.robocop_wrapper import (\n collect_robocop_diagnostics,\n )\n\n workspace = completion_context.workspace\n if workspace is not None:\n project_root = workspace.root_path\n else:\n project_root = os.path.abspath(\".\")\n\n monitor.check_cancelled()\n lsp_diagnostics.extend(\n collect_robocop_diagnostics(\n project_root, ast, uris.to_fs_path(doc_uri), source\n )\n )\n except Exception as e:\n log.exception(\n \"Error collecting Robocop errors (possibly an unsupported Robocop version is installed).\"\n )\n lsp_diagnostics.append(\n Error(\n f\"Error collecting Robocop errors: {e}\", (0, 0), (1, 0)\n ).to_lsp_diagnostic()\n )\n\n return lsp_diagnostics\n except JsonRpcRequestCancelled:\n raise JsonRpcRequestCancelled(\"Lint cancelled (inside lint)\")\n except Exception as e:\n log.exception(\"Error collecting errors.\")\n ret = [\n Error(\n f\"Error collecting Robocop errors: {e}\", (0, 0), (1, 0)\n ).to_lsp_diagnostic()\n ]\n return ret\n\n def m_complete_all(self, doc_uri, line, col):\n func = partial(self._threaded_complete_all, doc_uri, line, col)\n func = require_monitor(func)\n return func\n\n def _threaded_complete_all(self, doc_uri, line, col, monitor: IMonitor):\n completion_context = self._create_completion_context(\n doc_uri, line, col, monitor\n )\n if completion_context is None:\n return []\n\n return self._complete_from_completion_context(completion_context)\n\n def _complete_from_completion_context(self, completion_context):\n from robotframework_ls.impl import section_name_completions\n from robotframework_ls.impl import keyword_completions\n from robotframework_ls.impl import variable_completions\n from robotframework_ls.impl import dictionary_completions\n from robotframework_ls.impl import filesystem_section_completions\n from robotframework_ls.impl import keyword_parameter_completions\n from robotframework_ls.impl import auto_import_completions\n from robotframework_ls.impl.collect_keywords import (\n collect_keyword_name_to_keyword_found,\n )\n from robotframework_ls.impl import ast_utils\n\n ret = section_name_completions.complete(completion_context)\n if not ret:\n ret.extend(filesystem_section_completions.complete(completion_context))\n\n if not ret:\n token_info = completion_context.get_current_token()\n if token_info is not None:\n token = ast_utils.get_keyword_name_token(\n token_info.node, token_info.token\n )\n if token is not None:\n keyword_name_to_keyword_found: Dict[\n str, List[IKeywordFound]\n ] = collect_keyword_name_to_keyword_found(completion_context)\n ret.extend(keyword_completions.complete(completion_context))\n ret.extend(\n auto_import_completions.complete(\n completion_context, keyword_name_to_keyword_found\n )\n )\n return ret\n\n if not ret:\n ret.extend(variable_completions.complete(completion_context))\n\n if not ret:\n ret.extend(dictionary_completions.complete(completion_context))\n\n if not ret:\n ret.extend(keyword_parameter_completions.complete(completion_context))\n\n return ret\n\n def m_section_name_complete(self, doc_uri, line, col):\n from robotframework_ls.impl import section_name_completions\n\n completion_context = self._create_completion_context(doc_uri, line, col, None)\n if completion_context is None:\n return []\n\n return section_name_completions.complete(completion_context)\n\n def m_keyword_complete(self, doc_uri, line, col):\n from robotframework_ls.impl import keyword_completions\n\n completion_context = self._create_completion_context(doc_uri, line, col, None)\n if completion_context is None:\n return []\n return keyword_completions.complete(completion_context)\n\n def m_find_definition(self, doc_uri, line, col):\n func = partial(self._threaded_find_definition, doc_uri, line, col)\n func = require_monitor(func)\n return func\n\n def _threaded_find_definition(self, doc_uri, line, col, monitor) -> Optional[list]:\n from robotframework_ls.impl.find_definition import find_definition\n import os.path\n from robocorp_ls_core.lsp import Location, Range\n from robocorp_ls_core import uris\n\n completion_context = self._create_completion_context(\n doc_uri, line, col, monitor\n )\n if completion_context is None:\n return None\n definitions = find_definition(completion_context)\n ret = []\n for definition in definitions:\n if not definition.source:\n log.info(\"Found definition with empty source (%s).\", definition)\n continue\n\n if not os.path.exists(definition.source):\n log.info(\n \"Found definition: %s (but source does not exist).\", definition\n )\n continue\n\n lineno = definition.lineno\n if lineno is None or lineno < 0:\n lineno = 0\n\n end_lineno = definition.end_lineno\n if end_lineno is None or end_lineno < 0:\n end_lineno = 0\n\n col_offset = definition.col_offset\n end_col_offset = definition.end_col_offset\n\n ret.append(\n Location(\n uris.from_fs_path(definition.source),\n Range((lineno, col_offset), (end_lineno, end_col_offset)),\n ).to_dict()\n )\n return ret\n\n def m_code_format(self, text_document, options):\n func = partial(self._threaded_code_format, text_document, options)\n func = require_monitor(func)\n return func\n\n def _threaded_code_format(self, text_document, options, monitor: IMonitor):\n from robotframework_ls.impl.formatting import create_text_edit_from_diff\n from robocorp_ls_core.lsp import TextDocumentItem\n import os.path\n from robotframework_ls.impl.robot_lsp_constants import (\n OPTION_ROBOT_CODE_FORMATTER,\n )\n from robotframework_ls.impl.robot_lsp_constants import (\n OPTION_ROBOT_CODE_FORMATTER_ROBOTIDY,\n )\n from robotframework_ls.impl.robot_lsp_constants import (\n OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY,\n )\n\n text_document_item = TextDocumentItem(**text_document)\n text = text_document_item.text\n if not text:\n completion_context = self._create_completion_context(\n text_document_item.uri, 0, 0, monitor\n )\n if completion_context is None:\n return []\n text = completion_context.doc.source\n\n if not text:\n return []\n\n if options is None:\n options = {}\n tab_size = options.get(\"tabSize\", 4)\n\n # Default for now is the builtin. This will probably be changed in the future.\n formatter = self._config.get_setting(\n OPTION_ROBOT_CODE_FORMATTER, str, OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY\n )\n if formatter not in (\n OPTION_ROBOT_CODE_FORMATTER_ROBOTIDY,\n OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY,\n ):\n log.critical(\n f\"Code formatter invalid: {formatter}. Please select one of: {OPTION_ROBOT_CODE_FORMATTER_ROBOTIDY}, {OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY}.\"\n )\n return []\n\n if formatter == OPTION_ROBOT_CODE_FORMATTER_BUILTIN_TIDY:\n from robotframework_ls.impl.formatting import robot_source_format\n\n new_contents = robot_source_format(text, space_count=tab_size)\n\n else:\n if not self._check_min_version((4, 0)):\n log.critical(\n f\"To use the robotidy formatter, at least Robot Framework 4 is needed. Found: {self.m_version()}\"\n )\n return []\n\n from robocorp_ls_core.robotidy_wrapper import robot_tidy_source_format\n\n ast = completion_context.get_ast()\n path = completion_context.doc.path\n dirname = \".\"\n try:\n os.stat(path)\n except:\n # It doesn't exist\n ws = self._workspace\n if ws is not None:\n dirname = ws.root_path\n else:\n dirname = os.path.dirname(path)\n\n new_contents = robot_tidy_source_format(ast, dirname)\n\n if new_contents is None or new_contents == text:\n return []\n return [x.to_dict() for x in create_text_edit_from_diff(text, new_contents)]\n\n def _create_completion_context(\n self, doc_uri, line, col, monitor: Optional[IMonitor]\n ):\n from robotframework_ls.impl.completion_context import CompletionContext\n\n if not self._check_min_version((3, 2)):\n log.info(\"robotframework version too old.\")\n return None\n workspace = self.workspace\n if not workspace:\n log.info(\"Workspace still not initialized.\")\n return None\n\n document = workspace.get_document(doc_uri, accept_from_file=True)\n if document is None:\n log.info(\"Unable to get document for uri: %s.\", doc_uri)\n return None\n return CompletionContext(\n document,\n line,\n col,\n workspace=workspace,\n config=self.config,\n monitor=monitor,\n )\n\n def m_signature_help(self, doc_uri: str, line: int, col: int):\n func = partial(self._threaded_signature_help, doc_uri, line, col)\n func = require_monitor(func)\n return func\n\n def _threaded_signature_help(\n self, doc_uri: str, line: int, col: int, monitor: IMonitor\n ) -> Optional[dict]:\n from robotframework_ls.impl.signature_help import signature_help\n\n completion_context = self._create_completion_context(\n doc_uri, line, col, monitor\n )\n if completion_context is None:\n return None\n\n return signature_help(completion_context)\n\n def m_folding_range(self, doc_uri: str):\n func = partial(self._threaded_folding_range, doc_uri)\n func = require_monitor(func)\n return func\n\n def _threaded_folding_range(\n self, doc_uri: str, monitor: IMonitor\n ) -> List[FoldingRangeTypedDict]:\n from robotframework_ls.impl.folding_range import folding_range\n\n completion_context = self._create_completion_context(doc_uri, 0, 0, monitor)\n if completion_context is None:\n return []\n\n return folding_range(completion_context)\n\n def m_code_lens(self, doc_uri: str):\n func = partial(self._threaded_code_lens, doc_uri)\n func = require_monitor(func)\n return func\n\n def _threaded_code_lens(\n self, doc_uri: str, monitor: IMonitor\n ) -> List[CodeLensTypedDict]:\n from robotframework_ls.impl.code_lens import code_lens\n\n completion_context = self._create_completion_context(doc_uri, 0, 0, monitor)\n if completion_context is None:\n return []\n\n return code_lens(completion_context)\n\n def m_resolve_code_lens(self, **code_lens: CodeLensTypedDict):\n func = partial(self._threaded_resolve_code_lens, code_lens)\n func = require_monitor(func)\n return func\n\n def _threaded_resolve_code_lens(\n self, code_lens: CodeLensTypedDict, monitor: IMonitor\n ) -> CodeLensTypedDict:\n from robotframework_ls.impl.code_lens import code_lens_resolve\n\n data = code_lens.get(\"data\")\n if not isinstance(data, dict):\n return code_lens\n\n doc_uri = data.get(\"uri\")\n completion_context = self._create_completion_context(doc_uri, 0, 0, monitor)\n if completion_context is None:\n return code_lens\n\n return code_lens_resolve(completion_context, code_lens)\n\n def m_document_symbol(self, doc_uri: str):\n func = partial(self._threaded_document_symbol, doc_uri)\n func = require_monitor(func)\n return func\n\n def _threaded_document_symbol(\n self, doc_uri: str, monitor: IMonitor\n ) -> List[DocumentSymbolTypedDict]:\n from robotframework_ls.impl.document_symbol import document_symbol\n\n completion_context = self._create_completion_context(doc_uri, 0, 0, monitor)\n if completion_context is None:\n return []\n\n return document_symbol(completion_context)\n\n def m_list_tests(self, doc_uri: str):\n func = partial(self._threaded_list_tests, doc_uri)\n func = require_monitor(func)\n return func\n\n def _threaded_list_tests(\n self, doc_uri: str, monitor: IMonitor\n ) -> List[ITestInfoTypedDict]:\n from robotframework_ls.impl.code_lens import list_tests\n\n completion_context = self._create_completion_context(doc_uri, 0, 0, monitor)\n if completion_context is None:\n return []\n\n return list_tests(completion_context)\n\n def m_hover(self, doc_uri: str, line: int, col: int):\n func = partial(self._threaded_hover, doc_uri, line, col)\n func = require_monitor(func)\n return func\n\n def _threaded_hover(\n self, doc_uri: str, line, col, monitor: IMonitor\n ) -> Optional[HoverTypedDict]:\n from robotframework_ls.impl.hover import hover\n\n completion_context = self._create_completion_context(\n doc_uri, line, col, monitor\n )\n if completion_context is None:\n return None\n\n return hover(completion_context)\n\n def m_workspace_symbols(self, query: Optional[str] = None):\n func = partial(self._threaded_workspace_symbols, query)\n func = require_monitor(func)\n return func\n\n def _threaded_workspace_symbols(\n self, query: Optional[str], monitor: IMonitor\n ) -> Optional[List[SymbolInformationTypedDict]]:\n from robotframework_ls.impl.workspace_symbols import workspace_symbols\n from robotframework_ls.impl.completion_context import BaseContext\n from robotframework_ls.impl.protocols import IRobotWorkspace\n from typing import cast\n\n workspace = self._workspace\n if not workspace:\n return []\n\n robot_workspace = cast(IRobotWorkspace, workspace)\n\n return workspace_symbols(\n query,\n BaseContext(workspace=robot_workspace, config=self.config, monitor=monitor),\n )\n\n def m_text_document__semantic_tokens__range(self, textDocument=None, range=None):\n raise RuntimeError(\"Not currently implemented!\")\n\n def m_text_document__semantic_tokens__full(self, textDocument=None):\n func = partial(self.threaded_semantic_tokens_full, textDocument=textDocument)\n func = require_monitor(func)\n return func\n\n def threaded_semantic_tokens_full(\n self, textDocument: TextDocumentTypedDict, monitor: Optional[IMonitor] = None\n ):\n from robotframework_ls.impl.semantic_tokens import semantic_tokens_full\n\n doc_uri = textDocument[\"uri\"]\n context = self._create_completion_context(doc_uri, -1, -1, monitor)\n if context is None:\n return {\"resultId\": None, \"data\": []}\n return {\"resultId\": None, \"data\": semantic_tokens_full(context)}\n\n def m_monaco_completions_from_code_full(\n self,\n prefix: str = \"\",\n full_code: str = \"\",\n position=PositionTypedDict,\n uri: str = \"\",\n indent: str = \"\",\n ):\n func = partial(\n self.threaded_monaco_completions_from_code_full,\n prefix=prefix,\n full_code=full_code,\n position=position,\n uri=uri,\n indent=indent,\n )\n func = require_monitor(func)\n return func\n\n def threaded_monaco_completions_from_code_full(\n self,\n prefix: str,\n full_code: str,\n position: PositionTypedDict,\n uri: str,\n indent: str,\n monitor: Optional[IMonitor] = None,\n ):\n from robotframework_ls.impl.robot_workspace import RobotDocument\n from robotframework_ls.impl.completion_context import CompletionContext\n from robocorp_ls_core.workspace import Document\n from robotframework_ls.impl import section_completions\n from robotframework_ls.impl import snippets_completions\n from robotframework_ls.server_api.monaco_conversions import (\n convert_to_monaco_completion,\n )\n from robotframework_ls.impl.completion_context import CompletionType\n\n d = Document(uri, prefix)\n last_line, _last_col = d.get_last_line_col()\n line = last_line + position[\"line\"]\n\n col = position[\"character\"]\n col += len(indent)\n\n document = RobotDocument(uri, full_code)\n completion_context = CompletionContext(\n document,\n line,\n col,\n config=self.config,\n monitor=monitor,\n workspace=self.workspace,\n )\n completion_context.type = CompletionType.shell\n completions = self._complete_from_completion_context(completion_context)\n completions.extend(section_completions.complete(completion_context))\n completions.extend(snippets_completions.complete(completion_context))\n\n return {\n \"suggestions\": [\n convert_to_monaco_completion(\n c, line_delta=last_line, col_delta=len(indent), uri=uri\n )\n for c in completions\n ]\n }\n\n def m_semantic_tokens_from_code_full(\n self, prefix: str = \"\", full_code: str = \"\", indent: str = \"\"\n ):\n func = partial(\n self.threaded_semantic_tokens_from_code_full,\n prefix=prefix,\n full_code=full_code,\n indent=indent,\n )\n func = require_monitor(func)\n return func\n\n def threaded_semantic_tokens_from_code_full(\n self,\n prefix: str,\n full_code: str,\n indent: str,\n monitor: Optional[IMonitor] = None,\n ):\n from robotframework_ls.impl.semantic_tokens import semantic_tokens_full_from_ast\n\n try:\n from robotframework_ls.impl.robot_workspace import RobotDocument\n\n doc = RobotDocument(\"\")\n doc.source = full_code\n ast = doc.get_ast()\n data = semantic_tokens_full_from_ast(ast, monitor)\n if not prefix:\n return {\"resultId\": None, \"data\": data}\n\n # We have to exclude the prefix from the coloring...\n\n # debug info...\n # import io\n # from robotframework_ls.impl.semantic_tokens import decode_semantic_tokens\n # stream = io.StringIO()\n # decode_semantic_tokens(data, doc, stream)\n # found = stream.getvalue()\n\n prefix_doc = RobotDocument(\"\")\n prefix_doc.source = prefix\n last_line, last_col = prefix_doc.get_last_line_col()\n\n # Now we have the data from the full code, but we need to remove whatever\n # we have in the prefix from the result...\n ints_iter = iter(data)\n line = 0\n col = 0\n new_data = []\n indent_len = len(indent)\n while True:\n try:\n line_delta = next(ints_iter)\n except StopIteration:\n break\n col_delta = next(ints_iter)\n token_len = next(ints_iter)\n token_type = next(ints_iter)\n token_modifier = next(ints_iter)\n line += line_delta\n if line_delta == 0:\n col += col_delta\n else:\n col = col_delta\n\n if line >= last_line:\n new_data.append(line - last_line)\n new_data.append(col_delta - indent_len)\n new_data.append(token_len)\n new_data.append(token_type)\n new_data.append(token_modifier)\n\n # Ok, now, we have to add the indent_len to all the\n # next lines\n while True:\n try:\n line_delta = next(ints_iter)\n except StopIteration:\n break\n col_delta = next(ints_iter)\n token_len = next(ints_iter)\n token_type = next(ints_iter)\n token_modifier = next(ints_iter)\n\n new_data.append(line_delta)\n if line_delta > 0:\n new_data.append(col_delta - indent_len)\n else:\n new_data.append(col_delta)\n new_data.append(token_len)\n new_data.append(token_type)\n new_data.append(token_modifier)\n\n break\n\n # Approach changed so that we always have a new line\n # i.e.:\n # \\n<indent><code>\n #\n # so, the condition below no longer applies.\n # elif line == last_line and col >= last_col:\n # new_data.append(0)\n # new_data.append(col - last_col)\n # new_data.append(token_len)\n # new_data.append(token_type)\n # new_data.append(token_modifier)\n # new_data.extend(ints_iter)\n # break\n\n # debug info...\n # temp_stream = io.StringIO()\n # temp_doc = RobotDocument(\"\")\n # temp_doc.source = full_code[len(prefix) :]\n # decode_semantic_tokens(new_data, temp_doc, temp_stream)\n # temp_found = temp_stream.getvalue()\n\n return {\"resultId\": None, \"data\": new_data}\n except:\n log.exception(\"Error computing semantic tokens from code.\")\n return {\"resultId\": None, \"data\": []}\n\n def m_shutdown(self, **_kwargs):\n PythonLanguageServer.m_shutdown(self, **_kwargs)\n self.libspec_manager.dispose()\n\n def m_exit(self, **_kwargs):\n PythonLanguageServer.m_exit(self, **_kwargs)\n self.libspec_manager.dispose()\n",
"step-ids": [
19,
33,
35,
44,
51
]
}
|
[
19,
33,
35,
44,
51
] |
<|reserved_special_token_0|>
class DataTypesTestCase(unittest.TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_is_rain_a_float(self):
rain = dfClean.iloc[4908, 2]
self.assertTrue(isinstance(rain, float))
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class DateTimeFormatTestCase(unittest.TestCase):
def test_does_month_have_two_digits(self):
i = 0
booln = True
while i < len(dfClean):
if dfClean.iloc[i, 7][2] != '/':
booln = False
i += 1
self.assertTrue(booln)
def test_does_day_have_two_digits(self):
i = 0
booln = True
while i < len(dfClean):
if dfClean.iloc[i, 7][5] != '/':
booln = False
i += 1
self.assertTrue(booln)
def test_does_year_have_four_digits(self):
i = 0
booln = True
while i < len(dfClean):
if dfClean.iloc[i, 7][6:8] != '20':
booln = False
i += 1
self.assertTrue(booln)
def test_does_hour_have_two_digits(self):
i = 0
booln = True
while i < len(dfClean):
if len(dfClean.iloc[i, 7]) != 16:
booln = False
i += 1
self.assertTrue(booln)
class AppendColumnsTestCase(unittest.TestCase):
def test_is_month_column_appending_correctly(self):
i = 0
booln = True
while i < len(dfClean):
if int(dfClean.iloc[i, 9]) != int(dfClean.iloc[i, 7][:2]):
booln = False
i += 1
self.assertTrue(booln)
def test_is_day_column_apending_correctly(self):
i = 0
booln = True
while i < len(dfClean):
if int(dfClean.iloc[i, 10]) != int(dfClean.iloc[i, 7][3:5]):
booln = False
i += 1
self.assertTrue(booln)
def test_is_year_column_apending_correctly(self):
i = 0
booln = True
while i < len(dfClean):
if int(dfClean.iloc[i, 11]) != int(dfClean.iloc[i, 7][6:10]):
booln = False
i += 1
self.assertTrue(booln)
def test_is_hour_column_apending_correctly(self):
i = 0
booln = True
while i < len(dfClean):
if int(dfClean.iloc[i, 12]) != int(dfClean.iloc[i, 7][11:13]):
booln = False
i += 1
self.assertTrue(booln)
class HolidayTestCase(unittest.TestCase):
def test_are_all_hours_correct_holiday(self):
i = 0
booln = True
hol = 'None'
while i < len(dfClean):
if dfClean.iloc[i, 12] == 0:
hol = dfClean.iloc[i, 0]
elif dfClean.iloc[i, 0] != hol:
booln = False
i += 1
self.assertTrue(booln)
class UniqueDataPointsTestCase(unittest.TestCase):
def test_are_all_datetimes_unique(self):
i = 1
booln = True
while i < len(dfClean):
if dfClean.iloc[i, 7] == dfClean.iloc[i - 1, 7]:
booln = False
i += 1
self.assertTrue(booln)
class TemperatureConversionTestCase(unittest.TestCase):
def test_is_temp_converting_from_kelvin_to_F(self):
i = 1
booln = True
while i < len(dfClean):
if (dfClean.iloc[i, 1] > 120) | (dfClean.iloc[i, 1] < -50):
booln = False
i += 1
self.assertTrue(booln)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DataTypesTestCase(unittest.TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_is_rain_a_float(self):
rain = dfClean.iloc[4908, 2]
self.assertTrue(isinstance(rain, float))
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_is_hour_an_int(self):
hour = dfClean.iloc[4908, 12]
self.assertEqual(str(type(hour)), "<class 'numpy.int64'>")
class DateTimeFormatTestCase(unittest.TestCase):
def test_does_month_have_two_digits(self):
i = 0
booln = True
while i < len(dfClean):
if dfClean.iloc[i, 7][2] != '/':
booln = False
i += 1
self.assertTrue(booln)
def test_does_day_have_two_digits(self):
i = 0
booln = True
while i < len(dfClean):
if dfClean.iloc[i, 7][5] != '/':
booln = False
i += 1
self.assertTrue(booln)
def test_does_year_have_four_digits(self):
i = 0
booln = True
while i < len(dfClean):
if dfClean.iloc[i, 7][6:8] != '20':
booln = False
i += 1
self.assertTrue(booln)
def test_does_hour_have_two_digits(self):
i = 0
booln = True
while i < len(dfClean):
if len(dfClean.iloc[i, 7]) != 16:
booln = False
i += 1
self.assertTrue(booln)
class AppendColumnsTestCase(unittest.TestCase):
def test_is_month_column_appending_correctly(self):
i = 0
booln = True
while i < len(dfClean):
if int(dfClean.iloc[i, 9]) != int(dfClean.iloc[i, 7][:2]):
booln = False
i += 1
self.assertTrue(booln)
def test_is_day_column_apending_correctly(self):
i = 0
booln = True
while i < len(dfClean):
if int(dfClean.iloc[i, 10]) != int(dfClean.iloc[i, 7][3:5]):
booln = False
i += 1
self.assertTrue(booln)
def test_is_year_column_apending_correctly(self):
i = 0
booln = True
while i < len(dfClean):
if int(dfClean.iloc[i, 11]) != int(dfClean.iloc[i, 7][6:10]):
booln = False
i += 1
self.assertTrue(booln)
def test_is_hour_column_apending_correctly(self):
i = 0
booln = True
while i < len(dfClean):
if int(dfClean.iloc[i, 12]) != int(dfClean.iloc[i, 7][11:13]):
booln = False
i += 1
self.assertTrue(booln)
class HolidayTestCase(unittest.TestCase):
def test_are_all_hours_correct_holiday(self):
i = 0
booln = True
hol = 'None'
while i < len(dfClean):
if dfClean.iloc[i, 12] == 0:
hol = dfClean.iloc[i, 0]
elif dfClean.iloc[i, 0] != hol:
booln = False
i += 1
self.assertTrue(booln)
class UniqueDataPointsTestCase(unittest.TestCase):
def test_are_all_datetimes_unique(self):
i = 1
booln = True
while i < len(dfClean):
if dfClean.iloc[i, 7] == dfClean.iloc[i - 1, 7]:
booln = False
i += 1
self.assertTrue(booln)
class TemperatureConversionTestCase(unittest.TestCase):
def test_is_temp_converting_from_kelvin_to_F(self):
i = 1
booln = True
while i < len(dfClean):
if (dfClean.iloc[i, 1] > 120) | (dfClean.iloc[i, 1] < -50):
booln = False
i += 1
self.assertTrue(booln)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DataTypesTestCase(unittest.TestCase):
<|reserved_special_token_0|>
def test_is_temperature_a_float(self):
temp = dfClean.iloc[4908, 1]
self.assertTrue(isinstance(temp, float))
def test_is_rain_a_float(self):
rain = dfClean.iloc[4908, 2]
self.assertTrue(isinstance(rain, float))
def test_is_snow_a_float(self):
snow = dfClean.iloc[4908, 3]
self.assertTrue(isinstance(snow, float))
def test_is_clouds_an_int(self):
clouds = dfClean.iloc[4908, 4]
self.assertEqual(str(type(clouds)), "<class 'numpy.int64'>")
def test_is_weather_main_a_string(self):
weather = dfClean.iloc[4908, 5]
self.assertTrue(isinstance(weather, str))
def test_is_weather_descrip_a_string(self):
weather = dfClean.iloc[4908, 6]
self.assertTrue(isinstance(weather, str))
def test_is_date_time_a_string(self):
dateTime = dfClean.iloc[4908, 7]
self.assertTrue(isinstance(dateTime, str))
def test_is_traffic_an_int(self):
traffic = dfClean.iloc[4908, 8]
self.assertEqual(str(type(traffic)), "<class 'numpy.int64'>")
def test_is_month_an_int(self):
month = dfClean.iloc[4908, 9]
self.assertEqual(str(type(month)), "<class 'numpy.int64'>")
def test_is_day_an_int(self):
day = dfClean.iloc[4908, 10]
self.assertEqual(str(type(day)), "<class 'numpy.int64'>")
def test_is_year_an_int(self):
year = dfClean.iloc[4908, 11]
self.assertEqual(str(type(year)), "<class 'numpy.int64'>")
def test_is_hour_an_int(self):
hour = dfClean.iloc[4908, 12]
self.assertEqual(str(type(hour)), "<class 'numpy.int64'>")
class DateTimeFormatTestCase(unittest.TestCase):
def test_does_month_have_two_digits(self):
i = 0
booln = True
while i < len(dfClean):
if dfClean.iloc[i, 7][2] != '/':
booln = False
i += 1
self.assertTrue(booln)
def test_does_day_have_two_digits(self):
i = 0
booln = True
while i < len(dfClean):
if dfClean.iloc[i, 7][5] != '/':
booln = False
i += 1
self.assertTrue(booln)
def test_does_year_have_four_digits(self):
i = 0
booln = True
while i < len(dfClean):
if dfClean.iloc[i, 7][6:8] != '20':
booln = False
i += 1
self.assertTrue(booln)
def test_does_hour_have_two_digits(self):
i = 0
booln = True
while i < len(dfClean):
if len(dfClean.iloc[i, 7]) != 16:
booln = False
i += 1
self.assertTrue(booln)
class AppendColumnsTestCase(unittest.TestCase):
def test_is_month_column_appending_correctly(self):
i = 0
booln = True
while i < len(dfClean):
if int(dfClean.iloc[i, 9]) != int(dfClean.iloc[i, 7][:2]):
booln = False
i += 1
self.assertTrue(booln)
def test_is_day_column_apending_correctly(self):
i = 0
booln = True
while i < len(dfClean):
if int(dfClean.iloc[i, 10]) != int(dfClean.iloc[i, 7][3:5]):
booln = False
i += 1
self.assertTrue(booln)
def test_is_year_column_apending_correctly(self):
i = 0
booln = True
while i < len(dfClean):
if int(dfClean.iloc[i, 11]) != int(dfClean.iloc[i, 7][6:10]):
booln = False
i += 1
self.assertTrue(booln)
def test_is_hour_column_apending_correctly(self):
i = 0
booln = True
while i < len(dfClean):
if int(dfClean.iloc[i, 12]) != int(dfClean.iloc[i, 7][11:13]):
booln = False
i += 1
self.assertTrue(booln)
class HolidayTestCase(unittest.TestCase):
def test_are_all_hours_correct_holiday(self):
i = 0
booln = True
hol = 'None'
while i < len(dfClean):
if dfClean.iloc[i, 12] == 0:
hol = dfClean.iloc[i, 0]
elif dfClean.iloc[i, 0] != hol:
booln = False
i += 1
self.assertTrue(booln)
class UniqueDataPointsTestCase(unittest.TestCase):
def test_are_all_datetimes_unique(self):
i = 1
booln = True
while i < len(dfClean):
if dfClean.iloc[i, 7] == dfClean.iloc[i - 1, 7]:
booln = False
i += 1
self.assertTrue(booln)
class TemperatureConversionTestCase(unittest.TestCase):
def test_is_temp_converting_from_kelvin_to_F(self):
i = 1
booln = True
while i < len(dfClean):
if (dfClean.iloc[i, 1] > 120) | (dfClean.iloc[i, 1] < -50):
booln = False
i += 1
self.assertTrue(booln)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DataTypesTestCase(unittest.TestCase):
def test_is_holiday_a_string(self):
holiday = dfClean.iloc[4908, 0]
self.assertTrue(isinstance(holiday, str))
def test_is_temperature_a_float(self):
temp = dfClean.iloc[4908, 1]
self.assertTrue(isinstance(temp, float))
def test_is_rain_a_float(self):
rain = dfClean.iloc[4908, 2]
self.assertTrue(isinstance(rain, float))
def test_is_snow_a_float(self):
snow = dfClean.iloc[4908, 3]
self.assertTrue(isinstance(snow, float))
def test_is_clouds_an_int(self):
clouds = dfClean.iloc[4908, 4]
self.assertEqual(str(type(clouds)), "<class 'numpy.int64'>")
def test_is_weather_main_a_string(self):
weather = dfClean.iloc[4908, 5]
self.assertTrue(isinstance(weather, str))
def test_is_weather_descrip_a_string(self):
weather = dfClean.iloc[4908, 6]
self.assertTrue(isinstance(weather, str))
def test_is_date_time_a_string(self):
dateTime = dfClean.iloc[4908, 7]
self.assertTrue(isinstance(dateTime, str))
def test_is_traffic_an_int(self):
traffic = dfClean.iloc[4908, 8]
self.assertEqual(str(type(traffic)), "<class 'numpy.int64'>")
def test_is_month_an_int(self):
month = dfClean.iloc[4908, 9]
self.assertEqual(str(type(month)), "<class 'numpy.int64'>")
def test_is_day_an_int(self):
day = dfClean.iloc[4908, 10]
self.assertEqual(str(type(day)), "<class 'numpy.int64'>")
def test_is_year_an_int(self):
year = dfClean.iloc[4908, 11]
self.assertEqual(str(type(year)), "<class 'numpy.int64'>")
def test_is_hour_an_int(self):
hour = dfClean.iloc[4908, 12]
self.assertEqual(str(type(hour)), "<class 'numpy.int64'>")
class DateTimeFormatTestCase(unittest.TestCase):
def test_does_month_have_two_digits(self):
i = 0
booln = True
while i < len(dfClean):
if dfClean.iloc[i, 7][2] != '/':
booln = False
i += 1
self.assertTrue(booln)
def test_does_day_have_two_digits(self):
i = 0
booln = True
while i < len(dfClean):
if dfClean.iloc[i, 7][5] != '/':
booln = False
i += 1
self.assertTrue(booln)
def test_does_year_have_four_digits(self):
i = 0
booln = True
while i < len(dfClean):
if dfClean.iloc[i, 7][6:8] != '20':
booln = False
i += 1
self.assertTrue(booln)
def test_does_hour_have_two_digits(self):
i = 0
booln = True
while i < len(dfClean):
if len(dfClean.iloc[i, 7]) != 16:
booln = False
i += 1
self.assertTrue(booln)
class AppendColumnsTestCase(unittest.TestCase):
def test_is_month_column_appending_correctly(self):
i = 0
booln = True
while i < len(dfClean):
if int(dfClean.iloc[i, 9]) != int(dfClean.iloc[i, 7][:2]):
booln = False
i += 1
self.assertTrue(booln)
def test_is_day_column_apending_correctly(self):
i = 0
booln = True
while i < len(dfClean):
if int(dfClean.iloc[i, 10]) != int(dfClean.iloc[i, 7][3:5]):
booln = False
i += 1
self.assertTrue(booln)
def test_is_year_column_apending_correctly(self):
i = 0
booln = True
while i < len(dfClean):
if int(dfClean.iloc[i, 11]) != int(dfClean.iloc[i, 7][6:10]):
booln = False
i += 1
self.assertTrue(booln)
def test_is_hour_column_apending_correctly(self):
i = 0
booln = True
while i < len(dfClean):
if int(dfClean.iloc[i, 12]) != int(dfClean.iloc[i, 7][11:13]):
booln = False
i += 1
self.assertTrue(booln)
class HolidayTestCase(unittest.TestCase):
def test_are_all_hours_correct_holiday(self):
i = 0
booln = True
hol = 'None'
while i < len(dfClean):
if dfClean.iloc[i, 12] == 0:
hol = dfClean.iloc[i, 0]
elif dfClean.iloc[i, 0] != hol:
booln = False
i += 1
self.assertTrue(booln)
class UniqueDataPointsTestCase(unittest.TestCase):
def test_are_all_datetimes_unique(self):
i = 1
booln = True
while i < len(dfClean):
if dfClean.iloc[i, 7] == dfClean.iloc[i - 1, 7]:
booln = False
i += 1
self.assertTrue(booln)
class TemperatureConversionTestCase(unittest.TestCase):
def test_is_temp_converting_from_kelvin_to_F(self):
i = 1
booln = True
while i < len(dfClean):
if (dfClean.iloc[i, 1] > 120) | (dfClean.iloc[i, 1] < -50):
booln = False
i += 1
self.assertTrue(booln)
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
# CS 5010 Project
# Team Metro
# Test the data cleaning
import unittest
from cleaning_data import dfClean # import the dataframe we created after cleaning the data
class DataTypesTestCase(unittest.TestCase):
# we will test that each column has the correct data type
# note that there is a strange occurence seen below when converting to a pandas dataframe
def test_is_holiday_a_string(self):
holiday = dfClean.iloc[4908,0]
self.assertTrue(isinstance(holiday, str))
def test_is_temperature_a_float(self):
temp = dfClean.iloc[4908,1]
self.assertTrue(isinstance(temp, float))
def test_is_rain_a_float(self):
rain = dfClean.iloc[4908,2]
self.assertTrue(isinstance(rain, float))
def test_is_snow_a_float(self):
snow = dfClean.iloc[4908,3]
self.assertTrue(isinstance(snow, float))
def test_is_clouds_an_int(self):
clouds = dfClean.iloc[4908,4]
self.assertEqual(str(type(clouds)), "<class 'numpy.int64'>")
# pandas converts all of the ints in the list to numpy.int64
# could not figure out how to avoid this
def test_is_weather_main_a_string(self):
weather = dfClean.iloc[4908,5]
self.assertTrue(isinstance(weather, str))
def test_is_weather_descrip_a_string(self):
weather = dfClean.iloc[4908,6]
self.assertTrue(isinstance(weather, str))
def test_is_date_time_a_string(self):
dateTime = dfClean.iloc[4908,7]
self.assertTrue(isinstance(dateTime, str))
def test_is_traffic_an_int(self):
traffic = dfClean.iloc[4908,8]
self.assertEqual(str(type(traffic)), "<class 'numpy.int64'>")
def test_is_month_an_int(self):
month = dfClean.iloc[4908,9]
self.assertEqual(str(type(month)), "<class 'numpy.int64'>")
def test_is_day_an_int(self):
day = dfClean.iloc[4908,10]
self.assertEqual(str(type(day)), "<class 'numpy.int64'>")
def test_is_year_an_int(self):
year = dfClean.iloc[4908,11]
self.assertEqual(str(type(year)), "<class 'numpy.int64'>")
def test_is_hour_an_int(self):
hour = dfClean.iloc[4908,12]
self.assertEqual(str(type(hour)), "<class 'numpy.int64'>")
class DateTimeFormatTestCase(unittest.TestCase):
def test_does_month_have_two_digits(self):
i = 0
booln = True
while i < len(dfClean):
if dfClean.iloc[i,7][2] != "/":
booln = False
i += 1
self.assertTrue(booln)
# make sure that every data point has a two digit month
# in cleaning, 0 should have been added to make it two digits
def test_does_day_have_two_digits(self):
i = 0
booln = True
while i < len(dfClean):
if dfClean.iloc[i,7][5] != "/":
booln = False
i += 1
self.assertTrue(booln)
# all months in the date/time string should have two digits after cleaning
def test_does_year_have_four_digits(self):
i = 0
booln = True
while i < len(dfClean):
if dfClean.iloc[i,7][6:8] != "20":
booln = False
i += 1
self.assertTrue(booln)
# all years should be in the form 20xx in the date/time string
def test_does_hour_have_two_digits(self):
i = 0
booln = True # since we already tested all of the other cleaning items on the date/time string
while i < len(dfClean): # we can check the hour by checking the length of the whole string
if len(dfClean.iloc[i,7]) != 16: # all in column should have the form "mm/dd/yyyy hh:00"
booln = False
i += 1
self.assertTrue(booln)
# in cleaning, 0 should have been added to make a one digit hour (0-9) two digits (00-09)
# without the other tests this would be a way to check all in one test but would not
# tell us what part of the cleaning on the date/time string did not work correctly
class AppendColumnsTestCase(unittest.TestCase):
# we will check that each of the four new columns (month, day, year, and hour)
# appended correctly to the dataset
def test_is_month_column_appending_correctly(self):
i = 0
booln = True
while i < len(dfClean):
if int(dfClean.iloc[i,9]) != int(dfClean.iloc[i,7][:2]):
booln = False
i += 1
self.assertTrue(booln)
# we check that the month in the month column matches that in the original date/time column
def test_is_day_column_apending_correctly(self):
i = 0
booln = True
while i < len(dfClean):
if int(dfClean.iloc[i,10]) != int(dfClean.iloc[i,7][3:5]):
booln = False
i += 1
self.assertTrue(booln)
# we check that the day in the day column matches that in the original date/time column
def test_is_year_column_apending_correctly(self):
i = 0
booln = True
while i < len(dfClean):
if int(dfClean.iloc[i,11]) != int(dfClean.iloc[i,7][6:10]):
booln = False
i += 1
self.assertTrue(booln)
# we check that the year in the year column matches that in the original date/time column
def test_is_hour_column_apending_correctly(self):
i = 0
booln = True
while i < len(dfClean):
if int(dfClean.iloc[i,12]) != int(dfClean.iloc[i,7][11:13]):
booln = False
i += 1
self.assertTrue(booln)
# we check that the hour in the hour column matches that in the original date/time column
class HolidayTestCase(unittest.TestCase):
# we test that every hour of the same day has a consistent holiday
def test_are_all_hours_correct_holiday(self):
i = 0
booln = True
hol = "None"
while i < len(dfClean):
if dfClean.iloc[i,12] == 0:
hol = dfClean.iloc[i,0]
else:
if dfClean.iloc[i,0] != hol:
booln = False
i += 1
self.assertTrue(booln)
class UniqueDataPointsTestCase(unittest.TestCase):
# this test ensures that no two data points have the exact same date and hour
def test_are_all_datetimes_unique(self):
i = 1
booln = True
while i < len(dfClean):
if dfClean.iloc[i,7] == dfClean.iloc[i-1,7]:
booln = False
i += 1
self.assertTrue(booln)
class TemperatureConversionTestCase(unittest.TestCase):
# we test that the temperature was converted to Fahrenheit
# note that since we overrode the original temperature, we simply check for
# outlier that would make sense as Kelvin values but not Fahrenheit values
# This how we discovered there were some missing temperatures input as 0 Kelvin
# because they converted to -450 Fahrenheit
def test_is_temp_converting_from_kelvin_to_F(self):
i = 1
booln = True
while i < len(dfClean):
if (dfClean.iloc[i,1] > 120) | (dfClean.iloc[i,1] < -50):
booln = False
i += 1
self.assertTrue(booln)
if __name__ == '__main__':
unittest.main()
|
flexible
|
{
"blob_id": "9d0727970c760a9a8123c5c07359ba5c538cea3c",
"index": 5926,
"step-1": "<mask token>\n\n\nclass DataTypesTestCase(unittest.TestCase):\n <mask token>\n <mask token>\n\n def test_is_rain_a_float(self):\n rain = dfClean.iloc[4908, 2]\n self.assertTrue(isinstance(rain, float))\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass DateTimeFormatTestCase(unittest.TestCase):\n\n def test_does_month_have_two_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7][2] != '/':\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_does_day_have_two_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7][5] != '/':\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_does_year_have_four_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7][6:8] != '20':\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_does_hour_have_two_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if len(dfClean.iloc[i, 7]) != 16:\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass AppendColumnsTestCase(unittest.TestCase):\n\n def test_is_month_column_appending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 9]) != int(dfClean.iloc[i, 7][:2]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_is_day_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 10]) != int(dfClean.iloc[i, 7][3:5]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_is_year_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 11]) != int(dfClean.iloc[i, 7][6:10]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_is_hour_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 12]) != int(dfClean.iloc[i, 7][11:13]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass HolidayTestCase(unittest.TestCase):\n\n def test_are_all_hours_correct_holiday(self):\n i = 0\n booln = True\n hol = 'None'\n while i < len(dfClean):\n if dfClean.iloc[i, 12] == 0:\n hol = dfClean.iloc[i, 0]\n elif dfClean.iloc[i, 0] != hol:\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass UniqueDataPointsTestCase(unittest.TestCase):\n\n def test_are_all_datetimes_unique(self):\n i = 1\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7] == dfClean.iloc[i - 1, 7]:\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass TemperatureConversionTestCase(unittest.TestCase):\n\n def test_is_temp_converting_from_kelvin_to_F(self):\n i = 1\n booln = True\n while i < len(dfClean):\n if (dfClean.iloc[i, 1] > 120) | (dfClean.iloc[i, 1] < -50):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass DataTypesTestCase(unittest.TestCase):\n <mask token>\n <mask token>\n\n def test_is_rain_a_float(self):\n rain = dfClean.iloc[4908, 2]\n self.assertTrue(isinstance(rain, float))\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_is_hour_an_int(self):\n hour = dfClean.iloc[4908, 12]\n self.assertEqual(str(type(hour)), \"<class 'numpy.int64'>\")\n\n\nclass DateTimeFormatTestCase(unittest.TestCase):\n\n def test_does_month_have_two_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7][2] != '/':\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_does_day_have_two_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7][5] != '/':\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_does_year_have_four_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7][6:8] != '20':\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_does_hour_have_two_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if len(dfClean.iloc[i, 7]) != 16:\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass AppendColumnsTestCase(unittest.TestCase):\n\n def test_is_month_column_appending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 9]) != int(dfClean.iloc[i, 7][:2]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_is_day_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 10]) != int(dfClean.iloc[i, 7][3:5]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_is_year_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 11]) != int(dfClean.iloc[i, 7][6:10]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_is_hour_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 12]) != int(dfClean.iloc[i, 7][11:13]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass HolidayTestCase(unittest.TestCase):\n\n def test_are_all_hours_correct_holiday(self):\n i = 0\n booln = True\n hol = 'None'\n while i < len(dfClean):\n if dfClean.iloc[i, 12] == 0:\n hol = dfClean.iloc[i, 0]\n elif dfClean.iloc[i, 0] != hol:\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass UniqueDataPointsTestCase(unittest.TestCase):\n\n def test_are_all_datetimes_unique(self):\n i = 1\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7] == dfClean.iloc[i - 1, 7]:\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass TemperatureConversionTestCase(unittest.TestCase):\n\n def test_is_temp_converting_from_kelvin_to_F(self):\n i = 1\n booln = True\n while i < len(dfClean):\n if (dfClean.iloc[i, 1] > 120) | (dfClean.iloc[i, 1] < -50):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass DataTypesTestCase(unittest.TestCase):\n <mask token>\n\n def test_is_temperature_a_float(self):\n temp = dfClean.iloc[4908, 1]\n self.assertTrue(isinstance(temp, float))\n\n def test_is_rain_a_float(self):\n rain = dfClean.iloc[4908, 2]\n self.assertTrue(isinstance(rain, float))\n\n def test_is_snow_a_float(self):\n snow = dfClean.iloc[4908, 3]\n self.assertTrue(isinstance(snow, float))\n\n def test_is_clouds_an_int(self):\n clouds = dfClean.iloc[4908, 4]\n self.assertEqual(str(type(clouds)), \"<class 'numpy.int64'>\")\n\n def test_is_weather_main_a_string(self):\n weather = dfClean.iloc[4908, 5]\n self.assertTrue(isinstance(weather, str))\n\n def test_is_weather_descrip_a_string(self):\n weather = dfClean.iloc[4908, 6]\n self.assertTrue(isinstance(weather, str))\n\n def test_is_date_time_a_string(self):\n dateTime = dfClean.iloc[4908, 7]\n self.assertTrue(isinstance(dateTime, str))\n\n def test_is_traffic_an_int(self):\n traffic = dfClean.iloc[4908, 8]\n self.assertEqual(str(type(traffic)), \"<class 'numpy.int64'>\")\n\n def test_is_month_an_int(self):\n month = dfClean.iloc[4908, 9]\n self.assertEqual(str(type(month)), \"<class 'numpy.int64'>\")\n\n def test_is_day_an_int(self):\n day = dfClean.iloc[4908, 10]\n self.assertEqual(str(type(day)), \"<class 'numpy.int64'>\")\n\n def test_is_year_an_int(self):\n year = dfClean.iloc[4908, 11]\n self.assertEqual(str(type(year)), \"<class 'numpy.int64'>\")\n\n def test_is_hour_an_int(self):\n hour = dfClean.iloc[4908, 12]\n self.assertEqual(str(type(hour)), \"<class 'numpy.int64'>\")\n\n\nclass DateTimeFormatTestCase(unittest.TestCase):\n\n def test_does_month_have_two_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7][2] != '/':\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_does_day_have_two_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7][5] != '/':\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_does_year_have_four_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7][6:8] != '20':\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_does_hour_have_two_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if len(dfClean.iloc[i, 7]) != 16:\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass AppendColumnsTestCase(unittest.TestCase):\n\n def test_is_month_column_appending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 9]) != int(dfClean.iloc[i, 7][:2]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_is_day_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 10]) != int(dfClean.iloc[i, 7][3:5]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_is_year_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 11]) != int(dfClean.iloc[i, 7][6:10]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_is_hour_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 12]) != int(dfClean.iloc[i, 7][11:13]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass HolidayTestCase(unittest.TestCase):\n\n def test_are_all_hours_correct_holiday(self):\n i = 0\n booln = True\n hol = 'None'\n while i < len(dfClean):\n if dfClean.iloc[i, 12] == 0:\n hol = dfClean.iloc[i, 0]\n elif dfClean.iloc[i, 0] != hol:\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass UniqueDataPointsTestCase(unittest.TestCase):\n\n def test_are_all_datetimes_unique(self):\n i = 1\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7] == dfClean.iloc[i - 1, 7]:\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass TemperatureConversionTestCase(unittest.TestCase):\n\n def test_is_temp_converting_from_kelvin_to_F(self):\n i = 1\n booln = True\n while i < len(dfClean):\n if (dfClean.iloc[i, 1] > 120) | (dfClean.iloc[i, 1] < -50):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass DataTypesTestCase(unittest.TestCase):\n\n def test_is_holiday_a_string(self):\n holiday = dfClean.iloc[4908, 0]\n self.assertTrue(isinstance(holiday, str))\n\n def test_is_temperature_a_float(self):\n temp = dfClean.iloc[4908, 1]\n self.assertTrue(isinstance(temp, float))\n\n def test_is_rain_a_float(self):\n rain = dfClean.iloc[4908, 2]\n self.assertTrue(isinstance(rain, float))\n\n def test_is_snow_a_float(self):\n snow = dfClean.iloc[4908, 3]\n self.assertTrue(isinstance(snow, float))\n\n def test_is_clouds_an_int(self):\n clouds = dfClean.iloc[4908, 4]\n self.assertEqual(str(type(clouds)), \"<class 'numpy.int64'>\")\n\n def test_is_weather_main_a_string(self):\n weather = dfClean.iloc[4908, 5]\n self.assertTrue(isinstance(weather, str))\n\n def test_is_weather_descrip_a_string(self):\n weather = dfClean.iloc[4908, 6]\n self.assertTrue(isinstance(weather, str))\n\n def test_is_date_time_a_string(self):\n dateTime = dfClean.iloc[4908, 7]\n self.assertTrue(isinstance(dateTime, str))\n\n def test_is_traffic_an_int(self):\n traffic = dfClean.iloc[4908, 8]\n self.assertEqual(str(type(traffic)), \"<class 'numpy.int64'>\")\n\n def test_is_month_an_int(self):\n month = dfClean.iloc[4908, 9]\n self.assertEqual(str(type(month)), \"<class 'numpy.int64'>\")\n\n def test_is_day_an_int(self):\n day = dfClean.iloc[4908, 10]\n self.assertEqual(str(type(day)), \"<class 'numpy.int64'>\")\n\n def test_is_year_an_int(self):\n year = dfClean.iloc[4908, 11]\n self.assertEqual(str(type(year)), \"<class 'numpy.int64'>\")\n\n def test_is_hour_an_int(self):\n hour = dfClean.iloc[4908, 12]\n self.assertEqual(str(type(hour)), \"<class 'numpy.int64'>\")\n\n\nclass DateTimeFormatTestCase(unittest.TestCase):\n\n def test_does_month_have_two_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7][2] != '/':\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_does_day_have_two_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7][5] != '/':\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_does_year_have_four_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7][6:8] != '20':\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_does_hour_have_two_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if len(dfClean.iloc[i, 7]) != 16:\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass AppendColumnsTestCase(unittest.TestCase):\n\n def test_is_month_column_appending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 9]) != int(dfClean.iloc[i, 7][:2]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_is_day_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 10]) != int(dfClean.iloc[i, 7][3:5]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_is_year_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 11]) != int(dfClean.iloc[i, 7][6:10]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_is_hour_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 12]) != int(dfClean.iloc[i, 7][11:13]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass HolidayTestCase(unittest.TestCase):\n\n def test_are_all_hours_correct_holiday(self):\n i = 0\n booln = True\n hol = 'None'\n while i < len(dfClean):\n if dfClean.iloc[i, 12] == 0:\n hol = dfClean.iloc[i, 0]\n elif dfClean.iloc[i, 0] != hol:\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass UniqueDataPointsTestCase(unittest.TestCase):\n\n def test_are_all_datetimes_unique(self):\n i = 1\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7] == dfClean.iloc[i - 1, 7]:\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass TemperatureConversionTestCase(unittest.TestCase):\n\n def test_is_temp_converting_from_kelvin_to_F(self):\n i = 1\n booln = True\n while i < len(dfClean):\n if (dfClean.iloc[i, 1] > 120) | (dfClean.iloc[i, 1] < -50):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "# CS 5010 Project \n\n# Team Metro\n\n# Test the data cleaning\n\nimport unittest\nfrom cleaning_data import dfClean # import the dataframe we created after cleaning the data\n\n\nclass DataTypesTestCase(unittest.TestCase):\n\n # we will test that each column has the correct data type\n # note that there is a strange occurence seen below when converting to a pandas dataframe\n\n def test_is_holiday_a_string(self):\n holiday = dfClean.iloc[4908,0]\n self.assertTrue(isinstance(holiday, str))\n \n def test_is_temperature_a_float(self):\n temp = dfClean.iloc[4908,1]\n self.assertTrue(isinstance(temp, float))\n \n def test_is_rain_a_float(self):\n rain = dfClean.iloc[4908,2]\n self.assertTrue(isinstance(rain, float))\n\n def test_is_snow_a_float(self):\n snow = dfClean.iloc[4908,3]\n self.assertTrue(isinstance(snow, float))\n\n def test_is_clouds_an_int(self):\n clouds = dfClean.iloc[4908,4]\n self.assertEqual(str(type(clouds)), \"<class 'numpy.int64'>\")\n # pandas converts all of the ints in the list to numpy.int64 \n # could not figure out how to avoid this\n\n def test_is_weather_main_a_string(self):\n weather = dfClean.iloc[4908,5]\n self.assertTrue(isinstance(weather, str))\n \n def test_is_weather_descrip_a_string(self):\n weather = dfClean.iloc[4908,6]\n self.assertTrue(isinstance(weather, str))\n\n def test_is_date_time_a_string(self):\n dateTime = dfClean.iloc[4908,7]\n self.assertTrue(isinstance(dateTime, str))\n\n def test_is_traffic_an_int(self):\n traffic = dfClean.iloc[4908,8]\n self.assertEqual(str(type(traffic)), \"<class 'numpy.int64'>\")\n\n def test_is_month_an_int(self):\n month = dfClean.iloc[4908,9]\n self.assertEqual(str(type(month)), \"<class 'numpy.int64'>\")\n\n def test_is_day_an_int(self):\n day = dfClean.iloc[4908,10]\n self.assertEqual(str(type(day)), \"<class 'numpy.int64'>\")\n\n def test_is_year_an_int(self):\n year = dfClean.iloc[4908,11]\n self.assertEqual(str(type(year)), \"<class 'numpy.int64'>\")\n \n def test_is_hour_an_int(self):\n hour = dfClean.iloc[4908,12]\n self.assertEqual(str(type(hour)), \"<class 'numpy.int64'>\")\n\n \n\n\nclass DateTimeFormatTestCase(unittest.TestCase):\n def test_does_month_have_two_digits(self):\n i = 0 \n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i,7][2] != \"/\":\n booln = False\n i += 1\n self.assertTrue(booln)\n # make sure that every data point has a two digit month\n # in cleaning, 0 should have been added to make it two digits\n \n def test_does_day_have_two_digits(self):\n i = 0 \n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i,7][5] != \"/\":\n booln = False\n i += 1\n self.assertTrue(booln)\n # all months in the date/time string should have two digits after cleaning\n\n def test_does_year_have_four_digits(self):\n i = 0 \n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i,7][6:8] != \"20\":\n booln = False\n i += 1\n self.assertTrue(booln)\n # all years should be in the form 20xx in the date/time string\n \n def test_does_hour_have_two_digits(self):\n i = 0\n booln = True # since we already tested all of the other cleaning items on the date/time string\n while i < len(dfClean): # we can check the hour by checking the length of the whole string\n if len(dfClean.iloc[i,7]) != 16: # all in column should have the form \"mm/dd/yyyy hh:00\"\n booln = False\n i += 1\n self.assertTrue(booln) \n # in cleaning, 0 should have been added to make a one digit hour (0-9) two digits (00-09)\n # without the other tests this would be a way to check all in one test but would not\n # tell us what part of the cleaning on the date/time string did not work correctly\n\n\nclass AppendColumnsTestCase(unittest.TestCase):\n # we will check that each of the four new columns (month, day, year, and hour)\n # appended correctly to the dataset\n def test_is_month_column_appending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i,9]) != int(dfClean.iloc[i,7][:2]):\n booln = False\n i += 1\n self.assertTrue(booln)\n # we check that the month in the month column matches that in the original date/time column\n \n def test_is_day_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i,10]) != int(dfClean.iloc[i,7][3:5]):\n booln = False\n i += 1\n self.assertTrue(booln)\n # we check that the day in the day column matches that in the original date/time column\n\n def test_is_year_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i,11]) != int(dfClean.iloc[i,7][6:10]):\n booln = False\n i += 1\n self.assertTrue(booln)\n # we check that the year in the year column matches that in the original date/time column\n\n\n def test_is_hour_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i,12]) != int(dfClean.iloc[i,7][11:13]):\n booln = False\n i += 1\n self.assertTrue(booln)\n # we check that the hour in the hour column matches that in the original date/time column\n \n\nclass HolidayTestCase(unittest.TestCase):\n # we test that every hour of the same day has a consistent holiday\n def test_are_all_hours_correct_holiday(self):\n i = 0\n booln = True\n hol = \"None\"\n while i < len(dfClean):\n if dfClean.iloc[i,12] == 0:\n hol = dfClean.iloc[i,0]\n else:\n if dfClean.iloc[i,0] != hol:\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass UniqueDataPointsTestCase(unittest.TestCase):\n # this test ensures that no two data points have the exact same date and hour\n def test_are_all_datetimes_unique(self):\n i = 1\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i,7] == dfClean.iloc[i-1,7]:\n booln = False\n i += 1\n self.assertTrue(booln)\n \n\nclass TemperatureConversionTestCase(unittest.TestCase):\n # we test that the temperature was converted to Fahrenheit\n # note that since we overrode the original temperature, we simply check for \n # outlier that would make sense as Kelvin values but not Fahrenheit values\n # This how we discovered there were some missing temperatures input as 0 Kelvin\n # because they converted to -450 Fahrenheit\n def test_is_temp_converting_from_kelvin_to_F(self):\n i = 1\n booln = True\n while i < len(dfClean):\n if (dfClean.iloc[i,1] > 120) | (dfClean.iloc[i,1] < -50):\n booln = False\n i += 1\n self.assertTrue(booln)\n\nif __name__ == '__main__': \n unittest.main() ",
"step-ids": [
18,
19,
29,
31,
33
]
}
|
[
18,
19,
29,
31,
33
] |
# 가위, 바위, 보 게임
# 컴퓨터 가위, 바위, 보 리스트에서 랜덤하게 뽑기 위해 random 함수 호출
import random
# 컴퓨터 가위, 바위, 보 리스트
list_b = ["가위", "바위", "보"]
# 이긴횟수, 진 횟수 카운팅 하기 위한 변수
person_win_count = 0
person_lose_count = 0
while person_win_count < 4 or person_lose_count < 4:
# 가위, 바위, 보 입력 받기
player = input("가위, 바위, 보 중 어떤 것을 낼래요? ")
if player != "가위" and player != "바위" and player != "보":
player = input("다시 입력해 주세요.(예: 가위, 바위, 보)")
# 컴퓨터 가위, 바위, 보 임의 추출
computer = random.choice(list_b)
print("컴퓨터:", computer)
# 사람과 컴퓨터간 가위, 바위, 보 비교 및 카운팅
if player == computer:
print("비겼습니다.")
elif player == "가위":
if computer == "바위":
person_lose_count = person_lose_count + 1
print("컴퓨터가 이겼습니다.")
if computer == "보":
person_win_count = person_win_count + 1
print("당신이 이겼습니다.")
elif player == "바위":
if computer == "가위":
person_win_count = person_win_count + 1
print("당신이 이겼습니다.")
if computer == "보":
person_lose_count = person_lose_count + 1
print("컴퓨터가 이겼습니다.")
elif player == "보":
if computer == "바위":
person_win_count = person_win_count + 1
print("당신이 이겼습니다.")
if computer == "가위":
person_lose_count = person_lose_count + 1
print("컴퓨터가 이겼습니다.")
# 3번 이겼는지, 3번 졌는지 조건비교, 최종결과, 게임종료
if person_win_count == 3:
print("당신이 3번을 이겼습니다.^^; 가위바위보 게임을 종료합니다.")
break
elif person_lose_count == 3:
print("당신이 3번을 졌습니다.-_-; 가위바위보 게임을 종료합니다.")
break
|
normal
|
{
"blob_id": "93d4c6b6aef827d6746afc684c32a9cf1d0229e4",
"index": 717,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile person_win_count < 4 or person_lose_count < 4:\n player = input('가위, 바위, 보 중 어떤 것을 낼래요? ')\n if player != '가위' and player != '바위' and player != '보':\n player = input('다시 입력해 주세요.(예: 가위, 바위, 보)')\n computer = random.choice(list_b)\n print('컴퓨터:', computer)\n if player == computer:\n print('비겼습니다.')\n elif player == '가위':\n if computer == '바위':\n person_lose_count = person_lose_count + 1\n print('컴퓨터가 이겼습니다.')\n if computer == '보':\n person_win_count = person_win_count + 1\n print('당신이 이겼습니다.')\n elif player == '바위':\n if computer == '가위':\n person_win_count = person_win_count + 1\n print('당신이 이겼습니다.')\n if computer == '보':\n person_lose_count = person_lose_count + 1\n print('컴퓨터가 이겼습니다.')\n elif player == '보':\n if computer == '바위':\n person_win_count = person_win_count + 1\n print('당신이 이겼습니다.')\n if computer == '가위':\n person_lose_count = person_lose_count + 1\n print('컴퓨터가 이겼습니다.')\n if person_win_count == 3:\n print('당신이 3번을 이겼습니다.^^; 가위바위보 게임을 종료합니다.')\n break\n elif person_lose_count == 3:\n print('당신이 3번을 졌습니다.-_-; 가위바위보 게임을 종료합니다.')\n break\n",
"step-3": "<mask token>\nlist_b = ['가위', '바위', '보']\nperson_win_count = 0\nperson_lose_count = 0\nwhile person_win_count < 4 or person_lose_count < 4:\n player = input('가위, 바위, 보 중 어떤 것을 낼래요? ')\n if player != '가위' and player != '바위' and player != '보':\n player = input('다시 입력해 주세요.(예: 가위, 바위, 보)')\n computer = random.choice(list_b)\n print('컴퓨터:', computer)\n if player == computer:\n print('비겼습니다.')\n elif player == '가위':\n if computer == '바위':\n person_lose_count = person_lose_count + 1\n print('컴퓨터가 이겼습니다.')\n if computer == '보':\n person_win_count = person_win_count + 1\n print('당신이 이겼습니다.')\n elif player == '바위':\n if computer == '가위':\n person_win_count = person_win_count + 1\n print('당신이 이겼습니다.')\n if computer == '보':\n person_lose_count = person_lose_count + 1\n print('컴퓨터가 이겼습니다.')\n elif player == '보':\n if computer == '바위':\n person_win_count = person_win_count + 1\n print('당신이 이겼습니다.')\n if computer == '가위':\n person_lose_count = person_lose_count + 1\n print('컴퓨터가 이겼습니다.')\n if person_win_count == 3:\n print('당신이 3번을 이겼습니다.^^; 가위바위보 게임을 종료합니다.')\n break\n elif person_lose_count == 3:\n print('당신이 3번을 졌습니다.-_-; 가위바위보 게임을 종료합니다.')\n break\n",
"step-4": "import random\nlist_b = ['가위', '바위', '보']\nperson_win_count = 0\nperson_lose_count = 0\nwhile person_win_count < 4 or person_lose_count < 4:\n player = input('가위, 바위, 보 중 어떤 것을 낼래요? ')\n if player != '가위' and player != '바위' and player != '보':\n player = input('다시 입력해 주세요.(예: 가위, 바위, 보)')\n computer = random.choice(list_b)\n print('컴퓨터:', computer)\n if player == computer:\n print('비겼습니다.')\n elif player == '가위':\n if computer == '바위':\n person_lose_count = person_lose_count + 1\n print('컴퓨터가 이겼습니다.')\n if computer == '보':\n person_win_count = person_win_count + 1\n print('당신이 이겼습니다.')\n elif player == '바위':\n if computer == '가위':\n person_win_count = person_win_count + 1\n print('당신이 이겼습니다.')\n if computer == '보':\n person_lose_count = person_lose_count + 1\n print('컴퓨터가 이겼습니다.')\n elif player == '보':\n if computer == '바위':\n person_win_count = person_win_count + 1\n print('당신이 이겼습니다.')\n if computer == '가위':\n person_lose_count = person_lose_count + 1\n print('컴퓨터가 이겼습니다.')\n if person_win_count == 3:\n print('당신이 3번을 이겼습니다.^^; 가위바위보 게임을 종료합니다.')\n break\n elif person_lose_count == 3:\n print('당신이 3번을 졌습니다.-_-; 가위바위보 게임을 종료합니다.')\n break\n",
"step-5": "# 가위, 바위, 보 게임\n\n\n# 컴퓨터 가위, 바위, 보 리스트에서 랜덤하게 뽑기 위해 random 함수 호출\nimport random\n\n# 컴퓨터 가위, 바위, 보 리스트\nlist_b = [\"가위\", \"바위\", \"보\"]\n\n# 이긴횟수, 진 횟수 카운팅 하기 위한 변수\nperson_win_count = 0\nperson_lose_count = 0\n\nwhile person_win_count < 4 or person_lose_count < 4:\n # 가위, 바위, 보 입력 받기\n player = input(\"가위, 바위, 보 중 어떤 것을 낼래요? \")\n if player != \"가위\" and player != \"바위\" and player != \"보\":\n player = input(\"다시 입력해 주세요.(예: 가위, 바위, 보)\")\n\n # 컴퓨터 가위, 바위, 보 임의 추출\n computer = random.choice(list_b)\n print(\"컴퓨터:\", computer)\n\n # 사람과 컴퓨터간 가위, 바위, 보 비교 및 카운팅\n if player == computer:\n print(\"비겼습니다.\")\n elif player == \"가위\":\n if computer == \"바위\":\n person_lose_count = person_lose_count + 1\n print(\"컴퓨터가 이겼습니다.\")\n if computer == \"보\":\n person_win_count = person_win_count + 1\n print(\"당신이 이겼습니다.\")\n\n elif player == \"바위\":\n if computer == \"가위\":\n person_win_count = person_win_count + 1\n print(\"당신이 이겼습니다.\")\n if computer == \"보\":\n person_lose_count = person_lose_count + 1\n print(\"컴퓨터가 이겼습니다.\")\n\n elif player == \"보\":\n if computer == \"바위\":\n person_win_count = person_win_count + 1\n print(\"당신이 이겼습니다.\")\n if computer == \"가위\":\n person_lose_count = person_lose_count + 1\n print(\"컴퓨터가 이겼습니다.\")\n\n # 3번 이겼는지, 3번 졌는지 조건비교, 최종결과, 게임종료\n if person_win_count == 3:\n print(\"당신이 3번을 이겼습니다.^^; 가위바위보 게임을 종료합니다.\")\n break\n elif person_lose_count == 3:\n print(\"당신이 3번을 졌습니다.-_-; 가위바위보 게임을 종료합니다.\")\n break\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
This is the interface that allows for creating nested lists.
You should not implement it, or speculate about its implementation
class NestedInteger(object):
def isInteger(self):
# @return {boolean} True if this NestedInteger holds a single integer,
# rather than a nested list.
def getInteger(self):
# @return {int} the single integer that this NestedInteger holds,
# if it holds a single integer
# Return None if this NestedInteger holds a nested list
def getList(self):
# @return {NestedInteger[]} the nested list that this NestedInteger holds,
# if it holds a nested list
# Return None if this NestedInteger holds a single integer
"""
# Version 1: DFS Recursive
class Solution(object):
# @param {NestedInteger[]} nestedList a list of NestedInteger Object
# @return {int} an integer
def depthSum(self, nestedList):
return self.dfs(nestedList, 1)
def dfs(self, nestedList, depth):
sum = 0
for item in nestedList:
if item.isInteger():
sum += item.getInteger() * depth
else:
sum += self.dfs(item.getList(), depth + 1)
return sum
# Version 2: BFS, Non-Recursive
class Solution(object):
# @param {NestedInteger[]} nestedList a list of NestedInteger Object
# @return {int} an integer
def depthSum(self, nestedList):
if len(nestedList) == 0:
return 0
from queue import Queue
q = Queue()
sum = 0
depth = 1
for item in nestedList:
q.put(item)
while not q.empty():
for _ in range(q.qsize()):
item = q.get()
if item.isInteger():
sum += item.getInteger() * depth
else:
for next in item.getList():
q.put(next)
depth += 1
return sum
|
normal
|
{
"blob_id": "bb81027ed5311e625591d98193997e5c7b533b70",
"index": 4945,
"step-1": "<mask token>\n\n\nclass Solution(object):\n\n def depthSum(self, nestedList):\n if len(nestedList) == 0:\n return 0\n from queue import Queue\n q = Queue()\n sum = 0\n depth = 1\n for item in nestedList:\n q.put(item)\n while not q.empty():\n for _ in range(q.qsize()):\n item = q.get()\n if item.isInteger():\n sum += item.getInteger() * depth\n else:\n for next in item.getList():\n q.put(next)\n depth += 1\n return sum\n",
"step-2": "<mask token>\n\n\nclass Solution(object):\n <mask token>\n <mask token>\n\n\nclass Solution(object):\n\n def depthSum(self, nestedList):\n if len(nestedList) == 0:\n return 0\n from queue import Queue\n q = Queue()\n sum = 0\n depth = 1\n for item in nestedList:\n q.put(item)\n while not q.empty():\n for _ in range(q.qsize()):\n item = q.get()\n if item.isInteger():\n sum += item.getInteger() * depth\n else:\n for next in item.getList():\n q.put(next)\n depth += 1\n return sum\n",
"step-3": "<mask token>\n\n\nclass Solution(object):\n\n def depthSum(self, nestedList):\n return self.dfs(nestedList, 1)\n <mask token>\n\n\nclass Solution(object):\n\n def depthSum(self, nestedList):\n if len(nestedList) == 0:\n return 0\n from queue import Queue\n q = Queue()\n sum = 0\n depth = 1\n for item in nestedList:\n q.put(item)\n while not q.empty():\n for _ in range(q.qsize()):\n item = q.get()\n if item.isInteger():\n sum += item.getInteger() * depth\n else:\n for next in item.getList():\n q.put(next)\n depth += 1\n return sum\n",
"step-4": "<mask token>\n\n\nclass Solution(object):\n\n def depthSum(self, nestedList):\n return self.dfs(nestedList, 1)\n\n def dfs(self, nestedList, depth):\n sum = 0\n for item in nestedList:\n if item.isInteger():\n sum += item.getInteger() * depth\n else:\n sum += self.dfs(item.getList(), depth + 1)\n return sum\n\n\nclass Solution(object):\n\n def depthSum(self, nestedList):\n if len(nestedList) == 0:\n return 0\n from queue import Queue\n q = Queue()\n sum = 0\n depth = 1\n for item in nestedList:\n q.put(item)\n while not q.empty():\n for _ in range(q.qsize()):\n item = q.get()\n if item.isInteger():\n sum += item.getInteger() * depth\n else:\n for next in item.getList():\n q.put(next)\n depth += 1\n return sum\n",
"step-5": "\"\"\"\nThis is the interface that allows for creating nested lists.\nYou should not implement it, or speculate about its implementation\n\nclass NestedInteger(object):\n def isInteger(self):\n # @return {boolean} True if this NestedInteger holds a single integer,\n # rather than a nested list.\n\n def getInteger(self):\n # @return {int} the single integer that this NestedInteger holds,\n # if it holds a single integer\n # Return None if this NestedInteger holds a nested list\n\n def getList(self):\n # @return {NestedInteger[]} the nested list that this NestedInteger holds,\n # if it holds a nested list\n # Return None if this NestedInteger holds a single integer\n\"\"\"\n\n\n# Version 1: DFS Recursive\nclass Solution(object):\n # @param {NestedInteger[]} nestedList a list of NestedInteger Object\n # @return {int} an integer\n def depthSum(self, nestedList):\n return self.dfs(nestedList, 1)\n\n def dfs(self, nestedList, depth):\n sum = 0\n for item in nestedList:\n if item.isInteger():\n sum += item.getInteger() * depth\n else:\n sum += self.dfs(item.getList(), depth + 1)\n\n return sum\n\n\n\n\n# Version 2: BFS, Non-Recursive\nclass Solution(object):\n # @param {NestedInteger[]} nestedList a list of NestedInteger Object\n # @return {int} an integer\n def depthSum(self, nestedList):\n if len(nestedList) == 0:\n return 0\n\n from queue import Queue\n q = Queue()\n sum = 0\n depth = 1\n\n for item in nestedList:\n q.put(item)\n\n while not q.empty():\n for _ in range(q.qsize()):\n item = q.get()\n if item.isInteger():\n sum += item.getInteger() * depth\n else:\n for next in item.getList():\n q.put(next)\n depth += 1\n\n return sum",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import pytest
from homeworks.homework6.oop_2 import (
DeadLineError,
Homework,
HomeworkResult,
Student,
Teacher,
)
def test_creating_objects():
teacher = Teacher("Daniil", "Shadrin")
student = Student("Roman", "Petrov")
homework = teacher.create_homework("Learn OOP", 1)
homework_result = student.do_homework(homework, "I have done this hw")
assert isinstance(teacher, Teacher)
assert isinstance(student, Student)
assert isinstance(homework, Homework)
assert isinstance(homework_result, HomeworkResult)
def test_do_homework_exception():
teacher = Teacher("Daniil", "Shadrin")
student = Student("Lev", "Sokolov")
homework = teacher.create_homework("Learn OOP", 0)
with pytest.raises(DeadLineError, match=r"You are late"):
student.do_homework(homework, "I have done this hw")
def test_creating_and_resetting_homework_results_by_teacher():
teacher = Teacher("Daniil", "Shadrin")
student = Student("Roman", "Petrov")
homework_1 = teacher.create_homework("Learn OOP", 1)
homework_1_result = student.do_homework(homework_1, "I have done this hw")
assert teacher.check_homework(homework_1_result) is True
assert homework_1_result in teacher.homework_done[homework_1]
homework_2 = teacher.create_homework("homework 2", 1)
homework_2_result = student.do_homework(homework_2, "zero")
assert teacher.check_homework(homework_2_result) is False
assert teacher.homework_done.get(homework_2) is None
homework_3 = teacher.create_homework("homework 3", 1)
homework_3_result = student.do_homework(homework_3, "I have done this hw")
assert teacher.check_homework(homework_3_result) is True
assert homework_3_result in teacher.homework_done.get(homework_3)
assert len(teacher.homework_done) == 2
Teacher.reset_results(homework_3)
assert len(teacher.homework_done) == 1
Teacher.reset_results()
assert len(teacher.homework_done) == 0
|
normal
|
{
"blob_id": "8f971ee3b98691a887ee0632afd613bbf4f19aa0",
"index": 3505,
"step-1": "<mask token>\n\n\ndef test_creating_objects():\n teacher = Teacher('Daniil', 'Shadrin')\n student = Student('Roman', 'Petrov')\n homework = teacher.create_homework('Learn OOP', 1)\n homework_result = student.do_homework(homework, 'I have done this hw')\n assert isinstance(teacher, Teacher)\n assert isinstance(student, Student)\n assert isinstance(homework, Homework)\n assert isinstance(homework_result, HomeworkResult)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_creating_objects():\n teacher = Teacher('Daniil', 'Shadrin')\n student = Student('Roman', 'Petrov')\n homework = teacher.create_homework('Learn OOP', 1)\n homework_result = student.do_homework(homework, 'I have done this hw')\n assert isinstance(teacher, Teacher)\n assert isinstance(student, Student)\n assert isinstance(homework, Homework)\n assert isinstance(homework_result, HomeworkResult)\n\n\n<mask token>\n\n\ndef test_creating_and_resetting_homework_results_by_teacher():\n teacher = Teacher('Daniil', 'Shadrin')\n student = Student('Roman', 'Petrov')\n homework_1 = teacher.create_homework('Learn OOP', 1)\n homework_1_result = student.do_homework(homework_1, 'I have done this hw')\n assert teacher.check_homework(homework_1_result) is True\n assert homework_1_result in teacher.homework_done[homework_1]\n homework_2 = teacher.create_homework('homework 2', 1)\n homework_2_result = student.do_homework(homework_2, 'zero')\n assert teacher.check_homework(homework_2_result) is False\n assert teacher.homework_done.get(homework_2) is None\n homework_3 = teacher.create_homework('homework 3', 1)\n homework_3_result = student.do_homework(homework_3, 'I have done this hw')\n assert teacher.check_homework(homework_3_result) is True\n assert homework_3_result in teacher.homework_done.get(homework_3)\n assert len(teacher.homework_done) == 2\n Teacher.reset_results(homework_3)\n assert len(teacher.homework_done) == 1\n Teacher.reset_results()\n assert len(teacher.homework_done) == 0\n",
"step-3": "<mask token>\n\n\ndef test_creating_objects():\n teacher = Teacher('Daniil', 'Shadrin')\n student = Student('Roman', 'Petrov')\n homework = teacher.create_homework('Learn OOP', 1)\n homework_result = student.do_homework(homework, 'I have done this hw')\n assert isinstance(teacher, Teacher)\n assert isinstance(student, Student)\n assert isinstance(homework, Homework)\n assert isinstance(homework_result, HomeworkResult)\n\n\ndef test_do_homework_exception():\n teacher = Teacher('Daniil', 'Shadrin')\n student = Student('Lev', 'Sokolov')\n homework = teacher.create_homework('Learn OOP', 0)\n with pytest.raises(DeadLineError, match='You are late'):\n student.do_homework(homework, 'I have done this hw')\n\n\ndef test_creating_and_resetting_homework_results_by_teacher():\n teacher = Teacher('Daniil', 'Shadrin')\n student = Student('Roman', 'Petrov')\n homework_1 = teacher.create_homework('Learn OOP', 1)\n homework_1_result = student.do_homework(homework_1, 'I have done this hw')\n assert teacher.check_homework(homework_1_result) is True\n assert homework_1_result in teacher.homework_done[homework_1]\n homework_2 = teacher.create_homework('homework 2', 1)\n homework_2_result = student.do_homework(homework_2, 'zero')\n assert teacher.check_homework(homework_2_result) is False\n assert teacher.homework_done.get(homework_2) is None\n homework_3 = teacher.create_homework('homework 3', 1)\n homework_3_result = student.do_homework(homework_3, 'I have done this hw')\n assert teacher.check_homework(homework_3_result) is True\n assert homework_3_result in teacher.homework_done.get(homework_3)\n assert len(teacher.homework_done) == 2\n Teacher.reset_results(homework_3)\n assert len(teacher.homework_done) == 1\n Teacher.reset_results()\n assert len(teacher.homework_done) == 0\n",
"step-4": "import pytest\nfrom homeworks.homework6.oop_2 import DeadLineError, Homework, HomeworkResult, Student, Teacher\n\n\ndef test_creating_objects():\n teacher = Teacher('Daniil', 'Shadrin')\n student = Student('Roman', 'Petrov')\n homework = teacher.create_homework('Learn OOP', 1)\n homework_result = student.do_homework(homework, 'I have done this hw')\n assert isinstance(teacher, Teacher)\n assert isinstance(student, Student)\n assert isinstance(homework, Homework)\n assert isinstance(homework_result, HomeworkResult)\n\n\ndef test_do_homework_exception():\n teacher = Teacher('Daniil', 'Shadrin')\n student = Student('Lev', 'Sokolov')\n homework = teacher.create_homework('Learn OOP', 0)\n with pytest.raises(DeadLineError, match='You are late'):\n student.do_homework(homework, 'I have done this hw')\n\n\ndef test_creating_and_resetting_homework_results_by_teacher():\n teacher = Teacher('Daniil', 'Shadrin')\n student = Student('Roman', 'Petrov')\n homework_1 = teacher.create_homework('Learn OOP', 1)\n homework_1_result = student.do_homework(homework_1, 'I have done this hw')\n assert teacher.check_homework(homework_1_result) is True\n assert homework_1_result in teacher.homework_done[homework_1]\n homework_2 = teacher.create_homework('homework 2', 1)\n homework_2_result = student.do_homework(homework_2, 'zero')\n assert teacher.check_homework(homework_2_result) is False\n assert teacher.homework_done.get(homework_2) is None\n homework_3 = teacher.create_homework('homework 3', 1)\n homework_3_result = student.do_homework(homework_3, 'I have done this hw')\n assert teacher.check_homework(homework_3_result) is True\n assert homework_3_result in teacher.homework_done.get(homework_3)\n assert len(teacher.homework_done) == 2\n Teacher.reset_results(homework_3)\n assert len(teacher.homework_done) == 1\n Teacher.reset_results()\n assert len(teacher.homework_done) == 0\n",
"step-5": "import pytest\n\nfrom homeworks.homework6.oop_2 import (\n DeadLineError,\n Homework,\n HomeworkResult,\n Student,\n Teacher,\n)\n\n\ndef test_creating_objects():\n teacher = Teacher(\"Daniil\", \"Shadrin\")\n student = Student(\"Roman\", \"Petrov\")\n homework = teacher.create_homework(\"Learn OOP\", 1)\n homework_result = student.do_homework(homework, \"I have done this hw\")\n assert isinstance(teacher, Teacher)\n assert isinstance(student, Student)\n assert isinstance(homework, Homework)\n assert isinstance(homework_result, HomeworkResult)\n\n\ndef test_do_homework_exception():\n teacher = Teacher(\"Daniil\", \"Shadrin\")\n student = Student(\"Lev\", \"Sokolov\")\n homework = teacher.create_homework(\"Learn OOP\", 0)\n with pytest.raises(DeadLineError, match=r\"You are late\"):\n student.do_homework(homework, \"I have done this hw\")\n\n\ndef test_creating_and_resetting_homework_results_by_teacher():\n teacher = Teacher(\"Daniil\", \"Shadrin\")\n student = Student(\"Roman\", \"Petrov\")\n homework_1 = teacher.create_homework(\"Learn OOP\", 1)\n homework_1_result = student.do_homework(homework_1, \"I have done this hw\")\n assert teacher.check_homework(homework_1_result) is True\n assert homework_1_result in teacher.homework_done[homework_1]\n\n homework_2 = teacher.create_homework(\"homework 2\", 1)\n homework_2_result = student.do_homework(homework_2, \"zero\")\n assert teacher.check_homework(homework_2_result) is False\n assert teacher.homework_done.get(homework_2) is None\n\n homework_3 = teacher.create_homework(\"homework 3\", 1)\n homework_3_result = student.do_homework(homework_3, \"I have done this hw\")\n assert teacher.check_homework(homework_3_result) is True\n assert homework_3_result in teacher.homework_done.get(homework_3)\n\n assert len(teacher.homework_done) == 2\n Teacher.reset_results(homework_3)\n assert len(teacher.homework_done) == 1\n Teacher.reset_results()\n assert len(teacher.homework_done) == 0\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def kombinacije_trgovin_f(mnozica_izdelkov_v_kosarici, seznam_trgovin,
trgovine_z_izdelki):
generator_kombinacij = (set(itertools.compress(seznam_trgovin, el)) for
el in itertools.product(*([[0, 1]] * len(seznam_trgovin))))
kombinacije = []
for mnozica_trgovin in generator_kombinacij:
izdelki_kombinacije = set()
for trgovina in mnozica_trgovin:
for izdelek in trgovine_z_izdelki[trgovina]:
izdelki_kombinacije.add(izdelek)
if mnozica_izdelkov_v_kosarici.issubset(izdelki_kombinacije):
kombinacije.append(mnozica_trgovin)
for kombinacija in kombinacije:
for kombinacija2 in kombinacije:
if kombinacija.issubset(kombinacija2
) and kombinacija != kombinacija2:
kombinacije.remove(kombinacija2)
elif kombinacija2.issubset(kombinacija
) and kombinacija != kombinacija2:
kombinacije.remove(kombinacija)
for kombinacija in kombinacije:
for kombinacija2 in kombinacije:
if kombinacija.issubset(kombinacija2
) and kombinacija != kombinacija2:
kombinacije.remove(kombinacija2)
elif kombinacija2.issubset(kombinacija
) and kombinacija != kombinacija2:
kombinacije.remove(kombinacija)
return kombinacije
return None
def razdalja(vozlisce1, vozlisce2):
return math.sqrt((vozlisce2[1] - vozlisce1[1]) ** 2 + (vozlisce2[0] -
vozlisce1[0]) ** 2)
<|reserved_special_token_0|>
def doloci_pot(dom, seznam_izdelkov, seznam_trgovin,
seznam_izdelkov_v_kosarici, trgovine_z_izdelki):
vozlisca = []
dolzine = []
trgovine = []
for kombinacija in kombinacije_trgovin_f(set(seznam_izdelkov_v_kosarici
), seznam_trgovin, trgovine_z_izdelki):
par = doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov,
kombinacija)
dolzine.append(par[1])
vozlisca.append(par[0])
trgovine.append(kombinacija)
if dolzine == []:
return None
i = numpy.argmin(dolzine)
v = vozlisca[i]
v.append(dom)
obiskane_trgovine = trgovine[i]
return v, obiskane_trgovine
def razporeditev(obiskane_trgovine, izdelki, slovar):
izdelki2 = izdelki.copy()
razporeditev = []
for trgovina in obiskane_trgovine:
sez = []
for izdelek in izdelki:
if {izdelek}.issubset(slovar[trgovina]):
izd = podatki.id_izdelka_v_opis()[izdelek - 1]
sez.append(izd)
izdelki2.remove(izdelek)
razporeditev.append([trgovina, sez])
return razporeditev
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def kombinacije_trgovin_f(mnozica_izdelkov_v_kosarici, seznam_trgovin,
trgovine_z_izdelki):
generator_kombinacij = (set(itertools.compress(seznam_trgovin, el)) for
el in itertools.product(*([[0, 1]] * len(seznam_trgovin))))
kombinacije = []
for mnozica_trgovin in generator_kombinacij:
izdelki_kombinacije = set()
for trgovina in mnozica_trgovin:
for izdelek in trgovine_z_izdelki[trgovina]:
izdelki_kombinacije.add(izdelek)
if mnozica_izdelkov_v_kosarici.issubset(izdelki_kombinacije):
kombinacije.append(mnozica_trgovin)
for kombinacija in kombinacije:
for kombinacija2 in kombinacije:
if kombinacija.issubset(kombinacija2
) and kombinacija != kombinacija2:
kombinacije.remove(kombinacija2)
elif kombinacija2.issubset(kombinacija
) and kombinacija != kombinacija2:
kombinacije.remove(kombinacija)
for kombinacija in kombinacije:
for kombinacija2 in kombinacije:
if kombinacija.issubset(kombinacija2
) and kombinacija != kombinacija2:
kombinacije.remove(kombinacija2)
elif kombinacija2.issubset(kombinacija
) and kombinacija != kombinacija2:
kombinacije.remove(kombinacija)
return kombinacije
return None
def razdalja(vozlisce1, vozlisce2):
return math.sqrt((vozlisce2[1] - vozlisce1[1]) ** 2 + (vozlisce2[0] -
vozlisce1[0]) ** 2)
def doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov, kombinacija):
skupine = []
poti = []
for trgovina in kombinacija:
skupine.append(podatki.lokacije(slovar_koordinat, trgovina))
for i in skupine[0]:
dolzina = razdalja(dom, i)
if len(kombinacija) > 1:
for j in skupine[1]:
dolzina += razdalja(i, j)
if len(kombinacija) > 2:
for k in skupine[2]:
dolzina += razdalja(j, k)
if len(kombinacija) > 3:
for m in skupine[3]:
dolzina += razdalja(k, m)
if len(kombinacija) > 4:
for n in skupine[4]:
dolzina += razdalja(m, n)
dolzina += razdalja(n, dom)
poti.append([[dom, i, j, k, m, n], dolzina]
)
dolzina = 0
else:
dolzina += razdalja(m, dom)
poti.append([[dom, i, j, k, m], dolzina])
dolzina = 0
else:
dolzina += razdalja(k, dom)
poti.append([[dom, i, j, k], dolzina])
dolzina = 0
else:
dolzina += razdalja(j, dom)
poti.append([[dom, i, j], dolzina])
dolzina = 0
else:
dolzina *= 2
poti.append([[dom, i], dolzina])
dolzina = 0
dolzine = [el[1] for el in poti]
if dolzine == []:
print('Nakupa ni mogoče opraviti.')
return None
mini = numpy.argmin(dolzine)
return poti[mini]
return dolzina, sez_vozlisc
def doloci_pot(dom, seznam_izdelkov, seznam_trgovin,
seznam_izdelkov_v_kosarici, trgovine_z_izdelki):
vozlisca = []
dolzine = []
trgovine = []
for kombinacija in kombinacije_trgovin_f(set(seznam_izdelkov_v_kosarici
), seznam_trgovin, trgovine_z_izdelki):
par = doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov,
kombinacija)
dolzine.append(par[1])
vozlisca.append(par[0])
trgovine.append(kombinacija)
if dolzine == []:
return None
i = numpy.argmin(dolzine)
v = vozlisca[i]
v.append(dom)
obiskane_trgovine = trgovine[i]
return v, obiskane_trgovine
def razporeditev(obiskane_trgovine, izdelki, slovar):
izdelki2 = izdelki.copy()
razporeditev = []
for trgovina in obiskane_trgovine:
sez = []
for izdelek in izdelki:
if {izdelek}.issubset(slovar[trgovina]):
izd = podatki.id_izdelka_v_opis()[izdelek - 1]
sez.append(izd)
izdelki2.remove(izdelek)
razporeditev.append([trgovina, sez])
return razporeditev
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def kombinacije_trgovin_f(mnozica_izdelkov_v_kosarici, seznam_trgovin,
trgovine_z_izdelki):
generator_kombinacij = (set(itertools.compress(seznam_trgovin, el)) for
el in itertools.product(*([[0, 1]] * len(seznam_trgovin))))
kombinacije = []
for mnozica_trgovin in generator_kombinacij:
izdelki_kombinacije = set()
for trgovina in mnozica_trgovin:
for izdelek in trgovine_z_izdelki[trgovina]:
izdelki_kombinacije.add(izdelek)
if mnozica_izdelkov_v_kosarici.issubset(izdelki_kombinacije):
kombinacije.append(mnozica_trgovin)
for kombinacija in kombinacije:
for kombinacija2 in kombinacije:
if kombinacija.issubset(kombinacija2
) and kombinacija != kombinacija2:
kombinacije.remove(kombinacija2)
elif kombinacija2.issubset(kombinacija
) and kombinacija != kombinacija2:
kombinacije.remove(kombinacija)
for kombinacija in kombinacije:
for kombinacija2 in kombinacije:
if kombinacija.issubset(kombinacija2
) and kombinacija != kombinacija2:
kombinacije.remove(kombinacija2)
elif kombinacija2.issubset(kombinacija
) and kombinacija != kombinacija2:
kombinacije.remove(kombinacija)
return kombinacije
return None
def razdalja(vozlisce1, vozlisce2):
return math.sqrt((vozlisce2[1] - vozlisce1[1]) ** 2 + (vozlisce2[0] -
vozlisce1[0]) ** 2)
def doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov, kombinacija):
skupine = []
poti = []
for trgovina in kombinacija:
skupine.append(podatki.lokacije(slovar_koordinat, trgovina))
for i in skupine[0]:
dolzina = razdalja(dom, i)
if len(kombinacija) > 1:
for j in skupine[1]:
dolzina += razdalja(i, j)
if len(kombinacija) > 2:
for k in skupine[2]:
dolzina += razdalja(j, k)
if len(kombinacija) > 3:
for m in skupine[3]:
dolzina += razdalja(k, m)
if len(kombinacija) > 4:
for n in skupine[4]:
dolzina += razdalja(m, n)
dolzina += razdalja(n, dom)
poti.append([[dom, i, j, k, m, n], dolzina]
)
dolzina = 0
else:
dolzina += razdalja(m, dom)
poti.append([[dom, i, j, k, m], dolzina])
dolzina = 0
else:
dolzina += razdalja(k, dom)
poti.append([[dom, i, j, k], dolzina])
dolzina = 0
else:
dolzina += razdalja(j, dom)
poti.append([[dom, i, j], dolzina])
dolzina = 0
else:
dolzina *= 2
poti.append([[dom, i], dolzina])
dolzina = 0
dolzine = [el[1] for el in poti]
if dolzine == []:
print('Nakupa ni mogoče opraviti.')
return None
mini = numpy.argmin(dolzine)
return poti[mini]
return dolzina, sez_vozlisc
def doloci_pot(dom, seznam_izdelkov, seznam_trgovin,
seznam_izdelkov_v_kosarici, trgovine_z_izdelki):
vozlisca = []
dolzine = []
trgovine = []
for kombinacija in kombinacije_trgovin_f(set(seznam_izdelkov_v_kosarici
), seznam_trgovin, trgovine_z_izdelki):
par = doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov,
kombinacija)
dolzine.append(par[1])
vozlisca.append(par[0])
trgovine.append(kombinacija)
if dolzine == []:
return None
i = numpy.argmin(dolzine)
v = vozlisca[i]
v.append(dom)
obiskane_trgovine = trgovine[i]
return v, obiskane_trgovine
def razporeditev(obiskane_trgovine, izdelki, slovar):
izdelki2 = izdelki.copy()
razporeditev = []
for trgovina in obiskane_trgovine:
sez = []
for izdelek in izdelki:
if {izdelek}.issubset(slovar[trgovina]):
izd = podatki.id_izdelka_v_opis()[izdelek - 1]
sez.append(izd)
izdelki2.remove(izdelek)
razporeditev.append([trgovina, sez])
return razporeditev
baza.commit()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import itertools
import numpy
import math
import psycopg2
import podatki
baza = podatki.baza
dom = podatki.preberi_lokacijo()
seznam_trgovin = ['spar', 'mercator', 'tus', 'hofer', 'lidl']
id_in_opis = podatki.id_izdelka_v_opis()
seznam_izdelkov = [el[0] for el in id_in_opis]
mnozica_izdelkov = set(seznam_izdelkov)
trgovine_z_izdelki = podatki.trgovine_z_izdelki_f()
seznam_izdelkov_v_kosarici = [el[3] for el in podatki.kosarica]
<|reserved_special_token_0|>
def kombinacije_trgovin_f(mnozica_izdelkov_v_kosarici, seznam_trgovin,
trgovine_z_izdelki):
generator_kombinacij = (set(itertools.compress(seznam_trgovin, el)) for
el in itertools.product(*([[0, 1]] * len(seznam_trgovin))))
kombinacije = []
for mnozica_trgovin in generator_kombinacij:
izdelki_kombinacije = set()
for trgovina in mnozica_trgovin:
for izdelek in trgovine_z_izdelki[trgovina]:
izdelki_kombinacije.add(izdelek)
if mnozica_izdelkov_v_kosarici.issubset(izdelki_kombinacije):
kombinacije.append(mnozica_trgovin)
for kombinacija in kombinacije:
for kombinacija2 in kombinacije:
if kombinacija.issubset(kombinacija2
) and kombinacija != kombinacija2:
kombinacije.remove(kombinacija2)
elif kombinacija2.issubset(kombinacija
) and kombinacija != kombinacija2:
kombinacije.remove(kombinacija)
for kombinacija in kombinacije:
for kombinacija2 in kombinacije:
if kombinacija.issubset(kombinacija2
) and kombinacija != kombinacija2:
kombinacije.remove(kombinacija2)
elif kombinacija2.issubset(kombinacija
) and kombinacija != kombinacija2:
kombinacije.remove(kombinacija)
return kombinacije
return None
def razdalja(vozlisce1, vozlisce2):
return math.sqrt((vozlisce2[1] - vozlisce1[1]) ** 2 + (vozlisce2[0] -
vozlisce1[0]) ** 2)
def doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov, kombinacija):
skupine = []
poti = []
for trgovina in kombinacija:
skupine.append(podatki.lokacije(slovar_koordinat, trgovina))
for i in skupine[0]:
dolzina = razdalja(dom, i)
if len(kombinacija) > 1:
for j in skupine[1]:
dolzina += razdalja(i, j)
if len(kombinacija) > 2:
for k in skupine[2]:
dolzina += razdalja(j, k)
if len(kombinacija) > 3:
for m in skupine[3]:
dolzina += razdalja(k, m)
if len(kombinacija) > 4:
for n in skupine[4]:
dolzina += razdalja(m, n)
dolzina += razdalja(n, dom)
poti.append([[dom, i, j, k, m, n], dolzina]
)
dolzina = 0
else:
dolzina += razdalja(m, dom)
poti.append([[dom, i, j, k, m], dolzina])
dolzina = 0
else:
dolzina += razdalja(k, dom)
poti.append([[dom, i, j, k], dolzina])
dolzina = 0
else:
dolzina += razdalja(j, dom)
poti.append([[dom, i, j], dolzina])
dolzina = 0
else:
dolzina *= 2
poti.append([[dom, i], dolzina])
dolzina = 0
dolzine = [el[1] for el in poti]
if dolzine == []:
print('Nakupa ni mogoče opraviti.')
return None
mini = numpy.argmin(dolzine)
return poti[mini]
return dolzina, sez_vozlisc
def doloci_pot(dom, seznam_izdelkov, seznam_trgovin,
seznam_izdelkov_v_kosarici, trgovine_z_izdelki):
vozlisca = []
dolzine = []
trgovine = []
for kombinacija in kombinacije_trgovin_f(set(seznam_izdelkov_v_kosarici
), seznam_trgovin, trgovine_z_izdelki):
par = doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov,
kombinacija)
dolzine.append(par[1])
vozlisca.append(par[0])
trgovine.append(kombinacija)
if dolzine == []:
return None
i = numpy.argmin(dolzine)
v = vozlisca[i]
v.append(dom)
obiskane_trgovine = trgovine[i]
return v, obiskane_trgovine
def razporeditev(obiskane_trgovine, izdelki, slovar):
izdelki2 = izdelki.copy()
razporeditev = []
for trgovina in obiskane_trgovine:
sez = []
for izdelek in izdelki:
if {izdelek}.issubset(slovar[trgovina]):
izd = podatki.id_izdelka_v_opis()[izdelek - 1]
sez.append(izd)
izdelki2.remove(izdelek)
razporeditev.append([trgovina, sez])
return razporeditev
baza.commit()
slovar_koordinat = podatki.slovar_koordinat
kombinacije_trgovin = kombinacije_trgovin_f(set(seznam_izdelkov_v_kosarici),
seznam_trgovin, trgovine_z_izdelki)
pot, obiskane_trgovine = doloci_pot(dom, seznam_izdelkov, seznam_trgovin,
seznam_izdelkov_v_kosarici, trgovine_z_izdelki)
razpredelnica = razporeditev(obiskane_trgovine, seznam_izdelkov_v_kosarici,
podatki.trgovine_z_izdelki)
<|reserved_special_token_1|>
import itertools
import numpy
import math
import psycopg2
import podatki
baza = podatki.baza
dom = podatki.preberi_lokacijo()
seznam_trgovin =["spar", "mercator", "tus", "hofer", "lidl"]
id_in_opis = podatki.id_izdelka_v_opis()
seznam_izdelkov = [el[0] for el in id_in_opis] #['cokolada', 'sladoled', ...]
mnozica_izdelkov = set(seznam_izdelkov)
trgovine_z_izdelki = podatki.trgovine_z_izdelki_f() #slovar: {'trgovina':['id1', 'id2'],...}
seznam_izdelkov_v_kosarici = [el[3] for el in podatki.kosarica]
'''
def zemljevid_trgovin(trgovine):
sez = []
for trgovina in trgovine:
sez.append([trgovina, [])
def kombinacije_trgovin(seznam_izdelkov):
sez_kombinacij = []
for trgovina in trgovine:
kombinacija = []
izdelki = sez_izdelkov
for izdelek in izdelki:
if izdelek in trgovina:
izdelki = izdelki.remove(izdelek)
'''
def kombinacije_trgovin_f(mnozica_izdelkov_v_kosarici, seznam_trgovin, trgovine_z_izdelki):
generator_kombinacij = (set(itertools.compress(seznam_trgovin, el)) for el in itertools.product(*[[0,1]]*len(seznam_trgovin)))
kombinacije = []
for mnozica_trgovin in generator_kombinacij:
izdelki_kombinacije = set()
for trgovina in mnozica_trgovin:
for izdelek in trgovine_z_izdelki[trgovina]:
izdelki_kombinacije.add(izdelek) #množica vseh izdelkov, ki jih lahko dobiš v danih trgovinah
if mnozica_izdelkov_v_kosarici.issubset(izdelki_kombinacije):
kombinacije.append(mnozica_trgovin)
for kombinacija in kombinacije:
for kombinacija2 in kombinacije:
if kombinacija.issubset(kombinacija2) and kombinacija != kombinacija2:
kombinacije.remove(kombinacija2)
elif kombinacija2.issubset(kombinacija) and kombinacija != kombinacija2:
kombinacije.remove(kombinacija)
for kombinacija in kombinacije:
for kombinacija2 in kombinacije:
if kombinacija.issubset(kombinacija2) and kombinacija != kombinacija2:
kombinacije.remove(kombinacija2)
elif kombinacija2.issubset(kombinacija) and kombinacija != kombinacija2:
kombinacije.remove(kombinacija)
return kombinacije
return None
def razdalja(vozlisce1, vozlisce2):
return math.sqrt((vozlisce2[1] - vozlisce1[1]) ** 2 + (vozlisce2[0] - vozlisce1[0]) ** 2)
#dom = [x,y]
def doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov, kombinacija):
skupine = [] #skupine vozlišč iste trgovine
poti = []
for trgovina in kombinacija:
skupine.append(podatki.lokacije(slovar_koordinat, trgovina))
for i in skupine[0]: #skupine[0] je seznam lokacij ene vrste trgovin
dolzina = razdalja(dom, i)
if len(kombinacija) > 1:
for j in skupine[1]:
dolzina += razdalja(i, j)
if len(kombinacija) > 2:
for k in skupine[2]:
dolzina += razdalja(j, k)
if len(kombinacija) > 3:
for m in skupine[3]:
dolzina += razdalja(k, m)
if len(kombinacija) > 4:
for n in skupine[4]:
dolzina += razdalja(m, n)
dolzina += razdalja(n, dom)
poti.append([[dom, i, j, k, m, n], dolzina])
dolzina = 0
else:
dolzina += razdalja(m, dom)
poti.append([[dom, i, j, k, m], dolzina])
dolzina = 0
else:
dolzina += razdalja(k, dom)
poti.append([[dom, i, j, k], dolzina])
dolzina = 0
else:
dolzina += razdalja(j, dom)
poti.append([[dom, i, j], dolzina])
dolzina = 0
else:
dolzina *= 2
poti.append([[dom, i], dolzina])
dolzina = 0
dolzine = [el[1] for el in poti]
if dolzine == []:
print("Nakupa ni mogoče opraviti.")
return None
mini = numpy.argmin(dolzine)
return poti[mini] #[[pot], dolzina]
return (dolzina, sez_vozlisc)
def doloci_pot(dom, seznam_izdelkov, seznam_trgovin, seznam_izdelkov_v_kosarici, trgovine_z_izdelki):
vozlisca = []
dolzine = []
trgovine = []
for kombinacija in kombinacije_trgovin_f(set(seznam_izdelkov_v_kosarici), seznam_trgovin, trgovine_z_izdelki):
par = doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov, kombinacija)
dolzine.append(par[1])
vozlisca.append(par[0])
trgovine.append(kombinacija)
if dolzine == []:
return None
i = numpy.argmin(dolzine)
v = vozlisca[i]
v.append(dom)
obiskane_trgovine = trgovine[i]
return v, obiskane_trgovine
def razporeditev(obiskane_trgovine, izdelki, slovar):
izdelki2 = izdelki.copy()
razporeditev = []
for trgovina in obiskane_trgovine:
sez = []
for izdelek in izdelki:
if {izdelek}.issubset(slovar[trgovina]):
izd = podatki.id_izdelka_v_opis()[izdelek-1]
sez.append(izd)
izdelki2.remove(izdelek)
razporeditev.append([trgovina, sez])
return razporeditev
baza.commit()
slovar_koordinat = podatki.slovar_koordinat
kombinacije_trgovin = kombinacije_trgovin_f(set(seznam_izdelkov_v_kosarici), seznam_trgovin, trgovine_z_izdelki)
#print(kombinacije_trgovin)'
pot, obiskane_trgovine = doloci_pot(dom, seznam_izdelkov, seznam_trgovin, seznam_izdelkov_v_kosarici, trgovine_z_izdelki)
razpredelnica = razporeditev(obiskane_trgovine, seznam_izdelkov_v_kosarici, podatki.trgovine_z_izdelki)
|
flexible
|
{
"blob_id": "5a0702dd869862ebc27c83d10e0b1f0575de68a7",
"index": 2944,
"step-1": "<mask token>\n\n\ndef kombinacije_trgovin_f(mnozica_izdelkov_v_kosarici, seznam_trgovin,\n trgovine_z_izdelki):\n generator_kombinacij = (set(itertools.compress(seznam_trgovin, el)) for\n el in itertools.product(*([[0, 1]] * len(seznam_trgovin))))\n kombinacije = []\n for mnozica_trgovin in generator_kombinacij:\n izdelki_kombinacije = set()\n for trgovina in mnozica_trgovin:\n for izdelek in trgovine_z_izdelki[trgovina]:\n izdelki_kombinacije.add(izdelek)\n if mnozica_izdelkov_v_kosarici.issubset(izdelki_kombinacije):\n kombinacije.append(mnozica_trgovin)\n for kombinacija in kombinacije:\n for kombinacija2 in kombinacije:\n if kombinacija.issubset(kombinacija2\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija2)\n elif kombinacija2.issubset(kombinacija\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija)\n for kombinacija in kombinacije:\n for kombinacija2 in kombinacije:\n if kombinacija.issubset(kombinacija2\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija2)\n elif kombinacija2.issubset(kombinacija\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija)\n return kombinacije\n return None\n\n\ndef razdalja(vozlisce1, vozlisce2):\n return math.sqrt((vozlisce2[1] - vozlisce1[1]) ** 2 + (vozlisce2[0] -\n vozlisce1[0]) ** 2)\n\n\n<mask token>\n\n\ndef doloci_pot(dom, seznam_izdelkov, seznam_trgovin,\n seznam_izdelkov_v_kosarici, trgovine_z_izdelki):\n vozlisca = []\n dolzine = []\n trgovine = []\n for kombinacija in kombinacije_trgovin_f(set(seznam_izdelkov_v_kosarici\n ), seznam_trgovin, trgovine_z_izdelki):\n par = doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov,\n kombinacija)\n dolzine.append(par[1])\n vozlisca.append(par[0])\n trgovine.append(kombinacija)\n if dolzine == []:\n return None\n i = numpy.argmin(dolzine)\n v = vozlisca[i]\n v.append(dom)\n obiskane_trgovine = trgovine[i]\n return v, obiskane_trgovine\n\n\ndef razporeditev(obiskane_trgovine, izdelki, slovar):\n izdelki2 = izdelki.copy()\n razporeditev = []\n for trgovina in obiskane_trgovine:\n sez = []\n for izdelek in izdelki:\n if {izdelek}.issubset(slovar[trgovina]):\n izd = podatki.id_izdelka_v_opis()[izdelek - 1]\n sez.append(izd)\n izdelki2.remove(izdelek)\n razporeditev.append([trgovina, sez])\n return razporeditev\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef kombinacije_trgovin_f(mnozica_izdelkov_v_kosarici, seznam_trgovin,\n trgovine_z_izdelki):\n generator_kombinacij = (set(itertools.compress(seznam_trgovin, el)) for\n el in itertools.product(*([[0, 1]] * len(seznam_trgovin))))\n kombinacije = []\n for mnozica_trgovin in generator_kombinacij:\n izdelki_kombinacije = set()\n for trgovina in mnozica_trgovin:\n for izdelek in trgovine_z_izdelki[trgovina]:\n izdelki_kombinacije.add(izdelek)\n if mnozica_izdelkov_v_kosarici.issubset(izdelki_kombinacije):\n kombinacije.append(mnozica_trgovin)\n for kombinacija in kombinacije:\n for kombinacija2 in kombinacije:\n if kombinacija.issubset(kombinacija2\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija2)\n elif kombinacija2.issubset(kombinacija\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija)\n for kombinacija in kombinacije:\n for kombinacija2 in kombinacije:\n if kombinacija.issubset(kombinacija2\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija2)\n elif kombinacija2.issubset(kombinacija\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija)\n return kombinacije\n return None\n\n\ndef razdalja(vozlisce1, vozlisce2):\n return math.sqrt((vozlisce2[1] - vozlisce1[1]) ** 2 + (vozlisce2[0] -\n vozlisce1[0]) ** 2)\n\n\ndef doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov, kombinacija):\n skupine = []\n poti = []\n for trgovina in kombinacija:\n skupine.append(podatki.lokacije(slovar_koordinat, trgovina))\n for i in skupine[0]:\n dolzina = razdalja(dom, i)\n if len(kombinacija) > 1:\n for j in skupine[1]:\n dolzina += razdalja(i, j)\n if len(kombinacija) > 2:\n for k in skupine[2]:\n dolzina += razdalja(j, k)\n if len(kombinacija) > 3:\n for m in skupine[3]:\n dolzina += razdalja(k, m)\n if len(kombinacija) > 4:\n for n in skupine[4]:\n dolzina += razdalja(m, n)\n dolzina += razdalja(n, dom)\n poti.append([[dom, i, j, k, m, n], dolzina]\n )\n dolzina = 0\n else:\n dolzina += razdalja(m, dom)\n poti.append([[dom, i, j, k, m], dolzina])\n dolzina = 0\n else:\n dolzina += razdalja(k, dom)\n poti.append([[dom, i, j, k], dolzina])\n dolzina = 0\n else:\n dolzina += razdalja(j, dom)\n poti.append([[dom, i, j], dolzina])\n dolzina = 0\n else:\n dolzina *= 2\n poti.append([[dom, i], dolzina])\n dolzina = 0\n dolzine = [el[1] for el in poti]\n if dolzine == []:\n print('Nakupa ni mogoče opraviti.')\n return None\n mini = numpy.argmin(dolzine)\n return poti[mini]\n return dolzina, sez_vozlisc\n\n\ndef doloci_pot(dom, seznam_izdelkov, seznam_trgovin,\n seznam_izdelkov_v_kosarici, trgovine_z_izdelki):\n vozlisca = []\n dolzine = []\n trgovine = []\n for kombinacija in kombinacije_trgovin_f(set(seznam_izdelkov_v_kosarici\n ), seznam_trgovin, trgovine_z_izdelki):\n par = doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov,\n kombinacija)\n dolzine.append(par[1])\n vozlisca.append(par[0])\n trgovine.append(kombinacija)\n if dolzine == []:\n return None\n i = numpy.argmin(dolzine)\n v = vozlisca[i]\n v.append(dom)\n obiskane_trgovine = trgovine[i]\n return v, obiskane_trgovine\n\n\ndef razporeditev(obiskane_trgovine, izdelki, slovar):\n izdelki2 = izdelki.copy()\n razporeditev = []\n for trgovina in obiskane_trgovine:\n sez = []\n for izdelek in izdelki:\n if {izdelek}.issubset(slovar[trgovina]):\n izd = podatki.id_izdelka_v_opis()[izdelek - 1]\n sez.append(izd)\n izdelki2.remove(izdelek)\n razporeditev.append([trgovina, sez])\n return razporeditev\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef kombinacije_trgovin_f(mnozica_izdelkov_v_kosarici, seznam_trgovin,\n trgovine_z_izdelki):\n generator_kombinacij = (set(itertools.compress(seznam_trgovin, el)) for\n el in itertools.product(*([[0, 1]] * len(seznam_trgovin))))\n kombinacije = []\n for mnozica_trgovin in generator_kombinacij:\n izdelki_kombinacije = set()\n for trgovina in mnozica_trgovin:\n for izdelek in trgovine_z_izdelki[trgovina]:\n izdelki_kombinacije.add(izdelek)\n if mnozica_izdelkov_v_kosarici.issubset(izdelki_kombinacije):\n kombinacije.append(mnozica_trgovin)\n for kombinacija in kombinacije:\n for kombinacija2 in kombinacije:\n if kombinacija.issubset(kombinacija2\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija2)\n elif kombinacija2.issubset(kombinacija\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija)\n for kombinacija in kombinacije:\n for kombinacija2 in kombinacije:\n if kombinacija.issubset(kombinacija2\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija2)\n elif kombinacija2.issubset(kombinacija\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija)\n return kombinacije\n return None\n\n\ndef razdalja(vozlisce1, vozlisce2):\n return math.sqrt((vozlisce2[1] - vozlisce1[1]) ** 2 + (vozlisce2[0] -\n vozlisce1[0]) ** 2)\n\n\ndef doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov, kombinacija):\n skupine = []\n poti = []\n for trgovina in kombinacija:\n skupine.append(podatki.lokacije(slovar_koordinat, trgovina))\n for i in skupine[0]:\n dolzina = razdalja(dom, i)\n if len(kombinacija) > 1:\n for j in skupine[1]:\n dolzina += razdalja(i, j)\n if len(kombinacija) > 2:\n for k in skupine[2]:\n dolzina += razdalja(j, k)\n if len(kombinacija) > 3:\n for m in skupine[3]:\n dolzina += razdalja(k, m)\n if len(kombinacija) > 4:\n for n in skupine[4]:\n dolzina += razdalja(m, n)\n dolzina += razdalja(n, dom)\n poti.append([[dom, i, j, k, m, n], dolzina]\n )\n dolzina = 0\n else:\n dolzina += razdalja(m, dom)\n poti.append([[dom, i, j, k, m], dolzina])\n dolzina = 0\n else:\n dolzina += razdalja(k, dom)\n poti.append([[dom, i, j, k], dolzina])\n dolzina = 0\n else:\n dolzina += razdalja(j, dom)\n poti.append([[dom, i, j], dolzina])\n dolzina = 0\n else:\n dolzina *= 2\n poti.append([[dom, i], dolzina])\n dolzina = 0\n dolzine = [el[1] for el in poti]\n if dolzine == []:\n print('Nakupa ni mogoče opraviti.')\n return None\n mini = numpy.argmin(dolzine)\n return poti[mini]\n return dolzina, sez_vozlisc\n\n\ndef doloci_pot(dom, seznam_izdelkov, seznam_trgovin,\n seznam_izdelkov_v_kosarici, trgovine_z_izdelki):\n vozlisca = []\n dolzine = []\n trgovine = []\n for kombinacija in kombinacije_trgovin_f(set(seznam_izdelkov_v_kosarici\n ), seznam_trgovin, trgovine_z_izdelki):\n par = doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov,\n kombinacija)\n dolzine.append(par[1])\n vozlisca.append(par[0])\n trgovine.append(kombinacija)\n if dolzine == []:\n return None\n i = numpy.argmin(dolzine)\n v = vozlisca[i]\n v.append(dom)\n obiskane_trgovine = trgovine[i]\n return v, obiskane_trgovine\n\n\ndef razporeditev(obiskane_trgovine, izdelki, slovar):\n izdelki2 = izdelki.copy()\n razporeditev = []\n for trgovina in obiskane_trgovine:\n sez = []\n for izdelek in izdelki:\n if {izdelek}.issubset(slovar[trgovina]):\n izd = podatki.id_izdelka_v_opis()[izdelek - 1]\n sez.append(izd)\n izdelki2.remove(izdelek)\n razporeditev.append([trgovina, sez])\n return razporeditev\n\n\nbaza.commit()\n<mask token>\n",
"step-4": "import itertools\nimport numpy\nimport math\nimport psycopg2\nimport podatki\nbaza = podatki.baza\ndom = podatki.preberi_lokacijo()\nseznam_trgovin = ['spar', 'mercator', 'tus', 'hofer', 'lidl']\nid_in_opis = podatki.id_izdelka_v_opis()\nseznam_izdelkov = [el[0] for el in id_in_opis]\nmnozica_izdelkov = set(seznam_izdelkov)\ntrgovine_z_izdelki = podatki.trgovine_z_izdelki_f()\nseznam_izdelkov_v_kosarici = [el[3] for el in podatki.kosarica]\n<mask token>\n\n\ndef kombinacije_trgovin_f(mnozica_izdelkov_v_kosarici, seznam_trgovin,\n trgovine_z_izdelki):\n generator_kombinacij = (set(itertools.compress(seznam_trgovin, el)) for\n el in itertools.product(*([[0, 1]] * len(seznam_trgovin))))\n kombinacije = []\n for mnozica_trgovin in generator_kombinacij:\n izdelki_kombinacije = set()\n for trgovina in mnozica_trgovin:\n for izdelek in trgovine_z_izdelki[trgovina]:\n izdelki_kombinacije.add(izdelek)\n if mnozica_izdelkov_v_kosarici.issubset(izdelki_kombinacije):\n kombinacije.append(mnozica_trgovin)\n for kombinacija in kombinacije:\n for kombinacija2 in kombinacije:\n if kombinacija.issubset(kombinacija2\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija2)\n elif kombinacija2.issubset(kombinacija\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija)\n for kombinacija in kombinacije:\n for kombinacija2 in kombinacije:\n if kombinacija.issubset(kombinacija2\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija2)\n elif kombinacija2.issubset(kombinacija\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija)\n return kombinacije\n return None\n\n\ndef razdalja(vozlisce1, vozlisce2):\n return math.sqrt((vozlisce2[1] - vozlisce1[1]) ** 2 + (vozlisce2[0] -\n vozlisce1[0]) ** 2)\n\n\ndef doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov, kombinacija):\n skupine = []\n poti = []\n for trgovina in kombinacija:\n skupine.append(podatki.lokacije(slovar_koordinat, trgovina))\n for i in skupine[0]:\n dolzina = razdalja(dom, i)\n if len(kombinacija) > 1:\n for j in skupine[1]:\n dolzina += razdalja(i, j)\n if len(kombinacija) > 2:\n for k in skupine[2]:\n dolzina += razdalja(j, k)\n if len(kombinacija) > 3:\n for m in skupine[3]:\n dolzina += razdalja(k, m)\n if len(kombinacija) > 4:\n for n in skupine[4]:\n dolzina += razdalja(m, n)\n dolzina += razdalja(n, dom)\n poti.append([[dom, i, j, k, m, n], dolzina]\n )\n dolzina = 0\n else:\n dolzina += razdalja(m, dom)\n poti.append([[dom, i, j, k, m], dolzina])\n dolzina = 0\n else:\n dolzina += razdalja(k, dom)\n poti.append([[dom, i, j, k], dolzina])\n dolzina = 0\n else:\n dolzina += razdalja(j, dom)\n poti.append([[dom, i, j], dolzina])\n dolzina = 0\n else:\n dolzina *= 2\n poti.append([[dom, i], dolzina])\n dolzina = 0\n dolzine = [el[1] for el in poti]\n if dolzine == []:\n print('Nakupa ni mogoče opraviti.')\n return None\n mini = numpy.argmin(dolzine)\n return poti[mini]\n return dolzina, sez_vozlisc\n\n\ndef doloci_pot(dom, seznam_izdelkov, seznam_trgovin,\n seznam_izdelkov_v_kosarici, trgovine_z_izdelki):\n vozlisca = []\n dolzine = []\n trgovine = []\n for kombinacija in kombinacije_trgovin_f(set(seznam_izdelkov_v_kosarici\n ), seznam_trgovin, trgovine_z_izdelki):\n par = doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov,\n kombinacija)\n dolzine.append(par[1])\n vozlisca.append(par[0])\n trgovine.append(kombinacija)\n if dolzine == []:\n return None\n i = numpy.argmin(dolzine)\n v = vozlisca[i]\n v.append(dom)\n obiskane_trgovine = trgovine[i]\n return v, obiskane_trgovine\n\n\ndef razporeditev(obiskane_trgovine, izdelki, slovar):\n izdelki2 = izdelki.copy()\n razporeditev = []\n for trgovina in obiskane_trgovine:\n sez = []\n for izdelek in izdelki:\n if {izdelek}.issubset(slovar[trgovina]):\n izd = podatki.id_izdelka_v_opis()[izdelek - 1]\n sez.append(izd)\n izdelki2.remove(izdelek)\n razporeditev.append([trgovina, sez])\n return razporeditev\n\n\nbaza.commit()\nslovar_koordinat = podatki.slovar_koordinat\nkombinacije_trgovin = kombinacije_trgovin_f(set(seznam_izdelkov_v_kosarici),\n seznam_trgovin, trgovine_z_izdelki)\npot, obiskane_trgovine = doloci_pot(dom, seznam_izdelkov, seznam_trgovin,\n seznam_izdelkov_v_kosarici, trgovine_z_izdelki)\nrazpredelnica = razporeditev(obiskane_trgovine, seznam_izdelkov_v_kosarici,\n podatki.trgovine_z_izdelki)\n",
"step-5": "import itertools\nimport numpy\nimport math\nimport psycopg2\nimport podatki\n\nbaza = podatki.baza\ndom = podatki.preberi_lokacijo()\nseznam_trgovin =[\"spar\", \"mercator\", \"tus\", \"hofer\", \"lidl\"]\nid_in_opis = podatki.id_izdelka_v_opis()\nseznam_izdelkov = [el[0] for el in id_in_opis] #['cokolada', 'sladoled', ...]\nmnozica_izdelkov = set(seznam_izdelkov)\ntrgovine_z_izdelki = podatki.trgovine_z_izdelki_f() #slovar: {'trgovina':['id1', 'id2'],...}\nseznam_izdelkov_v_kosarici = [el[3] for el in podatki.kosarica]\n'''\ndef zemljevid_trgovin(trgovine):\n sez = []\n for trgovina in trgovine:\n sez.append([trgovina, [])\n\ndef kombinacije_trgovin(seznam_izdelkov):\n sez_kombinacij = []\n for trgovina in trgovine:\n kombinacija = []\n izdelki = sez_izdelkov\n for izdelek in izdelki:\n if izdelek in trgovina:\n izdelki = izdelki.remove(izdelek)\n'''\ndef kombinacije_trgovin_f(mnozica_izdelkov_v_kosarici, seznam_trgovin, trgovine_z_izdelki):\n \n generator_kombinacij = (set(itertools.compress(seznam_trgovin, el)) for el in itertools.product(*[[0,1]]*len(seznam_trgovin)))\n kombinacije = []\n for mnozica_trgovin in generator_kombinacij:\n izdelki_kombinacije = set()\n for trgovina in mnozica_trgovin:\n for izdelek in trgovine_z_izdelki[trgovina]:\n izdelki_kombinacije.add(izdelek) #množica vseh izdelkov, ki jih lahko dobiš v danih trgovinah\n if mnozica_izdelkov_v_kosarici.issubset(izdelki_kombinacije):\n kombinacije.append(mnozica_trgovin) \n for kombinacija in kombinacije:\n for kombinacija2 in kombinacije:\n if kombinacija.issubset(kombinacija2) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija2)\n elif kombinacija2.issubset(kombinacija) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija)\n for kombinacija in kombinacije:\n for kombinacija2 in kombinacije:\n if kombinacija.issubset(kombinacija2) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija2)\n elif kombinacija2.issubset(kombinacija) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija) \n return kombinacije\n \n \n return None\n\ndef razdalja(vozlisce1, vozlisce2):\n return math.sqrt((vozlisce2[1] - vozlisce1[1]) ** 2 + (vozlisce2[0] - vozlisce1[0]) ** 2)\n\n#dom = [x,y] \ndef doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov, kombinacija):\n skupine = [] #skupine vozlišč iste trgovine\n poti = []\n for trgovina in kombinacija:\n skupine.append(podatki.lokacije(slovar_koordinat, trgovina))\n for i in skupine[0]: #skupine[0] je seznam lokacij ene vrste trgovin\n dolzina = razdalja(dom, i)\n if len(kombinacija) > 1:\n for j in skupine[1]:\n dolzina += razdalja(i, j)\n if len(kombinacija) > 2:\n for k in skupine[2]:\n dolzina += razdalja(j, k)\n if len(kombinacija) > 3:\n for m in skupine[3]:\n dolzina += razdalja(k, m)\n if len(kombinacija) > 4:\n for n in skupine[4]:\n dolzina += razdalja(m, n)\n dolzina += razdalja(n, dom)\n poti.append([[dom, i, j, k, m, n], dolzina])\n dolzina = 0\n else:\n dolzina += razdalja(m, dom)\n poti.append([[dom, i, j, k, m], dolzina])\n dolzina = 0\n else:\n dolzina += razdalja(k, dom)\n poti.append([[dom, i, j, k], dolzina])\n dolzina = 0\n else:\n dolzina += razdalja(j, dom)\n poti.append([[dom, i, j], dolzina])\n dolzina = 0\n else:\n dolzina *= 2\n poti.append([[dom, i], dolzina])\n dolzina = 0\n dolzine = [el[1] for el in poti]\n if dolzine == []:\n print(\"Nakupa ni mogoče opraviti.\")\n return None\n mini = numpy.argmin(dolzine)\n return poti[mini] #[[pot], dolzina]\n \n\n \n return (dolzina, sez_vozlisc)\n\ndef doloci_pot(dom, seznam_izdelkov, seznam_trgovin, seznam_izdelkov_v_kosarici, trgovine_z_izdelki):\n vozlisca = []\n dolzine = []\n trgovine = []\n for kombinacija in kombinacije_trgovin_f(set(seznam_izdelkov_v_kosarici), seznam_trgovin, trgovine_z_izdelki):\n par = doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov, kombinacija)\n dolzine.append(par[1])\n vozlisca.append(par[0])\n trgovine.append(kombinacija)\n if dolzine == []:\n return None\n i = numpy.argmin(dolzine)\n v = vozlisca[i]\n v.append(dom)\n obiskane_trgovine = trgovine[i]\n return v, obiskane_trgovine\n\ndef razporeditev(obiskane_trgovine, izdelki, slovar):\n izdelki2 = izdelki.copy()\n razporeditev = []\n for trgovina in obiskane_trgovine:\n sez = []\n for izdelek in izdelki:\n if {izdelek}.issubset(slovar[trgovina]):\n izd = podatki.id_izdelka_v_opis()[izdelek-1]\n sez.append(izd)\n izdelki2.remove(izdelek)\n razporeditev.append([trgovina, sez])\n return razporeditev\n \nbaza.commit()\n\nslovar_koordinat = podatki.slovar_koordinat\n\nkombinacije_trgovin = kombinacije_trgovin_f(set(seznam_izdelkov_v_kosarici), seznam_trgovin, trgovine_z_izdelki)\n#print(kombinacije_trgovin)'\npot, obiskane_trgovine = doloci_pot(dom, seznam_izdelkov, seznam_trgovin, seznam_izdelkov_v_kosarici, trgovine_z_izdelki)\nrazpredelnica = razporeditev(obiskane_trgovine, seznam_izdelkov_v_kosarici, podatki.trgovine_z_izdelki)\n",
"step-ids": [
4,
5,
6,
8,
9
]
}
|
[
4,
5,
6,
8,
9
] |
#!/bin/python3
# Implement a stack with push, pop, inc(e, k) operations
# inc (e,k) - Add k to each of bottom e elements
import sys
class Stack(object):
def __init__(self):
self.arr = []
def push(self, val):
self.arr.append(val)
def pop(self):
if len(self.arr):
return self.arr.pop()
def inc(self, e, k):
count = min(len(self.arr), e)
for i in range(count):
self.arr[i] += k
def peek(self):
if len(self.arr):
return self.arr[-1]
else:
return 'EMPTY'
def superStack(operations):
s = Stack()
for o in operations:
op = o.split(' ')
if op[0] == 'push':
s.push(int(op[1]))
print(s.peek())
elif op[0] == 'pop':
s.pop()
print(s.peek())
elif op[0] == 'inc':
s.inc(int(op[1]), int(op[2]))
print(s.peek())
if __name__ == "__main__":
operations_cnt = 0
operations_cnt = int(input())
operations_i = 0
operations = []
while operations_i < operations_cnt:
try:
operations_item = str(input())
except:
operations_item = None
operations.append(operations_item)
operations_i += 1
res = superStack(operations);
|
normal
|
{
"blob_id": "5ed439a2a7cfb9c941c40ea0c5eba2851a0f2855",
"index": 24,
"step-1": "<mask token>\n\n\nclass Stack(object):\n\n def __init__(self):\n self.arr = []\n\n def push(self, val):\n self.arr.append(val)\n\n def pop(self):\n if len(self.arr):\n return self.arr.pop()\n\n def inc(self, e, k):\n count = min(len(self.arr), e)\n for i in range(count):\n self.arr[i] += k\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Stack(object):\n\n def __init__(self):\n self.arr = []\n\n def push(self, val):\n self.arr.append(val)\n\n def pop(self):\n if len(self.arr):\n return self.arr.pop()\n\n def inc(self, e, k):\n count = min(len(self.arr), e)\n for i in range(count):\n self.arr[i] += k\n\n def peek(self):\n if len(self.arr):\n return self.arr[-1]\n else:\n return 'EMPTY'\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Stack(object):\n\n def __init__(self):\n self.arr = []\n\n def push(self, val):\n self.arr.append(val)\n\n def pop(self):\n if len(self.arr):\n return self.arr.pop()\n\n def inc(self, e, k):\n count = min(len(self.arr), e)\n for i in range(count):\n self.arr[i] += k\n\n def peek(self):\n if len(self.arr):\n return self.arr[-1]\n else:\n return 'EMPTY'\n\n\ndef superStack(operations):\n s = Stack()\n for o in operations:\n op = o.split(' ')\n if op[0] == 'push':\n s.push(int(op[1]))\n print(s.peek())\n elif op[0] == 'pop':\n s.pop()\n print(s.peek())\n elif op[0] == 'inc':\n s.inc(int(op[1]), int(op[2]))\n print(s.peek())\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Stack(object):\n\n def __init__(self):\n self.arr = []\n\n def push(self, val):\n self.arr.append(val)\n\n def pop(self):\n if len(self.arr):\n return self.arr.pop()\n\n def inc(self, e, k):\n count = min(len(self.arr), e)\n for i in range(count):\n self.arr[i] += k\n\n def peek(self):\n if len(self.arr):\n return self.arr[-1]\n else:\n return 'EMPTY'\n\n\ndef superStack(operations):\n s = Stack()\n for o in operations:\n op = o.split(' ')\n if op[0] == 'push':\n s.push(int(op[1]))\n print(s.peek())\n elif op[0] == 'pop':\n s.pop()\n print(s.peek())\n elif op[0] == 'inc':\n s.inc(int(op[1]), int(op[2]))\n print(s.peek())\n\n\nif __name__ == '__main__':\n operations_cnt = 0\n operations_cnt = int(input())\n operations_i = 0\n operations = []\n while operations_i < operations_cnt:\n try:\n operations_item = str(input())\n except:\n operations_item = None\n operations.append(operations_item)\n operations_i += 1\n res = superStack(operations)\n",
"step-5": "#!/bin/python3\n\n# Implement a stack with push, pop, inc(e, k) operations\n# inc (e,k) - Add k to each of bottom e elements\nimport sys\n\nclass Stack(object):\n def __init__(self):\n self.arr = []\n\n def push(self, val):\n self.arr.append(val)\n\n def pop(self):\n if len(self.arr):\n return self.arr.pop()\n\n def inc(self, e, k):\n count = min(len(self.arr), e)\n for i in range(count):\n self.arr[i] += k\n\n def peek(self):\n if len(self.arr):\n return self.arr[-1]\n else:\n return 'EMPTY'\n\ndef superStack(operations):\n s = Stack()\n for o in operations:\n op = o.split(' ')\n if op[0] == 'push':\n s.push(int(op[1]))\n print(s.peek())\n elif op[0] == 'pop':\n s.pop()\n print(s.peek())\n elif op[0] == 'inc':\n s.inc(int(op[1]), int(op[2]))\n print(s.peek())\n \n\nif __name__ == \"__main__\":\n operations_cnt = 0\n operations_cnt = int(input())\n operations_i = 0\n operations = []\n while operations_i < operations_cnt:\n try:\n operations_item = str(input())\n except:\n operations_item = None\n operations.append(operations_item)\n operations_i += 1\n\n\n res = superStack(operations);\n \n\n",
"step-ids": [
5,
6,
7,
8,
10
]
}
|
[
5,
6,
7,
8,
10
] |
c = "こ に ち わ "
print (len(c))
|
normal
|
{
"blob_id": "26f466a6a2fd09bb108ca89e4537192c070ff83b",
"index": 1335,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(len(c))\n",
"step-3": "c = 'こ に ち わ '\nprint(len(c))\n",
"step-4": "c = \"こ に ち わ \"\nprint (len(c))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def rinex_merge(output_fname, rinex_fnames, _err=sys.stderr):
"""
Using teqc, merge *rinex_fnames* and store to the file
*output_fname*. Returns *output_fname*. Redirect error output to
*_err*.
"""
args = ['-pch'] + rinex_fnames
sh.teqc(*args, _out=output_fname, _err=_err)
return output_fname
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def rinex_info(rinex_fname, nav_fname, work_path=None):
"""
Query RINEX file *rinex_fname* and RINEX nav file *nav_fname* for
useful information and return in a key/value mapping. Store
intermediate files in *work_path* (a temporary, automatically
cleaned up area if not specified).
"""
if not os.path.isfile(rinex_fname):
raise ValueError('RINEX observation file {} does not exist'.format(
rinex_fname))
if not os.path.isfile(nav_fname):
raise ValueError('RINEX navigation file {} does not exist'.format(
nav_fname))
info = {}
def process_output(line):
if line.startswith('Receiver type'):
info['receiver'] = line.split(':')[1].split('(')[0].strip()
elif line.lstrip().startswith('antenna WGS 84 (xyz)'):
assert line.rstrip().endswith('(m)')
info['xyz'] = map(float, line.split(':')[1].split('(')[0].split())
elif line.lstrip().startswith('antenna WGS 84 (geo)'):
if line.split(':')[1].lstrip()[0] in ['N', 'S']:
pass
else:
lat, _, lon, _ = line.split(':')[1].split(None, 3)
info['lat'] = float(lat)
lon = float(lon)
while lon > 180:
lon -= 360
info['lon'] = lon
elif line.lstrip().startswith('WGS 84 height'):
assert line.rstrip().endswith('m')
info['height'] = float(line.split(':')[1].rstrip()[:-1])
elif line.startswith('|qc - header| position'):
assert line.rstrip()[-1] == 'm'
info['xyz error'] = float(line.split(':')[1].rstrip()[:-1])
elif line.startswith('Observation interval'):
info['interval'] = float(line.split(':')[1].split()[0])
elif line.startswith('Moving average MP12'):
info['MP12'] = float(line.split(':')[1].rstrip()[:-1])
elif line.startswith('Moving average MP21'):
info['MP21'] = float(line.split(':')[1].rstrip()[:-1])
with SmartTempDir(work_path) as work_path:
intermediate_rinex_fname = replace_path(work_path, rinex_fname)
os.symlink(os.path.abspath(rinex_fname), intermediate_rinex_fname)
intermediate_nav_fname = replace_path(work_path, nav_fname)
os.symlink(os.path.abspath(nav_fname), intermediate_nav_fname)
sh.teqc('+qc', '+quiet', '-R', '-S', '-E', '-C', '-J', '-nav',
intermediate_nav_fname, intermediate_rinex_fname, _cwd=
work_path, _out=process_output, _err=sys.stderr)
os.remove(intermediate_rinex_fname)
os.remove(intermediate_nav_fname)
return info
def rinex_merge(output_fname, rinex_fnames, _err=sys.stderr):
"""
Using teqc, merge *rinex_fnames* and store to the file
*output_fname*. Returns *output_fname*. Redirect error output to
*_err*.
"""
args = ['-pch'] + rinex_fnames
sh.teqc(*args, _out=output_fname, _err=_err)
return output_fname
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
logging.getLogger('sh').setLevel(logging.WARNING)
rinex_fname = '/Users/butala/src/absolute_tec/jplm0010.14o'
nav_fname = '/Users/butala/src/absolute_tec/jplm0010.14n'
info = rinex_info(rinex_fname, nav_fname)
for key in sorted(info):
print('{:10s}: {}'.format(key, info[key]))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logger = logging.getLogger('pyrsss.gps.teqc')
def rinex_info(rinex_fname, nav_fname, work_path=None):
"""
Query RINEX file *rinex_fname* and RINEX nav file *nav_fname* for
useful information and return in a key/value mapping. Store
intermediate files in *work_path* (a temporary, automatically
cleaned up area if not specified).
"""
if not os.path.isfile(rinex_fname):
raise ValueError('RINEX observation file {} does not exist'.format(
rinex_fname))
if not os.path.isfile(nav_fname):
raise ValueError('RINEX navigation file {} does not exist'.format(
nav_fname))
info = {}
def process_output(line):
if line.startswith('Receiver type'):
info['receiver'] = line.split(':')[1].split('(')[0].strip()
elif line.lstrip().startswith('antenna WGS 84 (xyz)'):
assert line.rstrip().endswith('(m)')
info['xyz'] = map(float, line.split(':')[1].split('(')[0].split())
elif line.lstrip().startswith('antenna WGS 84 (geo)'):
if line.split(':')[1].lstrip()[0] in ['N', 'S']:
pass
else:
lat, _, lon, _ = line.split(':')[1].split(None, 3)
info['lat'] = float(lat)
lon = float(lon)
while lon > 180:
lon -= 360
info['lon'] = lon
elif line.lstrip().startswith('WGS 84 height'):
assert line.rstrip().endswith('m')
info['height'] = float(line.split(':')[1].rstrip()[:-1])
elif line.startswith('|qc - header| position'):
assert line.rstrip()[-1] == 'm'
info['xyz error'] = float(line.split(':')[1].rstrip()[:-1])
elif line.startswith('Observation interval'):
info['interval'] = float(line.split(':')[1].split()[0])
elif line.startswith('Moving average MP12'):
info['MP12'] = float(line.split(':')[1].rstrip()[:-1])
elif line.startswith('Moving average MP21'):
info['MP21'] = float(line.split(':')[1].rstrip()[:-1])
with SmartTempDir(work_path) as work_path:
intermediate_rinex_fname = replace_path(work_path, rinex_fname)
os.symlink(os.path.abspath(rinex_fname), intermediate_rinex_fname)
intermediate_nav_fname = replace_path(work_path, nav_fname)
os.symlink(os.path.abspath(nav_fname), intermediate_nav_fname)
sh.teqc('+qc', '+quiet', '-R', '-S', '-E', '-C', '-J', '-nav',
intermediate_nav_fname, intermediate_rinex_fname, _cwd=
work_path, _out=process_output, _err=sys.stderr)
os.remove(intermediate_rinex_fname)
os.remove(intermediate_nav_fname)
return info
def rinex_merge(output_fname, rinex_fnames, _err=sys.stderr):
"""
Using teqc, merge *rinex_fnames* and store to the file
*output_fname*. Returns *output_fname*. Redirect error output to
*_err*.
"""
args = ['-pch'] + rinex_fnames
sh.teqc(*args, _out=output_fname, _err=_err)
return output_fname
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
logging.getLogger('sh').setLevel(logging.WARNING)
rinex_fname = '/Users/butala/src/absolute_tec/jplm0010.14o'
nav_fname = '/Users/butala/src/absolute_tec/jplm0010.14n'
info = rinex_info(rinex_fname, nav_fname)
for key in sorted(info):
print('{:10s}: {}'.format(key, info[key]))
<|reserved_special_token_1|>
import sys
import os
import logging
import sh
from ..util.path import SmartTempDir, replace_path
logger = logging.getLogger('pyrsss.gps.teqc')
def rinex_info(rinex_fname, nav_fname, work_path=None):
"""
Query RINEX file *rinex_fname* and RINEX nav file *nav_fname* for
useful information and return in a key/value mapping. Store
intermediate files in *work_path* (a temporary, automatically
cleaned up area if not specified).
"""
if not os.path.isfile(rinex_fname):
raise ValueError('RINEX observation file {} does not exist'.format(
rinex_fname))
if not os.path.isfile(nav_fname):
raise ValueError('RINEX navigation file {} does not exist'.format(
nav_fname))
info = {}
def process_output(line):
if line.startswith('Receiver type'):
info['receiver'] = line.split(':')[1].split('(')[0].strip()
elif line.lstrip().startswith('antenna WGS 84 (xyz)'):
assert line.rstrip().endswith('(m)')
info['xyz'] = map(float, line.split(':')[1].split('(')[0].split())
elif line.lstrip().startswith('antenna WGS 84 (geo)'):
if line.split(':')[1].lstrip()[0] in ['N', 'S']:
pass
else:
lat, _, lon, _ = line.split(':')[1].split(None, 3)
info['lat'] = float(lat)
lon = float(lon)
while lon > 180:
lon -= 360
info['lon'] = lon
elif line.lstrip().startswith('WGS 84 height'):
assert line.rstrip().endswith('m')
info['height'] = float(line.split(':')[1].rstrip()[:-1])
elif line.startswith('|qc - header| position'):
assert line.rstrip()[-1] == 'm'
info['xyz error'] = float(line.split(':')[1].rstrip()[:-1])
elif line.startswith('Observation interval'):
info['interval'] = float(line.split(':')[1].split()[0])
elif line.startswith('Moving average MP12'):
info['MP12'] = float(line.split(':')[1].rstrip()[:-1])
elif line.startswith('Moving average MP21'):
info['MP21'] = float(line.split(':')[1].rstrip()[:-1])
with SmartTempDir(work_path) as work_path:
intermediate_rinex_fname = replace_path(work_path, rinex_fname)
os.symlink(os.path.abspath(rinex_fname), intermediate_rinex_fname)
intermediate_nav_fname = replace_path(work_path, nav_fname)
os.symlink(os.path.abspath(nav_fname), intermediate_nav_fname)
sh.teqc('+qc', '+quiet', '-R', '-S', '-E', '-C', '-J', '-nav',
intermediate_nav_fname, intermediate_rinex_fname, _cwd=
work_path, _out=process_output, _err=sys.stderr)
os.remove(intermediate_rinex_fname)
os.remove(intermediate_nav_fname)
return info
def rinex_merge(output_fname, rinex_fnames, _err=sys.stderr):
"""
Using teqc, merge *rinex_fnames* and store to the file
*output_fname*. Returns *output_fname*. Redirect error output to
*_err*.
"""
args = ['-pch'] + rinex_fnames
sh.teqc(*args, _out=output_fname, _err=_err)
return output_fname
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
logging.getLogger('sh').setLevel(logging.WARNING)
rinex_fname = '/Users/butala/src/absolute_tec/jplm0010.14o'
nav_fname = '/Users/butala/src/absolute_tec/jplm0010.14n'
info = rinex_info(rinex_fname, nav_fname)
for key in sorted(info):
print('{:10s}: {}'.format(key, info[key]))
<|reserved_special_token_1|>
import sys
import os
import logging
import sh
from ..util.path import SmartTempDir, replace_path
logger = logging.getLogger('pyrsss.gps.teqc')
def rinex_info(rinex_fname,
nav_fname,
work_path=None):
"""
Query RINEX file *rinex_fname* and RINEX nav file *nav_fname* for
useful information and return in a key/value mapping. Store
intermediate files in *work_path* (a temporary, automatically
cleaned up area if not specified).
"""
if not os.path.isfile(rinex_fname):
raise ValueError('RINEX observation file {} does not exist'.format(rinex_fname))
if not os.path.isfile(nav_fname):
raise ValueError('RINEX navigation file {} does not exist'.format(nav_fname))
# information mapping
info = {}
def process_output(line):
if line.startswith('Receiver type'):
info['receiver'] = line.split(':')[1].split('(')[0].strip()
elif line.lstrip().startswith('antenna WGS 84 (xyz)'):
# make sure units are [m]
assert line.rstrip().endswith('(m)')
info['xyz'] = map(float, line.split(':')[1].split('(')[0].split())
elif line.lstrip().startswith('antenna WGS 84 (geo)'):
if line.split(':')[1].lstrip()[0] in ['N', 'S']:
# skip arcmin, arcsec line
pass
else:
lat, _, lon, _ = line.split(':')[1].split(None, 3)
info['lat'] = float(lat)
lon = float(lon)
while lon > 180:
lon -= 360
info['lon'] = lon
elif line.lstrip().startswith('WGS 84 height'):
assert line.rstrip().endswith('m')
info['height'] = float(line.split(':')[1].rstrip()[:-1])
elif line.startswith('|qc - header| position'):
# make sure units are [m]
assert line.rstrip()[-1] == 'm'
info['xyz error'] = float(line.split(':')[1].rstrip()[:-1])
elif line.startswith('Observation interval'):
info['interval'] = float(line.split(':')[1].split()[0])
elif line.startswith('Moving average MP12'):
info['MP12'] = float(line.split(':')[1].rstrip()[:-1])
elif line.startswith('Moving average MP21'):
info['MP21'] = float(line.split(':')[1].rstrip()[:-1])
# query the RINEX file via teqc quality check --- process in given
# work area to avoid intermediate file pollution
with SmartTempDir(work_path) as work_path:
intermediate_rinex_fname = replace_path(work_path, rinex_fname)
os.symlink(os.path.abspath(rinex_fname),
intermediate_rinex_fname)
intermediate_nav_fname = replace_path(work_path, nav_fname)
os.symlink(os.path.abspath(nav_fname),
intermediate_nav_fname)
sh.teqc('+qc',
'+quiet',
'-R',
'-S',
'-E',
'-C',
'-J',
'-nav', intermediate_nav_fname,
intermediate_rinex_fname,
_cwd=work_path,
_out=process_output,
_err=sys.stderr)
os.remove(intermediate_rinex_fname)
os.remove(intermediate_nav_fname)
return info
def rinex_merge(output_fname, rinex_fnames, _err=sys.stderr):
"""
Using teqc, merge *rinex_fnames* and store to the file
*output_fname*. Returns *output_fname*. Redirect error output to
*_err*.
"""
args = ['-pch'] + rinex_fnames
sh.teqc(*args,
_out=output_fname,
_err=_err)
return output_fname
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
logging.getLogger('sh').setLevel(logging.WARNING)
rinex_fname = '/Users/butala/src/absolute_tec/jplm0010.14o'
nav_fname = '/Users/butala/src/absolute_tec/jplm0010.14n'
info = rinex_info(rinex_fname,
nav_fname)
for key in sorted(info):
print('{:10s}: {}'.format(key, info[key]))
|
flexible
|
{
"blob_id": "ec19567b49f686f613308d79e439f6ff9053fa40",
"index": 5064,
"step-1": "<mask token>\n\n\ndef rinex_merge(output_fname, rinex_fnames, _err=sys.stderr):\n \"\"\"\n Using teqc, merge *rinex_fnames* and store to the file\n *output_fname*. Returns *output_fname*. Redirect error output to\n *_err*.\n \"\"\"\n args = ['-pch'] + rinex_fnames\n sh.teqc(*args, _out=output_fname, _err=_err)\n return output_fname\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef rinex_info(rinex_fname, nav_fname, work_path=None):\n \"\"\"\n Query RINEX file *rinex_fname* and RINEX nav file *nav_fname* for\n useful information and return in a key/value mapping. Store\n intermediate files in *work_path* (a temporary, automatically\n cleaned up area if not specified).\n \"\"\"\n if not os.path.isfile(rinex_fname):\n raise ValueError('RINEX observation file {} does not exist'.format(\n rinex_fname))\n if not os.path.isfile(nav_fname):\n raise ValueError('RINEX navigation file {} does not exist'.format(\n nav_fname))\n info = {}\n\n def process_output(line):\n if line.startswith('Receiver type'):\n info['receiver'] = line.split(':')[1].split('(')[0].strip()\n elif line.lstrip().startswith('antenna WGS 84 (xyz)'):\n assert line.rstrip().endswith('(m)')\n info['xyz'] = map(float, line.split(':')[1].split('(')[0].split())\n elif line.lstrip().startswith('antenna WGS 84 (geo)'):\n if line.split(':')[1].lstrip()[0] in ['N', 'S']:\n pass\n else:\n lat, _, lon, _ = line.split(':')[1].split(None, 3)\n info['lat'] = float(lat)\n lon = float(lon)\n while lon > 180:\n lon -= 360\n info['lon'] = lon\n elif line.lstrip().startswith('WGS 84 height'):\n assert line.rstrip().endswith('m')\n info['height'] = float(line.split(':')[1].rstrip()[:-1])\n elif line.startswith('|qc - header| position'):\n assert line.rstrip()[-1] == 'm'\n info['xyz error'] = float(line.split(':')[1].rstrip()[:-1])\n elif line.startswith('Observation interval'):\n info['interval'] = float(line.split(':')[1].split()[0])\n elif line.startswith('Moving average MP12'):\n info['MP12'] = float(line.split(':')[1].rstrip()[:-1])\n elif line.startswith('Moving average MP21'):\n info['MP21'] = float(line.split(':')[1].rstrip()[:-1])\n with SmartTempDir(work_path) as work_path:\n intermediate_rinex_fname = replace_path(work_path, rinex_fname)\n os.symlink(os.path.abspath(rinex_fname), intermediate_rinex_fname)\n intermediate_nav_fname = replace_path(work_path, nav_fname)\n os.symlink(os.path.abspath(nav_fname), intermediate_nav_fname)\n sh.teqc('+qc', '+quiet', '-R', '-S', '-E', '-C', '-J', '-nav',\n intermediate_nav_fname, intermediate_rinex_fname, _cwd=\n work_path, _out=process_output, _err=sys.stderr)\n os.remove(intermediate_rinex_fname)\n os.remove(intermediate_nav_fname)\n return info\n\n\ndef rinex_merge(output_fname, rinex_fnames, _err=sys.stderr):\n \"\"\"\n Using teqc, merge *rinex_fnames* and store to the file\n *output_fname*. Returns *output_fname*. Redirect error output to\n *_err*.\n \"\"\"\n args = ['-pch'] + rinex_fnames\n sh.teqc(*args, _out=output_fname, _err=_err)\n return output_fname\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n logging.getLogger('sh').setLevel(logging.WARNING)\n rinex_fname = '/Users/butala/src/absolute_tec/jplm0010.14o'\n nav_fname = '/Users/butala/src/absolute_tec/jplm0010.14n'\n info = rinex_info(rinex_fname, nav_fname)\n for key in sorted(info):\n print('{:10s}: {}'.format(key, info[key]))\n",
"step-3": "<mask token>\nlogger = logging.getLogger('pyrsss.gps.teqc')\n\n\ndef rinex_info(rinex_fname, nav_fname, work_path=None):\n \"\"\"\n Query RINEX file *rinex_fname* and RINEX nav file *nav_fname* for\n useful information and return in a key/value mapping. Store\n intermediate files in *work_path* (a temporary, automatically\n cleaned up area if not specified).\n \"\"\"\n if not os.path.isfile(rinex_fname):\n raise ValueError('RINEX observation file {} does not exist'.format(\n rinex_fname))\n if not os.path.isfile(nav_fname):\n raise ValueError('RINEX navigation file {} does not exist'.format(\n nav_fname))\n info = {}\n\n def process_output(line):\n if line.startswith('Receiver type'):\n info['receiver'] = line.split(':')[1].split('(')[0].strip()\n elif line.lstrip().startswith('antenna WGS 84 (xyz)'):\n assert line.rstrip().endswith('(m)')\n info['xyz'] = map(float, line.split(':')[1].split('(')[0].split())\n elif line.lstrip().startswith('antenna WGS 84 (geo)'):\n if line.split(':')[1].lstrip()[0] in ['N', 'S']:\n pass\n else:\n lat, _, lon, _ = line.split(':')[1].split(None, 3)\n info['lat'] = float(lat)\n lon = float(lon)\n while lon > 180:\n lon -= 360\n info['lon'] = lon\n elif line.lstrip().startswith('WGS 84 height'):\n assert line.rstrip().endswith('m')\n info['height'] = float(line.split(':')[1].rstrip()[:-1])\n elif line.startswith('|qc - header| position'):\n assert line.rstrip()[-1] == 'm'\n info['xyz error'] = float(line.split(':')[1].rstrip()[:-1])\n elif line.startswith('Observation interval'):\n info['interval'] = float(line.split(':')[1].split()[0])\n elif line.startswith('Moving average MP12'):\n info['MP12'] = float(line.split(':')[1].rstrip()[:-1])\n elif line.startswith('Moving average MP21'):\n info['MP21'] = float(line.split(':')[1].rstrip()[:-1])\n with SmartTempDir(work_path) as work_path:\n intermediate_rinex_fname = replace_path(work_path, rinex_fname)\n os.symlink(os.path.abspath(rinex_fname), intermediate_rinex_fname)\n intermediate_nav_fname = replace_path(work_path, nav_fname)\n os.symlink(os.path.abspath(nav_fname), intermediate_nav_fname)\n sh.teqc('+qc', '+quiet', '-R', '-S', '-E', '-C', '-J', '-nav',\n intermediate_nav_fname, intermediate_rinex_fname, _cwd=\n work_path, _out=process_output, _err=sys.stderr)\n os.remove(intermediate_rinex_fname)\n os.remove(intermediate_nav_fname)\n return info\n\n\ndef rinex_merge(output_fname, rinex_fnames, _err=sys.stderr):\n \"\"\"\n Using teqc, merge *rinex_fnames* and store to the file\n *output_fname*. Returns *output_fname*. Redirect error output to\n *_err*.\n \"\"\"\n args = ['-pch'] + rinex_fnames\n sh.teqc(*args, _out=output_fname, _err=_err)\n return output_fname\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n logging.getLogger('sh').setLevel(logging.WARNING)\n rinex_fname = '/Users/butala/src/absolute_tec/jplm0010.14o'\n nav_fname = '/Users/butala/src/absolute_tec/jplm0010.14n'\n info = rinex_info(rinex_fname, nav_fname)\n for key in sorted(info):\n print('{:10s}: {}'.format(key, info[key]))\n",
"step-4": "import sys\nimport os\nimport logging\nimport sh\nfrom ..util.path import SmartTempDir, replace_path\nlogger = logging.getLogger('pyrsss.gps.teqc')\n\n\ndef rinex_info(rinex_fname, nav_fname, work_path=None):\n \"\"\"\n Query RINEX file *rinex_fname* and RINEX nav file *nav_fname* for\n useful information and return in a key/value mapping. Store\n intermediate files in *work_path* (a temporary, automatically\n cleaned up area if not specified).\n \"\"\"\n if not os.path.isfile(rinex_fname):\n raise ValueError('RINEX observation file {} does not exist'.format(\n rinex_fname))\n if not os.path.isfile(nav_fname):\n raise ValueError('RINEX navigation file {} does not exist'.format(\n nav_fname))\n info = {}\n\n def process_output(line):\n if line.startswith('Receiver type'):\n info['receiver'] = line.split(':')[1].split('(')[0].strip()\n elif line.lstrip().startswith('antenna WGS 84 (xyz)'):\n assert line.rstrip().endswith('(m)')\n info['xyz'] = map(float, line.split(':')[1].split('(')[0].split())\n elif line.lstrip().startswith('antenna WGS 84 (geo)'):\n if line.split(':')[1].lstrip()[0] in ['N', 'S']:\n pass\n else:\n lat, _, lon, _ = line.split(':')[1].split(None, 3)\n info['lat'] = float(lat)\n lon = float(lon)\n while lon > 180:\n lon -= 360\n info['lon'] = lon\n elif line.lstrip().startswith('WGS 84 height'):\n assert line.rstrip().endswith('m')\n info['height'] = float(line.split(':')[1].rstrip()[:-1])\n elif line.startswith('|qc - header| position'):\n assert line.rstrip()[-1] == 'm'\n info['xyz error'] = float(line.split(':')[1].rstrip()[:-1])\n elif line.startswith('Observation interval'):\n info['interval'] = float(line.split(':')[1].split()[0])\n elif line.startswith('Moving average MP12'):\n info['MP12'] = float(line.split(':')[1].rstrip()[:-1])\n elif line.startswith('Moving average MP21'):\n info['MP21'] = float(line.split(':')[1].rstrip()[:-1])\n with SmartTempDir(work_path) as work_path:\n intermediate_rinex_fname = replace_path(work_path, rinex_fname)\n os.symlink(os.path.abspath(rinex_fname), intermediate_rinex_fname)\n intermediate_nav_fname = replace_path(work_path, nav_fname)\n os.symlink(os.path.abspath(nav_fname), intermediate_nav_fname)\n sh.teqc('+qc', '+quiet', '-R', '-S', '-E', '-C', '-J', '-nav',\n intermediate_nav_fname, intermediate_rinex_fname, _cwd=\n work_path, _out=process_output, _err=sys.stderr)\n os.remove(intermediate_rinex_fname)\n os.remove(intermediate_nav_fname)\n return info\n\n\ndef rinex_merge(output_fname, rinex_fnames, _err=sys.stderr):\n \"\"\"\n Using teqc, merge *rinex_fnames* and store to the file\n *output_fname*. Returns *output_fname*. Redirect error output to\n *_err*.\n \"\"\"\n args = ['-pch'] + rinex_fnames\n sh.teqc(*args, _out=output_fname, _err=_err)\n return output_fname\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n logging.getLogger('sh').setLevel(logging.WARNING)\n rinex_fname = '/Users/butala/src/absolute_tec/jplm0010.14o'\n nav_fname = '/Users/butala/src/absolute_tec/jplm0010.14n'\n info = rinex_info(rinex_fname, nav_fname)\n for key in sorted(info):\n print('{:10s}: {}'.format(key, info[key]))\n",
"step-5": "import sys\nimport os\nimport logging\n\nimport sh\n\nfrom ..util.path import SmartTempDir, replace_path\n\nlogger = logging.getLogger('pyrsss.gps.teqc')\n\n\ndef rinex_info(rinex_fname,\n nav_fname,\n work_path=None):\n \"\"\"\n Query RINEX file *rinex_fname* and RINEX nav file *nav_fname* for\n useful information and return in a key/value mapping. Store\n intermediate files in *work_path* (a temporary, automatically\n cleaned up area if not specified).\n \"\"\"\n if not os.path.isfile(rinex_fname):\n raise ValueError('RINEX observation file {} does not exist'.format(rinex_fname))\n if not os.path.isfile(nav_fname):\n raise ValueError('RINEX navigation file {} does not exist'.format(nav_fname))\n # information mapping\n info = {}\n def process_output(line):\n if line.startswith('Receiver type'):\n info['receiver'] = line.split(':')[1].split('(')[0].strip()\n elif line.lstrip().startswith('antenna WGS 84 (xyz)'):\n # make sure units are [m]\n assert line.rstrip().endswith('(m)')\n info['xyz'] = map(float, line.split(':')[1].split('(')[0].split())\n elif line.lstrip().startswith('antenna WGS 84 (geo)'):\n if line.split(':')[1].lstrip()[0] in ['N', 'S']:\n # skip arcmin, arcsec line\n pass\n else:\n lat, _, lon, _ = line.split(':')[1].split(None, 3)\n info['lat'] = float(lat)\n lon = float(lon)\n while lon > 180:\n lon -= 360\n info['lon'] = lon\n elif line.lstrip().startswith('WGS 84 height'):\n assert line.rstrip().endswith('m')\n info['height'] = float(line.split(':')[1].rstrip()[:-1])\n elif line.startswith('|qc - header| position'):\n # make sure units are [m]\n assert line.rstrip()[-1] == 'm'\n info['xyz error'] = float(line.split(':')[1].rstrip()[:-1])\n elif line.startswith('Observation interval'):\n info['interval'] = float(line.split(':')[1].split()[0])\n elif line.startswith('Moving average MP12'):\n info['MP12'] = float(line.split(':')[1].rstrip()[:-1])\n elif line.startswith('Moving average MP21'):\n info['MP21'] = float(line.split(':')[1].rstrip()[:-1])\n # query the RINEX file via teqc quality check --- process in given\n # work area to avoid intermediate file pollution\n with SmartTempDir(work_path) as work_path:\n intermediate_rinex_fname = replace_path(work_path, rinex_fname)\n os.symlink(os.path.abspath(rinex_fname),\n intermediate_rinex_fname)\n intermediate_nav_fname = replace_path(work_path, nav_fname)\n os.symlink(os.path.abspath(nav_fname),\n intermediate_nav_fname)\n sh.teqc('+qc',\n '+quiet',\n '-R',\n '-S',\n '-E',\n '-C',\n '-J',\n '-nav', intermediate_nav_fname,\n intermediate_rinex_fname,\n _cwd=work_path,\n _out=process_output,\n _err=sys.stderr)\n os.remove(intermediate_rinex_fname)\n os.remove(intermediate_nav_fname)\n return info\n\n\ndef rinex_merge(output_fname, rinex_fnames, _err=sys.stderr):\n \"\"\"\n Using teqc, merge *rinex_fnames* and store to the file\n *output_fname*. Returns *output_fname*. Redirect error output to\n *_err*.\n \"\"\"\n args = ['-pch'] + rinex_fnames\n sh.teqc(*args,\n _out=output_fname,\n _err=_err)\n return output_fname\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n logging.getLogger('sh').setLevel(logging.WARNING)\n\n rinex_fname = '/Users/butala/src/absolute_tec/jplm0010.14o'\n nav_fname = '/Users/butala/src/absolute_tec/jplm0010.14n'\n\n info = rinex_info(rinex_fname,\n nav_fname)\n\n for key in sorted(info):\n print('{:10s}: {}'.format(key, info[key]))\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
import wx
import os
# os.environ["HTTPS_PROXY"] = "http://user:[email protected]:3128"
import wikipedia
import wolframalpha
import pyttsx3
import webbrowser
import winshell
import json
import requests
import ctypes
import random
from urllib.request import urlopen
import speech_recognition as sr
import ssl
import urllib.request
import urllib.parse
import re
from regression import Regression
# Remove SSL error
requests.packages.urllib3.disable_warnings()
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
headers = {'''user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6)
AppleWebKit/537.36 (KHTML, like Gecko)
Chrome/53.0.2785.143 Safari/537.36'''}
#speak = wincl.Dispatch("SAPI.SpVoice")
speak = pyttsx3.init()
voices = speak.getProperty('voices')
voice = voices[1]
speak.setProperty('voice', voice.id)
# Requirements
videos = ['C:\\Users\\nEW u\\Videos\\Um4WR.mkv', 'C:\\Users\\nEW u\\Videos\\Jaatishwar.mkv']
app_id = 'GY6T92-YG5RXA85AV'
# GUI creation
class MyFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None,
pos=wx.DefaultPosition, size=wx.Size(450, 100),
style=wx.MINIMIZE_BOX | wx.SYSTEM_MENU | wx.CAPTION |
wx.CLOSE_BOX | wx.CLIP_CHILDREN,
title="Assistant")
panel = wx.Panel(self)
#ico = wx.Icon('programming.jpg', type=wx.ICON_ASTERISK, desiredWidth=-1, desiredHeight=-1)
#self.SetIcon(ico)
my_sizer = wx.BoxSizer(wx.VERTICAL)
lbl = wx.StaticText(panel,
label="Hello Sir. How can I help you?")
my_sizer.Add(lbl, 0, wx.ALL, 5)
self.txt = wx.TextCtrl(panel, style=wx.TE_PROCESS_ENTER,
size=(400, 30))
self.txt.SetFocus()
self.txt.Bind(wx.EVT_TEXT_ENTER, self.OnEnter)
my_sizer.Add(self.txt, 0, wx.ALL, 5)
panel.SetSizer(my_sizer)
self.Show()
speak.say('''Welcome back Sir, Your assistant at your service.''')
speak.runAndWait()
def OnEnter(self, event):
put = self.txt.GetValue()
put = put.lower()
link = put.split()
r = sr.Recognizer()
if put == '':
with sr.Microphone() as src:
r.adjust_for_ambient_noise(src)
speak.say("Yes? How can I help You?")
speak.runAndWait()
audio = r.listen(src)
try:
put = r.recognize_google(audio)
put = put.lower()
link = put.split()
self.txt.SetValue(put)
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google STT; {0}".format(e))
except:
print("Unknown exception occurred!")
# Open a webpage
if put.startswith('open '):
try:
speak.say("opening "+link[1])
speak.runAndWait()
webbrowser.open('http://www.'+link[1]+'.com')
except:
print('Sorry, No Internet Connection!')
# Play Song on Youtube
elif put.startswith('play '):
try:
link = '+'.join(link[1:])
s = link.replace('+', ' ')
query_string = urllib.parse.urlencode({"search_query" : link})
html_content = urllib.request.urlopen("http://www.youtube.com/results?" + query_string)
search_results = re.findall(r'href=\"\/watch\?v=(.{11})', html_content.read().decode())
print("http://www.youtube.com/watch?v=" + search_results[0])
speak.say("playing "+s)
speak.runAndWait()
webbrowser.open("http://www.youtube.com/watch?v=" + search_results[0])
except:
print('Sorry, No internet connection!')
# Google Search
elif put.startswith('search '):
try:
link = '+'.join(link[1:])
say = link.replace('+', ' ')
speak.say("searching on google for "+say)
speak.runAndWait()
webbrowser.open('https://www.google.co.in/search?q='+link)
except:
print('Sorry, No internet connection!')
# Empty Recycle bin
elif put.startswith('empty '):
try:
winshell.recycle_bin().empty(confirm=False,
show_progress=False, sound=True)
speak.say("Recycle Bin Empty")
speak.runAndWait()
except:
speak.say("Unknown Error")
speak.runAndWait()
# News
elif put.startswith('science '):
try:
jsonObj = urlopen('''https://newsapi.org/v1/articles?source=new-scientist&sortBy=top&apiKey=your_API_here''')
data = json.load(jsonObj)
i = 1
speak.say('''Here are some top science news from new scientist''')
speak.runAndWait()
print(''' ================NEW SCIENTIST=============
'''+'\n')
for item in data['articles']:
print(str(i)+'. '+item['title']+'\n')
print(item['description']+'\n')
i += 1
except:
print('Sorry, No internet connection')
elif put.startswith('headlines '):
try:
jsonObj = urlopen('''https://newsapi.org/v1/articles?source=the-times-of-india&sortBy=top&apiKey=your_API_here''')
data = json.load(jsonObj)
i = 1
speak.say('Here are some top news from the times of india')
speak.runAndWait()
print(''' ===============TIMES OF INDIA============'''
+'\n')
for item in data['articles']:
print(str(i)+'. '+item['title']+'\n')
print(item['description']+'\n')
i += 1
except Exception as e:
print(str(e))
# Lock the device
elif put.startswith('lock '):
try:
speak.say("locking the device")
speak.runAndWait()
ctypes.windll.user32.LockWorkStation()
except Exception as e:
print(str(e))
# Play videos in boredom
elif put.endswith('bored'):
try:
speak.say('''Sir, I\'m playing a video.
Hope you like it''')
speak.runAndWait()
video = random.choice(videos)
os.startfile(video)
except Exception as e:
print(str(e))
# Say Whats up
elif put.startswith('whats up'):
try:
speak.say('''Nothing much, just trying to become the perfect assistant!''')
speak.runAndWait()
except Exception as e:
print(str(e))
#Show stocks
elif put.startswith('show stocks'):
try:
Regression.execute()
except Exception as e:
print(str(e))
# Other Cases
else:
try:
# wolframalpha
client = wolframalpha.Client(app_id)
res = client.query(put)
ans = next(res.results).text
print(ans)
speak.say(ans)
speak.runAndWait()
except:
# wikipedia/google
put = put.split()
put = ' '.join(put[:])
#print(put)
print(wikipedia.summary(put))
speak.say('Searched google for '+put)
speak.runAndWait()
webbrowser.open('https://www.google.co.in/search?q='+put)
# Trigger GUI
if __name__ == "__main__":
app = wx.App(True)
frame = MyFrame()
app.MainLoop()
|
normal
|
{
"blob_id": "8f1e6ea93b2dd7add256cb31d2c621aa69721609",
"index": 8834,
"step-1": "<mask token>\n\n\nclass MyFrame(wx.Frame):\n\n def __init__(self):\n wx.Frame.__init__(self, None, pos=wx.DefaultPosition, size=wx.Size(\n 450, 100), style=wx.MINIMIZE_BOX | wx.SYSTEM_MENU | wx.CAPTION |\n wx.CLOSE_BOX | wx.CLIP_CHILDREN, title='Assistant')\n panel = wx.Panel(self)\n my_sizer = wx.BoxSizer(wx.VERTICAL)\n lbl = wx.StaticText(panel, label='Hello Sir. How can I help you?')\n my_sizer.Add(lbl, 0, wx.ALL, 5)\n self.txt = wx.TextCtrl(panel, style=wx.TE_PROCESS_ENTER, size=(400, 30)\n )\n self.txt.SetFocus()\n self.txt.Bind(wx.EVT_TEXT_ENTER, self.OnEnter)\n my_sizer.Add(self.txt, 0, wx.ALL, 5)\n panel.SetSizer(my_sizer)\n self.Show()\n speak.say('Welcome back Sir, Your assistant at your service.')\n speak.runAndWait()\n\n def OnEnter(self, event):\n put = self.txt.GetValue()\n put = put.lower()\n link = put.split()\n r = sr.Recognizer()\n if put == '':\n with sr.Microphone() as src:\n r.adjust_for_ambient_noise(src)\n speak.say('Yes? How can I help You?')\n speak.runAndWait()\n audio = r.listen(src)\n try:\n put = r.recognize_google(audio)\n put = put.lower()\n link = put.split()\n self.txt.SetValue(put)\n except sr.UnknownValueError:\n print('Google Speech Recognition could not understand audio')\n except sr.RequestError as e:\n print('Could not request results from Google STT; {0}'.\n format(e))\n except:\n print('Unknown exception occurred!')\n if put.startswith('open '):\n try:\n speak.say('opening ' + link[1])\n speak.runAndWait()\n webbrowser.open('http://www.' + link[1] + '.com')\n except:\n print('Sorry, No Internet Connection!')\n elif put.startswith('play '):\n try:\n link = '+'.join(link[1:])\n s = link.replace('+', ' ')\n query_string = urllib.parse.urlencode({'search_query': link})\n html_content = urllib.request.urlopen(\n 'http://www.youtube.com/results?' + query_string)\n search_results = re.findall('href=\\\\\"\\\\/watch\\\\?v=(.{11})',\n html_content.read().decode())\n print('http://www.youtube.com/watch?v=' + search_results[0])\n speak.say('playing ' + s)\n speak.runAndWait()\n webbrowser.open('http://www.youtube.com/watch?v=' +\n search_results[0])\n except:\n print('Sorry, No internet connection!')\n elif put.startswith('search '):\n try:\n link = '+'.join(link[1:])\n say = link.replace('+', ' ')\n speak.say('searching on google for ' + say)\n speak.runAndWait()\n webbrowser.open('https://www.google.co.in/search?q=' + link)\n except:\n print('Sorry, No internet connection!')\n elif put.startswith('empty '):\n try:\n winshell.recycle_bin().empty(confirm=False, show_progress=\n False, sound=True)\n speak.say('Recycle Bin Empty')\n speak.runAndWait()\n except:\n speak.say('Unknown Error')\n speak.runAndWait()\n elif put.startswith('science '):\n try:\n jsonObj = urlopen(\n 'https://newsapi.org/v1/articles?source=new-scientist&sortBy=top&apiKey=your_API_here'\n )\n data = json.load(jsonObj)\n i = 1\n speak.say('Here are some top science news from new scientist')\n speak.runAndWait()\n print(\n \"\"\" ================NEW SCIENTIST=============\n \"\"\"\n + '\\n')\n for item in data['articles']:\n print(str(i) + '. ' + item['title'] + '\\n')\n print(item['description'] + '\\n')\n i += 1\n except:\n print('Sorry, No internet connection')\n elif put.startswith('headlines '):\n try:\n jsonObj = urlopen(\n 'https://newsapi.org/v1/articles?source=the-times-of-india&sortBy=top&apiKey=your_API_here'\n )\n data = json.load(jsonObj)\n i = 1\n speak.say('Here are some top news from the times of india')\n speak.runAndWait()\n print(\n ' ===============TIMES OF INDIA============' +\n '\\n')\n for item in data['articles']:\n print(str(i) + '. ' + item['title'] + '\\n')\n print(item['description'] + '\\n')\n i += 1\n except Exception as e:\n print(str(e))\n elif put.startswith('lock '):\n try:\n speak.say('locking the device')\n speak.runAndWait()\n ctypes.windll.user32.LockWorkStation()\n except Exception as e:\n print(str(e))\n elif put.endswith('bored'):\n try:\n speak.say(\n \"\"\"Sir, I'm playing a video.\n Hope you like it\"\"\"\n )\n speak.runAndWait()\n video = random.choice(videos)\n os.startfile(video)\n except Exception as e:\n print(str(e))\n elif put.startswith('whats up'):\n try:\n speak.say(\n 'Nothing much, just trying to become the perfect assistant!'\n )\n speak.runAndWait()\n except Exception as e:\n print(str(e))\n elif put.startswith('show stocks'):\n try:\n Regression.execute()\n except Exception as e:\n print(str(e))\n else:\n try:\n client = wolframalpha.Client(app_id)\n res = client.query(put)\n ans = next(res.results).text\n print(ans)\n speak.say(ans)\n speak.runAndWait()\n except:\n put = put.split()\n put = ' '.join(put[:])\n print(wikipedia.summary(put))\n speak.say('Searched google for ' + put)\n speak.runAndWait()\n webbrowser.open('https://www.google.co.in/search?q=' + put)\n\n\n<mask token>\n",
"step-2": "<mask token>\nrequests.packages.urllib3.disable_warnings()\ntry:\n _create_unverified_https_context = ssl._create_unverified_context\nexcept AttributeError:\n pass\nelse:\n ssl._create_default_https_context = _create_unverified_https_context\n<mask token>\nspeak.setProperty('voice', voice.id)\n<mask token>\n\n\nclass MyFrame(wx.Frame):\n\n def __init__(self):\n wx.Frame.__init__(self, None, pos=wx.DefaultPosition, size=wx.Size(\n 450, 100), style=wx.MINIMIZE_BOX | wx.SYSTEM_MENU | wx.CAPTION |\n wx.CLOSE_BOX | wx.CLIP_CHILDREN, title='Assistant')\n panel = wx.Panel(self)\n my_sizer = wx.BoxSizer(wx.VERTICAL)\n lbl = wx.StaticText(panel, label='Hello Sir. How can I help you?')\n my_sizer.Add(lbl, 0, wx.ALL, 5)\n self.txt = wx.TextCtrl(panel, style=wx.TE_PROCESS_ENTER, size=(400, 30)\n )\n self.txt.SetFocus()\n self.txt.Bind(wx.EVT_TEXT_ENTER, self.OnEnter)\n my_sizer.Add(self.txt, 0, wx.ALL, 5)\n panel.SetSizer(my_sizer)\n self.Show()\n speak.say('Welcome back Sir, Your assistant at your service.')\n speak.runAndWait()\n\n def OnEnter(self, event):\n put = self.txt.GetValue()\n put = put.lower()\n link = put.split()\n r = sr.Recognizer()\n if put == '':\n with sr.Microphone() as src:\n r.adjust_for_ambient_noise(src)\n speak.say('Yes? How can I help You?')\n speak.runAndWait()\n audio = r.listen(src)\n try:\n put = r.recognize_google(audio)\n put = put.lower()\n link = put.split()\n self.txt.SetValue(put)\n except sr.UnknownValueError:\n print('Google Speech Recognition could not understand audio')\n except sr.RequestError as e:\n print('Could not request results from Google STT; {0}'.\n format(e))\n except:\n print('Unknown exception occurred!')\n if put.startswith('open '):\n try:\n speak.say('opening ' + link[1])\n speak.runAndWait()\n webbrowser.open('http://www.' + link[1] + '.com')\n except:\n print('Sorry, No Internet Connection!')\n elif put.startswith('play '):\n try:\n link = '+'.join(link[1:])\n s = link.replace('+', ' ')\n query_string = urllib.parse.urlencode({'search_query': link})\n html_content = urllib.request.urlopen(\n 'http://www.youtube.com/results?' + query_string)\n search_results = re.findall('href=\\\\\"\\\\/watch\\\\?v=(.{11})',\n html_content.read().decode())\n print('http://www.youtube.com/watch?v=' + search_results[0])\n speak.say('playing ' + s)\n speak.runAndWait()\n webbrowser.open('http://www.youtube.com/watch?v=' +\n search_results[0])\n except:\n print('Sorry, No internet connection!')\n elif put.startswith('search '):\n try:\n link = '+'.join(link[1:])\n say = link.replace('+', ' ')\n speak.say('searching on google for ' + say)\n speak.runAndWait()\n webbrowser.open('https://www.google.co.in/search?q=' + link)\n except:\n print('Sorry, No internet connection!')\n elif put.startswith('empty '):\n try:\n winshell.recycle_bin().empty(confirm=False, show_progress=\n False, sound=True)\n speak.say('Recycle Bin Empty')\n speak.runAndWait()\n except:\n speak.say('Unknown Error')\n speak.runAndWait()\n elif put.startswith('science '):\n try:\n jsonObj = urlopen(\n 'https://newsapi.org/v1/articles?source=new-scientist&sortBy=top&apiKey=your_API_here'\n )\n data = json.load(jsonObj)\n i = 1\n speak.say('Here are some top science news from new scientist')\n speak.runAndWait()\n print(\n \"\"\" ================NEW SCIENTIST=============\n \"\"\"\n + '\\n')\n for item in data['articles']:\n print(str(i) + '. ' + item['title'] + '\\n')\n print(item['description'] + '\\n')\n i += 1\n except:\n print('Sorry, No internet connection')\n elif put.startswith('headlines '):\n try:\n jsonObj = urlopen(\n 'https://newsapi.org/v1/articles?source=the-times-of-india&sortBy=top&apiKey=your_API_here'\n )\n data = json.load(jsonObj)\n i = 1\n speak.say('Here are some top news from the times of india')\n speak.runAndWait()\n print(\n ' ===============TIMES OF INDIA============' +\n '\\n')\n for item in data['articles']:\n print(str(i) + '. ' + item['title'] + '\\n')\n print(item['description'] + '\\n')\n i += 1\n except Exception as e:\n print(str(e))\n elif put.startswith('lock '):\n try:\n speak.say('locking the device')\n speak.runAndWait()\n ctypes.windll.user32.LockWorkStation()\n except Exception as e:\n print(str(e))\n elif put.endswith('bored'):\n try:\n speak.say(\n \"\"\"Sir, I'm playing a video.\n Hope you like it\"\"\"\n )\n speak.runAndWait()\n video = random.choice(videos)\n os.startfile(video)\n except Exception as e:\n print(str(e))\n elif put.startswith('whats up'):\n try:\n speak.say(\n 'Nothing much, just trying to become the perfect assistant!'\n )\n speak.runAndWait()\n except Exception as e:\n print(str(e))\n elif put.startswith('show stocks'):\n try:\n Regression.execute()\n except Exception as e:\n print(str(e))\n else:\n try:\n client = wolframalpha.Client(app_id)\n res = client.query(put)\n ans = next(res.results).text\n print(ans)\n speak.say(ans)\n speak.runAndWait()\n except:\n put = put.split()\n put = ' '.join(put[:])\n print(wikipedia.summary(put))\n speak.say('Searched google for ' + put)\n speak.runAndWait()\n webbrowser.open('https://www.google.co.in/search?q=' + put)\n\n\nif __name__ == '__main__':\n app = wx.App(True)\n frame = MyFrame()\n app.MainLoop()\n",
"step-3": "<mask token>\nrequests.packages.urllib3.disable_warnings()\ntry:\n _create_unverified_https_context = ssl._create_unverified_context\nexcept AttributeError:\n pass\nelse:\n ssl._create_default_https_context = _create_unverified_https_context\nheaders = {\n \"\"\"user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6)\n AppleWebKit/537.36 (KHTML, like Gecko)\n Chrome/53.0.2785.143 Safari/537.36\"\"\"\n }\nspeak = pyttsx3.init()\nvoices = speak.getProperty('voices')\nvoice = voices[1]\nspeak.setProperty('voice', voice.id)\nvideos = ['C:\\\\Users\\\\nEW u\\\\Videos\\\\Um4WR.mkv',\n 'C:\\\\Users\\\\nEW u\\\\Videos\\\\Jaatishwar.mkv']\napp_id = 'GY6T92-YG5RXA85AV'\n\n\nclass MyFrame(wx.Frame):\n\n def __init__(self):\n wx.Frame.__init__(self, None, pos=wx.DefaultPosition, size=wx.Size(\n 450, 100), style=wx.MINIMIZE_BOX | wx.SYSTEM_MENU | wx.CAPTION |\n wx.CLOSE_BOX | wx.CLIP_CHILDREN, title='Assistant')\n panel = wx.Panel(self)\n my_sizer = wx.BoxSizer(wx.VERTICAL)\n lbl = wx.StaticText(panel, label='Hello Sir. How can I help you?')\n my_sizer.Add(lbl, 0, wx.ALL, 5)\n self.txt = wx.TextCtrl(panel, style=wx.TE_PROCESS_ENTER, size=(400, 30)\n )\n self.txt.SetFocus()\n self.txt.Bind(wx.EVT_TEXT_ENTER, self.OnEnter)\n my_sizer.Add(self.txt, 0, wx.ALL, 5)\n panel.SetSizer(my_sizer)\n self.Show()\n speak.say('Welcome back Sir, Your assistant at your service.')\n speak.runAndWait()\n\n def OnEnter(self, event):\n put = self.txt.GetValue()\n put = put.lower()\n link = put.split()\n r = sr.Recognizer()\n if put == '':\n with sr.Microphone() as src:\n r.adjust_for_ambient_noise(src)\n speak.say('Yes? How can I help You?')\n speak.runAndWait()\n audio = r.listen(src)\n try:\n put = r.recognize_google(audio)\n put = put.lower()\n link = put.split()\n self.txt.SetValue(put)\n except sr.UnknownValueError:\n print('Google Speech Recognition could not understand audio')\n except sr.RequestError as e:\n print('Could not request results from Google STT; {0}'.\n format(e))\n except:\n print('Unknown exception occurred!')\n if put.startswith('open '):\n try:\n speak.say('opening ' + link[1])\n speak.runAndWait()\n webbrowser.open('http://www.' + link[1] + '.com')\n except:\n print('Sorry, No Internet Connection!')\n elif put.startswith('play '):\n try:\n link = '+'.join(link[1:])\n s = link.replace('+', ' ')\n query_string = urllib.parse.urlencode({'search_query': link})\n html_content = urllib.request.urlopen(\n 'http://www.youtube.com/results?' + query_string)\n search_results = re.findall('href=\\\\\"\\\\/watch\\\\?v=(.{11})',\n html_content.read().decode())\n print('http://www.youtube.com/watch?v=' + search_results[0])\n speak.say('playing ' + s)\n speak.runAndWait()\n webbrowser.open('http://www.youtube.com/watch?v=' +\n search_results[0])\n except:\n print('Sorry, No internet connection!')\n elif put.startswith('search '):\n try:\n link = '+'.join(link[1:])\n say = link.replace('+', ' ')\n speak.say('searching on google for ' + say)\n speak.runAndWait()\n webbrowser.open('https://www.google.co.in/search?q=' + link)\n except:\n print('Sorry, No internet connection!')\n elif put.startswith('empty '):\n try:\n winshell.recycle_bin().empty(confirm=False, show_progress=\n False, sound=True)\n speak.say('Recycle Bin Empty')\n speak.runAndWait()\n except:\n speak.say('Unknown Error')\n speak.runAndWait()\n elif put.startswith('science '):\n try:\n jsonObj = urlopen(\n 'https://newsapi.org/v1/articles?source=new-scientist&sortBy=top&apiKey=your_API_here'\n )\n data = json.load(jsonObj)\n i = 1\n speak.say('Here are some top science news from new scientist')\n speak.runAndWait()\n print(\n \"\"\" ================NEW SCIENTIST=============\n \"\"\"\n + '\\n')\n for item in data['articles']:\n print(str(i) + '. ' + item['title'] + '\\n')\n print(item['description'] + '\\n')\n i += 1\n except:\n print('Sorry, No internet connection')\n elif put.startswith('headlines '):\n try:\n jsonObj = urlopen(\n 'https://newsapi.org/v1/articles?source=the-times-of-india&sortBy=top&apiKey=your_API_here'\n )\n data = json.load(jsonObj)\n i = 1\n speak.say('Here are some top news from the times of india')\n speak.runAndWait()\n print(\n ' ===============TIMES OF INDIA============' +\n '\\n')\n for item in data['articles']:\n print(str(i) + '. ' + item['title'] + '\\n')\n print(item['description'] + '\\n')\n i += 1\n except Exception as e:\n print(str(e))\n elif put.startswith('lock '):\n try:\n speak.say('locking the device')\n speak.runAndWait()\n ctypes.windll.user32.LockWorkStation()\n except Exception as e:\n print(str(e))\n elif put.endswith('bored'):\n try:\n speak.say(\n \"\"\"Sir, I'm playing a video.\n Hope you like it\"\"\"\n )\n speak.runAndWait()\n video = random.choice(videos)\n os.startfile(video)\n except Exception as e:\n print(str(e))\n elif put.startswith('whats up'):\n try:\n speak.say(\n 'Nothing much, just trying to become the perfect assistant!'\n )\n speak.runAndWait()\n except Exception as e:\n print(str(e))\n elif put.startswith('show stocks'):\n try:\n Regression.execute()\n except Exception as e:\n print(str(e))\n else:\n try:\n client = wolframalpha.Client(app_id)\n res = client.query(put)\n ans = next(res.results).text\n print(ans)\n speak.say(ans)\n speak.runAndWait()\n except:\n put = put.split()\n put = ' '.join(put[:])\n print(wikipedia.summary(put))\n speak.say('Searched google for ' + put)\n speak.runAndWait()\n webbrowser.open('https://www.google.co.in/search?q=' + put)\n\n\nif __name__ == '__main__':\n app = wx.App(True)\n frame = MyFrame()\n app.MainLoop()\n",
"step-4": "import wx\nimport os\nimport wikipedia\nimport wolframalpha\nimport pyttsx3\nimport webbrowser\nimport winshell\nimport json\nimport requests\nimport ctypes\nimport random\nfrom urllib.request import urlopen\nimport speech_recognition as sr\nimport ssl\nimport urllib.request\nimport urllib.parse\nimport re\nfrom regression import Regression\nrequests.packages.urllib3.disable_warnings()\ntry:\n _create_unverified_https_context = ssl._create_unverified_context\nexcept AttributeError:\n pass\nelse:\n ssl._create_default_https_context = _create_unverified_https_context\nheaders = {\n \"\"\"user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6)\n AppleWebKit/537.36 (KHTML, like Gecko)\n Chrome/53.0.2785.143 Safari/537.36\"\"\"\n }\nspeak = pyttsx3.init()\nvoices = speak.getProperty('voices')\nvoice = voices[1]\nspeak.setProperty('voice', voice.id)\nvideos = ['C:\\\\Users\\\\nEW u\\\\Videos\\\\Um4WR.mkv',\n 'C:\\\\Users\\\\nEW u\\\\Videos\\\\Jaatishwar.mkv']\napp_id = 'GY6T92-YG5RXA85AV'\n\n\nclass MyFrame(wx.Frame):\n\n def __init__(self):\n wx.Frame.__init__(self, None, pos=wx.DefaultPosition, size=wx.Size(\n 450, 100), style=wx.MINIMIZE_BOX | wx.SYSTEM_MENU | wx.CAPTION |\n wx.CLOSE_BOX | wx.CLIP_CHILDREN, title='Assistant')\n panel = wx.Panel(self)\n my_sizer = wx.BoxSizer(wx.VERTICAL)\n lbl = wx.StaticText(panel, label='Hello Sir. How can I help you?')\n my_sizer.Add(lbl, 0, wx.ALL, 5)\n self.txt = wx.TextCtrl(panel, style=wx.TE_PROCESS_ENTER, size=(400, 30)\n )\n self.txt.SetFocus()\n self.txt.Bind(wx.EVT_TEXT_ENTER, self.OnEnter)\n my_sizer.Add(self.txt, 0, wx.ALL, 5)\n panel.SetSizer(my_sizer)\n self.Show()\n speak.say('Welcome back Sir, Your assistant at your service.')\n speak.runAndWait()\n\n def OnEnter(self, event):\n put = self.txt.GetValue()\n put = put.lower()\n link = put.split()\n r = sr.Recognizer()\n if put == '':\n with sr.Microphone() as src:\n r.adjust_for_ambient_noise(src)\n speak.say('Yes? How can I help You?')\n speak.runAndWait()\n audio = r.listen(src)\n try:\n put = r.recognize_google(audio)\n put = put.lower()\n link = put.split()\n self.txt.SetValue(put)\n except sr.UnknownValueError:\n print('Google Speech Recognition could not understand audio')\n except sr.RequestError as e:\n print('Could not request results from Google STT; {0}'.\n format(e))\n except:\n print('Unknown exception occurred!')\n if put.startswith('open '):\n try:\n speak.say('opening ' + link[1])\n speak.runAndWait()\n webbrowser.open('http://www.' + link[1] + '.com')\n except:\n print('Sorry, No Internet Connection!')\n elif put.startswith('play '):\n try:\n link = '+'.join(link[1:])\n s = link.replace('+', ' ')\n query_string = urllib.parse.urlencode({'search_query': link})\n html_content = urllib.request.urlopen(\n 'http://www.youtube.com/results?' + query_string)\n search_results = re.findall('href=\\\\\"\\\\/watch\\\\?v=(.{11})',\n html_content.read().decode())\n print('http://www.youtube.com/watch?v=' + search_results[0])\n speak.say('playing ' + s)\n speak.runAndWait()\n webbrowser.open('http://www.youtube.com/watch?v=' +\n search_results[0])\n except:\n print('Sorry, No internet connection!')\n elif put.startswith('search '):\n try:\n link = '+'.join(link[1:])\n say = link.replace('+', ' ')\n speak.say('searching on google for ' + say)\n speak.runAndWait()\n webbrowser.open('https://www.google.co.in/search?q=' + link)\n except:\n print('Sorry, No internet connection!')\n elif put.startswith('empty '):\n try:\n winshell.recycle_bin().empty(confirm=False, show_progress=\n False, sound=True)\n speak.say('Recycle Bin Empty')\n speak.runAndWait()\n except:\n speak.say('Unknown Error')\n speak.runAndWait()\n elif put.startswith('science '):\n try:\n jsonObj = urlopen(\n 'https://newsapi.org/v1/articles?source=new-scientist&sortBy=top&apiKey=your_API_here'\n )\n data = json.load(jsonObj)\n i = 1\n speak.say('Here are some top science news from new scientist')\n speak.runAndWait()\n print(\n \"\"\" ================NEW SCIENTIST=============\n \"\"\"\n + '\\n')\n for item in data['articles']:\n print(str(i) + '. ' + item['title'] + '\\n')\n print(item['description'] + '\\n')\n i += 1\n except:\n print('Sorry, No internet connection')\n elif put.startswith('headlines '):\n try:\n jsonObj = urlopen(\n 'https://newsapi.org/v1/articles?source=the-times-of-india&sortBy=top&apiKey=your_API_here'\n )\n data = json.load(jsonObj)\n i = 1\n speak.say('Here are some top news from the times of india')\n speak.runAndWait()\n print(\n ' ===============TIMES OF INDIA============' +\n '\\n')\n for item in data['articles']:\n print(str(i) + '. ' + item['title'] + '\\n')\n print(item['description'] + '\\n')\n i += 1\n except Exception as e:\n print(str(e))\n elif put.startswith('lock '):\n try:\n speak.say('locking the device')\n speak.runAndWait()\n ctypes.windll.user32.LockWorkStation()\n except Exception as e:\n print(str(e))\n elif put.endswith('bored'):\n try:\n speak.say(\n \"\"\"Sir, I'm playing a video.\n Hope you like it\"\"\"\n )\n speak.runAndWait()\n video = random.choice(videos)\n os.startfile(video)\n except Exception as e:\n print(str(e))\n elif put.startswith('whats up'):\n try:\n speak.say(\n 'Nothing much, just trying to become the perfect assistant!'\n )\n speak.runAndWait()\n except Exception as e:\n print(str(e))\n elif put.startswith('show stocks'):\n try:\n Regression.execute()\n except Exception as e:\n print(str(e))\n else:\n try:\n client = wolframalpha.Client(app_id)\n res = client.query(put)\n ans = next(res.results).text\n print(ans)\n speak.say(ans)\n speak.runAndWait()\n except:\n put = put.split()\n put = ' '.join(put[:])\n print(wikipedia.summary(put))\n speak.say('Searched google for ' + put)\n speak.runAndWait()\n webbrowser.open('https://www.google.co.in/search?q=' + put)\n\n\nif __name__ == '__main__':\n app = wx.App(True)\n frame = MyFrame()\n app.MainLoop()\n",
"step-5": "import wx\nimport os\n# os.environ[\"HTTPS_PROXY\"] = \"http://user:[email protected]:3128\"\nimport wikipedia\nimport wolframalpha\nimport pyttsx3\nimport webbrowser\nimport winshell\nimport json\nimport requests\nimport ctypes\nimport random\nfrom urllib.request import urlopen\nimport speech_recognition as sr\nimport ssl\nimport urllib.request\nimport urllib.parse\nimport re\nfrom regression import Regression\n# Remove SSL error\nrequests.packages.urllib3.disable_warnings()\n\ntry:\n _create_unverified_https_context = ssl._create_unverified_context\nexcept AttributeError:\n # Legacy Python that doesn't verify HTTPS certificates by default\n pass\nelse:\n # Handle target environment that doesn't support HTTPS verification\n ssl._create_default_https_context = _create_unverified_https_context\n\n\nheaders = {'''user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6)\n AppleWebKit/537.36 (KHTML, like Gecko)\n Chrome/53.0.2785.143 Safari/537.36'''}\n\n#speak = wincl.Dispatch(\"SAPI.SpVoice\")\nspeak = pyttsx3.init()\nvoices = speak.getProperty('voices')\nvoice = voices[1]\nspeak.setProperty('voice', voice.id)\n\n# Requirements\nvideos = ['C:\\\\Users\\\\nEW u\\\\Videos\\\\Um4WR.mkv', 'C:\\\\Users\\\\nEW u\\\\Videos\\\\Jaatishwar.mkv']\napp_id = 'GY6T92-YG5RXA85AV'\n\n\n# GUI creation\nclass MyFrame(wx.Frame):\n def __init__(self):\n wx.Frame.__init__(self, None,\n pos=wx.DefaultPosition, size=wx.Size(450, 100),\n style=wx.MINIMIZE_BOX | wx.SYSTEM_MENU | wx.CAPTION |\n wx.CLOSE_BOX | wx.CLIP_CHILDREN,\n title=\"Assistant\")\n panel = wx.Panel(self)\n\n #ico = wx.Icon('programming.jpg', type=wx.ICON_ASTERISK, desiredWidth=-1, desiredHeight=-1)\n #self.SetIcon(ico)\n \n my_sizer = wx.BoxSizer(wx.VERTICAL)\n lbl = wx.StaticText(panel,\n label=\"Hello Sir. How can I help you?\")\n my_sizer.Add(lbl, 0, wx.ALL, 5)\n self.txt = wx.TextCtrl(panel, style=wx.TE_PROCESS_ENTER,\n size=(400, 30))\n self.txt.SetFocus()\n self.txt.Bind(wx.EVT_TEXT_ENTER, self.OnEnter)\n my_sizer.Add(self.txt, 0, wx.ALL, 5)\n panel.SetSizer(my_sizer)\n self.Show()\n speak.say('''Welcome back Sir, Your assistant at your service.''')\n speak.runAndWait()\n\n\n def OnEnter(self, event):\n put = self.txt.GetValue()\n put = put.lower()\n link = put.split()\n r = sr.Recognizer()\n if put == '':\n with sr.Microphone() as src:\n r.adjust_for_ambient_noise(src) \n speak.say(\"Yes? How can I help You?\")\n speak.runAndWait()\n audio = r.listen(src)\n try:\n put = r.recognize_google(audio)\n put = put.lower()\n link = put.split()\n self.txt.SetValue(put)\n\n except sr.UnknownValueError:\n print(\"Google Speech Recognition could not understand audio\")\n except sr.RequestError as e:\n print(\"Could not request results from Google STT; {0}\".format(e))\n except:\n print(\"Unknown exception occurred!\")\n\n # Open a webpage\n if put.startswith('open '):\n try:\n speak.say(\"opening \"+link[1])\n speak.runAndWait()\n webbrowser.open('http://www.'+link[1]+'.com')\n except:\n print('Sorry, No Internet Connection!')\n # Play Song on Youtube\n elif put.startswith('play '):\n try:\n link = '+'.join(link[1:])\n s = link.replace('+', ' ')\n query_string = urllib.parse.urlencode({\"search_query\" : link})\n html_content = urllib.request.urlopen(\"http://www.youtube.com/results?\" + query_string)\n search_results = re.findall(r'href=\\\"\\/watch\\?v=(.{11})', html_content.read().decode())\n print(\"http://www.youtube.com/watch?v=\" + search_results[0])\n speak.say(\"playing \"+s)\n speak.runAndWait()\n webbrowser.open(\"http://www.youtube.com/watch?v=\" + search_results[0])\n except:\n print('Sorry, No internet connection!')\n # Google Search\n elif put.startswith('search '):\n try:\n link = '+'.join(link[1:])\n say = link.replace('+', ' ')\n speak.say(\"searching on google for \"+say)\n speak.runAndWait()\n webbrowser.open('https://www.google.co.in/search?q='+link)\n except:\n print('Sorry, No internet connection!')\n # Empty Recycle bin\n elif put.startswith('empty '):\n try:\n winshell.recycle_bin().empty(confirm=False,\n show_progress=False, sound=True)\n speak.say(\"Recycle Bin Empty\")\n speak.runAndWait()\n except:\n speak.say(\"Unknown Error\")\n speak.runAndWait()\n # News\n elif put.startswith('science '):\n try:\n jsonObj = urlopen('''https://newsapi.org/v1/articles?source=new-scientist&sortBy=top&apiKey=your_API_here''')\n data = json.load(jsonObj)\n i = 1\n speak.say('''Here are some top science news from new scientist''')\n speak.runAndWait()\n print(''' ================NEW SCIENTIST=============\n '''+'\\n')\n for item in data['articles']:\n print(str(i)+'. '+item['title']+'\\n')\n print(item['description']+'\\n')\n i += 1\n except:\n print('Sorry, No internet connection')\n elif put.startswith('headlines '):\n try:\n jsonObj = urlopen('''https://newsapi.org/v1/articles?source=the-times-of-india&sortBy=top&apiKey=your_API_here''')\n data = json.load(jsonObj)\n i = 1\n speak.say('Here are some top news from the times of india')\n speak.runAndWait()\n print(''' ===============TIMES OF INDIA============'''\n +'\\n')\n for item in data['articles']:\n print(str(i)+'. '+item['title']+'\\n')\n print(item['description']+'\\n')\n i += 1\n except Exception as e:\n print(str(e))\n # Lock the device\n elif put.startswith('lock '):\n try:\n speak.say(\"locking the device\")\n speak.runAndWait()\n ctypes.windll.user32.LockWorkStation()\n except Exception as e:\n print(str(e)) \n # Play videos in boredom\n elif put.endswith('bored'):\n try:\n speak.say('''Sir, I\\'m playing a video.\n Hope you like it''')\n speak.runAndWait()\n video = random.choice(videos)\n os.startfile(video)\n except Exception as e:\n print(str(e)) \n # Say Whats up \n elif put.startswith('whats up'):\n try:\n speak.say('''Nothing much, just trying to become the perfect assistant!''')\n speak.runAndWait()\n except Exception as e:\n print(str(e)) \n #Show stocks\n elif put.startswith('show stocks'):\n try:\n Regression.execute()\n except Exception as e:\n print(str(e))\n \n # Other Cases\n else:\n try:\n # wolframalpha\n client = wolframalpha.Client(app_id)\n res = client.query(put)\n ans = next(res.results).text\n print(ans)\n speak.say(ans)\n speak.runAndWait()\n\n except:\n # wikipedia/google\n put = put.split()\n put = ' '.join(put[:])\n #print(put)\n print(wikipedia.summary(put))\n speak.say('Searched google for '+put)\n speak.runAndWait()\n webbrowser.open('https://www.google.co.in/search?q='+put)\n\n\n# Trigger GUI\nif __name__ == \"__main__\":\n app = wx.App(True)\n frame = MyFrame()\n app.MainLoop()",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
class NumMatrix(object):
def __init__(self, matrix):
if matrix:
self.dp = [[0] * (len(matrix[0]) + 1) for i in range(len(matrix)+1)]
for i in xrange(1,len(matrix)+1):
for j in xrange(1,len(matrix[0])+1):
self.dp[i][j] = self.dp[i-1][j] + self.dp[i][j-1] + matrix[i-1][j-1] - self.dp[i-1][j-1]
def sumRegion(self, row1, col1, row2, col2):
return self.dp[row2+1][col2+1] + self.dp[row1][col1] - self.dp[row1][col2+1] - self.dp[row2+1][col1]
# Your NumMatrix object will be instantiated and called as such:
matrix = [[3,0,1,4,2],[5,6,3,2,1],[1,2,0,1,5],[4,1,0,1,7],[1,0,3,0,5]]
for m in matrix:
print m
print
numMatrix = NumMatrix(matrix)
print numMatrix.sumRegion(2, 1, 4, 3)
print numMatrix.sumRegion(1, 2, 3, 4)
|
normal
|
{
"blob_id": "443ce5c2ec86b9f89ad39ef2ac6772fa002e7e16",
"index": 8377,
"step-1": "class NumMatrix(object):\n\n def __init__(self, matrix):\n if matrix:\n self.dp = [[0] * (len(matrix[0]) + 1) for i in range(len(matrix)+1)]\n for i in xrange(1,len(matrix)+1):\n for j in xrange(1,len(matrix[0])+1):\n self.dp[i][j] = self.dp[i-1][j] + self.dp[i][j-1] + matrix[i-1][j-1] - self.dp[i-1][j-1]\n\n\n\n def sumRegion(self, row1, col1, row2, col2):\n\n return self.dp[row2+1][col2+1] + self.dp[row1][col1] - self.dp[row1][col2+1] - self.dp[row2+1][col1]\n\n\n\n# Your NumMatrix object will be instantiated and called as such:\nmatrix = [[3,0,1,4,2],[5,6,3,2,1],[1,2,0,1,5],[4,1,0,1,7],[1,0,3,0,5]]\nfor m in matrix:\n print m\nprint\nnumMatrix = NumMatrix(matrix)\nprint numMatrix.sumRegion(2, 1, 4, 3)\nprint numMatrix.sumRegion(1, 2, 3, 4)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class ArbertmoPreprocessor:
<|reserved_special_token_0|>
def __init__(self, model_name, keep_emojis=False, remove_html_markup=
True, replace_urls_emails_mentions=True, strip_tashkeel=True,
strip_tatweel=True, insert_white_spaces=True, remove_elongation=True):
"""
model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to "bert-base-arabertv02". Current accepted models are:
- :obj:`"bert-base-arabertv01"`: No farasa segmentation.
- :obj:`"bert-base-arabert"`: with farasa segmentation.
- :obj:`"bert-base-arabertv02"`: No farasas egmentation.
- :obj:`"bert-base-arabertv2"`: with farasa segmentation.
- :obj:`"bert-large-arabertv02"`: No farasas egmentation.
- :obj:`"bert-large-arabertv2"`: with farasa segmentation.
- :obj:`"araelectra-base"`: No farasa segmentation.
- :obj:`"araelectra-base-discriminator"`: No farasa segmentation.
- :obj:`"araelectra-base-generator"`: No farasa segmentation.
- :obj:`"aragpt2-base"`: No farasa segmentation.
- :obj:`"aragpt2-medium"`: No farasa segmentation.
- :obj:`"aragpt2-large"`: No farasa segmentation.
- :obj:`"aragpt2-mega"`: No farasa segmentation.
keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False
remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True
replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True
strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)
strip_tatweel(:obj: `bool`): remove tatweel '\\u0640'
insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words
remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character
"""
model_name = model_name.replace('aubmindlab/', '')
if model_name not in ACCEPTED_MODELS:
logging.warning(
"Model provided is not in the accepted model list. Assuming you don't want Farasa Segmentation"
)
self.model_name = 'bert-base-arabertv02'
else:
self.model_name = model_name
if self.model_name in SEGMENTED_MODELS:
logging.info(
'Selected Model requires pre-segmentation, Initializing FarasaSegmenter'
)
try:
from farasa.segmenter import FarasaSegmenter
self.farasa_segmenter = FarasaSegmenter(interactive=True)
except:
logging.warning(
'farasapy is not installed, you want be able to process text for AraBERTv1 and v2. Install it using: pip install farasapy'
)
else:
logging.info(
"Selected Model doesn't require pre-segmentation, skipping FarasaSegmenter initialization"
)
self.keep_emojis = keep_emojis
if self.keep_emojis:
import emoji
self.emoji = emoji
if self.model_name in SEGMENTED_MODELS:
logging.warning(
'Keeping tweets with Farasa Segmentation is 10 times slower'
)
self.remove_html_markup = remove_html_markup
self.replace_urls_emails_mentions = replace_urls_emails_mentions
self.strip_tashkeel = strip_tashkeel
self.strip_tatweel = strip_tatweel
self.insert_white_spaces = insert_white_spaces
self.remove_elongation = remove_elongation
def preprocess(self, text):
"""
Preprocess takes an input text line an applies the same preprocessing used in AraBERT
pretraining
Args:
text (:obj:`str`): inout text string
Returns:
string: A preprocessed string depending on which model was selected
"""
if self.model_name == 'bert-base-arabert':
return self._old_preprocess(text, do_farasa_tokenization=True)
if self.model_name == 'bert-base-arabertv01':
return self._old_preprocess(text, do_farasa_tokenization=False)
text = str(text)
text = html.unescape(text)
if self.strip_tashkeel:
text = araby.strip_tashkeel(text)
if self.strip_tatweel:
text = araby.strip_tatweel(text)
if self.replace_urls_emails_mentions:
for reg in url_regexes:
text = re.sub(reg, ' [رابط] ', text)
for reg in email_regexes:
text = re.sub(reg, ' [بريد] ', text)
text = re.sub(user_mention_regex, ' [مستخدم] ', text)
if self.remove_html_markup:
text = re.sub('<br />', ' ', text)
text = re.sub('</?[^>]+>', ' ', text)
if self.remove_elongation:
text = self._remove_elongation(text)
if self.insert_white_spaces:
text = re.sub('([^0-9ء-غف-ي٠-٩a-zA-Z\\[\\]])', ' \\1 ', text)
text = re.sub('(\\d+)([ء-غف-ي٠-٬]+)', ' \\1 \\2 ', text)
text = re.sub('([ء-غف-ي٠-٬]+)(\\d+)', ' \\1 \\2 ', text)
if self.keep_emojis:
emoji_regex = ''.join(list(self.emoji.UNICODE_EMOJI['en'].keys()))
rejected_chars_regex2 = '[^%s%s]' % (chars_regex, emoji_regex)
text = re.sub(rejected_chars_regex2, ' ', text)
else:
text = re.sub(rejected_chars_regex, ' ', text)
text = ' '.join(text.replace('️', '').split())
if (self.model_name == 'bert-base-arabertv2' or self.model_name ==
'bert-large-arabertv2'):
if self.keep_emojis:
new_text = []
for word in text.split():
if word in list(self.emoji.UNICODE_EMOJI['en'].keys()):
new_text.append(word)
else:
new_text.append(self.farasa_segmenter.segment(word))
text = ' '.join(new_text)
else:
text = self.farasa_segmenter.segment(text)
return self._farasa_segment(text)
return text
def unpreprocess(self, text, desegment=True):
"""Re-formats the text to a classic format where punctuations, brackets, parenthesis are not seperated by whitespaces.
The objective is to make the generated text of any model appear natural and not preprocessed.
Args:
text (str): input text to be un-preprocessed
desegment (bool, optional): [whether or not to remove farasa pre-segmentation before]. Defaults to True.
Returns:
str: The unpreprocessed (and possibly Farasa-desegmented) text.
"""
if self.model_name in SEGMENTED_MODELS and desegment:
text = self.desegment(text)
text = re.sub(white_spaced_double_quotation_regex, '"' + '\\1' +
'"', text)
text = re.sub(white_spaced_single_quotation_regex, "'" + '\\1' +
"'", text)
text = re.sub(white_spaced_back_quotation_regex, '\\`' + '\\1' +
'\\`', text)
text = re.sub(white_spaced_back_quotation_regex, '\\—' + '\\1' +
'\\—', text)
text = text.replace('.', ' . ')
text = ' '.join(text.split())
text = re.sub('(\\d+) \\. (\\d+)', '\\1.\\2', text)
text = re.sub('(\\d+) \\, (\\d+)', '\\1,\\2', text)
text = re.sub(left_and_right_spaced_chars, '\\1', text)
text = re.sub(left_spaced_chars, '\\1', text)
text = re.sub(right_spaced_chars, '\\1', text)
return text
def desegment(self, text):
"""
Use this function if sentence tokenization was done using
`from arabert.preprocess_arabert import preprocess` with Farasa enabled
AraBERT segmentation using Farasa adds a space after the '+' for prefixes,
and after before the '+' for suffixes
Example:
>>> desegment('ال+ دراس +ات')
الدراسات
"""
text = text.replace('+ ', '+')
text = text.replace(' +', '+')
text = ' '.join([self._desegmentword(word) for word in text.split(' ')]
)
return text
def _desegmentword(self, orig_word: str) ->str:
"""
Word segmentor that takes a Farasa Segmented Word and removes the '+' signs
Example:
>>> _desegmentword("ال+يومي+ة")
اليومية
"""
word = orig_word.replace('ل+ال+', 'لل')
if 'ال+ال' not in orig_word:
word = word.replace('ل+ال', 'لل')
word = word.replace('+', '')
word = word.replace('للل', 'لل')
return word
def _old_preprocess(self, text, do_farasa_tokenization):
"""
AraBERTv1 preprocessing Function
"""
text = str(text)
if self.strip_tashkeel:
text = araby.strip_tashkeel(text)
text = re.sub('\\d+\\/[ء-ي]+\\/\\d+\\]', '', text)
text = re.sub('ـ', '', text)
text = re.sub('[«»]', ' " ', text)
if self.replace_urls_emails_mentions:
text = re.sub(regex_url_step1, '[رابط]', text)
text = re.sub(regex_url_step2, '[رابط]', text)
text = re.sub(regex_url, '[رابط]', text)
text = re.sub(regex_email, '[بريد]', text)
text = re.sub(regex_mention, '[مستخدم]', text)
text = re.sub('…', '\\.', text).strip()
text = self._remove_redundant_punct(text)
if self.replace_urls_emails_mentions:
text = re.sub('\\[ رابط \\]|\\[ رابط\\]|\\[رابط \\]',
' [رابط] ', text)
text = re.sub('\\[ بريد \\]|\\[ بريد\\]|\\[بريد \\]',
' [بريد] ', text)
text = re.sub('\\[ مستخدم \\]|\\[ مستخدم\\]|\\[مستخدم \\]',
' [مستخدم] ', text)
if self.remove_elongation:
text = self._remove_elongation(text)
if self.insert_white_spaces:
text = re.sub('([^0-9ء-غف-٩ٱ-ٳa-zA-Z\\[\\]])', ' \\1 ', text)
if do_farasa_tokenization:
text = self._tokenize_arabic_words_farasa(text)
return text.strip()
def _farasa_segment(self, text):
line_farasa = text.split()
segmented_line = []
for index, word in enumerate(line_farasa):
if word in ['[', ']']:
continue
if word in ['رابط', 'بريد', 'مستخدم'] and line_farasa[index - 1
] in ['[', ']']:
segmented_line.append('[' + word + ']')
continue
if '+' not in word:
segmented_line.append(word)
continue
segmented_word = self._split_farasa_output(word)
segmented_line.extend(segmented_word)
return ' '.join(segmented_line)
def _split_farasa_output(self, word):
segmented_word = []
temp_token = ''
for i, c in enumerate(word):
if c == '+':
if temp_token == 'ك':
if i == 1:
segmented_word.append(temp_token + '+')
temp_token = ''
elif word[i - 2] == '+':
if segmented_word[-1][-1] == '+':
segmented_word.append(temp_token + '+')
temp_token = ''
else:
segmented_word.append('+' + temp_token)
temp_token = ''
elif temp_token in prefix_list:
segmented_word.append(temp_token + '+')
temp_token = ''
elif temp_token in suffix_list:
segmented_word.append('+' + temp_token)
temp_token = ''
else:
segmented_word.append(temp_token)
temp_token = ''
continue
temp_token += c
if temp_token != '':
if temp_token in suffix_list:
segmented_word.append('+' + temp_token)
else:
segmented_word.append(temp_token)
return segmented_word
def _tokenize_arabic_words_farasa(self, line_input):
if self.keep_emojis:
line_farasa = []
for word in line_input.split():
if word in list(self.emoji.UNICODE_EMOJI['en'].keys()):
line_farasa.append(word)
else:
line_farasa.append(self.farasa_segmenter.segment(word))
else:
line_farasa = self.farasa_segmenter.segment(line_input).split()
segmented_line = []
for index, word in enumerate(line_farasa):
if word in ['[', ']']:
continue
if word in ['رابط', 'بريد', 'مستخدم'] and line_farasa[index - 1
] in ['[', ']']:
segmented_line.append('[' + word + ']')
continue
segmented_word = []
for token in word.split('+'):
if token in prefix_list:
segmented_word.append(token + '+')
elif token in suffix_list:
segmented_word.append('+' + token)
else:
segmented_word.append(token)
segmented_line.extend(segmented_word)
return ' '.join(segmented_line)
def _remove_elongation(self, text):
"""
:param text: the input text to remove elongation
:return: delongated text
"""
for index_ in range(len(re.findall(regex_tatweel, text))):
elongation = re.search(regex_tatweel, text)
if elongation:
elongation_pattern = elongation.group()
elongation_replacement = elongation_pattern[0]
elongation_pattern = re.escape(elongation_pattern)
text = re.sub(elongation_pattern, elongation_replacement,
text, flags=re.MULTILINE)
else:
break
return text
def _remove_redundant_punct(self, text):
text_ = text
result = re.search(redundant_punct_pattern, text)
dif = 0
while result:
sub = result.group()
sub = sorted(set(sub), key=sub.index)
sub = ' ' + ''.join(list(sub)) + ' '
text = ''.join((text[:result.span()[0] + dif], sub, text[result
.span()[1] + dif:]))
text_ = ''.join((text_[:result.span()[0]], text_[result.span()[
1]:])).strip()
dif = abs(len(text) - len(text_))
result = re.search(redundant_punct_pattern, text_)
text = re.sub('\\s+', ' ', text)
return text.strip()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ArbertmoPreprocessor:
"""
A Preprocessor class that cleans and preprocesses text for all models in the AraBERT repo.
It also can unprocess the text ouput of the generated text
Args:
model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to "bert-base-arabertv02". Current accepted models are:
- :obj:`"bert-base-arabertv01"`: No farasa segmentation.
- :obj:`"bert-base-arabert"`: with farasa segmentation.
- :obj:`"bert-base-arabertv02"`: No farasas egmentation.
- :obj:`"bert-base-arabertv2"`: with farasa segmentation.
- :obj:`"bert-large-arabertv02"`: No farasas egmentation.
- :obj:`"bert-large-arabertv2"`: with farasa segmentation.
- :obj:`"araelectra-base"`: No farasa segmentation.
- :obj:`"araelectra-base-discriminator"`: No farasa segmentation.
- :obj:`"araelectra-base-generator"`: No farasa segmentation.
- :obj:`"aragpt2-base"`: No farasa segmentation.
- :obj:`"aragpt2-medium"`: No farasa segmentation.
- :obj:`"aragpt2-large"`: No farasa segmentation.
- :obj:`"aragpt2-mega"`: No farasa segmentation.
keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False
remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True
replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True
strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)
strip_tatweel(:obj: `bool`): remove tatweel '\\u0640'
insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words
remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character
Returns:
ArBERTMoPreprocessor: the preprocessor class
Example:
from preprocess import ArBERTMoPreprocessor
arabert_prep = ArBERTMoPreprocessor("aubmindlab/bert-base-arabertv2")
arabert_prep.preprocess("SOME ARABIC TEXT")
"""
def __init__(self, model_name, keep_emojis=False, remove_html_markup=
True, replace_urls_emails_mentions=True, strip_tashkeel=True,
strip_tatweel=True, insert_white_spaces=True, remove_elongation=True):
"""
model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to "bert-base-arabertv02". Current accepted models are:
- :obj:`"bert-base-arabertv01"`: No farasa segmentation.
- :obj:`"bert-base-arabert"`: with farasa segmentation.
- :obj:`"bert-base-arabertv02"`: No farasas egmentation.
- :obj:`"bert-base-arabertv2"`: with farasa segmentation.
- :obj:`"bert-large-arabertv02"`: No farasas egmentation.
- :obj:`"bert-large-arabertv2"`: with farasa segmentation.
- :obj:`"araelectra-base"`: No farasa segmentation.
- :obj:`"araelectra-base-discriminator"`: No farasa segmentation.
- :obj:`"araelectra-base-generator"`: No farasa segmentation.
- :obj:`"aragpt2-base"`: No farasa segmentation.
- :obj:`"aragpt2-medium"`: No farasa segmentation.
- :obj:`"aragpt2-large"`: No farasa segmentation.
- :obj:`"aragpt2-mega"`: No farasa segmentation.
keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False
remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True
replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True
strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)
strip_tatweel(:obj: `bool`): remove tatweel '\\u0640'
insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words
remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character
"""
model_name = model_name.replace('aubmindlab/', '')
if model_name not in ACCEPTED_MODELS:
logging.warning(
"Model provided is not in the accepted model list. Assuming you don't want Farasa Segmentation"
)
self.model_name = 'bert-base-arabertv02'
else:
self.model_name = model_name
if self.model_name in SEGMENTED_MODELS:
logging.info(
'Selected Model requires pre-segmentation, Initializing FarasaSegmenter'
)
try:
from farasa.segmenter import FarasaSegmenter
self.farasa_segmenter = FarasaSegmenter(interactive=True)
except:
logging.warning(
'farasapy is not installed, you want be able to process text for AraBERTv1 and v2. Install it using: pip install farasapy'
)
else:
logging.info(
"Selected Model doesn't require pre-segmentation, skipping FarasaSegmenter initialization"
)
self.keep_emojis = keep_emojis
if self.keep_emojis:
import emoji
self.emoji = emoji
if self.model_name in SEGMENTED_MODELS:
logging.warning(
'Keeping tweets with Farasa Segmentation is 10 times slower'
)
self.remove_html_markup = remove_html_markup
self.replace_urls_emails_mentions = replace_urls_emails_mentions
self.strip_tashkeel = strip_tashkeel
self.strip_tatweel = strip_tatweel
self.insert_white_spaces = insert_white_spaces
self.remove_elongation = remove_elongation
def preprocess(self, text):
"""
Preprocess takes an input text line an applies the same preprocessing used in AraBERT
pretraining
Args:
text (:obj:`str`): inout text string
Returns:
string: A preprocessed string depending on which model was selected
"""
if self.model_name == 'bert-base-arabert':
return self._old_preprocess(text, do_farasa_tokenization=True)
if self.model_name == 'bert-base-arabertv01':
return self._old_preprocess(text, do_farasa_tokenization=False)
text = str(text)
text = html.unescape(text)
if self.strip_tashkeel:
text = araby.strip_tashkeel(text)
if self.strip_tatweel:
text = araby.strip_tatweel(text)
if self.replace_urls_emails_mentions:
for reg in url_regexes:
text = re.sub(reg, ' [رابط] ', text)
for reg in email_regexes:
text = re.sub(reg, ' [بريد] ', text)
text = re.sub(user_mention_regex, ' [مستخدم] ', text)
if self.remove_html_markup:
text = re.sub('<br />', ' ', text)
text = re.sub('</?[^>]+>', ' ', text)
if self.remove_elongation:
text = self._remove_elongation(text)
if self.insert_white_spaces:
text = re.sub('([^0-9ء-غف-ي٠-٩a-zA-Z\\[\\]])', ' \\1 ', text)
text = re.sub('(\\d+)([ء-غف-ي٠-٬]+)', ' \\1 \\2 ', text)
text = re.sub('([ء-غف-ي٠-٬]+)(\\d+)', ' \\1 \\2 ', text)
if self.keep_emojis:
emoji_regex = ''.join(list(self.emoji.UNICODE_EMOJI['en'].keys()))
rejected_chars_regex2 = '[^%s%s]' % (chars_regex, emoji_regex)
text = re.sub(rejected_chars_regex2, ' ', text)
else:
text = re.sub(rejected_chars_regex, ' ', text)
text = ' '.join(text.replace('️', '').split())
if (self.model_name == 'bert-base-arabertv2' or self.model_name ==
'bert-large-arabertv2'):
if self.keep_emojis:
new_text = []
for word in text.split():
if word in list(self.emoji.UNICODE_EMOJI['en'].keys()):
new_text.append(word)
else:
new_text.append(self.farasa_segmenter.segment(word))
text = ' '.join(new_text)
else:
text = self.farasa_segmenter.segment(text)
return self._farasa_segment(text)
return text
def unpreprocess(self, text, desegment=True):
"""Re-formats the text to a classic format where punctuations, brackets, parenthesis are not seperated by whitespaces.
The objective is to make the generated text of any model appear natural and not preprocessed.
Args:
text (str): input text to be un-preprocessed
desegment (bool, optional): [whether or not to remove farasa pre-segmentation before]. Defaults to True.
Returns:
str: The unpreprocessed (and possibly Farasa-desegmented) text.
"""
if self.model_name in SEGMENTED_MODELS and desegment:
text = self.desegment(text)
text = re.sub(white_spaced_double_quotation_regex, '"' + '\\1' +
'"', text)
text = re.sub(white_spaced_single_quotation_regex, "'" + '\\1' +
"'", text)
text = re.sub(white_spaced_back_quotation_regex, '\\`' + '\\1' +
'\\`', text)
text = re.sub(white_spaced_back_quotation_regex, '\\—' + '\\1' +
'\\—', text)
text = text.replace('.', ' . ')
text = ' '.join(text.split())
text = re.sub('(\\d+) \\. (\\d+)', '\\1.\\2', text)
text = re.sub('(\\d+) \\, (\\d+)', '\\1,\\2', text)
text = re.sub(left_and_right_spaced_chars, '\\1', text)
text = re.sub(left_spaced_chars, '\\1', text)
text = re.sub(right_spaced_chars, '\\1', text)
return text
def desegment(self, text):
"""
Use this function if sentence tokenization was done using
`from arabert.preprocess_arabert import preprocess` with Farasa enabled
AraBERT segmentation using Farasa adds a space after the '+' for prefixes,
and after before the '+' for suffixes
Example:
>>> desegment('ال+ دراس +ات')
الدراسات
"""
text = text.replace('+ ', '+')
text = text.replace(' +', '+')
text = ' '.join([self._desegmentword(word) for word in text.split(' ')]
)
return text
def _desegmentword(self, orig_word: str) ->str:
"""
Word segmentor that takes a Farasa Segmented Word and removes the '+' signs
Example:
>>> _desegmentword("ال+يومي+ة")
اليومية
"""
word = orig_word.replace('ل+ال+', 'لل')
if 'ال+ال' not in orig_word:
word = word.replace('ل+ال', 'لل')
word = word.replace('+', '')
word = word.replace('للل', 'لل')
return word
def _old_preprocess(self, text, do_farasa_tokenization):
"""
AraBERTv1 preprocessing Function
"""
text = str(text)
if self.strip_tashkeel:
text = araby.strip_tashkeel(text)
text = re.sub('\\d+\\/[ء-ي]+\\/\\d+\\]', '', text)
text = re.sub('ـ', '', text)
text = re.sub('[«»]', ' " ', text)
if self.replace_urls_emails_mentions:
text = re.sub(regex_url_step1, '[رابط]', text)
text = re.sub(regex_url_step2, '[رابط]', text)
text = re.sub(regex_url, '[رابط]', text)
text = re.sub(regex_email, '[بريد]', text)
text = re.sub(regex_mention, '[مستخدم]', text)
text = re.sub('…', '\\.', text).strip()
text = self._remove_redundant_punct(text)
if self.replace_urls_emails_mentions:
text = re.sub('\\[ رابط \\]|\\[ رابط\\]|\\[رابط \\]',
' [رابط] ', text)
text = re.sub('\\[ بريد \\]|\\[ بريد\\]|\\[بريد \\]',
' [بريد] ', text)
text = re.sub('\\[ مستخدم \\]|\\[ مستخدم\\]|\\[مستخدم \\]',
' [مستخدم] ', text)
if self.remove_elongation:
text = self._remove_elongation(text)
if self.insert_white_spaces:
text = re.sub('([^0-9ء-غف-٩ٱ-ٳa-zA-Z\\[\\]])', ' \\1 ', text)
if do_farasa_tokenization:
text = self._tokenize_arabic_words_farasa(text)
return text.strip()
def _farasa_segment(self, text):
line_farasa = text.split()
segmented_line = []
for index, word in enumerate(line_farasa):
if word in ['[', ']']:
continue
if word in ['رابط', 'بريد', 'مستخدم'] and line_farasa[index - 1
] in ['[', ']']:
segmented_line.append('[' + word + ']')
continue
if '+' not in word:
segmented_line.append(word)
continue
segmented_word = self._split_farasa_output(word)
segmented_line.extend(segmented_word)
return ' '.join(segmented_line)
def _split_farasa_output(self, word):
segmented_word = []
temp_token = ''
for i, c in enumerate(word):
if c == '+':
if temp_token == 'ك':
if i == 1:
segmented_word.append(temp_token + '+')
temp_token = ''
elif word[i - 2] == '+':
if segmented_word[-1][-1] == '+':
segmented_word.append(temp_token + '+')
temp_token = ''
else:
segmented_word.append('+' + temp_token)
temp_token = ''
elif temp_token in prefix_list:
segmented_word.append(temp_token + '+')
temp_token = ''
elif temp_token in suffix_list:
segmented_word.append('+' + temp_token)
temp_token = ''
else:
segmented_word.append(temp_token)
temp_token = ''
continue
temp_token += c
if temp_token != '':
if temp_token in suffix_list:
segmented_word.append('+' + temp_token)
else:
segmented_word.append(temp_token)
return segmented_word
def _tokenize_arabic_words_farasa(self, line_input):
if self.keep_emojis:
line_farasa = []
for word in line_input.split():
if word in list(self.emoji.UNICODE_EMOJI['en'].keys()):
line_farasa.append(word)
else:
line_farasa.append(self.farasa_segmenter.segment(word))
else:
line_farasa = self.farasa_segmenter.segment(line_input).split()
segmented_line = []
for index, word in enumerate(line_farasa):
if word in ['[', ']']:
continue
if word in ['رابط', 'بريد', 'مستخدم'] and line_farasa[index - 1
] in ['[', ']']:
segmented_line.append('[' + word + ']')
continue
segmented_word = []
for token in word.split('+'):
if token in prefix_list:
segmented_word.append(token + '+')
elif token in suffix_list:
segmented_word.append('+' + token)
else:
segmented_word.append(token)
segmented_line.extend(segmented_word)
return ' '.join(segmented_line)
def _remove_elongation(self, text):
"""
:param text: the input text to remove elongation
:return: delongated text
"""
for index_ in range(len(re.findall(regex_tatweel, text))):
elongation = re.search(regex_tatweel, text)
if elongation:
elongation_pattern = elongation.group()
elongation_replacement = elongation_pattern[0]
elongation_pattern = re.escape(elongation_pattern)
text = re.sub(elongation_pattern, elongation_replacement,
text, flags=re.MULTILINE)
else:
break
return text
def _remove_redundant_punct(self, text):
text_ = text
result = re.search(redundant_punct_pattern, text)
dif = 0
while result:
sub = result.group()
sub = sorted(set(sub), key=sub.index)
sub = ' ' + ''.join(list(sub)) + ' '
text = ''.join((text[:result.span()[0] + dif], sub, text[result
.span()[1] + dif:]))
text_ = ''.join((text_[:result.span()[0]], text_[result.span()[
1]:])).strip()
dif = abs(len(text) - len(text_))
result = re.search(redundant_punct_pattern, text_)
text = re.sub('\\s+', ' ', text)
return text.strip()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ACCEPTED_MODELS = ['bert-base-arabertv01', 'bert-base-arabert',
'bert-base-arabertv02', 'bert-base-arabertv2', 'bert-large-arabertv02',
'bert-large-arabertv2', 'araelectra-base',
'araelectra-base-discriminator', 'araelectra-base-generator',
'aragpt2-base', 'aragpt2-medium', 'aragpt2-large', 'aragpt2-mega']
SEGMENTED_MODELS = ['bert-base-arabert', 'bert-base-arabertv2',
'bert-large-arabertv2']
class ArbertmoPreprocessor:
"""
A Preprocessor class that cleans and preprocesses text for all models in the AraBERT repo.
It also can unprocess the text ouput of the generated text
Args:
model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to "bert-base-arabertv02". Current accepted models are:
- :obj:`"bert-base-arabertv01"`: No farasa segmentation.
- :obj:`"bert-base-arabert"`: with farasa segmentation.
- :obj:`"bert-base-arabertv02"`: No farasas egmentation.
- :obj:`"bert-base-arabertv2"`: with farasa segmentation.
- :obj:`"bert-large-arabertv02"`: No farasas egmentation.
- :obj:`"bert-large-arabertv2"`: with farasa segmentation.
- :obj:`"araelectra-base"`: No farasa segmentation.
- :obj:`"araelectra-base-discriminator"`: No farasa segmentation.
- :obj:`"araelectra-base-generator"`: No farasa segmentation.
- :obj:`"aragpt2-base"`: No farasa segmentation.
- :obj:`"aragpt2-medium"`: No farasa segmentation.
- :obj:`"aragpt2-large"`: No farasa segmentation.
- :obj:`"aragpt2-mega"`: No farasa segmentation.
keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False
remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True
replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True
strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)
strip_tatweel(:obj: `bool`): remove tatweel '\\u0640'
insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words
remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character
Returns:
ArBERTMoPreprocessor: the preprocessor class
Example:
from preprocess import ArBERTMoPreprocessor
arabert_prep = ArBERTMoPreprocessor("aubmindlab/bert-base-arabertv2")
arabert_prep.preprocess("SOME ARABIC TEXT")
"""
def __init__(self, model_name, keep_emojis=False, remove_html_markup=
True, replace_urls_emails_mentions=True, strip_tashkeel=True,
strip_tatweel=True, insert_white_spaces=True, remove_elongation=True):
"""
model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to "bert-base-arabertv02". Current accepted models are:
- :obj:`"bert-base-arabertv01"`: No farasa segmentation.
- :obj:`"bert-base-arabert"`: with farasa segmentation.
- :obj:`"bert-base-arabertv02"`: No farasas egmentation.
- :obj:`"bert-base-arabertv2"`: with farasa segmentation.
- :obj:`"bert-large-arabertv02"`: No farasas egmentation.
- :obj:`"bert-large-arabertv2"`: with farasa segmentation.
- :obj:`"araelectra-base"`: No farasa segmentation.
- :obj:`"araelectra-base-discriminator"`: No farasa segmentation.
- :obj:`"araelectra-base-generator"`: No farasa segmentation.
- :obj:`"aragpt2-base"`: No farasa segmentation.
- :obj:`"aragpt2-medium"`: No farasa segmentation.
- :obj:`"aragpt2-large"`: No farasa segmentation.
- :obj:`"aragpt2-mega"`: No farasa segmentation.
keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False
remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True
replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True
strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)
strip_tatweel(:obj: `bool`): remove tatweel '\\u0640'
insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words
remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character
"""
model_name = model_name.replace('aubmindlab/', '')
if model_name not in ACCEPTED_MODELS:
logging.warning(
"Model provided is not in the accepted model list. Assuming you don't want Farasa Segmentation"
)
self.model_name = 'bert-base-arabertv02'
else:
self.model_name = model_name
if self.model_name in SEGMENTED_MODELS:
logging.info(
'Selected Model requires pre-segmentation, Initializing FarasaSegmenter'
)
try:
from farasa.segmenter import FarasaSegmenter
self.farasa_segmenter = FarasaSegmenter(interactive=True)
except:
logging.warning(
'farasapy is not installed, you want be able to process text for AraBERTv1 and v2. Install it using: pip install farasapy'
)
else:
logging.info(
"Selected Model doesn't require pre-segmentation, skipping FarasaSegmenter initialization"
)
self.keep_emojis = keep_emojis
if self.keep_emojis:
import emoji
self.emoji = emoji
if self.model_name in SEGMENTED_MODELS:
logging.warning(
'Keeping tweets with Farasa Segmentation is 10 times slower'
)
self.remove_html_markup = remove_html_markup
self.replace_urls_emails_mentions = replace_urls_emails_mentions
self.strip_tashkeel = strip_tashkeel
self.strip_tatweel = strip_tatweel
self.insert_white_spaces = insert_white_spaces
self.remove_elongation = remove_elongation
def preprocess(self, text):
"""
Preprocess takes an input text line an applies the same preprocessing used in AraBERT
pretraining
Args:
text (:obj:`str`): inout text string
Returns:
string: A preprocessed string depending on which model was selected
"""
if self.model_name == 'bert-base-arabert':
return self._old_preprocess(text, do_farasa_tokenization=True)
if self.model_name == 'bert-base-arabertv01':
return self._old_preprocess(text, do_farasa_tokenization=False)
text = str(text)
text = html.unescape(text)
if self.strip_tashkeel:
text = araby.strip_tashkeel(text)
if self.strip_tatweel:
text = araby.strip_tatweel(text)
if self.replace_urls_emails_mentions:
for reg in url_regexes:
text = re.sub(reg, ' [رابط] ', text)
for reg in email_regexes:
text = re.sub(reg, ' [بريد] ', text)
text = re.sub(user_mention_regex, ' [مستخدم] ', text)
if self.remove_html_markup:
text = re.sub('<br />', ' ', text)
text = re.sub('</?[^>]+>', ' ', text)
if self.remove_elongation:
text = self._remove_elongation(text)
if self.insert_white_spaces:
text = re.sub('([^0-9ء-غف-ي٠-٩a-zA-Z\\[\\]])', ' \\1 ', text)
text = re.sub('(\\d+)([ء-غف-ي٠-٬]+)', ' \\1 \\2 ', text)
text = re.sub('([ء-غف-ي٠-٬]+)(\\d+)', ' \\1 \\2 ', text)
if self.keep_emojis:
emoji_regex = ''.join(list(self.emoji.UNICODE_EMOJI['en'].keys()))
rejected_chars_regex2 = '[^%s%s]' % (chars_regex, emoji_regex)
text = re.sub(rejected_chars_regex2, ' ', text)
else:
text = re.sub(rejected_chars_regex, ' ', text)
text = ' '.join(text.replace('️', '').split())
if (self.model_name == 'bert-base-arabertv2' or self.model_name ==
'bert-large-arabertv2'):
if self.keep_emojis:
new_text = []
for word in text.split():
if word in list(self.emoji.UNICODE_EMOJI['en'].keys()):
new_text.append(word)
else:
new_text.append(self.farasa_segmenter.segment(word))
text = ' '.join(new_text)
else:
text = self.farasa_segmenter.segment(text)
return self._farasa_segment(text)
return text
def unpreprocess(self, text, desegment=True):
"""Re-formats the text to a classic format where punctuations, brackets, parenthesis are not seperated by whitespaces.
The objective is to make the generated text of any model appear natural and not preprocessed.
Args:
text (str): input text to be un-preprocessed
desegment (bool, optional): [whether or not to remove farasa pre-segmentation before]. Defaults to True.
Returns:
str: The unpreprocessed (and possibly Farasa-desegmented) text.
"""
if self.model_name in SEGMENTED_MODELS and desegment:
text = self.desegment(text)
text = re.sub(white_spaced_double_quotation_regex, '"' + '\\1' +
'"', text)
text = re.sub(white_spaced_single_quotation_regex, "'" + '\\1' +
"'", text)
text = re.sub(white_spaced_back_quotation_regex, '\\`' + '\\1' +
'\\`', text)
text = re.sub(white_spaced_back_quotation_regex, '\\—' + '\\1' +
'\\—', text)
text = text.replace('.', ' . ')
text = ' '.join(text.split())
text = re.sub('(\\d+) \\. (\\d+)', '\\1.\\2', text)
text = re.sub('(\\d+) \\, (\\d+)', '\\1,\\2', text)
text = re.sub(left_and_right_spaced_chars, '\\1', text)
text = re.sub(left_spaced_chars, '\\1', text)
text = re.sub(right_spaced_chars, '\\1', text)
return text
def desegment(self, text):
"""
Use this function if sentence tokenization was done using
`from arabert.preprocess_arabert import preprocess` with Farasa enabled
AraBERT segmentation using Farasa adds a space after the '+' for prefixes,
and after before the '+' for suffixes
Example:
>>> desegment('ال+ دراس +ات')
الدراسات
"""
text = text.replace('+ ', '+')
text = text.replace(' +', '+')
text = ' '.join([self._desegmentword(word) for word in text.split(' ')]
)
return text
def _desegmentword(self, orig_word: str) ->str:
"""
Word segmentor that takes a Farasa Segmented Word and removes the '+' signs
Example:
>>> _desegmentword("ال+يومي+ة")
اليومية
"""
word = orig_word.replace('ل+ال+', 'لل')
if 'ال+ال' not in orig_word:
word = word.replace('ل+ال', 'لل')
word = word.replace('+', '')
word = word.replace('للل', 'لل')
return word
def _old_preprocess(self, text, do_farasa_tokenization):
"""
AraBERTv1 preprocessing Function
"""
text = str(text)
if self.strip_tashkeel:
text = araby.strip_tashkeel(text)
text = re.sub('\\d+\\/[ء-ي]+\\/\\d+\\]', '', text)
text = re.sub('ـ', '', text)
text = re.sub('[«»]', ' " ', text)
if self.replace_urls_emails_mentions:
text = re.sub(regex_url_step1, '[رابط]', text)
text = re.sub(regex_url_step2, '[رابط]', text)
text = re.sub(regex_url, '[رابط]', text)
text = re.sub(regex_email, '[بريد]', text)
text = re.sub(regex_mention, '[مستخدم]', text)
text = re.sub('…', '\\.', text).strip()
text = self._remove_redundant_punct(text)
if self.replace_urls_emails_mentions:
text = re.sub('\\[ رابط \\]|\\[ رابط\\]|\\[رابط \\]',
' [رابط] ', text)
text = re.sub('\\[ بريد \\]|\\[ بريد\\]|\\[بريد \\]',
' [بريد] ', text)
text = re.sub('\\[ مستخدم \\]|\\[ مستخدم\\]|\\[مستخدم \\]',
' [مستخدم] ', text)
if self.remove_elongation:
text = self._remove_elongation(text)
if self.insert_white_spaces:
text = re.sub('([^0-9ء-غف-٩ٱ-ٳa-zA-Z\\[\\]])', ' \\1 ', text)
if do_farasa_tokenization:
text = self._tokenize_arabic_words_farasa(text)
return text.strip()
def _farasa_segment(self, text):
line_farasa = text.split()
segmented_line = []
for index, word in enumerate(line_farasa):
if word in ['[', ']']:
continue
if word in ['رابط', 'بريد', 'مستخدم'] and line_farasa[index - 1
] in ['[', ']']:
segmented_line.append('[' + word + ']')
continue
if '+' not in word:
segmented_line.append(word)
continue
segmented_word = self._split_farasa_output(word)
segmented_line.extend(segmented_word)
return ' '.join(segmented_line)
def _split_farasa_output(self, word):
segmented_word = []
temp_token = ''
for i, c in enumerate(word):
if c == '+':
if temp_token == 'ك':
if i == 1:
segmented_word.append(temp_token + '+')
temp_token = ''
elif word[i - 2] == '+':
if segmented_word[-1][-1] == '+':
segmented_word.append(temp_token + '+')
temp_token = ''
else:
segmented_word.append('+' + temp_token)
temp_token = ''
elif temp_token in prefix_list:
segmented_word.append(temp_token + '+')
temp_token = ''
elif temp_token in suffix_list:
segmented_word.append('+' + temp_token)
temp_token = ''
else:
segmented_word.append(temp_token)
temp_token = ''
continue
temp_token += c
if temp_token != '':
if temp_token in suffix_list:
segmented_word.append('+' + temp_token)
else:
segmented_word.append(temp_token)
return segmented_word
def _tokenize_arabic_words_farasa(self, line_input):
if self.keep_emojis:
line_farasa = []
for word in line_input.split():
if word in list(self.emoji.UNICODE_EMOJI['en'].keys()):
line_farasa.append(word)
else:
line_farasa.append(self.farasa_segmenter.segment(word))
else:
line_farasa = self.farasa_segmenter.segment(line_input).split()
segmented_line = []
for index, word in enumerate(line_farasa):
if word in ['[', ']']:
continue
if word in ['رابط', 'بريد', 'مستخدم'] and line_farasa[index - 1
] in ['[', ']']:
segmented_line.append('[' + word + ']')
continue
segmented_word = []
for token in word.split('+'):
if token in prefix_list:
segmented_word.append(token + '+')
elif token in suffix_list:
segmented_word.append('+' + token)
else:
segmented_word.append(token)
segmented_line.extend(segmented_word)
return ' '.join(segmented_line)
def _remove_elongation(self, text):
"""
:param text: the input text to remove elongation
:return: delongated text
"""
for index_ in range(len(re.findall(regex_tatweel, text))):
elongation = re.search(regex_tatweel, text)
if elongation:
elongation_pattern = elongation.group()
elongation_replacement = elongation_pattern[0]
elongation_pattern = re.escape(elongation_pattern)
text = re.sub(elongation_pattern, elongation_replacement,
text, flags=re.MULTILINE)
else:
break
return text
def _remove_redundant_punct(self, text):
text_ = text
result = re.search(redundant_punct_pattern, text)
dif = 0
while result:
sub = result.group()
sub = sorted(set(sub), key=sub.index)
sub = ' ' + ''.join(list(sub)) + ' '
text = ''.join((text[:result.span()[0] + dif], sub, text[result
.span()[1] + dif:]))
text_ = ''.join((text_[:result.span()[0]], text_[result.span()[
1]:])).strip()
dif = abs(len(text) - len(text_))
result = re.search(redundant_punct_pattern, text_)
text = re.sub('\\s+', ' ', text)
return text.strip()
prefix_list = ['ال', 'و', 'ف', 'ب', 'ك', 'ل', 'لل', 'ال', 'و', 'ف', 'ب',
'ك', 'ل', 'لل', 'س']
suffix_list = ['ه', 'ها', 'ك', 'ي', 'هما', 'كما', 'نا', 'كم', 'هم', 'هن',
'كن', 'ا', 'ان', 'ين', 'ون', 'وا', 'ات', 'ت', 'ن', 'ة', 'ه', 'ها', 'ك',
'ي', 'هما', 'كما', 'نا', 'كم', 'هم', 'هن', 'كن', 'ا', 'ان', 'ين', 'ون',
'وا', 'ات', 'ت', 'ن', 'ة']
other_tokens = ['[رابط]', '[مستخدم]', '[بريد]']
prefix_symbols = [(x + '+') for x in prefix_list]
suffix_symblos = [('+' + x) for x in suffix_list]
never_split_tokens = list(set(prefix_symbols + suffix_symblos + other_tokens))
url_regexes = [
'(http(s)?:\\/\\/.)?(www\\.)?[-a-zA-Z0-9@:%._\\+~#=]{2,256}\\.[a-z]{2,6}\\b([-a-zA-Z0-9@:%_\\+.~#?&//=]*)'
, '@(https?|ftp)://(-\\.)?([^\\s/?\\.#-]+\\.?)+(/[^\\s]*)?$@iS',
'http[s]?://[a-zA-Z0-9_\\-./~\\?=%&]+', 'www[a-zA-Z0-9_\\-?=%&/.~]+',
'[a-zA-Z]+\\.com', '(?=http)[^\\s]+', '(?=www)[^\\s]+', '://']
user_mention_regex = '@[\\w\\d]+'
email_regexes = ['[\\w-]+@([\\w-]+\\.)+[\\w-]+', '\\S+@\\S+']
redundant_punct_pattern = (
'([!\\"#\\$%\\\'\\(\\)\\*\\+,\\.:;\\-<=·>?@\\[\\\\\\]\\^_ـ`{\\|}~—٪’،؟`୍“؛”ۚ【»؛\\s+«–…‘]{2,})'
)
regex_tatweel = '(\\D)\\1{2,}'
rejected_chars_regex = (
'[^0-9\\u0621-\\u063A\\u0640-\\u066C\\u0671-\\u0674a-zA-Z\\[\\]!\\"#\\$%\\\'\\(\\)\\*\\+,\\.:;\\-<=·>?@\\[\\\\\\]\\^_ـ`{\\|}~—٪’،؟`୍“؛”ۚ»؛\\s+«–…‘]'
)
regex_url_step1 = '(?=http)[^\\s]+'
regex_url_step2 = '(?=www)[^\\s]+'
regex_url = (
'(http(s)?:\\/\\/.)?(www\\.)?[-a-zA-Z0-9@:%._\\+~#=]{2,256}\\.[a-z]{2,6}\\b([-a-zA-Z0-9@:%_\\+.~#?&//=]*)'
)
regex_mention = '@[\\w\\d]+'
regex_email = '\\S+@\\S+'
chars_regex = (
'0-9\\u0621-\\u063A\\u0640-\\u066C\\u0671-\\u0674a-zA-Z\\[\\]!\\"#\\$%\\\'\\(\\)\\*\\+,\\.:;\\-<=·>?@\\[\\\\\\]\\^_ـ`{\\|}~—٪’،؟`୍“؛”ۚ»؛\\s+«–…‘'
)
white_spaced_double_quotation_regex = '\\"\\s+([^"]+)\\s+\\"'
white_spaced_single_quotation_regex = "\\'\\s+([^']+)\\s+\\'"
white_spaced_back_quotation_regex = '\\`\\s+([^`]+)\\s+\\`'
white_spaced_em_dash = '\\—\\s+([^—]+)\\s+\\—'
left_spaced_chars = ' ([\\]!#\\$%\\),\\.:;\\?}٪’،؟”؛…»·])'
right_spaced_chars = '([\\[\\(\\{“«‘*\\~]) '
left_and_right_spaced_chars = ' ([\\+\\-\\<\\=\\>\\@\\\\\\^\\_\\|\\–]) '
<|reserved_special_token_1|>
import html
import logging
import re
import pyarabic.araby as araby
ACCEPTED_MODELS = ['bert-base-arabertv01', 'bert-base-arabert',
'bert-base-arabertv02', 'bert-base-arabertv2', 'bert-large-arabertv02',
'bert-large-arabertv2', 'araelectra-base',
'araelectra-base-discriminator', 'araelectra-base-generator',
'aragpt2-base', 'aragpt2-medium', 'aragpt2-large', 'aragpt2-mega']
SEGMENTED_MODELS = ['bert-base-arabert', 'bert-base-arabertv2',
'bert-large-arabertv2']
class ArbertmoPreprocessor:
"""
A Preprocessor class that cleans and preprocesses text for all models in the AraBERT repo.
It also can unprocess the text ouput of the generated text
Args:
model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to "bert-base-arabertv02". Current accepted models are:
- :obj:`"bert-base-arabertv01"`: No farasa segmentation.
- :obj:`"bert-base-arabert"`: with farasa segmentation.
- :obj:`"bert-base-arabertv02"`: No farasas egmentation.
- :obj:`"bert-base-arabertv2"`: with farasa segmentation.
- :obj:`"bert-large-arabertv02"`: No farasas egmentation.
- :obj:`"bert-large-arabertv2"`: with farasa segmentation.
- :obj:`"araelectra-base"`: No farasa segmentation.
- :obj:`"araelectra-base-discriminator"`: No farasa segmentation.
- :obj:`"araelectra-base-generator"`: No farasa segmentation.
- :obj:`"aragpt2-base"`: No farasa segmentation.
- :obj:`"aragpt2-medium"`: No farasa segmentation.
- :obj:`"aragpt2-large"`: No farasa segmentation.
- :obj:`"aragpt2-mega"`: No farasa segmentation.
keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False
remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True
replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True
strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)
strip_tatweel(:obj: `bool`): remove tatweel '\\u0640'
insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words
remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character
Returns:
ArBERTMoPreprocessor: the preprocessor class
Example:
from preprocess import ArBERTMoPreprocessor
arabert_prep = ArBERTMoPreprocessor("aubmindlab/bert-base-arabertv2")
arabert_prep.preprocess("SOME ARABIC TEXT")
"""
def __init__(self, model_name, keep_emojis=False, remove_html_markup=
True, replace_urls_emails_mentions=True, strip_tashkeel=True,
strip_tatweel=True, insert_white_spaces=True, remove_elongation=True):
"""
model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to "bert-base-arabertv02". Current accepted models are:
- :obj:`"bert-base-arabertv01"`: No farasa segmentation.
- :obj:`"bert-base-arabert"`: with farasa segmentation.
- :obj:`"bert-base-arabertv02"`: No farasas egmentation.
- :obj:`"bert-base-arabertv2"`: with farasa segmentation.
- :obj:`"bert-large-arabertv02"`: No farasas egmentation.
- :obj:`"bert-large-arabertv2"`: with farasa segmentation.
- :obj:`"araelectra-base"`: No farasa segmentation.
- :obj:`"araelectra-base-discriminator"`: No farasa segmentation.
- :obj:`"araelectra-base-generator"`: No farasa segmentation.
- :obj:`"aragpt2-base"`: No farasa segmentation.
- :obj:`"aragpt2-medium"`: No farasa segmentation.
- :obj:`"aragpt2-large"`: No farasa segmentation.
- :obj:`"aragpt2-mega"`: No farasa segmentation.
keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False
remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True
replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True
strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)
strip_tatweel(:obj: `bool`): remove tatweel '\\u0640'
insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words
remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character
"""
model_name = model_name.replace('aubmindlab/', '')
if model_name not in ACCEPTED_MODELS:
logging.warning(
"Model provided is not in the accepted model list. Assuming you don't want Farasa Segmentation"
)
self.model_name = 'bert-base-arabertv02'
else:
self.model_name = model_name
if self.model_name in SEGMENTED_MODELS:
logging.info(
'Selected Model requires pre-segmentation, Initializing FarasaSegmenter'
)
try:
from farasa.segmenter import FarasaSegmenter
self.farasa_segmenter = FarasaSegmenter(interactive=True)
except:
logging.warning(
'farasapy is not installed, you want be able to process text for AraBERTv1 and v2. Install it using: pip install farasapy'
)
else:
logging.info(
"Selected Model doesn't require pre-segmentation, skipping FarasaSegmenter initialization"
)
self.keep_emojis = keep_emojis
if self.keep_emojis:
import emoji
self.emoji = emoji
if self.model_name in SEGMENTED_MODELS:
logging.warning(
'Keeping tweets with Farasa Segmentation is 10 times slower'
)
self.remove_html_markup = remove_html_markup
self.replace_urls_emails_mentions = replace_urls_emails_mentions
self.strip_tashkeel = strip_tashkeel
self.strip_tatweel = strip_tatweel
self.insert_white_spaces = insert_white_spaces
self.remove_elongation = remove_elongation
def preprocess(self, text):
"""
Preprocess takes an input text line an applies the same preprocessing used in AraBERT
pretraining
Args:
text (:obj:`str`): inout text string
Returns:
string: A preprocessed string depending on which model was selected
"""
if self.model_name == 'bert-base-arabert':
return self._old_preprocess(text, do_farasa_tokenization=True)
if self.model_name == 'bert-base-arabertv01':
return self._old_preprocess(text, do_farasa_tokenization=False)
text = str(text)
text = html.unescape(text)
if self.strip_tashkeel:
text = araby.strip_tashkeel(text)
if self.strip_tatweel:
text = araby.strip_tatweel(text)
if self.replace_urls_emails_mentions:
for reg in url_regexes:
text = re.sub(reg, ' [رابط] ', text)
for reg in email_regexes:
text = re.sub(reg, ' [بريد] ', text)
text = re.sub(user_mention_regex, ' [مستخدم] ', text)
if self.remove_html_markup:
text = re.sub('<br />', ' ', text)
text = re.sub('</?[^>]+>', ' ', text)
if self.remove_elongation:
text = self._remove_elongation(text)
if self.insert_white_spaces:
text = re.sub('([^0-9ء-غف-ي٠-٩a-zA-Z\\[\\]])', ' \\1 ', text)
text = re.sub('(\\d+)([ء-غف-ي٠-٬]+)', ' \\1 \\2 ', text)
text = re.sub('([ء-غف-ي٠-٬]+)(\\d+)', ' \\1 \\2 ', text)
if self.keep_emojis:
emoji_regex = ''.join(list(self.emoji.UNICODE_EMOJI['en'].keys()))
rejected_chars_regex2 = '[^%s%s]' % (chars_regex, emoji_regex)
text = re.sub(rejected_chars_regex2, ' ', text)
else:
text = re.sub(rejected_chars_regex, ' ', text)
text = ' '.join(text.replace('️', '').split())
if (self.model_name == 'bert-base-arabertv2' or self.model_name ==
'bert-large-arabertv2'):
if self.keep_emojis:
new_text = []
for word in text.split():
if word in list(self.emoji.UNICODE_EMOJI['en'].keys()):
new_text.append(word)
else:
new_text.append(self.farasa_segmenter.segment(word))
text = ' '.join(new_text)
else:
text = self.farasa_segmenter.segment(text)
return self._farasa_segment(text)
return text
def unpreprocess(self, text, desegment=True):
"""Re-formats the text to a classic format where punctuations, brackets, parenthesis are not seperated by whitespaces.
The objective is to make the generated text of any model appear natural and not preprocessed.
Args:
text (str): input text to be un-preprocessed
desegment (bool, optional): [whether or not to remove farasa pre-segmentation before]. Defaults to True.
Returns:
str: The unpreprocessed (and possibly Farasa-desegmented) text.
"""
if self.model_name in SEGMENTED_MODELS and desegment:
text = self.desegment(text)
text = re.sub(white_spaced_double_quotation_regex, '"' + '\\1' +
'"', text)
text = re.sub(white_spaced_single_quotation_regex, "'" + '\\1' +
"'", text)
text = re.sub(white_spaced_back_quotation_regex, '\\`' + '\\1' +
'\\`', text)
text = re.sub(white_spaced_back_quotation_regex, '\\—' + '\\1' +
'\\—', text)
text = text.replace('.', ' . ')
text = ' '.join(text.split())
text = re.sub('(\\d+) \\. (\\d+)', '\\1.\\2', text)
text = re.sub('(\\d+) \\, (\\d+)', '\\1,\\2', text)
text = re.sub(left_and_right_spaced_chars, '\\1', text)
text = re.sub(left_spaced_chars, '\\1', text)
text = re.sub(right_spaced_chars, '\\1', text)
return text
def desegment(self, text):
"""
Use this function if sentence tokenization was done using
`from arabert.preprocess_arabert import preprocess` with Farasa enabled
AraBERT segmentation using Farasa adds a space after the '+' for prefixes,
and after before the '+' for suffixes
Example:
>>> desegment('ال+ دراس +ات')
الدراسات
"""
text = text.replace('+ ', '+')
text = text.replace(' +', '+')
text = ' '.join([self._desegmentword(word) for word in text.split(' ')]
)
return text
def _desegmentword(self, orig_word: str) ->str:
"""
Word segmentor that takes a Farasa Segmented Word and removes the '+' signs
Example:
>>> _desegmentword("ال+يومي+ة")
اليومية
"""
word = orig_word.replace('ل+ال+', 'لل')
if 'ال+ال' not in orig_word:
word = word.replace('ل+ال', 'لل')
word = word.replace('+', '')
word = word.replace('للل', 'لل')
return word
def _old_preprocess(self, text, do_farasa_tokenization):
"""
AraBERTv1 preprocessing Function
"""
text = str(text)
if self.strip_tashkeel:
text = araby.strip_tashkeel(text)
text = re.sub('\\d+\\/[ء-ي]+\\/\\d+\\]', '', text)
text = re.sub('ـ', '', text)
text = re.sub('[«»]', ' " ', text)
if self.replace_urls_emails_mentions:
text = re.sub(regex_url_step1, '[رابط]', text)
text = re.sub(regex_url_step2, '[رابط]', text)
text = re.sub(regex_url, '[رابط]', text)
text = re.sub(regex_email, '[بريد]', text)
text = re.sub(regex_mention, '[مستخدم]', text)
text = re.sub('…', '\\.', text).strip()
text = self._remove_redundant_punct(text)
if self.replace_urls_emails_mentions:
text = re.sub('\\[ رابط \\]|\\[ رابط\\]|\\[رابط \\]',
' [رابط] ', text)
text = re.sub('\\[ بريد \\]|\\[ بريد\\]|\\[بريد \\]',
' [بريد] ', text)
text = re.sub('\\[ مستخدم \\]|\\[ مستخدم\\]|\\[مستخدم \\]',
' [مستخدم] ', text)
if self.remove_elongation:
text = self._remove_elongation(text)
if self.insert_white_spaces:
text = re.sub('([^0-9ء-غف-٩ٱ-ٳa-zA-Z\\[\\]])', ' \\1 ', text)
if do_farasa_tokenization:
text = self._tokenize_arabic_words_farasa(text)
return text.strip()
def _farasa_segment(self, text):
line_farasa = text.split()
segmented_line = []
for index, word in enumerate(line_farasa):
if word in ['[', ']']:
continue
if word in ['رابط', 'بريد', 'مستخدم'] and line_farasa[index - 1
] in ['[', ']']:
segmented_line.append('[' + word + ']')
continue
if '+' not in word:
segmented_line.append(word)
continue
segmented_word = self._split_farasa_output(word)
segmented_line.extend(segmented_word)
return ' '.join(segmented_line)
def _split_farasa_output(self, word):
segmented_word = []
temp_token = ''
for i, c in enumerate(word):
if c == '+':
if temp_token == 'ك':
if i == 1:
segmented_word.append(temp_token + '+')
temp_token = ''
elif word[i - 2] == '+':
if segmented_word[-1][-1] == '+':
segmented_word.append(temp_token + '+')
temp_token = ''
else:
segmented_word.append('+' + temp_token)
temp_token = ''
elif temp_token in prefix_list:
segmented_word.append(temp_token + '+')
temp_token = ''
elif temp_token in suffix_list:
segmented_word.append('+' + temp_token)
temp_token = ''
else:
segmented_word.append(temp_token)
temp_token = ''
continue
temp_token += c
if temp_token != '':
if temp_token in suffix_list:
segmented_word.append('+' + temp_token)
else:
segmented_word.append(temp_token)
return segmented_word
def _tokenize_arabic_words_farasa(self, line_input):
if self.keep_emojis:
line_farasa = []
for word in line_input.split():
if word in list(self.emoji.UNICODE_EMOJI['en'].keys()):
line_farasa.append(word)
else:
line_farasa.append(self.farasa_segmenter.segment(word))
else:
line_farasa = self.farasa_segmenter.segment(line_input).split()
segmented_line = []
for index, word in enumerate(line_farasa):
if word in ['[', ']']:
continue
if word in ['رابط', 'بريد', 'مستخدم'] and line_farasa[index - 1
] in ['[', ']']:
segmented_line.append('[' + word + ']')
continue
segmented_word = []
for token in word.split('+'):
if token in prefix_list:
segmented_word.append(token + '+')
elif token in suffix_list:
segmented_word.append('+' + token)
else:
segmented_word.append(token)
segmented_line.extend(segmented_word)
return ' '.join(segmented_line)
def _remove_elongation(self, text):
"""
:param text: the input text to remove elongation
:return: delongated text
"""
for index_ in range(len(re.findall(regex_tatweel, text))):
elongation = re.search(regex_tatweel, text)
if elongation:
elongation_pattern = elongation.group()
elongation_replacement = elongation_pattern[0]
elongation_pattern = re.escape(elongation_pattern)
text = re.sub(elongation_pattern, elongation_replacement,
text, flags=re.MULTILINE)
else:
break
return text
def _remove_redundant_punct(self, text):
text_ = text
result = re.search(redundant_punct_pattern, text)
dif = 0
while result:
sub = result.group()
sub = sorted(set(sub), key=sub.index)
sub = ' ' + ''.join(list(sub)) + ' '
text = ''.join((text[:result.span()[0] + dif], sub, text[result
.span()[1] + dif:]))
text_ = ''.join((text_[:result.span()[0]], text_[result.span()[
1]:])).strip()
dif = abs(len(text) - len(text_))
result = re.search(redundant_punct_pattern, text_)
text = re.sub('\\s+', ' ', text)
return text.strip()
prefix_list = ['ال', 'و', 'ف', 'ب', 'ك', 'ل', 'لل', 'ال', 'و', 'ف', 'ب',
'ك', 'ل', 'لل', 'س']
suffix_list = ['ه', 'ها', 'ك', 'ي', 'هما', 'كما', 'نا', 'كم', 'هم', 'هن',
'كن', 'ا', 'ان', 'ين', 'ون', 'وا', 'ات', 'ت', 'ن', 'ة', 'ه', 'ها', 'ك',
'ي', 'هما', 'كما', 'نا', 'كم', 'هم', 'هن', 'كن', 'ا', 'ان', 'ين', 'ون',
'وا', 'ات', 'ت', 'ن', 'ة']
other_tokens = ['[رابط]', '[مستخدم]', '[بريد]']
prefix_symbols = [(x + '+') for x in prefix_list]
suffix_symblos = [('+' + x) for x in suffix_list]
never_split_tokens = list(set(prefix_symbols + suffix_symblos + other_tokens))
url_regexes = [
'(http(s)?:\\/\\/.)?(www\\.)?[-a-zA-Z0-9@:%._\\+~#=]{2,256}\\.[a-z]{2,6}\\b([-a-zA-Z0-9@:%_\\+.~#?&//=]*)'
, '@(https?|ftp)://(-\\.)?([^\\s/?\\.#-]+\\.?)+(/[^\\s]*)?$@iS',
'http[s]?://[a-zA-Z0-9_\\-./~\\?=%&]+', 'www[a-zA-Z0-9_\\-?=%&/.~]+',
'[a-zA-Z]+\\.com', '(?=http)[^\\s]+', '(?=www)[^\\s]+', '://']
user_mention_regex = '@[\\w\\d]+'
email_regexes = ['[\\w-]+@([\\w-]+\\.)+[\\w-]+', '\\S+@\\S+']
redundant_punct_pattern = (
'([!\\"#\\$%\\\'\\(\\)\\*\\+,\\.:;\\-<=·>?@\\[\\\\\\]\\^_ـ`{\\|}~—٪’،؟`୍“؛”ۚ【»؛\\s+«–…‘]{2,})'
)
regex_tatweel = '(\\D)\\1{2,}'
rejected_chars_regex = (
'[^0-9\\u0621-\\u063A\\u0640-\\u066C\\u0671-\\u0674a-zA-Z\\[\\]!\\"#\\$%\\\'\\(\\)\\*\\+,\\.:;\\-<=·>?@\\[\\\\\\]\\^_ـ`{\\|}~—٪’،؟`୍“؛”ۚ»؛\\s+«–…‘]'
)
regex_url_step1 = '(?=http)[^\\s]+'
regex_url_step2 = '(?=www)[^\\s]+'
regex_url = (
'(http(s)?:\\/\\/.)?(www\\.)?[-a-zA-Z0-9@:%._\\+~#=]{2,256}\\.[a-z]{2,6}\\b([-a-zA-Z0-9@:%_\\+.~#?&//=]*)'
)
regex_mention = '@[\\w\\d]+'
regex_email = '\\S+@\\S+'
chars_regex = (
'0-9\\u0621-\\u063A\\u0640-\\u066C\\u0671-\\u0674a-zA-Z\\[\\]!\\"#\\$%\\\'\\(\\)\\*\\+,\\.:;\\-<=·>?@\\[\\\\\\]\\^_ـ`{\\|}~—٪’،؟`୍“؛”ۚ»؛\\s+«–…‘'
)
white_spaced_double_quotation_regex = '\\"\\s+([^"]+)\\s+\\"'
white_spaced_single_quotation_regex = "\\'\\s+([^']+)\\s+\\'"
white_spaced_back_quotation_regex = '\\`\\s+([^`]+)\\s+\\`'
white_spaced_em_dash = '\\—\\s+([^—]+)\\s+\\—'
left_spaced_chars = ' ([\\]!#\\$%\\),\\.:;\\?}٪’،؟”؛…»·])'
right_spaced_chars = '([\\[\\(\\{“«‘*\\~]) '
left_and_right_spaced_chars = ' ([\\+\\-\\<\\=\\>\\@\\\\\\^\\_\\|\\–]) '
<|reserved_special_token_1|>
import html
import logging
import re
import pyarabic.araby as araby
ACCEPTED_MODELS = [
"bert-base-arabertv01",
"bert-base-arabert",
"bert-base-arabertv02",
"bert-base-arabertv2",
"bert-large-arabertv02",
"bert-large-arabertv2",
"araelectra-base",
"araelectra-base-discriminator",
"araelectra-base-generator",
"aragpt2-base",
"aragpt2-medium",
"aragpt2-large",
"aragpt2-mega",
]
SEGMENTED_MODELS = [
"bert-base-arabert",
"bert-base-arabertv2",
"bert-large-arabertv2",
]
class ArbertmoPreprocessor:
"""
A Preprocessor class that cleans and preprocesses text for all models in the AraBERT repo.
It also can unprocess the text ouput of the generated text
Args:
model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to "bert-base-arabertv02". Current accepted models are:
- :obj:`"bert-base-arabertv01"`: No farasa segmentation.
- :obj:`"bert-base-arabert"`: with farasa segmentation.
- :obj:`"bert-base-arabertv02"`: No farasas egmentation.
- :obj:`"bert-base-arabertv2"`: with farasa segmentation.
- :obj:`"bert-large-arabertv02"`: No farasas egmentation.
- :obj:`"bert-large-arabertv2"`: with farasa segmentation.
- :obj:`"araelectra-base"`: No farasa segmentation.
- :obj:`"araelectra-base-discriminator"`: No farasa segmentation.
- :obj:`"araelectra-base-generator"`: No farasa segmentation.
- :obj:`"aragpt2-base"`: No farasa segmentation.
- :obj:`"aragpt2-medium"`: No farasa segmentation.
- :obj:`"aragpt2-large"`: No farasa segmentation.
- :obj:`"aragpt2-mega"`: No farasa segmentation.
keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False
remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True
replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True
strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)
strip_tatweel(:obj: `bool`): remove tatweel '\\u0640'
insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words
remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character
Returns:
ArBERTMoPreprocessor: the preprocessor class
Example:
from preprocess import ArBERTMoPreprocessor
arabert_prep = ArBERTMoPreprocessor("aubmindlab/bert-base-arabertv2")
arabert_prep.preprocess("SOME ARABIC TEXT")
"""
def __init__(
self,
model_name,
keep_emojis=False,
remove_html_markup=True,
replace_urls_emails_mentions=True,
strip_tashkeel=True,
strip_tatweel=True,
insert_white_spaces=True,
remove_elongation=True,
):
"""
model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to "bert-base-arabertv02". Current accepted models are:
- :obj:`"bert-base-arabertv01"`: No farasa segmentation.
- :obj:`"bert-base-arabert"`: with farasa segmentation.
- :obj:`"bert-base-arabertv02"`: No farasas egmentation.
- :obj:`"bert-base-arabertv2"`: with farasa segmentation.
- :obj:`"bert-large-arabertv02"`: No farasas egmentation.
- :obj:`"bert-large-arabertv2"`: with farasa segmentation.
- :obj:`"araelectra-base"`: No farasa segmentation.
- :obj:`"araelectra-base-discriminator"`: No farasa segmentation.
- :obj:`"araelectra-base-generator"`: No farasa segmentation.
- :obj:`"aragpt2-base"`: No farasa segmentation.
- :obj:`"aragpt2-medium"`: No farasa segmentation.
- :obj:`"aragpt2-large"`: No farasa segmentation.
- :obj:`"aragpt2-mega"`: No farasa segmentation.
keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False
remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True
replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True
strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)
strip_tatweel(:obj: `bool`): remove tatweel '\\u0640'
insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words
remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character
"""
model_name = model_name.replace("aubmindlab/", "")
if model_name not in ACCEPTED_MODELS:
logging.warning(
"Model provided is not in the accepted model list. Assuming you don't want Farasa Segmentation"
)
self.model_name = "bert-base-arabertv02"
else:
self.model_name = model_name
if self.model_name in SEGMENTED_MODELS:
logging.info(
"Selected Model requires pre-segmentation, Initializing FarasaSegmenter"
)
try:
from farasa.segmenter import FarasaSegmenter
self.farasa_segmenter = FarasaSegmenter(interactive=True)
except:
logging.warning(
"farasapy is not installed, you want be able to process text for AraBERTv1 and v2. Install it using: pip install farasapy"
)
else:
logging.info(
"Selected Model doesn't require pre-segmentation, skipping FarasaSegmenter initialization"
)
self.keep_emojis = keep_emojis
if self.keep_emojis:
import emoji
self.emoji = emoji
if self.model_name in SEGMENTED_MODELS:
logging.warning(
"Keeping tweets with Farasa Segmentation is 10 times slower"
)
self.remove_html_markup = remove_html_markup
self.replace_urls_emails_mentions = replace_urls_emails_mentions
self.strip_tashkeel = strip_tashkeel
self.strip_tatweel = strip_tatweel
self.insert_white_spaces = insert_white_spaces
self.remove_elongation = remove_elongation
def preprocess(self, text):
"""
Preprocess takes an input text line an applies the same preprocessing used in AraBERT
pretraining
Args:
text (:obj:`str`): inout text string
Returns:
string: A preprocessed string depending on which model was selected
"""
if self.model_name == "bert-base-arabert":
return self._old_preprocess(
text,
do_farasa_tokenization=True,
)
if self.model_name == "bert-base-arabertv01":
return self._old_preprocess(text, do_farasa_tokenization=False)
text = str(text)
text = html.unescape(text)
if self.strip_tashkeel:
text = araby.strip_tashkeel(text)
if self.strip_tatweel:
text = araby.strip_tatweel(text)
if self.replace_urls_emails_mentions:
# replace all possible URLs
for reg in url_regexes:
text = re.sub(reg, " [رابط] ", text)
# REplace Emails with [بريد]
for reg in email_regexes:
text = re.sub(reg, " [بريد] ", text)
# replace mentions with [مستخدم]
text = re.sub(user_mention_regex, " [مستخدم] ", text)
if self.remove_html_markup:
# remove html line breaks
text = re.sub("<br />", " ", text)
# remove html markup
text = re.sub("</?[^>]+>", " ", text)
# remove repeated characters >2
if self.remove_elongation:
text = self._remove_elongation(text)
# insert whitespace before and after all non Arabic digits or English Digits and Alphabet and the 2 brackets
if self.insert_white_spaces:
text = re.sub(
"([^0-9\u0621-\u063A\u0641-\u064A\u0660-\u0669a-zA-Z\[\]])",
r" \1 ",
text,
)
# insert whitespace between words and numbers or numbers and words
text = re.sub(
"(\d+)([\u0621-\u063A\u0641-\u064A\u0660-\u066C]+)", r" \1 \2 ", text
)
text = re.sub(
"([\u0621-\u063A\u0641-\u064A\u0660-\u066C]+)(\d+)", r" \1 \2 ", text
)
# remove unwanted characters
if self.keep_emojis:
emoji_regex = "".join(list(self.emoji.UNICODE_EMOJI["en"].keys()))
rejected_chars_regex2 = "[^%s%s]" % (chars_regex, emoji_regex)
text = re.sub(rejected_chars_regex2, " ", text)
else:
text = re.sub(rejected_chars_regex, " ", text)
# remove extra spaces
text = " ".join(text.replace("\uFE0F", "").split())
if (
self.model_name == "bert-base-arabertv2"
or self.model_name == "bert-large-arabertv2"
):
if self.keep_emojis:
new_text = []
for word in text.split():
if word in list(self.emoji.UNICODE_EMOJI["en"].keys()):
new_text.append(word)
else:
new_text.append(self.farasa_segmenter.segment(word))
text = " ".join(new_text)
else:
text = self.farasa_segmenter.segment(text)
return self._farasa_segment(text)
# ALl the other models dont require Farasa Segmentation
return text
def unpreprocess(self, text, desegment=True):
"""Re-formats the text to a classic format where punctuations, brackets, parenthesis are not seperated by whitespaces.
The objective is to make the generated text of any model appear natural and not preprocessed.
Args:
text (str): input text to be un-preprocessed
desegment (bool, optional): [whether or not to remove farasa pre-segmentation before]. Defaults to True.
Returns:
str: The unpreprocessed (and possibly Farasa-desegmented) text.
"""
if self.model_name in SEGMENTED_MODELS and desegment:
text = self.desegment(text)
# removes the spaces around quotation marks ex: i " ate " an apple --> i "ate" an apple
# https://stackoverflow.com/a/53436792/5381220
text = re.sub(white_spaced_double_quotation_regex, '"' + r"\1" + '"', text)
text = re.sub(white_spaced_single_quotation_regex, "'" + r"\1" + "'", text)
text = re.sub(white_spaced_back_quotation_regex, "\`" + r"\1" + "\`", text)
text = re.sub(white_spaced_back_quotation_regex, "\—" + r"\1" + "\—", text)
# during generation, sometimes the models don't put a space after the dot, this handles it
text = text.replace(".", " . ")
text = " ".join(text.split())
# handle decimals
text = re.sub(r"(\d+) \. (\d+)", r"\1.\2", text)
text = re.sub(r"(\d+) \, (\d+)", r"\1,\2", text)
text = re.sub(left_and_right_spaced_chars, r"\1", text)
text = re.sub(left_spaced_chars, r"\1", text)
text = re.sub(right_spaced_chars, r"\1", text)
return text
def desegment(self, text):
"""
Use this function if sentence tokenization was done using
`from arabert.preprocess_arabert import preprocess` with Farasa enabled
AraBERT segmentation using Farasa adds a space after the '+' for prefixes,
and after before the '+' for suffixes
Example:
>>> desegment('ال+ دراس +ات')
الدراسات
"""
text = text.replace("+ ", "+")
text = text.replace(" +", "+")
text = " ".join([self._desegmentword(word) for word in text.split(" ")])
return text
def _desegmentword(self, orig_word: str) -> str:
"""
Word segmentor that takes a Farasa Segmented Word and removes the '+' signs
Example:
>>> _desegmentword("ال+يومي+ة")
اليومية
"""
word = orig_word.replace("ل+ال+", "لل")
if "ال+ال" not in orig_word:
word = word.replace("ل+ال", "لل")
word = word.replace("+", "")
word = word.replace("للل", "لل")
return word
def _old_preprocess(self, text, do_farasa_tokenization):
"""
AraBERTv1 preprocessing Function
"""
text = str(text)
if self.strip_tashkeel:
text = araby.strip_tashkeel(text)
text = re.sub(r"\d+\/[ء-ي]+\/\d+\]", "", text)
text = re.sub("ـ", "", text)
text = re.sub("[«»]", ' " ', text)
if self.replace_urls_emails_mentions:
# replace the [رابط] token with space if you want to clean links
text = re.sub(regex_url_step1, "[رابط]", text)
text = re.sub(regex_url_step2, "[رابط]", text)
text = re.sub(regex_url, "[رابط]", text)
text = re.sub(regex_email, "[بريد]", text)
text = re.sub(regex_mention, "[مستخدم]", text)
text = re.sub("…", r"\.", text).strip()
text = self._remove_redundant_punct(text)
if self.replace_urls_emails_mentions:
text = re.sub(r"\[ رابط \]|\[ رابط\]|\[رابط \]", " [رابط] ", text)
text = re.sub(r"\[ بريد \]|\[ بريد\]|\[بريد \]", " [بريد] ", text)
text = re.sub(r"\[ مستخدم \]|\[ مستخدم\]|\[مستخدم \]", " [مستخدم] ", text)
if self.remove_elongation:
text = self._remove_elongation(text)
if self.insert_white_spaces:
text = re.sub(
"([^0-9\u0621-\u063A\u0641-\u0669\u0671-\u0673a-zA-Z\[\]])",
r" \1 ",
text,
)
if do_farasa_tokenization:
text = self._tokenize_arabic_words_farasa(text)
return text.strip()
def _farasa_segment(self, text):
line_farasa = text.split()
segmented_line = []
for index, word in enumerate(line_farasa):
if word in ["[", "]"]:
continue
if word in ["رابط", "بريد", "مستخدم"] and line_farasa[index - 1] in [
"[",
"]",
]:
segmented_line.append("[" + word + "]")
continue
if "+" not in word:
segmented_line.append(word)
continue
segmented_word = self._split_farasa_output(word)
segmented_line.extend(segmented_word)
return " ".join(segmented_line)
def _split_farasa_output(self, word):
segmented_word = []
temp_token = ""
for i, c in enumerate(word):
if c == "+":
# if the token is KAF, it could be a suffix or prefix
if temp_token == "ك":
# if we are at the second token, then KAF is surely a prefix
if i == 1:
segmented_word.append(temp_token + "+")
temp_token = ""
# If the KAF token is between 2 tokens
elif word[i - 2] == "+":
# if the previous token is prefix, then this KAF must be a prefix
if segmented_word[-1][-1] == "+":
segmented_word.append(temp_token + "+")
temp_token = ""
# else it is a suffix, this KAF could not be a second suffix
else:
segmented_word.append("+" + temp_token)
temp_token = ""
# if Kaf is at the end, this is handled with the statement after the loop
elif temp_token in prefix_list:
segmented_word.append(temp_token + "+")
temp_token = ""
elif temp_token in suffix_list:
segmented_word.append("+" + temp_token)
temp_token = ""
else:
segmented_word.append(temp_token)
temp_token = ""
continue
temp_token += c
if temp_token != "":
if temp_token in suffix_list:
segmented_word.append("+" + temp_token)
else:
segmented_word.append(temp_token)
return segmented_word
def _tokenize_arabic_words_farasa(self, line_input):
if self.keep_emojis:
# insert whitespace before and after all non Arabic digits or English Digits and Alphabet and the 2 brackets
line_farasa = []
for word in line_input.split():
if word in list(self.emoji.UNICODE_EMOJI["en"].keys()):
line_farasa.append(word)
else:
line_farasa.append(self.farasa_segmenter.segment(word))
else:
line_farasa = self.farasa_segmenter.segment(line_input).split()
segmented_line = []
for index, word in enumerate(line_farasa):
if word in ["[", "]"]:
continue
if word in ["رابط", "بريد", "مستخدم"] and line_farasa[index - 1] in [
"[",
"]",
]:
segmented_line.append("[" + word + "]")
continue
segmented_word = []
for token in word.split("+"):
if token in prefix_list:
segmented_word.append(token + "+")
elif token in suffix_list:
segmented_word.append("+" + token)
else:
segmented_word.append(token)
segmented_line.extend(segmented_word)
return " ".join(segmented_line)
def _remove_elongation(self, text):
"""
:param text: the input text to remove elongation
:return: delongated text
"""
# loop over the number of times the regex matched the text
for index_ in range(len(re.findall(regex_tatweel, text))):
elongation = re.search(regex_tatweel, text)
if elongation:
elongation_pattern = elongation.group()
elongation_replacement = elongation_pattern[0]
elongation_pattern = re.escape(elongation_pattern)
text = re.sub(
elongation_pattern, elongation_replacement, text, flags=re.MULTILINE
)
else:
break
return text
def _remove_redundant_punct(self, text):
text_ = text
result = re.search(redundant_punct_pattern, text)
dif = 0
while result:
sub = result.group()
sub = sorted(set(sub), key=sub.index)
sub = " " + "".join(list(sub)) + " "
text = "".join(
(text[: result.span()[0] + dif], sub, text[result.span()[1] + dif :])
)
text_ = "".join(
(text_[: result.span()[0]], text_[result.span()[1] :])
).strip()
dif = abs(len(text) - len(text_))
result = re.search(redundant_punct_pattern, text_)
text = re.sub(r"\s+", " ", text)
return text.strip()
prefix_list = [
"ال",
"و",
"ف",
"ب",
"ك",
"ل",
"لل",
"\u0627\u0644",
"\u0648",
"\u0641",
"\u0628",
"\u0643",
"\u0644",
"\u0644\u0644",
"س",
]
suffix_list = [
"ه",
"ها",
"ك",
"ي",
"هما",
"كما",
"نا",
"كم",
"هم",
"هن",
"كن",
"ا",
"ان",
"ين",
"ون",
"وا",
"ات",
"ت",
"ن",
"ة",
"\u0647",
"\u0647\u0627",
"\u0643",
"\u064a",
"\u0647\u0645\u0627",
"\u0643\u0645\u0627",
"\u0646\u0627",
"\u0643\u0645",
"\u0647\u0645",
"\u0647\u0646",
"\u0643\u0646",
"\u0627",
"\u0627\u0646",
"\u064a\u0646",
"\u0648\u0646",
"\u0648\u0627",
"\u0627\u062a",
"\u062a",
"\u0646",
"\u0629",
]
other_tokens = ["[رابط]", "[مستخدم]", "[بريد]"]
# the never_split list is ussed with the transformers library
prefix_symbols = [x + "+" for x in prefix_list]
suffix_symblos = ["+" + x for x in suffix_list]
never_split_tokens = list(set(prefix_symbols + suffix_symblos + other_tokens))
url_regexes = [
r"(http(s)?:\/\/.)?(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)",
r"@(https?|ftp)://(-\.)?([^\s/?\.#-]+\.?)+(/[^\s]*)?$@iS",
r"http[s]?://[a-zA-Z0-9_\-./~\?=%&]+",
r"www[a-zA-Z0-9_\-?=%&/.~]+",
r"[a-zA-Z]+\.com",
r"(?=http)[^\s]+",
r"(?=www)[^\s]+",
r"://",
]
user_mention_regex = r"@[\w\d]+"
email_regexes = [r"[\w-]+@([\w-]+\.)+[\w-]+", r"\S+@\S+"]
redundant_punct_pattern = (
r"([!\"#\$%\'\(\)\*\+,\.:;\-<=·>?@\[\\\]\^_ـ`{\|}~—٪’،؟`୍“؛”ۚ【»؛\s+«–…‘]{2,})"
)
regex_tatweel = r"(\D)\1{2,}"
rejected_chars_regex = r"[^0-9\u0621-\u063A\u0640-\u066C\u0671-\u0674a-zA-Z\[\]!\"#\$%\'\(\)\*\+,\.:;\-<=·>?@\[\\\]\^_ـ`{\|}~—٪’،؟`୍“؛”ۚ»؛\s+«–…‘]"
regex_url_step1 = r"(?=http)[^\s]+"
regex_url_step2 = r"(?=www)[^\s]+"
regex_url = r"(http(s)?:\/\/.)?(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)"
regex_mention = r"@[\w\d]+"
regex_email = r"\S+@\S+"
chars_regex = r"0-9\u0621-\u063A\u0640-\u066C\u0671-\u0674a-zA-Z\[\]!\"#\$%\'\(\)\*\+,\.:;\-<=·>?@\[\\\]\^_ـ`{\|}~—٪’،؟`୍“؛”ۚ»؛\s+«–…‘"
white_spaced_double_quotation_regex = r'\"\s+([^"]+)\s+\"'
white_spaced_single_quotation_regex = r"\'\s+([^']+)\s+\'"
white_spaced_back_quotation_regex = r"\`\s+([^`]+)\s+\`"
white_spaced_em_dash = r"\—\s+([^—]+)\s+\—"
left_spaced_chars = r" ([\]!#\$%\),\.:;\?}٪’،؟”؛…»·])"
right_spaced_chars = r"([\[\(\{“«‘*\~]) "
left_and_right_spaced_chars = r" ([\+\-\<\=\>\@\\\^\_\|\–]) "
|
flexible
|
{
"blob_id": "6c3f60f05adbebe521ba08d7a7e9fc10b1cc914f",
"index": 2907,
"step-1": "<mask token>\n\n\nclass ArbertmoPreprocessor:\n <mask token>\n\n def __init__(self, model_name, keep_emojis=False, remove_html_markup=\n True, replace_urls_emails_mentions=True, strip_tashkeel=True,\n strip_tatweel=True, insert_white_spaces=True, remove_elongation=True):\n \"\"\"\n model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to \"bert-base-arabertv02\". Current accepted models are:\n\n - :obj:`\"bert-base-arabertv01\"`: No farasa segmentation.\n - :obj:`\"bert-base-arabert\"`: with farasa segmentation.\n - :obj:`\"bert-base-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-base-arabertv2\"`: with farasa segmentation.\n - :obj:`\"bert-large-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-large-arabertv2\"`: with farasa segmentation.\n - :obj:`\"araelectra-base\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-discriminator\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-generator\"`: No farasa segmentation.\n - :obj:`\"aragpt2-base\"`: No farasa segmentation.\n - :obj:`\"aragpt2-medium\"`: No farasa segmentation.\n - :obj:`\"aragpt2-large\"`: No farasa segmentation.\n - :obj:`\"aragpt2-mega\"`: No farasa segmentation.\n\n keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False\n\n remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True\n\n replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True\n\n strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)\n\n strip_tatweel(:obj: `bool`): remove tatweel '\\\\u0640'\n\n insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words\n\n remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character\n\n \"\"\"\n model_name = model_name.replace('aubmindlab/', '')\n if model_name not in ACCEPTED_MODELS:\n logging.warning(\n \"Model provided is not in the accepted model list. Assuming you don't want Farasa Segmentation\"\n )\n self.model_name = 'bert-base-arabertv02'\n else:\n self.model_name = model_name\n if self.model_name in SEGMENTED_MODELS:\n logging.info(\n 'Selected Model requires pre-segmentation, Initializing FarasaSegmenter'\n )\n try:\n from farasa.segmenter import FarasaSegmenter\n self.farasa_segmenter = FarasaSegmenter(interactive=True)\n except:\n logging.warning(\n 'farasapy is not installed, you want be able to process text for AraBERTv1 and v2. Install it using: pip install farasapy'\n )\n else:\n logging.info(\n \"Selected Model doesn't require pre-segmentation, skipping FarasaSegmenter initialization\"\n )\n self.keep_emojis = keep_emojis\n if self.keep_emojis:\n import emoji\n self.emoji = emoji\n if self.model_name in SEGMENTED_MODELS:\n logging.warning(\n 'Keeping tweets with Farasa Segmentation is 10 times slower'\n )\n self.remove_html_markup = remove_html_markup\n self.replace_urls_emails_mentions = replace_urls_emails_mentions\n self.strip_tashkeel = strip_tashkeel\n self.strip_tatweel = strip_tatweel\n self.insert_white_spaces = insert_white_spaces\n self.remove_elongation = remove_elongation\n\n def preprocess(self, text):\n \"\"\"\n Preprocess takes an input text line an applies the same preprocessing used in AraBERT\n pretraining\n\n Args:\n\n text (:obj:`str`): inout text string\n\n Returns:\n\n string: A preprocessed string depending on which model was selected\n \"\"\"\n if self.model_name == 'bert-base-arabert':\n return self._old_preprocess(text, do_farasa_tokenization=True)\n if self.model_name == 'bert-base-arabertv01':\n return self._old_preprocess(text, do_farasa_tokenization=False)\n text = str(text)\n text = html.unescape(text)\n if self.strip_tashkeel:\n text = araby.strip_tashkeel(text)\n if self.strip_tatweel:\n text = araby.strip_tatweel(text)\n if self.replace_urls_emails_mentions:\n for reg in url_regexes:\n text = re.sub(reg, ' [رابط] ', text)\n for reg in email_regexes:\n text = re.sub(reg, ' [بريد] ', text)\n text = re.sub(user_mention_regex, ' [مستخدم] ', text)\n if self.remove_html_markup:\n text = re.sub('<br />', ' ', text)\n text = re.sub('</?[^>]+>', ' ', text)\n if self.remove_elongation:\n text = self._remove_elongation(text)\n if self.insert_white_spaces:\n text = re.sub('([^0-9ء-غف-ي٠-٩a-zA-Z\\\\[\\\\]])', ' \\\\1 ', text)\n text = re.sub('(\\\\d+)([ء-غف-ي٠-٬]+)', ' \\\\1 \\\\2 ', text)\n text = re.sub('([ء-غف-ي٠-٬]+)(\\\\d+)', ' \\\\1 \\\\2 ', text)\n if self.keep_emojis:\n emoji_regex = ''.join(list(self.emoji.UNICODE_EMOJI['en'].keys()))\n rejected_chars_regex2 = '[^%s%s]' % (chars_regex, emoji_regex)\n text = re.sub(rejected_chars_regex2, ' ', text)\n else:\n text = re.sub(rejected_chars_regex, ' ', text)\n text = ' '.join(text.replace('️', '').split())\n if (self.model_name == 'bert-base-arabertv2' or self.model_name ==\n 'bert-large-arabertv2'):\n if self.keep_emojis:\n new_text = []\n for word in text.split():\n if word in list(self.emoji.UNICODE_EMOJI['en'].keys()):\n new_text.append(word)\n else:\n new_text.append(self.farasa_segmenter.segment(word))\n text = ' '.join(new_text)\n else:\n text = self.farasa_segmenter.segment(text)\n return self._farasa_segment(text)\n return text\n\n def unpreprocess(self, text, desegment=True):\n \"\"\"Re-formats the text to a classic format where punctuations, brackets, parenthesis are not seperated by whitespaces.\n The objective is to make the generated text of any model appear natural and not preprocessed.\n\n Args:\n text (str): input text to be un-preprocessed\n desegment (bool, optional): [whether or not to remove farasa pre-segmentation before]. Defaults to True.\n\n Returns:\n str: The unpreprocessed (and possibly Farasa-desegmented) text.\n \"\"\"\n if self.model_name in SEGMENTED_MODELS and desegment:\n text = self.desegment(text)\n text = re.sub(white_spaced_double_quotation_regex, '\"' + '\\\\1' +\n '\"', text)\n text = re.sub(white_spaced_single_quotation_regex, \"'\" + '\\\\1' +\n \"'\", text)\n text = re.sub(white_spaced_back_quotation_regex, '\\\\`' + '\\\\1' +\n '\\\\`', text)\n text = re.sub(white_spaced_back_quotation_regex, '\\\\—' + '\\\\1' +\n '\\\\—', text)\n text = text.replace('.', ' . ')\n text = ' '.join(text.split())\n text = re.sub('(\\\\d+) \\\\. (\\\\d+)', '\\\\1.\\\\2', text)\n text = re.sub('(\\\\d+) \\\\, (\\\\d+)', '\\\\1,\\\\2', text)\n text = re.sub(left_and_right_spaced_chars, '\\\\1', text)\n text = re.sub(left_spaced_chars, '\\\\1', text)\n text = re.sub(right_spaced_chars, '\\\\1', text)\n return text\n\n def desegment(self, text):\n \"\"\"\n Use this function if sentence tokenization was done using\n `from arabert.preprocess_arabert import preprocess` with Farasa enabled\n AraBERT segmentation using Farasa adds a space after the '+' for prefixes,\n and after before the '+' for suffixes\n\n Example:\n >>> desegment('ال+ دراس +ات')\n الدراسات\n \"\"\"\n text = text.replace('+ ', '+')\n text = text.replace(' +', '+')\n text = ' '.join([self._desegmentword(word) for word in text.split(' ')]\n )\n return text\n\n def _desegmentword(self, orig_word: str) ->str:\n \"\"\"\n Word segmentor that takes a Farasa Segmented Word and removes the '+' signs\n\n Example:\n >>> _desegmentword(\"ال+يومي+ة\")\n اليومية\n \"\"\"\n word = orig_word.replace('ل+ال+', 'لل')\n if 'ال+ال' not in orig_word:\n word = word.replace('ل+ال', 'لل')\n word = word.replace('+', '')\n word = word.replace('للل', 'لل')\n return word\n\n def _old_preprocess(self, text, do_farasa_tokenization):\n \"\"\"\n AraBERTv1 preprocessing Function\n \"\"\"\n text = str(text)\n if self.strip_tashkeel:\n text = araby.strip_tashkeel(text)\n text = re.sub('\\\\d+\\\\/[ء-ي]+\\\\/\\\\d+\\\\]', '', text)\n text = re.sub('ـ', '', text)\n text = re.sub('[«»]', ' \" ', text)\n if self.replace_urls_emails_mentions:\n text = re.sub(regex_url_step1, '[رابط]', text)\n text = re.sub(regex_url_step2, '[رابط]', text)\n text = re.sub(regex_url, '[رابط]', text)\n text = re.sub(regex_email, '[بريد]', text)\n text = re.sub(regex_mention, '[مستخدم]', text)\n text = re.sub('…', '\\\\.', text).strip()\n text = self._remove_redundant_punct(text)\n if self.replace_urls_emails_mentions:\n text = re.sub('\\\\[ رابط \\\\]|\\\\[ رابط\\\\]|\\\\[رابط \\\\]',\n ' [رابط] ', text)\n text = re.sub('\\\\[ بريد \\\\]|\\\\[ بريد\\\\]|\\\\[بريد \\\\]',\n ' [بريد] ', text)\n text = re.sub('\\\\[ مستخدم \\\\]|\\\\[ مستخدم\\\\]|\\\\[مستخدم \\\\]',\n ' [مستخدم] ', text)\n if self.remove_elongation:\n text = self._remove_elongation(text)\n if self.insert_white_spaces:\n text = re.sub('([^0-9ء-غف-٩ٱ-ٳa-zA-Z\\\\[\\\\]])', ' \\\\1 ', text)\n if do_farasa_tokenization:\n text = self._tokenize_arabic_words_farasa(text)\n return text.strip()\n\n def _farasa_segment(self, text):\n line_farasa = text.split()\n segmented_line = []\n for index, word in enumerate(line_farasa):\n if word in ['[', ']']:\n continue\n if word in ['رابط', 'بريد', 'مستخدم'] and line_farasa[index - 1\n ] in ['[', ']']:\n segmented_line.append('[' + word + ']')\n continue\n if '+' not in word:\n segmented_line.append(word)\n continue\n segmented_word = self._split_farasa_output(word)\n segmented_line.extend(segmented_word)\n return ' '.join(segmented_line)\n\n def _split_farasa_output(self, word):\n segmented_word = []\n temp_token = ''\n for i, c in enumerate(word):\n if c == '+':\n if temp_token == 'ك':\n if i == 1:\n segmented_word.append(temp_token + '+')\n temp_token = ''\n elif word[i - 2] == '+':\n if segmented_word[-1][-1] == '+':\n segmented_word.append(temp_token + '+')\n temp_token = ''\n else:\n segmented_word.append('+' + temp_token)\n temp_token = ''\n elif temp_token in prefix_list:\n segmented_word.append(temp_token + '+')\n temp_token = ''\n elif temp_token in suffix_list:\n segmented_word.append('+' + temp_token)\n temp_token = ''\n else:\n segmented_word.append(temp_token)\n temp_token = ''\n continue\n temp_token += c\n if temp_token != '':\n if temp_token in suffix_list:\n segmented_word.append('+' + temp_token)\n else:\n segmented_word.append(temp_token)\n return segmented_word\n\n def _tokenize_arabic_words_farasa(self, line_input):\n if self.keep_emojis:\n line_farasa = []\n for word in line_input.split():\n if word in list(self.emoji.UNICODE_EMOJI['en'].keys()):\n line_farasa.append(word)\n else:\n line_farasa.append(self.farasa_segmenter.segment(word))\n else:\n line_farasa = self.farasa_segmenter.segment(line_input).split()\n segmented_line = []\n for index, word in enumerate(line_farasa):\n if word in ['[', ']']:\n continue\n if word in ['رابط', 'بريد', 'مستخدم'] and line_farasa[index - 1\n ] in ['[', ']']:\n segmented_line.append('[' + word + ']')\n continue\n segmented_word = []\n for token in word.split('+'):\n if token in prefix_list:\n segmented_word.append(token + '+')\n elif token in suffix_list:\n segmented_word.append('+' + token)\n else:\n segmented_word.append(token)\n segmented_line.extend(segmented_word)\n return ' '.join(segmented_line)\n\n def _remove_elongation(self, text):\n \"\"\"\n :param text: the input text to remove elongation\n :return: delongated text\n \"\"\"\n for index_ in range(len(re.findall(regex_tatweel, text))):\n elongation = re.search(regex_tatweel, text)\n if elongation:\n elongation_pattern = elongation.group()\n elongation_replacement = elongation_pattern[0]\n elongation_pattern = re.escape(elongation_pattern)\n text = re.sub(elongation_pattern, elongation_replacement,\n text, flags=re.MULTILINE)\n else:\n break\n return text\n\n def _remove_redundant_punct(self, text):\n text_ = text\n result = re.search(redundant_punct_pattern, text)\n dif = 0\n while result:\n sub = result.group()\n sub = sorted(set(sub), key=sub.index)\n sub = ' ' + ''.join(list(sub)) + ' '\n text = ''.join((text[:result.span()[0] + dif], sub, text[result\n .span()[1] + dif:]))\n text_ = ''.join((text_[:result.span()[0]], text_[result.span()[\n 1]:])).strip()\n dif = abs(len(text) - len(text_))\n result = re.search(redundant_punct_pattern, text_)\n text = re.sub('\\\\s+', ' ', text)\n return text.strip()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ArbertmoPreprocessor:\n \"\"\"\n A Preprocessor class that cleans and preprocesses text for all models in the AraBERT repo.\n It also can unprocess the text ouput of the generated text\n\n Args:\n\n model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to \"bert-base-arabertv02\". Current accepted models are:\n\n - :obj:`\"bert-base-arabertv01\"`: No farasa segmentation.\n - :obj:`\"bert-base-arabert\"`: with farasa segmentation.\n - :obj:`\"bert-base-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-base-arabertv2\"`: with farasa segmentation.\n - :obj:`\"bert-large-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-large-arabertv2\"`: with farasa segmentation.\n - :obj:`\"araelectra-base\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-discriminator\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-generator\"`: No farasa segmentation.\n - :obj:`\"aragpt2-base\"`: No farasa segmentation.\n - :obj:`\"aragpt2-medium\"`: No farasa segmentation.\n - :obj:`\"aragpt2-large\"`: No farasa segmentation.\n - :obj:`\"aragpt2-mega\"`: No farasa segmentation.\n\n keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False\n\n remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True\n\n replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True\n\n strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)\n\n strip_tatweel(:obj: `bool`): remove tatweel '\\\\u0640'\n\n insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words\n\n remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character\n\n\n Returns:\n\n ArBERTMoPreprocessor: the preprocessor class\n\n Example:\n\n from preprocess import ArBERTMoPreprocessor\n\n arabert_prep = ArBERTMoPreprocessor(\"aubmindlab/bert-base-arabertv2\")\n\n arabert_prep.preprocess(\"SOME ARABIC TEXT\")\n \"\"\"\n\n def __init__(self, model_name, keep_emojis=False, remove_html_markup=\n True, replace_urls_emails_mentions=True, strip_tashkeel=True,\n strip_tatweel=True, insert_white_spaces=True, remove_elongation=True):\n \"\"\"\n model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to \"bert-base-arabertv02\". Current accepted models are:\n\n - :obj:`\"bert-base-arabertv01\"`: No farasa segmentation.\n - :obj:`\"bert-base-arabert\"`: with farasa segmentation.\n - :obj:`\"bert-base-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-base-arabertv2\"`: with farasa segmentation.\n - :obj:`\"bert-large-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-large-arabertv2\"`: with farasa segmentation.\n - :obj:`\"araelectra-base\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-discriminator\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-generator\"`: No farasa segmentation.\n - :obj:`\"aragpt2-base\"`: No farasa segmentation.\n - :obj:`\"aragpt2-medium\"`: No farasa segmentation.\n - :obj:`\"aragpt2-large\"`: No farasa segmentation.\n - :obj:`\"aragpt2-mega\"`: No farasa segmentation.\n\n keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False\n\n remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True\n\n replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True\n\n strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)\n\n strip_tatweel(:obj: `bool`): remove tatweel '\\\\u0640'\n\n insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words\n\n remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character\n\n \"\"\"\n model_name = model_name.replace('aubmindlab/', '')\n if model_name not in ACCEPTED_MODELS:\n logging.warning(\n \"Model provided is not in the accepted model list. Assuming you don't want Farasa Segmentation\"\n )\n self.model_name = 'bert-base-arabertv02'\n else:\n self.model_name = model_name\n if self.model_name in SEGMENTED_MODELS:\n logging.info(\n 'Selected Model requires pre-segmentation, Initializing FarasaSegmenter'\n )\n try:\n from farasa.segmenter import FarasaSegmenter\n self.farasa_segmenter = FarasaSegmenter(interactive=True)\n except:\n logging.warning(\n 'farasapy is not installed, you want be able to process text for AraBERTv1 and v2. Install it using: pip install farasapy'\n )\n else:\n logging.info(\n \"Selected Model doesn't require pre-segmentation, skipping FarasaSegmenter initialization\"\n )\n self.keep_emojis = keep_emojis\n if self.keep_emojis:\n import emoji\n self.emoji = emoji\n if self.model_name in SEGMENTED_MODELS:\n logging.warning(\n 'Keeping tweets with Farasa Segmentation is 10 times slower'\n )\n self.remove_html_markup = remove_html_markup\n self.replace_urls_emails_mentions = replace_urls_emails_mentions\n self.strip_tashkeel = strip_tashkeel\n self.strip_tatweel = strip_tatweel\n self.insert_white_spaces = insert_white_spaces\n self.remove_elongation = remove_elongation\n\n def preprocess(self, text):\n \"\"\"\n Preprocess takes an input text line an applies the same preprocessing used in AraBERT\n pretraining\n\n Args:\n\n text (:obj:`str`): inout text string\n\n Returns:\n\n string: A preprocessed string depending on which model was selected\n \"\"\"\n if self.model_name == 'bert-base-arabert':\n return self._old_preprocess(text, do_farasa_tokenization=True)\n if self.model_name == 'bert-base-arabertv01':\n return self._old_preprocess(text, do_farasa_tokenization=False)\n text = str(text)\n text = html.unescape(text)\n if self.strip_tashkeel:\n text = araby.strip_tashkeel(text)\n if self.strip_tatweel:\n text = araby.strip_tatweel(text)\n if self.replace_urls_emails_mentions:\n for reg in url_regexes:\n text = re.sub(reg, ' [رابط] ', text)\n for reg in email_regexes:\n text = re.sub(reg, ' [بريد] ', text)\n text = re.sub(user_mention_regex, ' [مستخدم] ', text)\n if self.remove_html_markup:\n text = re.sub('<br />', ' ', text)\n text = re.sub('</?[^>]+>', ' ', text)\n if self.remove_elongation:\n text = self._remove_elongation(text)\n if self.insert_white_spaces:\n text = re.sub('([^0-9ء-غف-ي٠-٩a-zA-Z\\\\[\\\\]])', ' \\\\1 ', text)\n text = re.sub('(\\\\d+)([ء-غف-ي٠-٬]+)', ' \\\\1 \\\\2 ', text)\n text = re.sub('([ء-غف-ي٠-٬]+)(\\\\d+)', ' \\\\1 \\\\2 ', text)\n if self.keep_emojis:\n emoji_regex = ''.join(list(self.emoji.UNICODE_EMOJI['en'].keys()))\n rejected_chars_regex2 = '[^%s%s]' % (chars_regex, emoji_regex)\n text = re.sub(rejected_chars_regex2, ' ', text)\n else:\n text = re.sub(rejected_chars_regex, ' ', text)\n text = ' '.join(text.replace('️', '').split())\n if (self.model_name == 'bert-base-arabertv2' or self.model_name ==\n 'bert-large-arabertv2'):\n if self.keep_emojis:\n new_text = []\n for word in text.split():\n if word in list(self.emoji.UNICODE_EMOJI['en'].keys()):\n new_text.append(word)\n else:\n new_text.append(self.farasa_segmenter.segment(word))\n text = ' '.join(new_text)\n else:\n text = self.farasa_segmenter.segment(text)\n return self._farasa_segment(text)\n return text\n\n def unpreprocess(self, text, desegment=True):\n \"\"\"Re-formats the text to a classic format where punctuations, brackets, parenthesis are not seperated by whitespaces.\n The objective is to make the generated text of any model appear natural and not preprocessed.\n\n Args:\n text (str): input text to be un-preprocessed\n desegment (bool, optional): [whether or not to remove farasa pre-segmentation before]. Defaults to True.\n\n Returns:\n str: The unpreprocessed (and possibly Farasa-desegmented) text.\n \"\"\"\n if self.model_name in SEGMENTED_MODELS and desegment:\n text = self.desegment(text)\n text = re.sub(white_spaced_double_quotation_regex, '\"' + '\\\\1' +\n '\"', text)\n text = re.sub(white_spaced_single_quotation_regex, \"'\" + '\\\\1' +\n \"'\", text)\n text = re.sub(white_spaced_back_quotation_regex, '\\\\`' + '\\\\1' +\n '\\\\`', text)\n text = re.sub(white_spaced_back_quotation_regex, '\\\\—' + '\\\\1' +\n '\\\\—', text)\n text = text.replace('.', ' . ')\n text = ' '.join(text.split())\n text = re.sub('(\\\\d+) \\\\. (\\\\d+)', '\\\\1.\\\\2', text)\n text = re.sub('(\\\\d+) \\\\, (\\\\d+)', '\\\\1,\\\\2', text)\n text = re.sub(left_and_right_spaced_chars, '\\\\1', text)\n text = re.sub(left_spaced_chars, '\\\\1', text)\n text = re.sub(right_spaced_chars, '\\\\1', text)\n return text\n\n def desegment(self, text):\n \"\"\"\n Use this function if sentence tokenization was done using\n `from arabert.preprocess_arabert import preprocess` with Farasa enabled\n AraBERT segmentation using Farasa adds a space after the '+' for prefixes,\n and after before the '+' for suffixes\n\n Example:\n >>> desegment('ال+ دراس +ات')\n الدراسات\n \"\"\"\n text = text.replace('+ ', '+')\n text = text.replace(' +', '+')\n text = ' '.join([self._desegmentword(word) for word in text.split(' ')]\n )\n return text\n\n def _desegmentword(self, orig_word: str) ->str:\n \"\"\"\n Word segmentor that takes a Farasa Segmented Word and removes the '+' signs\n\n Example:\n >>> _desegmentword(\"ال+يومي+ة\")\n اليومية\n \"\"\"\n word = orig_word.replace('ل+ال+', 'لل')\n if 'ال+ال' not in orig_word:\n word = word.replace('ل+ال', 'لل')\n word = word.replace('+', '')\n word = word.replace('للل', 'لل')\n return word\n\n def _old_preprocess(self, text, do_farasa_tokenization):\n \"\"\"\n AraBERTv1 preprocessing Function\n \"\"\"\n text = str(text)\n if self.strip_tashkeel:\n text = araby.strip_tashkeel(text)\n text = re.sub('\\\\d+\\\\/[ء-ي]+\\\\/\\\\d+\\\\]', '', text)\n text = re.sub('ـ', '', text)\n text = re.sub('[«»]', ' \" ', text)\n if self.replace_urls_emails_mentions:\n text = re.sub(regex_url_step1, '[رابط]', text)\n text = re.sub(regex_url_step2, '[رابط]', text)\n text = re.sub(regex_url, '[رابط]', text)\n text = re.sub(regex_email, '[بريد]', text)\n text = re.sub(regex_mention, '[مستخدم]', text)\n text = re.sub('…', '\\\\.', text).strip()\n text = self._remove_redundant_punct(text)\n if self.replace_urls_emails_mentions:\n text = re.sub('\\\\[ رابط \\\\]|\\\\[ رابط\\\\]|\\\\[رابط \\\\]',\n ' [رابط] ', text)\n text = re.sub('\\\\[ بريد \\\\]|\\\\[ بريد\\\\]|\\\\[بريد \\\\]',\n ' [بريد] ', text)\n text = re.sub('\\\\[ مستخدم \\\\]|\\\\[ مستخدم\\\\]|\\\\[مستخدم \\\\]',\n ' [مستخدم] ', text)\n if self.remove_elongation:\n text = self._remove_elongation(text)\n if self.insert_white_spaces:\n text = re.sub('([^0-9ء-غف-٩ٱ-ٳa-zA-Z\\\\[\\\\]])', ' \\\\1 ', text)\n if do_farasa_tokenization:\n text = self._tokenize_arabic_words_farasa(text)\n return text.strip()\n\n def _farasa_segment(self, text):\n line_farasa = text.split()\n segmented_line = []\n for index, word in enumerate(line_farasa):\n if word in ['[', ']']:\n continue\n if word in ['رابط', 'بريد', 'مستخدم'] and line_farasa[index - 1\n ] in ['[', ']']:\n segmented_line.append('[' + word + ']')\n continue\n if '+' not in word:\n segmented_line.append(word)\n continue\n segmented_word = self._split_farasa_output(word)\n segmented_line.extend(segmented_word)\n return ' '.join(segmented_line)\n\n def _split_farasa_output(self, word):\n segmented_word = []\n temp_token = ''\n for i, c in enumerate(word):\n if c == '+':\n if temp_token == 'ك':\n if i == 1:\n segmented_word.append(temp_token + '+')\n temp_token = ''\n elif word[i - 2] == '+':\n if segmented_word[-1][-1] == '+':\n segmented_word.append(temp_token + '+')\n temp_token = ''\n else:\n segmented_word.append('+' + temp_token)\n temp_token = ''\n elif temp_token in prefix_list:\n segmented_word.append(temp_token + '+')\n temp_token = ''\n elif temp_token in suffix_list:\n segmented_word.append('+' + temp_token)\n temp_token = ''\n else:\n segmented_word.append(temp_token)\n temp_token = ''\n continue\n temp_token += c\n if temp_token != '':\n if temp_token in suffix_list:\n segmented_word.append('+' + temp_token)\n else:\n segmented_word.append(temp_token)\n return segmented_word\n\n def _tokenize_arabic_words_farasa(self, line_input):\n if self.keep_emojis:\n line_farasa = []\n for word in line_input.split():\n if word in list(self.emoji.UNICODE_EMOJI['en'].keys()):\n line_farasa.append(word)\n else:\n line_farasa.append(self.farasa_segmenter.segment(word))\n else:\n line_farasa = self.farasa_segmenter.segment(line_input).split()\n segmented_line = []\n for index, word in enumerate(line_farasa):\n if word in ['[', ']']:\n continue\n if word in ['رابط', 'بريد', 'مستخدم'] and line_farasa[index - 1\n ] in ['[', ']']:\n segmented_line.append('[' + word + ']')\n continue\n segmented_word = []\n for token in word.split('+'):\n if token in prefix_list:\n segmented_word.append(token + '+')\n elif token in suffix_list:\n segmented_word.append('+' + token)\n else:\n segmented_word.append(token)\n segmented_line.extend(segmented_word)\n return ' '.join(segmented_line)\n\n def _remove_elongation(self, text):\n \"\"\"\n :param text: the input text to remove elongation\n :return: delongated text\n \"\"\"\n for index_ in range(len(re.findall(regex_tatweel, text))):\n elongation = re.search(regex_tatweel, text)\n if elongation:\n elongation_pattern = elongation.group()\n elongation_replacement = elongation_pattern[0]\n elongation_pattern = re.escape(elongation_pattern)\n text = re.sub(elongation_pattern, elongation_replacement,\n text, flags=re.MULTILINE)\n else:\n break\n return text\n\n def _remove_redundant_punct(self, text):\n text_ = text\n result = re.search(redundant_punct_pattern, text)\n dif = 0\n while result:\n sub = result.group()\n sub = sorted(set(sub), key=sub.index)\n sub = ' ' + ''.join(list(sub)) + ' '\n text = ''.join((text[:result.span()[0] + dif], sub, text[result\n .span()[1] + dif:]))\n text_ = ''.join((text_[:result.span()[0]], text_[result.span()[\n 1]:])).strip()\n dif = abs(len(text) - len(text_))\n result = re.search(redundant_punct_pattern, text_)\n text = re.sub('\\\\s+', ' ', text)\n return text.strip()\n\n\n<mask token>\n",
"step-3": "<mask token>\nACCEPTED_MODELS = ['bert-base-arabertv01', 'bert-base-arabert',\n 'bert-base-arabertv02', 'bert-base-arabertv2', 'bert-large-arabertv02',\n 'bert-large-arabertv2', 'araelectra-base',\n 'araelectra-base-discriminator', 'araelectra-base-generator',\n 'aragpt2-base', 'aragpt2-medium', 'aragpt2-large', 'aragpt2-mega']\nSEGMENTED_MODELS = ['bert-base-arabert', 'bert-base-arabertv2',\n 'bert-large-arabertv2']\n\n\nclass ArbertmoPreprocessor:\n \"\"\"\n A Preprocessor class that cleans and preprocesses text for all models in the AraBERT repo.\n It also can unprocess the text ouput of the generated text\n\n Args:\n\n model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to \"bert-base-arabertv02\". Current accepted models are:\n\n - :obj:`\"bert-base-arabertv01\"`: No farasa segmentation.\n - :obj:`\"bert-base-arabert\"`: with farasa segmentation.\n - :obj:`\"bert-base-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-base-arabertv2\"`: with farasa segmentation.\n - :obj:`\"bert-large-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-large-arabertv2\"`: with farasa segmentation.\n - :obj:`\"araelectra-base\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-discriminator\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-generator\"`: No farasa segmentation.\n - :obj:`\"aragpt2-base\"`: No farasa segmentation.\n - :obj:`\"aragpt2-medium\"`: No farasa segmentation.\n - :obj:`\"aragpt2-large\"`: No farasa segmentation.\n - :obj:`\"aragpt2-mega\"`: No farasa segmentation.\n\n keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False\n\n remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True\n\n replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True\n\n strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)\n\n strip_tatweel(:obj: `bool`): remove tatweel '\\\\u0640'\n\n insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words\n\n remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character\n\n\n Returns:\n\n ArBERTMoPreprocessor: the preprocessor class\n\n Example:\n\n from preprocess import ArBERTMoPreprocessor\n\n arabert_prep = ArBERTMoPreprocessor(\"aubmindlab/bert-base-arabertv2\")\n\n arabert_prep.preprocess(\"SOME ARABIC TEXT\")\n \"\"\"\n\n def __init__(self, model_name, keep_emojis=False, remove_html_markup=\n True, replace_urls_emails_mentions=True, strip_tashkeel=True,\n strip_tatweel=True, insert_white_spaces=True, remove_elongation=True):\n \"\"\"\n model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to \"bert-base-arabertv02\". Current accepted models are:\n\n - :obj:`\"bert-base-arabertv01\"`: No farasa segmentation.\n - :obj:`\"bert-base-arabert\"`: with farasa segmentation.\n - :obj:`\"bert-base-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-base-arabertv2\"`: with farasa segmentation.\n - :obj:`\"bert-large-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-large-arabertv2\"`: with farasa segmentation.\n - :obj:`\"araelectra-base\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-discriminator\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-generator\"`: No farasa segmentation.\n - :obj:`\"aragpt2-base\"`: No farasa segmentation.\n - :obj:`\"aragpt2-medium\"`: No farasa segmentation.\n - :obj:`\"aragpt2-large\"`: No farasa segmentation.\n - :obj:`\"aragpt2-mega\"`: No farasa segmentation.\n\n keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False\n\n remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True\n\n replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True\n\n strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)\n\n strip_tatweel(:obj: `bool`): remove tatweel '\\\\u0640'\n\n insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words\n\n remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character\n\n \"\"\"\n model_name = model_name.replace('aubmindlab/', '')\n if model_name not in ACCEPTED_MODELS:\n logging.warning(\n \"Model provided is not in the accepted model list. Assuming you don't want Farasa Segmentation\"\n )\n self.model_name = 'bert-base-arabertv02'\n else:\n self.model_name = model_name\n if self.model_name in SEGMENTED_MODELS:\n logging.info(\n 'Selected Model requires pre-segmentation, Initializing FarasaSegmenter'\n )\n try:\n from farasa.segmenter import FarasaSegmenter\n self.farasa_segmenter = FarasaSegmenter(interactive=True)\n except:\n logging.warning(\n 'farasapy is not installed, you want be able to process text for AraBERTv1 and v2. Install it using: pip install farasapy'\n )\n else:\n logging.info(\n \"Selected Model doesn't require pre-segmentation, skipping FarasaSegmenter initialization\"\n )\n self.keep_emojis = keep_emojis\n if self.keep_emojis:\n import emoji\n self.emoji = emoji\n if self.model_name in SEGMENTED_MODELS:\n logging.warning(\n 'Keeping tweets with Farasa Segmentation is 10 times slower'\n )\n self.remove_html_markup = remove_html_markup\n self.replace_urls_emails_mentions = replace_urls_emails_mentions\n self.strip_tashkeel = strip_tashkeel\n self.strip_tatweel = strip_tatweel\n self.insert_white_spaces = insert_white_spaces\n self.remove_elongation = remove_elongation\n\n def preprocess(self, text):\n \"\"\"\n Preprocess takes an input text line an applies the same preprocessing used in AraBERT\n pretraining\n\n Args:\n\n text (:obj:`str`): inout text string\n\n Returns:\n\n string: A preprocessed string depending on which model was selected\n \"\"\"\n if self.model_name == 'bert-base-arabert':\n return self._old_preprocess(text, do_farasa_tokenization=True)\n if self.model_name == 'bert-base-arabertv01':\n return self._old_preprocess(text, do_farasa_tokenization=False)\n text = str(text)\n text = html.unescape(text)\n if self.strip_tashkeel:\n text = araby.strip_tashkeel(text)\n if self.strip_tatweel:\n text = araby.strip_tatweel(text)\n if self.replace_urls_emails_mentions:\n for reg in url_regexes:\n text = re.sub(reg, ' [رابط] ', text)\n for reg in email_regexes:\n text = re.sub(reg, ' [بريد] ', text)\n text = re.sub(user_mention_regex, ' [مستخدم] ', text)\n if self.remove_html_markup:\n text = re.sub('<br />', ' ', text)\n text = re.sub('</?[^>]+>', ' ', text)\n if self.remove_elongation:\n text = self._remove_elongation(text)\n if self.insert_white_spaces:\n text = re.sub('([^0-9ء-غف-ي٠-٩a-zA-Z\\\\[\\\\]])', ' \\\\1 ', text)\n text = re.sub('(\\\\d+)([ء-غف-ي٠-٬]+)', ' \\\\1 \\\\2 ', text)\n text = re.sub('([ء-غف-ي٠-٬]+)(\\\\d+)', ' \\\\1 \\\\2 ', text)\n if self.keep_emojis:\n emoji_regex = ''.join(list(self.emoji.UNICODE_EMOJI['en'].keys()))\n rejected_chars_regex2 = '[^%s%s]' % (chars_regex, emoji_regex)\n text = re.sub(rejected_chars_regex2, ' ', text)\n else:\n text = re.sub(rejected_chars_regex, ' ', text)\n text = ' '.join(text.replace('️', '').split())\n if (self.model_name == 'bert-base-arabertv2' or self.model_name ==\n 'bert-large-arabertv2'):\n if self.keep_emojis:\n new_text = []\n for word in text.split():\n if word in list(self.emoji.UNICODE_EMOJI['en'].keys()):\n new_text.append(word)\n else:\n new_text.append(self.farasa_segmenter.segment(word))\n text = ' '.join(new_text)\n else:\n text = self.farasa_segmenter.segment(text)\n return self._farasa_segment(text)\n return text\n\n def unpreprocess(self, text, desegment=True):\n \"\"\"Re-formats the text to a classic format where punctuations, brackets, parenthesis are not seperated by whitespaces.\n The objective is to make the generated text of any model appear natural and not preprocessed.\n\n Args:\n text (str): input text to be un-preprocessed\n desegment (bool, optional): [whether or not to remove farasa pre-segmentation before]. Defaults to True.\n\n Returns:\n str: The unpreprocessed (and possibly Farasa-desegmented) text.\n \"\"\"\n if self.model_name in SEGMENTED_MODELS and desegment:\n text = self.desegment(text)\n text = re.sub(white_spaced_double_quotation_regex, '\"' + '\\\\1' +\n '\"', text)\n text = re.sub(white_spaced_single_quotation_regex, \"'\" + '\\\\1' +\n \"'\", text)\n text = re.sub(white_spaced_back_quotation_regex, '\\\\`' + '\\\\1' +\n '\\\\`', text)\n text = re.sub(white_spaced_back_quotation_regex, '\\\\—' + '\\\\1' +\n '\\\\—', text)\n text = text.replace('.', ' . ')\n text = ' '.join(text.split())\n text = re.sub('(\\\\d+) \\\\. (\\\\d+)', '\\\\1.\\\\2', text)\n text = re.sub('(\\\\d+) \\\\, (\\\\d+)', '\\\\1,\\\\2', text)\n text = re.sub(left_and_right_spaced_chars, '\\\\1', text)\n text = re.sub(left_spaced_chars, '\\\\1', text)\n text = re.sub(right_spaced_chars, '\\\\1', text)\n return text\n\n def desegment(self, text):\n \"\"\"\n Use this function if sentence tokenization was done using\n `from arabert.preprocess_arabert import preprocess` with Farasa enabled\n AraBERT segmentation using Farasa adds a space after the '+' for prefixes,\n and after before the '+' for suffixes\n\n Example:\n >>> desegment('ال+ دراس +ات')\n الدراسات\n \"\"\"\n text = text.replace('+ ', '+')\n text = text.replace(' +', '+')\n text = ' '.join([self._desegmentword(word) for word in text.split(' ')]\n )\n return text\n\n def _desegmentword(self, orig_word: str) ->str:\n \"\"\"\n Word segmentor that takes a Farasa Segmented Word and removes the '+' signs\n\n Example:\n >>> _desegmentword(\"ال+يومي+ة\")\n اليومية\n \"\"\"\n word = orig_word.replace('ل+ال+', 'لل')\n if 'ال+ال' not in orig_word:\n word = word.replace('ل+ال', 'لل')\n word = word.replace('+', '')\n word = word.replace('للل', 'لل')\n return word\n\n def _old_preprocess(self, text, do_farasa_tokenization):\n \"\"\"\n AraBERTv1 preprocessing Function\n \"\"\"\n text = str(text)\n if self.strip_tashkeel:\n text = araby.strip_tashkeel(text)\n text = re.sub('\\\\d+\\\\/[ء-ي]+\\\\/\\\\d+\\\\]', '', text)\n text = re.sub('ـ', '', text)\n text = re.sub('[«»]', ' \" ', text)\n if self.replace_urls_emails_mentions:\n text = re.sub(regex_url_step1, '[رابط]', text)\n text = re.sub(regex_url_step2, '[رابط]', text)\n text = re.sub(regex_url, '[رابط]', text)\n text = re.sub(regex_email, '[بريد]', text)\n text = re.sub(regex_mention, '[مستخدم]', text)\n text = re.sub('…', '\\\\.', text).strip()\n text = self._remove_redundant_punct(text)\n if self.replace_urls_emails_mentions:\n text = re.sub('\\\\[ رابط \\\\]|\\\\[ رابط\\\\]|\\\\[رابط \\\\]',\n ' [رابط] ', text)\n text = re.sub('\\\\[ بريد \\\\]|\\\\[ بريد\\\\]|\\\\[بريد \\\\]',\n ' [بريد] ', text)\n text = re.sub('\\\\[ مستخدم \\\\]|\\\\[ مستخدم\\\\]|\\\\[مستخدم \\\\]',\n ' [مستخدم] ', text)\n if self.remove_elongation:\n text = self._remove_elongation(text)\n if self.insert_white_spaces:\n text = re.sub('([^0-9ء-غف-٩ٱ-ٳa-zA-Z\\\\[\\\\]])', ' \\\\1 ', text)\n if do_farasa_tokenization:\n text = self._tokenize_arabic_words_farasa(text)\n return text.strip()\n\n def _farasa_segment(self, text):\n line_farasa = text.split()\n segmented_line = []\n for index, word in enumerate(line_farasa):\n if word in ['[', ']']:\n continue\n if word in ['رابط', 'بريد', 'مستخدم'] and line_farasa[index - 1\n ] in ['[', ']']:\n segmented_line.append('[' + word + ']')\n continue\n if '+' not in word:\n segmented_line.append(word)\n continue\n segmented_word = self._split_farasa_output(word)\n segmented_line.extend(segmented_word)\n return ' '.join(segmented_line)\n\n def _split_farasa_output(self, word):\n segmented_word = []\n temp_token = ''\n for i, c in enumerate(word):\n if c == '+':\n if temp_token == 'ك':\n if i == 1:\n segmented_word.append(temp_token + '+')\n temp_token = ''\n elif word[i - 2] == '+':\n if segmented_word[-1][-1] == '+':\n segmented_word.append(temp_token + '+')\n temp_token = ''\n else:\n segmented_word.append('+' + temp_token)\n temp_token = ''\n elif temp_token in prefix_list:\n segmented_word.append(temp_token + '+')\n temp_token = ''\n elif temp_token in suffix_list:\n segmented_word.append('+' + temp_token)\n temp_token = ''\n else:\n segmented_word.append(temp_token)\n temp_token = ''\n continue\n temp_token += c\n if temp_token != '':\n if temp_token in suffix_list:\n segmented_word.append('+' + temp_token)\n else:\n segmented_word.append(temp_token)\n return segmented_word\n\n def _tokenize_arabic_words_farasa(self, line_input):\n if self.keep_emojis:\n line_farasa = []\n for word in line_input.split():\n if word in list(self.emoji.UNICODE_EMOJI['en'].keys()):\n line_farasa.append(word)\n else:\n line_farasa.append(self.farasa_segmenter.segment(word))\n else:\n line_farasa = self.farasa_segmenter.segment(line_input).split()\n segmented_line = []\n for index, word in enumerate(line_farasa):\n if word in ['[', ']']:\n continue\n if word in ['رابط', 'بريد', 'مستخدم'] and line_farasa[index - 1\n ] in ['[', ']']:\n segmented_line.append('[' + word + ']')\n continue\n segmented_word = []\n for token in word.split('+'):\n if token in prefix_list:\n segmented_word.append(token + '+')\n elif token in suffix_list:\n segmented_word.append('+' + token)\n else:\n segmented_word.append(token)\n segmented_line.extend(segmented_word)\n return ' '.join(segmented_line)\n\n def _remove_elongation(self, text):\n \"\"\"\n :param text: the input text to remove elongation\n :return: delongated text\n \"\"\"\n for index_ in range(len(re.findall(regex_tatweel, text))):\n elongation = re.search(regex_tatweel, text)\n if elongation:\n elongation_pattern = elongation.group()\n elongation_replacement = elongation_pattern[0]\n elongation_pattern = re.escape(elongation_pattern)\n text = re.sub(elongation_pattern, elongation_replacement,\n text, flags=re.MULTILINE)\n else:\n break\n return text\n\n def _remove_redundant_punct(self, text):\n text_ = text\n result = re.search(redundant_punct_pattern, text)\n dif = 0\n while result:\n sub = result.group()\n sub = sorted(set(sub), key=sub.index)\n sub = ' ' + ''.join(list(sub)) + ' '\n text = ''.join((text[:result.span()[0] + dif], sub, text[result\n .span()[1] + dif:]))\n text_ = ''.join((text_[:result.span()[0]], text_[result.span()[\n 1]:])).strip()\n dif = abs(len(text) - len(text_))\n result = re.search(redundant_punct_pattern, text_)\n text = re.sub('\\\\s+', ' ', text)\n return text.strip()\n\n\nprefix_list = ['ال', 'و', 'ف', 'ب', 'ك', 'ل', 'لل', 'ال', 'و', 'ف', 'ب',\n 'ك', 'ل', 'لل', 'س']\nsuffix_list = ['ه', 'ها', 'ك', 'ي', 'هما', 'كما', 'نا', 'كم', 'هم', 'هن',\n 'كن', 'ا', 'ان', 'ين', 'ون', 'وا', 'ات', 'ت', 'ن', 'ة', 'ه', 'ها', 'ك',\n 'ي', 'هما', 'كما', 'نا', 'كم', 'هم', 'هن', 'كن', 'ا', 'ان', 'ين', 'ون',\n 'وا', 'ات', 'ت', 'ن', 'ة']\nother_tokens = ['[رابط]', '[مستخدم]', '[بريد]']\nprefix_symbols = [(x + '+') for x in prefix_list]\nsuffix_symblos = [('+' + x) for x in suffix_list]\nnever_split_tokens = list(set(prefix_symbols + suffix_symblos + other_tokens))\nurl_regexes = [\n '(http(s)?:\\\\/\\\\/.)?(www\\\\.)?[-a-zA-Z0-9@:%._\\\\+~#=]{2,256}\\\\.[a-z]{2,6}\\\\b([-a-zA-Z0-9@:%_\\\\+.~#?&//=]*)'\n , '@(https?|ftp)://(-\\\\.)?([^\\\\s/?\\\\.#-]+\\\\.?)+(/[^\\\\s]*)?$@iS',\n 'http[s]?://[a-zA-Z0-9_\\\\-./~\\\\?=%&]+', 'www[a-zA-Z0-9_\\\\-?=%&/.~]+',\n '[a-zA-Z]+\\\\.com', '(?=http)[^\\\\s]+', '(?=www)[^\\\\s]+', '://']\nuser_mention_regex = '@[\\\\w\\\\d]+'\nemail_regexes = ['[\\\\w-]+@([\\\\w-]+\\\\.)+[\\\\w-]+', '\\\\S+@\\\\S+']\nredundant_punct_pattern = (\n '([!\\\\\"#\\\\$%\\\\\\'\\\\(\\\\)\\\\*\\\\+,\\\\.:;\\\\-<=·>?@\\\\[\\\\\\\\\\\\]\\\\^_ـ`{\\\\|}~—٪’،؟`୍“؛”ۚ【»؛\\\\s+«–…‘]{2,})'\n )\nregex_tatweel = '(\\\\D)\\\\1{2,}'\nrejected_chars_regex = (\n '[^0-9\\\\u0621-\\\\u063A\\\\u0640-\\\\u066C\\\\u0671-\\\\u0674a-zA-Z\\\\[\\\\]!\\\\\"#\\\\$%\\\\\\'\\\\(\\\\)\\\\*\\\\+,\\\\.:;\\\\-<=·>?@\\\\[\\\\\\\\\\\\]\\\\^_ـ`{\\\\|}~—٪’،؟`୍“؛”ۚ»؛\\\\s+«–…‘]'\n )\nregex_url_step1 = '(?=http)[^\\\\s]+'\nregex_url_step2 = '(?=www)[^\\\\s]+'\nregex_url = (\n '(http(s)?:\\\\/\\\\/.)?(www\\\\.)?[-a-zA-Z0-9@:%._\\\\+~#=]{2,256}\\\\.[a-z]{2,6}\\\\b([-a-zA-Z0-9@:%_\\\\+.~#?&//=]*)'\n )\nregex_mention = '@[\\\\w\\\\d]+'\nregex_email = '\\\\S+@\\\\S+'\nchars_regex = (\n '0-9\\\\u0621-\\\\u063A\\\\u0640-\\\\u066C\\\\u0671-\\\\u0674a-zA-Z\\\\[\\\\]!\\\\\"#\\\\$%\\\\\\'\\\\(\\\\)\\\\*\\\\+,\\\\.:;\\\\-<=·>?@\\\\[\\\\\\\\\\\\]\\\\^_ـ`{\\\\|}~—٪’،؟`୍“؛”ۚ»؛\\\\s+«–…‘'\n )\nwhite_spaced_double_quotation_regex = '\\\\\"\\\\s+([^\"]+)\\\\s+\\\\\"'\nwhite_spaced_single_quotation_regex = \"\\\\'\\\\s+([^']+)\\\\s+\\\\'\"\nwhite_spaced_back_quotation_regex = '\\\\`\\\\s+([^`]+)\\\\s+\\\\`'\nwhite_spaced_em_dash = '\\\\—\\\\s+([^—]+)\\\\s+\\\\—'\nleft_spaced_chars = ' ([\\\\]!#\\\\$%\\\\),\\\\.:;\\\\?}٪’،؟”؛…»·])'\nright_spaced_chars = '([\\\\[\\\\(\\\\{“«‘*\\\\~]) '\nleft_and_right_spaced_chars = ' ([\\\\+\\\\-\\\\<\\\\=\\\\>\\\\@\\\\\\\\\\\\^\\\\_\\\\|\\\\–]) '\n",
"step-4": "import html\nimport logging\nimport re\nimport pyarabic.araby as araby\nACCEPTED_MODELS = ['bert-base-arabertv01', 'bert-base-arabert',\n 'bert-base-arabertv02', 'bert-base-arabertv2', 'bert-large-arabertv02',\n 'bert-large-arabertv2', 'araelectra-base',\n 'araelectra-base-discriminator', 'araelectra-base-generator',\n 'aragpt2-base', 'aragpt2-medium', 'aragpt2-large', 'aragpt2-mega']\nSEGMENTED_MODELS = ['bert-base-arabert', 'bert-base-arabertv2',\n 'bert-large-arabertv2']\n\n\nclass ArbertmoPreprocessor:\n \"\"\"\n A Preprocessor class that cleans and preprocesses text for all models in the AraBERT repo.\n It also can unprocess the text ouput of the generated text\n\n Args:\n\n model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to \"bert-base-arabertv02\". Current accepted models are:\n\n - :obj:`\"bert-base-arabertv01\"`: No farasa segmentation.\n - :obj:`\"bert-base-arabert\"`: with farasa segmentation.\n - :obj:`\"bert-base-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-base-arabertv2\"`: with farasa segmentation.\n - :obj:`\"bert-large-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-large-arabertv2\"`: with farasa segmentation.\n - :obj:`\"araelectra-base\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-discriminator\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-generator\"`: No farasa segmentation.\n - :obj:`\"aragpt2-base\"`: No farasa segmentation.\n - :obj:`\"aragpt2-medium\"`: No farasa segmentation.\n - :obj:`\"aragpt2-large\"`: No farasa segmentation.\n - :obj:`\"aragpt2-mega\"`: No farasa segmentation.\n\n keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False\n\n remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True\n\n replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True\n\n strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)\n\n strip_tatweel(:obj: `bool`): remove tatweel '\\\\u0640'\n\n insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words\n\n remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character\n\n\n Returns:\n\n ArBERTMoPreprocessor: the preprocessor class\n\n Example:\n\n from preprocess import ArBERTMoPreprocessor\n\n arabert_prep = ArBERTMoPreprocessor(\"aubmindlab/bert-base-arabertv2\")\n\n arabert_prep.preprocess(\"SOME ARABIC TEXT\")\n \"\"\"\n\n def __init__(self, model_name, keep_emojis=False, remove_html_markup=\n True, replace_urls_emails_mentions=True, strip_tashkeel=True,\n strip_tatweel=True, insert_white_spaces=True, remove_elongation=True):\n \"\"\"\n model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to \"bert-base-arabertv02\". Current accepted models are:\n\n - :obj:`\"bert-base-arabertv01\"`: No farasa segmentation.\n - :obj:`\"bert-base-arabert\"`: with farasa segmentation.\n - :obj:`\"bert-base-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-base-arabertv2\"`: with farasa segmentation.\n - :obj:`\"bert-large-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-large-arabertv2\"`: with farasa segmentation.\n - :obj:`\"araelectra-base\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-discriminator\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-generator\"`: No farasa segmentation.\n - :obj:`\"aragpt2-base\"`: No farasa segmentation.\n - :obj:`\"aragpt2-medium\"`: No farasa segmentation.\n - :obj:`\"aragpt2-large\"`: No farasa segmentation.\n - :obj:`\"aragpt2-mega\"`: No farasa segmentation.\n\n keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False\n\n remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True\n\n replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True\n\n strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)\n\n strip_tatweel(:obj: `bool`): remove tatweel '\\\\u0640'\n\n insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words\n\n remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character\n\n \"\"\"\n model_name = model_name.replace('aubmindlab/', '')\n if model_name not in ACCEPTED_MODELS:\n logging.warning(\n \"Model provided is not in the accepted model list. Assuming you don't want Farasa Segmentation\"\n )\n self.model_name = 'bert-base-arabertv02'\n else:\n self.model_name = model_name\n if self.model_name in SEGMENTED_MODELS:\n logging.info(\n 'Selected Model requires pre-segmentation, Initializing FarasaSegmenter'\n )\n try:\n from farasa.segmenter import FarasaSegmenter\n self.farasa_segmenter = FarasaSegmenter(interactive=True)\n except:\n logging.warning(\n 'farasapy is not installed, you want be able to process text for AraBERTv1 and v2. Install it using: pip install farasapy'\n )\n else:\n logging.info(\n \"Selected Model doesn't require pre-segmentation, skipping FarasaSegmenter initialization\"\n )\n self.keep_emojis = keep_emojis\n if self.keep_emojis:\n import emoji\n self.emoji = emoji\n if self.model_name in SEGMENTED_MODELS:\n logging.warning(\n 'Keeping tweets with Farasa Segmentation is 10 times slower'\n )\n self.remove_html_markup = remove_html_markup\n self.replace_urls_emails_mentions = replace_urls_emails_mentions\n self.strip_tashkeel = strip_tashkeel\n self.strip_tatweel = strip_tatweel\n self.insert_white_spaces = insert_white_spaces\n self.remove_elongation = remove_elongation\n\n def preprocess(self, text):\n \"\"\"\n Preprocess takes an input text line an applies the same preprocessing used in AraBERT\n pretraining\n\n Args:\n\n text (:obj:`str`): inout text string\n\n Returns:\n\n string: A preprocessed string depending on which model was selected\n \"\"\"\n if self.model_name == 'bert-base-arabert':\n return self._old_preprocess(text, do_farasa_tokenization=True)\n if self.model_name == 'bert-base-arabertv01':\n return self._old_preprocess(text, do_farasa_tokenization=False)\n text = str(text)\n text = html.unescape(text)\n if self.strip_tashkeel:\n text = araby.strip_tashkeel(text)\n if self.strip_tatweel:\n text = araby.strip_tatweel(text)\n if self.replace_urls_emails_mentions:\n for reg in url_regexes:\n text = re.sub(reg, ' [رابط] ', text)\n for reg in email_regexes:\n text = re.sub(reg, ' [بريد] ', text)\n text = re.sub(user_mention_regex, ' [مستخدم] ', text)\n if self.remove_html_markup:\n text = re.sub('<br />', ' ', text)\n text = re.sub('</?[^>]+>', ' ', text)\n if self.remove_elongation:\n text = self._remove_elongation(text)\n if self.insert_white_spaces:\n text = re.sub('([^0-9ء-غف-ي٠-٩a-zA-Z\\\\[\\\\]])', ' \\\\1 ', text)\n text = re.sub('(\\\\d+)([ء-غف-ي٠-٬]+)', ' \\\\1 \\\\2 ', text)\n text = re.sub('([ء-غف-ي٠-٬]+)(\\\\d+)', ' \\\\1 \\\\2 ', text)\n if self.keep_emojis:\n emoji_regex = ''.join(list(self.emoji.UNICODE_EMOJI['en'].keys()))\n rejected_chars_regex2 = '[^%s%s]' % (chars_regex, emoji_regex)\n text = re.sub(rejected_chars_regex2, ' ', text)\n else:\n text = re.sub(rejected_chars_regex, ' ', text)\n text = ' '.join(text.replace('️', '').split())\n if (self.model_name == 'bert-base-arabertv2' or self.model_name ==\n 'bert-large-arabertv2'):\n if self.keep_emojis:\n new_text = []\n for word in text.split():\n if word in list(self.emoji.UNICODE_EMOJI['en'].keys()):\n new_text.append(word)\n else:\n new_text.append(self.farasa_segmenter.segment(word))\n text = ' '.join(new_text)\n else:\n text = self.farasa_segmenter.segment(text)\n return self._farasa_segment(text)\n return text\n\n def unpreprocess(self, text, desegment=True):\n \"\"\"Re-formats the text to a classic format where punctuations, brackets, parenthesis are not seperated by whitespaces.\n The objective is to make the generated text of any model appear natural and not preprocessed.\n\n Args:\n text (str): input text to be un-preprocessed\n desegment (bool, optional): [whether or not to remove farasa pre-segmentation before]. Defaults to True.\n\n Returns:\n str: The unpreprocessed (and possibly Farasa-desegmented) text.\n \"\"\"\n if self.model_name in SEGMENTED_MODELS and desegment:\n text = self.desegment(text)\n text = re.sub(white_spaced_double_quotation_regex, '\"' + '\\\\1' +\n '\"', text)\n text = re.sub(white_spaced_single_quotation_regex, \"'\" + '\\\\1' +\n \"'\", text)\n text = re.sub(white_spaced_back_quotation_regex, '\\\\`' + '\\\\1' +\n '\\\\`', text)\n text = re.sub(white_spaced_back_quotation_regex, '\\\\—' + '\\\\1' +\n '\\\\—', text)\n text = text.replace('.', ' . ')\n text = ' '.join(text.split())\n text = re.sub('(\\\\d+) \\\\. (\\\\d+)', '\\\\1.\\\\2', text)\n text = re.sub('(\\\\d+) \\\\, (\\\\d+)', '\\\\1,\\\\2', text)\n text = re.sub(left_and_right_spaced_chars, '\\\\1', text)\n text = re.sub(left_spaced_chars, '\\\\1', text)\n text = re.sub(right_spaced_chars, '\\\\1', text)\n return text\n\n def desegment(self, text):\n \"\"\"\n Use this function if sentence tokenization was done using\n `from arabert.preprocess_arabert import preprocess` with Farasa enabled\n AraBERT segmentation using Farasa adds a space after the '+' for prefixes,\n and after before the '+' for suffixes\n\n Example:\n >>> desegment('ال+ دراس +ات')\n الدراسات\n \"\"\"\n text = text.replace('+ ', '+')\n text = text.replace(' +', '+')\n text = ' '.join([self._desegmentword(word) for word in text.split(' ')]\n )\n return text\n\n def _desegmentword(self, orig_word: str) ->str:\n \"\"\"\n Word segmentor that takes a Farasa Segmented Word and removes the '+' signs\n\n Example:\n >>> _desegmentword(\"ال+يومي+ة\")\n اليومية\n \"\"\"\n word = orig_word.replace('ل+ال+', 'لل')\n if 'ال+ال' not in orig_word:\n word = word.replace('ل+ال', 'لل')\n word = word.replace('+', '')\n word = word.replace('للل', 'لل')\n return word\n\n def _old_preprocess(self, text, do_farasa_tokenization):\n \"\"\"\n AraBERTv1 preprocessing Function\n \"\"\"\n text = str(text)\n if self.strip_tashkeel:\n text = araby.strip_tashkeel(text)\n text = re.sub('\\\\d+\\\\/[ء-ي]+\\\\/\\\\d+\\\\]', '', text)\n text = re.sub('ـ', '', text)\n text = re.sub('[«»]', ' \" ', text)\n if self.replace_urls_emails_mentions:\n text = re.sub(regex_url_step1, '[رابط]', text)\n text = re.sub(regex_url_step2, '[رابط]', text)\n text = re.sub(regex_url, '[رابط]', text)\n text = re.sub(regex_email, '[بريد]', text)\n text = re.sub(regex_mention, '[مستخدم]', text)\n text = re.sub('…', '\\\\.', text).strip()\n text = self._remove_redundant_punct(text)\n if self.replace_urls_emails_mentions:\n text = re.sub('\\\\[ رابط \\\\]|\\\\[ رابط\\\\]|\\\\[رابط \\\\]',\n ' [رابط] ', text)\n text = re.sub('\\\\[ بريد \\\\]|\\\\[ بريد\\\\]|\\\\[بريد \\\\]',\n ' [بريد] ', text)\n text = re.sub('\\\\[ مستخدم \\\\]|\\\\[ مستخدم\\\\]|\\\\[مستخدم \\\\]',\n ' [مستخدم] ', text)\n if self.remove_elongation:\n text = self._remove_elongation(text)\n if self.insert_white_spaces:\n text = re.sub('([^0-9ء-غف-٩ٱ-ٳa-zA-Z\\\\[\\\\]])', ' \\\\1 ', text)\n if do_farasa_tokenization:\n text = self._tokenize_arabic_words_farasa(text)\n return text.strip()\n\n def _farasa_segment(self, text):\n line_farasa = text.split()\n segmented_line = []\n for index, word in enumerate(line_farasa):\n if word in ['[', ']']:\n continue\n if word in ['رابط', 'بريد', 'مستخدم'] and line_farasa[index - 1\n ] in ['[', ']']:\n segmented_line.append('[' + word + ']')\n continue\n if '+' not in word:\n segmented_line.append(word)\n continue\n segmented_word = self._split_farasa_output(word)\n segmented_line.extend(segmented_word)\n return ' '.join(segmented_line)\n\n def _split_farasa_output(self, word):\n segmented_word = []\n temp_token = ''\n for i, c in enumerate(word):\n if c == '+':\n if temp_token == 'ك':\n if i == 1:\n segmented_word.append(temp_token + '+')\n temp_token = ''\n elif word[i - 2] == '+':\n if segmented_word[-1][-1] == '+':\n segmented_word.append(temp_token + '+')\n temp_token = ''\n else:\n segmented_word.append('+' + temp_token)\n temp_token = ''\n elif temp_token in prefix_list:\n segmented_word.append(temp_token + '+')\n temp_token = ''\n elif temp_token in suffix_list:\n segmented_word.append('+' + temp_token)\n temp_token = ''\n else:\n segmented_word.append(temp_token)\n temp_token = ''\n continue\n temp_token += c\n if temp_token != '':\n if temp_token in suffix_list:\n segmented_word.append('+' + temp_token)\n else:\n segmented_word.append(temp_token)\n return segmented_word\n\n def _tokenize_arabic_words_farasa(self, line_input):\n if self.keep_emojis:\n line_farasa = []\n for word in line_input.split():\n if word in list(self.emoji.UNICODE_EMOJI['en'].keys()):\n line_farasa.append(word)\n else:\n line_farasa.append(self.farasa_segmenter.segment(word))\n else:\n line_farasa = self.farasa_segmenter.segment(line_input).split()\n segmented_line = []\n for index, word in enumerate(line_farasa):\n if word in ['[', ']']:\n continue\n if word in ['رابط', 'بريد', 'مستخدم'] and line_farasa[index - 1\n ] in ['[', ']']:\n segmented_line.append('[' + word + ']')\n continue\n segmented_word = []\n for token in word.split('+'):\n if token in prefix_list:\n segmented_word.append(token + '+')\n elif token in suffix_list:\n segmented_word.append('+' + token)\n else:\n segmented_word.append(token)\n segmented_line.extend(segmented_word)\n return ' '.join(segmented_line)\n\n def _remove_elongation(self, text):\n \"\"\"\n :param text: the input text to remove elongation\n :return: delongated text\n \"\"\"\n for index_ in range(len(re.findall(regex_tatweel, text))):\n elongation = re.search(regex_tatweel, text)\n if elongation:\n elongation_pattern = elongation.group()\n elongation_replacement = elongation_pattern[0]\n elongation_pattern = re.escape(elongation_pattern)\n text = re.sub(elongation_pattern, elongation_replacement,\n text, flags=re.MULTILINE)\n else:\n break\n return text\n\n def _remove_redundant_punct(self, text):\n text_ = text\n result = re.search(redundant_punct_pattern, text)\n dif = 0\n while result:\n sub = result.group()\n sub = sorted(set(sub), key=sub.index)\n sub = ' ' + ''.join(list(sub)) + ' '\n text = ''.join((text[:result.span()[0] + dif], sub, text[result\n .span()[1] + dif:]))\n text_ = ''.join((text_[:result.span()[0]], text_[result.span()[\n 1]:])).strip()\n dif = abs(len(text) - len(text_))\n result = re.search(redundant_punct_pattern, text_)\n text = re.sub('\\\\s+', ' ', text)\n return text.strip()\n\n\nprefix_list = ['ال', 'و', 'ف', 'ب', 'ك', 'ل', 'لل', 'ال', 'و', 'ف', 'ب',\n 'ك', 'ل', 'لل', 'س']\nsuffix_list = ['ه', 'ها', 'ك', 'ي', 'هما', 'كما', 'نا', 'كم', 'هم', 'هن',\n 'كن', 'ا', 'ان', 'ين', 'ون', 'وا', 'ات', 'ت', 'ن', 'ة', 'ه', 'ها', 'ك',\n 'ي', 'هما', 'كما', 'نا', 'كم', 'هم', 'هن', 'كن', 'ا', 'ان', 'ين', 'ون',\n 'وا', 'ات', 'ت', 'ن', 'ة']\nother_tokens = ['[رابط]', '[مستخدم]', '[بريد]']\nprefix_symbols = [(x + '+') for x in prefix_list]\nsuffix_symblos = [('+' + x) for x in suffix_list]\nnever_split_tokens = list(set(prefix_symbols + suffix_symblos + other_tokens))\nurl_regexes = [\n '(http(s)?:\\\\/\\\\/.)?(www\\\\.)?[-a-zA-Z0-9@:%._\\\\+~#=]{2,256}\\\\.[a-z]{2,6}\\\\b([-a-zA-Z0-9@:%_\\\\+.~#?&//=]*)'\n , '@(https?|ftp)://(-\\\\.)?([^\\\\s/?\\\\.#-]+\\\\.?)+(/[^\\\\s]*)?$@iS',\n 'http[s]?://[a-zA-Z0-9_\\\\-./~\\\\?=%&]+', 'www[a-zA-Z0-9_\\\\-?=%&/.~]+',\n '[a-zA-Z]+\\\\.com', '(?=http)[^\\\\s]+', '(?=www)[^\\\\s]+', '://']\nuser_mention_regex = '@[\\\\w\\\\d]+'\nemail_regexes = ['[\\\\w-]+@([\\\\w-]+\\\\.)+[\\\\w-]+', '\\\\S+@\\\\S+']\nredundant_punct_pattern = (\n '([!\\\\\"#\\\\$%\\\\\\'\\\\(\\\\)\\\\*\\\\+,\\\\.:;\\\\-<=·>?@\\\\[\\\\\\\\\\\\]\\\\^_ـ`{\\\\|}~—٪’،؟`୍“؛”ۚ【»؛\\\\s+«–…‘]{2,})'\n )\nregex_tatweel = '(\\\\D)\\\\1{2,}'\nrejected_chars_regex = (\n '[^0-9\\\\u0621-\\\\u063A\\\\u0640-\\\\u066C\\\\u0671-\\\\u0674a-zA-Z\\\\[\\\\]!\\\\\"#\\\\$%\\\\\\'\\\\(\\\\)\\\\*\\\\+,\\\\.:;\\\\-<=·>?@\\\\[\\\\\\\\\\\\]\\\\^_ـ`{\\\\|}~—٪’،؟`୍“؛”ۚ»؛\\\\s+«–…‘]'\n )\nregex_url_step1 = '(?=http)[^\\\\s]+'\nregex_url_step2 = '(?=www)[^\\\\s]+'\nregex_url = (\n '(http(s)?:\\\\/\\\\/.)?(www\\\\.)?[-a-zA-Z0-9@:%._\\\\+~#=]{2,256}\\\\.[a-z]{2,6}\\\\b([-a-zA-Z0-9@:%_\\\\+.~#?&//=]*)'\n )\nregex_mention = '@[\\\\w\\\\d]+'\nregex_email = '\\\\S+@\\\\S+'\nchars_regex = (\n '0-9\\\\u0621-\\\\u063A\\\\u0640-\\\\u066C\\\\u0671-\\\\u0674a-zA-Z\\\\[\\\\]!\\\\\"#\\\\$%\\\\\\'\\\\(\\\\)\\\\*\\\\+,\\\\.:;\\\\-<=·>?@\\\\[\\\\\\\\\\\\]\\\\^_ـ`{\\\\|}~—٪’،؟`୍“؛”ۚ»؛\\\\s+«–…‘'\n )\nwhite_spaced_double_quotation_regex = '\\\\\"\\\\s+([^\"]+)\\\\s+\\\\\"'\nwhite_spaced_single_quotation_regex = \"\\\\'\\\\s+([^']+)\\\\s+\\\\'\"\nwhite_spaced_back_quotation_regex = '\\\\`\\\\s+([^`]+)\\\\s+\\\\`'\nwhite_spaced_em_dash = '\\\\—\\\\s+([^—]+)\\\\s+\\\\—'\nleft_spaced_chars = ' ([\\\\]!#\\\\$%\\\\),\\\\.:;\\\\?}٪’،؟”؛…»·])'\nright_spaced_chars = '([\\\\[\\\\(\\\\{“«‘*\\\\~]) '\nleft_and_right_spaced_chars = ' ([\\\\+\\\\-\\\\<\\\\=\\\\>\\\\@\\\\\\\\\\\\^\\\\_\\\\|\\\\–]) '\n",
"step-5": "import html\nimport logging\nimport re\n\nimport pyarabic.araby as araby\n\nACCEPTED_MODELS = [\n \"bert-base-arabertv01\",\n \"bert-base-arabert\",\n \"bert-base-arabertv02\",\n \"bert-base-arabertv2\",\n \"bert-large-arabertv02\",\n \"bert-large-arabertv2\",\n \"araelectra-base\",\n \"araelectra-base-discriminator\",\n \"araelectra-base-generator\",\n \"aragpt2-base\",\n \"aragpt2-medium\",\n \"aragpt2-large\",\n \"aragpt2-mega\",\n]\n\nSEGMENTED_MODELS = [\n \"bert-base-arabert\",\n \"bert-base-arabertv2\",\n \"bert-large-arabertv2\",\n]\n\n\nclass ArbertmoPreprocessor:\n \"\"\"\n A Preprocessor class that cleans and preprocesses text for all models in the AraBERT repo.\n It also can unprocess the text ouput of the generated text\n\n Args:\n\n model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to \"bert-base-arabertv02\". Current accepted models are:\n\n - :obj:`\"bert-base-arabertv01\"`: No farasa segmentation.\n - :obj:`\"bert-base-arabert\"`: with farasa segmentation.\n - :obj:`\"bert-base-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-base-arabertv2\"`: with farasa segmentation.\n - :obj:`\"bert-large-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-large-arabertv2\"`: with farasa segmentation.\n - :obj:`\"araelectra-base\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-discriminator\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-generator\"`: No farasa segmentation.\n - :obj:`\"aragpt2-base\"`: No farasa segmentation.\n - :obj:`\"aragpt2-medium\"`: No farasa segmentation.\n - :obj:`\"aragpt2-large\"`: No farasa segmentation.\n - :obj:`\"aragpt2-mega\"`: No farasa segmentation.\n\n keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False\n\n remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True\n\n replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True\n\n strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)\n\n strip_tatweel(:obj: `bool`): remove tatweel '\\\\u0640'\n\n insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words\n\n remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character\n\n\n Returns:\n\n ArBERTMoPreprocessor: the preprocessor class\n\n Example:\n\n from preprocess import ArBERTMoPreprocessor\n\n arabert_prep = ArBERTMoPreprocessor(\"aubmindlab/bert-base-arabertv2\")\n\n arabert_prep.preprocess(\"SOME ARABIC TEXT\")\n \"\"\"\n\n def __init__(\n self,\n model_name,\n keep_emojis=False,\n remove_html_markup=True,\n replace_urls_emails_mentions=True,\n strip_tashkeel=True,\n strip_tatweel=True,\n insert_white_spaces=True,\n remove_elongation=True,\n ):\n \"\"\"\n model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to \"bert-base-arabertv02\". Current accepted models are:\n\n - :obj:`\"bert-base-arabertv01\"`: No farasa segmentation.\n - :obj:`\"bert-base-arabert\"`: with farasa segmentation.\n - :obj:`\"bert-base-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-base-arabertv2\"`: with farasa segmentation.\n - :obj:`\"bert-large-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-large-arabertv2\"`: with farasa segmentation.\n - :obj:`\"araelectra-base\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-discriminator\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-generator\"`: No farasa segmentation.\n - :obj:`\"aragpt2-base\"`: No farasa segmentation.\n - :obj:`\"aragpt2-medium\"`: No farasa segmentation.\n - :obj:`\"aragpt2-large\"`: No farasa segmentation.\n - :obj:`\"aragpt2-mega\"`: No farasa segmentation.\n\n keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False\n\n remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True\n\n replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True\n\n strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)\n\n strip_tatweel(:obj: `bool`): remove tatweel '\\\\u0640'\n\n insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words\n\n remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character\n\n \"\"\"\n model_name = model_name.replace(\"aubmindlab/\", \"\")\n\n if model_name not in ACCEPTED_MODELS:\n logging.warning(\n \"Model provided is not in the accepted model list. Assuming you don't want Farasa Segmentation\"\n )\n self.model_name = \"bert-base-arabertv02\"\n else:\n self.model_name = model_name\n\n if self.model_name in SEGMENTED_MODELS:\n logging.info(\n \"Selected Model requires pre-segmentation, Initializing FarasaSegmenter\"\n )\n try:\n from farasa.segmenter import FarasaSegmenter\n\n self.farasa_segmenter = FarasaSegmenter(interactive=True)\n except:\n logging.warning(\n \"farasapy is not installed, you want be able to process text for AraBERTv1 and v2. Install it using: pip install farasapy\"\n )\n else:\n logging.info(\n \"Selected Model doesn't require pre-segmentation, skipping FarasaSegmenter initialization\"\n )\n\n self.keep_emojis = keep_emojis\n if self.keep_emojis:\n import emoji\n\n self.emoji = emoji\n if self.model_name in SEGMENTED_MODELS:\n logging.warning(\n \"Keeping tweets with Farasa Segmentation is 10 times slower\"\n )\n\n self.remove_html_markup = remove_html_markup\n self.replace_urls_emails_mentions = replace_urls_emails_mentions\n self.strip_tashkeel = strip_tashkeel\n self.strip_tatweel = strip_tatweel\n self.insert_white_spaces = insert_white_spaces\n self.remove_elongation = remove_elongation\n\n def preprocess(self, text):\n \"\"\"\n Preprocess takes an input text line an applies the same preprocessing used in AraBERT\n pretraining\n\n Args:\n\n text (:obj:`str`): inout text string\n\n Returns:\n\n string: A preprocessed string depending on which model was selected\n \"\"\"\n if self.model_name == \"bert-base-arabert\":\n return self._old_preprocess(\n text,\n do_farasa_tokenization=True,\n )\n\n if self.model_name == \"bert-base-arabertv01\":\n return self._old_preprocess(text, do_farasa_tokenization=False)\n\n text = str(text)\n text = html.unescape(text)\n if self.strip_tashkeel:\n text = araby.strip_tashkeel(text)\n if self.strip_tatweel:\n text = araby.strip_tatweel(text)\n\n if self.replace_urls_emails_mentions:\n # replace all possible URLs\n for reg in url_regexes:\n text = re.sub(reg, \" [رابط] \", text)\n # REplace Emails with [بريد]\n for reg in email_regexes:\n text = re.sub(reg, \" [بريد] \", text)\n # replace mentions with [مستخدم]\n text = re.sub(user_mention_regex, \" [مستخدم] \", text)\n\n if self.remove_html_markup:\n # remove html line breaks\n text = re.sub(\"<br />\", \" \", text)\n # remove html markup\n text = re.sub(\"</?[^>]+>\", \" \", text)\n\n # remove repeated characters >2\n if self.remove_elongation:\n text = self._remove_elongation(text)\n\n # insert whitespace before and after all non Arabic digits or English Digits and Alphabet and the 2 brackets\n if self.insert_white_spaces:\n text = re.sub(\n \"([^0-9\\u0621-\\u063A\\u0641-\\u064A\\u0660-\\u0669a-zA-Z\\[\\]])\",\n r\" \\1 \",\n text,\n )\n\n # insert whitespace between words and numbers or numbers and words\n text = re.sub(\n \"(\\d+)([\\u0621-\\u063A\\u0641-\\u064A\\u0660-\\u066C]+)\", r\" \\1 \\2 \", text\n )\n text = re.sub(\n \"([\\u0621-\\u063A\\u0641-\\u064A\\u0660-\\u066C]+)(\\d+)\", r\" \\1 \\2 \", text\n )\n\n # remove unwanted characters\n if self.keep_emojis:\n emoji_regex = \"\".join(list(self.emoji.UNICODE_EMOJI[\"en\"].keys()))\n rejected_chars_regex2 = \"[^%s%s]\" % (chars_regex, emoji_regex)\n text = re.sub(rejected_chars_regex2, \" \", text)\n else:\n text = re.sub(rejected_chars_regex, \" \", text)\n\n # remove extra spaces\n text = \" \".join(text.replace(\"\\uFE0F\", \"\").split())\n\n if (\n self.model_name == \"bert-base-arabertv2\"\n or self.model_name == \"bert-large-arabertv2\"\n ):\n if self.keep_emojis:\n new_text = []\n for word in text.split():\n if word in list(self.emoji.UNICODE_EMOJI[\"en\"].keys()):\n new_text.append(word)\n else:\n new_text.append(self.farasa_segmenter.segment(word))\n text = \" \".join(new_text)\n else:\n text = self.farasa_segmenter.segment(text)\n return self._farasa_segment(text)\n\n # ALl the other models dont require Farasa Segmentation\n return text\n\n def unpreprocess(self, text, desegment=True):\n \"\"\"Re-formats the text to a classic format where punctuations, brackets, parenthesis are not seperated by whitespaces.\n The objective is to make the generated text of any model appear natural and not preprocessed.\n\n Args:\n text (str): input text to be un-preprocessed\n desegment (bool, optional): [whether or not to remove farasa pre-segmentation before]. Defaults to True.\n\n Returns:\n str: The unpreprocessed (and possibly Farasa-desegmented) text.\n \"\"\"\n\n if self.model_name in SEGMENTED_MODELS and desegment:\n text = self.desegment(text)\n\n # removes the spaces around quotation marks ex: i \" ate \" an apple --> i \"ate\" an apple\n # https://stackoverflow.com/a/53436792/5381220\n text = re.sub(white_spaced_double_quotation_regex, '\"' + r\"\\1\" + '\"', text)\n text = re.sub(white_spaced_single_quotation_regex, \"'\" + r\"\\1\" + \"'\", text)\n text = re.sub(white_spaced_back_quotation_regex, \"\\`\" + r\"\\1\" + \"\\`\", text)\n text = re.sub(white_spaced_back_quotation_regex, \"\\—\" + r\"\\1\" + \"\\—\", text)\n\n # during generation, sometimes the models don't put a space after the dot, this handles it\n text = text.replace(\".\", \" . \")\n text = \" \".join(text.split())\n\n # handle decimals\n text = re.sub(r\"(\\d+) \\. (\\d+)\", r\"\\1.\\2\", text)\n text = re.sub(r\"(\\d+) \\, (\\d+)\", r\"\\1,\\2\", text)\n\n text = re.sub(left_and_right_spaced_chars, r\"\\1\", text)\n text = re.sub(left_spaced_chars, r\"\\1\", text)\n text = re.sub(right_spaced_chars, r\"\\1\", text)\n\n return text\n\n def desegment(self, text):\n \"\"\"\n Use this function if sentence tokenization was done using\n `from arabert.preprocess_arabert import preprocess` with Farasa enabled\n AraBERT segmentation using Farasa adds a space after the '+' for prefixes,\n and after before the '+' for suffixes\n\n Example:\n >>> desegment('ال+ دراس +ات')\n الدراسات\n \"\"\"\n text = text.replace(\"+ \", \"+\")\n text = text.replace(\" +\", \"+\")\n text = \" \".join([self._desegmentword(word) for word in text.split(\" \")])\n return text\n\n def _desegmentword(self, orig_word: str) -> str:\n \"\"\"\n Word segmentor that takes a Farasa Segmented Word and removes the '+' signs\n\n Example:\n >>> _desegmentword(\"ال+يومي+ة\")\n اليومية\n \"\"\"\n word = orig_word.replace(\"ل+ال+\", \"لل\")\n if \"ال+ال\" not in orig_word:\n word = word.replace(\"ل+ال\", \"لل\")\n word = word.replace(\"+\", \"\")\n word = word.replace(\"للل\", \"لل\")\n return word\n\n def _old_preprocess(self, text, do_farasa_tokenization):\n \"\"\"\n AraBERTv1 preprocessing Function\n \"\"\"\n text = str(text)\n if self.strip_tashkeel:\n text = araby.strip_tashkeel(text)\n\n text = re.sub(r\"\\d+\\/[ء-ي]+\\/\\d+\\]\", \"\", text)\n text = re.sub(\"ـ\", \"\", text)\n text = re.sub(\"[«»]\", ' \" ', text)\n\n if self.replace_urls_emails_mentions:\n # replace the [رابط] token with space if you want to clean links\n text = re.sub(regex_url_step1, \"[رابط]\", text)\n text = re.sub(regex_url_step2, \"[رابط]\", text)\n text = re.sub(regex_url, \"[رابط]\", text)\n text = re.sub(regex_email, \"[بريد]\", text)\n text = re.sub(regex_mention, \"[مستخدم]\", text)\n text = re.sub(\"…\", r\"\\.\", text).strip()\n text = self._remove_redundant_punct(text)\n\n if self.replace_urls_emails_mentions:\n text = re.sub(r\"\\[ رابط \\]|\\[ رابط\\]|\\[رابط \\]\", \" [رابط] \", text)\n text = re.sub(r\"\\[ بريد \\]|\\[ بريد\\]|\\[بريد \\]\", \" [بريد] \", text)\n text = re.sub(r\"\\[ مستخدم \\]|\\[ مستخدم\\]|\\[مستخدم \\]\", \" [مستخدم] \", text)\n\n if self.remove_elongation:\n text = self._remove_elongation(text)\n\n if self.insert_white_spaces:\n text = re.sub(\n \"([^0-9\\u0621-\\u063A\\u0641-\\u0669\\u0671-\\u0673a-zA-Z\\[\\]])\",\n r\" \\1 \",\n text,\n )\n if do_farasa_tokenization:\n text = self._tokenize_arabic_words_farasa(text)\n\n return text.strip()\n\n def _farasa_segment(self, text):\n line_farasa = text.split()\n segmented_line = []\n for index, word in enumerate(line_farasa):\n if word in [\"[\", \"]\"]:\n continue\n if word in [\"رابط\", \"بريد\", \"مستخدم\"] and line_farasa[index - 1] in [\n \"[\",\n \"]\",\n ]:\n segmented_line.append(\"[\" + word + \"]\")\n continue\n if \"+\" not in word:\n segmented_line.append(word)\n continue\n segmented_word = self._split_farasa_output(word)\n segmented_line.extend(segmented_word)\n\n return \" \".join(segmented_line)\n\n def _split_farasa_output(self, word):\n segmented_word = []\n temp_token = \"\"\n for i, c in enumerate(word):\n if c == \"+\":\n # if the token is KAF, it could be a suffix or prefix\n if temp_token == \"ك\":\n # if we are at the second token, then KAF is surely a prefix\n if i == 1:\n segmented_word.append(temp_token + \"+\")\n temp_token = \"\"\n # If the KAF token is between 2 tokens\n elif word[i - 2] == \"+\":\n # if the previous token is prefix, then this KAF must be a prefix\n if segmented_word[-1][-1] == \"+\":\n segmented_word.append(temp_token + \"+\")\n temp_token = \"\"\n # else it is a suffix, this KAF could not be a second suffix\n else:\n segmented_word.append(\"+\" + temp_token)\n temp_token = \"\"\n # if Kaf is at the end, this is handled with the statement after the loop\n elif temp_token in prefix_list:\n segmented_word.append(temp_token + \"+\")\n temp_token = \"\"\n elif temp_token in suffix_list:\n segmented_word.append(\"+\" + temp_token)\n temp_token = \"\"\n else:\n segmented_word.append(temp_token)\n temp_token = \"\"\n continue\n temp_token += c\n if temp_token != \"\":\n if temp_token in suffix_list:\n segmented_word.append(\"+\" + temp_token)\n else:\n segmented_word.append(temp_token)\n return segmented_word\n\n def _tokenize_arabic_words_farasa(self, line_input):\n\n if self.keep_emojis:\n # insert whitespace before and after all non Arabic digits or English Digits and Alphabet and the 2 brackets\n line_farasa = []\n for word in line_input.split():\n if word in list(self.emoji.UNICODE_EMOJI[\"en\"].keys()):\n line_farasa.append(word)\n else:\n line_farasa.append(self.farasa_segmenter.segment(word))\n else:\n line_farasa = self.farasa_segmenter.segment(line_input).split()\n\n segmented_line = []\n for index, word in enumerate(line_farasa):\n if word in [\"[\", \"]\"]:\n continue\n if word in [\"رابط\", \"بريد\", \"مستخدم\"] and line_farasa[index - 1] in [\n \"[\",\n \"]\",\n ]:\n segmented_line.append(\"[\" + word + \"]\")\n continue\n segmented_word = []\n for token in word.split(\"+\"):\n if token in prefix_list:\n segmented_word.append(token + \"+\")\n elif token in suffix_list:\n segmented_word.append(\"+\" + token)\n else:\n segmented_word.append(token)\n segmented_line.extend(segmented_word)\n return \" \".join(segmented_line)\n\n def _remove_elongation(self, text):\n \"\"\"\n :param text: the input text to remove elongation\n :return: delongated text\n \"\"\"\n # loop over the number of times the regex matched the text\n for index_ in range(len(re.findall(regex_tatweel, text))):\n elongation = re.search(regex_tatweel, text)\n if elongation:\n elongation_pattern = elongation.group()\n elongation_replacement = elongation_pattern[0]\n elongation_pattern = re.escape(elongation_pattern)\n text = re.sub(\n elongation_pattern, elongation_replacement, text, flags=re.MULTILINE\n )\n else:\n break\n return text\n\n def _remove_redundant_punct(self, text):\n text_ = text\n result = re.search(redundant_punct_pattern, text)\n dif = 0\n while result:\n sub = result.group()\n sub = sorted(set(sub), key=sub.index)\n sub = \" \" + \"\".join(list(sub)) + \" \"\n text = \"\".join(\n (text[: result.span()[0] + dif], sub, text[result.span()[1] + dif :])\n )\n text_ = \"\".join(\n (text_[: result.span()[0]], text_[result.span()[1] :])\n ).strip()\n dif = abs(len(text) - len(text_))\n result = re.search(redundant_punct_pattern, text_)\n text = re.sub(r\"\\s+\", \" \", text)\n return text.strip()\n\n\nprefix_list = [\n \"ال\",\n \"و\",\n \"ف\",\n \"ب\",\n \"ك\",\n \"ل\",\n \"لل\",\n \"\\u0627\\u0644\",\n \"\\u0648\",\n \"\\u0641\",\n \"\\u0628\",\n \"\\u0643\",\n \"\\u0644\",\n \"\\u0644\\u0644\",\n \"س\",\n]\nsuffix_list = [\n \"ه\",\n \"ها\",\n \"ك\",\n \"ي\",\n \"هما\",\n \"كما\",\n \"نا\",\n \"كم\",\n \"هم\",\n \"هن\",\n \"كن\",\n \"ا\",\n \"ان\",\n \"ين\",\n \"ون\",\n \"وا\",\n \"ات\",\n \"ت\",\n \"ن\",\n \"ة\",\n \"\\u0647\",\n \"\\u0647\\u0627\",\n \"\\u0643\",\n \"\\u064a\",\n \"\\u0647\\u0645\\u0627\",\n \"\\u0643\\u0645\\u0627\",\n \"\\u0646\\u0627\",\n \"\\u0643\\u0645\",\n \"\\u0647\\u0645\",\n \"\\u0647\\u0646\",\n \"\\u0643\\u0646\",\n \"\\u0627\",\n \"\\u0627\\u0646\",\n \"\\u064a\\u0646\",\n \"\\u0648\\u0646\",\n \"\\u0648\\u0627\",\n \"\\u0627\\u062a\",\n \"\\u062a\",\n \"\\u0646\",\n \"\\u0629\",\n]\nother_tokens = [\"[رابط]\", \"[مستخدم]\", \"[بريد]\"]\n\n# the never_split list is ussed with the transformers library\nprefix_symbols = [x + \"+\" for x in prefix_list]\nsuffix_symblos = [\"+\" + x for x in suffix_list]\nnever_split_tokens = list(set(prefix_symbols + suffix_symblos + other_tokens))\n\nurl_regexes = [\n r\"(http(s)?:\\/\\/.)?(www\\.)?[-a-zA-Z0-9@:%._\\+~#=]{2,256}\\.[a-z]{2,6}\\b([-a-zA-Z0-9@:%_\\+.~#?&//=]*)\",\n r\"@(https?|ftp)://(-\\.)?([^\\s/?\\.#-]+\\.?)+(/[^\\s]*)?$@iS\",\n r\"http[s]?://[a-zA-Z0-9_\\-./~\\?=%&]+\",\n r\"www[a-zA-Z0-9_\\-?=%&/.~]+\",\n r\"[a-zA-Z]+\\.com\",\n r\"(?=http)[^\\s]+\",\n r\"(?=www)[^\\s]+\",\n r\"://\",\n]\nuser_mention_regex = r\"@[\\w\\d]+\"\nemail_regexes = [r\"[\\w-]+@([\\w-]+\\.)+[\\w-]+\", r\"\\S+@\\S+\"]\nredundant_punct_pattern = (\n r\"([!\\\"#\\$%\\'\\(\\)\\*\\+,\\.:;\\-<=·>?@\\[\\\\\\]\\^_ـ`{\\|}~—٪’،؟`୍“؛”ۚ【»؛\\s+«–…‘]{2,})\"\n)\nregex_tatweel = r\"(\\D)\\1{2,}\"\nrejected_chars_regex = r\"[^0-9\\u0621-\\u063A\\u0640-\\u066C\\u0671-\\u0674a-zA-Z\\[\\]!\\\"#\\$%\\'\\(\\)\\*\\+,\\.:;\\-<=·>?@\\[\\\\\\]\\^_ـ`{\\|}~—٪’،؟`୍“؛”ۚ»؛\\s+«–…‘]\"\n\nregex_url_step1 = r\"(?=http)[^\\s]+\"\nregex_url_step2 = r\"(?=www)[^\\s]+\"\nregex_url = r\"(http(s)?:\\/\\/.)?(www\\.)?[-a-zA-Z0-9@:%._\\+~#=]{2,256}\\.[a-z]{2,6}\\b([-a-zA-Z0-9@:%_\\+.~#?&//=]*)\"\nregex_mention = r\"@[\\w\\d]+\"\nregex_email = r\"\\S+@\\S+\"\n\nchars_regex = r\"0-9\\u0621-\\u063A\\u0640-\\u066C\\u0671-\\u0674a-zA-Z\\[\\]!\\\"#\\$%\\'\\(\\)\\*\\+,\\.:;\\-<=·>?@\\[\\\\\\]\\^_ـ`{\\|}~—٪’،؟`୍“؛”ۚ»؛\\s+«–…‘\"\n\nwhite_spaced_double_quotation_regex = r'\\\"\\s+([^\"]+)\\s+\\\"'\nwhite_spaced_single_quotation_regex = r\"\\'\\s+([^']+)\\s+\\'\"\nwhite_spaced_back_quotation_regex = r\"\\`\\s+([^`]+)\\s+\\`\"\nwhite_spaced_em_dash = r\"\\—\\s+([^—]+)\\s+\\—\"\n\nleft_spaced_chars = r\" ([\\]!#\\$%\\),\\.:;\\?}٪’،؟”؛…»·])\"\nright_spaced_chars = r\"([\\[\\(\\{“«‘*\\~]) \"\nleft_and_right_spaced_chars = r\" ([\\+\\-\\<\\=\\>\\@\\\\\\^\\_\\|\\–]) \"\n",
"step-ids": [
12,
13,
14,
15,
16
]
}
|
[
12,
13,
14,
15,
16
] |
import operator
import theano.tensor as T
from collections import OrderedDict
from lasagne.layers import get_output
from stanza.research import config
from neural import SimpleLasagneModel, NeuralLearner
from vectorizers import SequenceVectorizer, BucketsVectorizer
from neural import OPTIMIZERS, get_named_layers
from listener import LISTENERS, PRIORS as LISTENER_PRIORS
from speaker import SPEAKERS, PRIORS as SPEAKER_PRIORS
parser = config.get_options_parser()
parser.add_argument('--rsa_listeners', type=int, default=1,
help='Number of listeners to use in RSA cooperative nets graph')
parser.add_argument('--rsa_speakers', type=int, default=1,
help='Number of speakers to use in RSA cooperative nets graph')
parser.add_argument('--listener_class', default=['Listener'], choices=LISTENERS.keys(), nargs='+',
help='The name of the listener model to use in the RSA network.')
parser.add_argument('--speaker_class', default=['Speaker'], choices=SPEAKERS.keys(), nargs='+',
help='The name of the speaker model to use in the RSA network.')
parser.add_argument('--eval_agent', type=int, default=0,
help='Index of the agent (listener/speaker) to use as the primary object '
'of evaluation. Whether this agent is a listener or speaker will be '
'inferred from the --listener flag.')
parser.add_argument('--rsa_optimizer', choices=OPTIMIZERS.keys(), default='rmsprop',
help='The optimization (update) algorithm to use for RSA training.')
parser.add_argument('--rsa_learning_rate', type=float, default=0.1,
help='The learning rate to use for RSA training.')
parser.add_argument('--rsa_alpha', type=float, nargs='*', default=[1.0],
help='Weights for the log-likelihood of the dataset according to the '
'listeners. Provide as many values as there are listeners.')
parser.add_argument('--rsa_beta', type=float, nargs='*', default=[1.0],
help='Weights for the log-likelihood of the dataset according to the '
'speakers. Provide as many values as there are speakers.')
parser.add_argument('--rsa_mu', type=float, nargs='*', default=[1.0],
help='Weights for KL(L_j||S_k). Provide values to fill a '
'rsa_listeners x rsa_speakers matrix, in row-major order '
'(i.e. all speakers for first listener, then all speakers for second '
'listener, etc.).')
parser.add_argument('--rsa_nu', type=float, nargs='*', default=[1.0],
help='Weights for KL(S_k||L_j). Provide values to fill a '
'rsa_listeners x rsa_speakers matrix, in row-major order '
'(i.e. all speakers for first listener, then all speakers for second '
'listener, etc.).')
parser.add_argument('--listener_samples', type=int, default=128,
help='Number of samples to draw from the listener per minibatch.')
parser.add_argument('--speaker_samples', type=int, default=128,
help='Number of samples to draw from the speaker per minibatch.')
parser.add_argument('--monitor_sublosses', type=config.boolean, default=False,
help='If `True`, return sub-losses for monitoring and write them to the '
'TensorBoard events file. This will likely increase compilation time.')
parser.add_argument('--monitor_subgrads', type=config.boolean, default=False,
help='If `True`, return sub-gradients for monitoring and write them to the '
'TensorBoard events file. This will likely increase compilation time.')
parser.add_argument('--grad_of_est', type=config.boolean, default=False,
help='If `True`, optimize using the gradient of the estimated loss; '
'otherwise, use the manually-derived estimate of the gradient of '
'the true loss.')
parser.add_argument('--layer_by_layer', type=config.boolean, default=False,
help='If `True`, train RSA agents layer-by-layer (only use the log-likelihood '
'sub-gradients, equivalent to training each agent on data generated from '
'the other agents); otherwise, use the gradient of the full RSA '
'objective.')
parser.add_argument('--listener_sample_smoothed', type=config.boolean, default=False,
help='If `True`, take samples from the smoothed utterance prior; otherwise, '
'sample from the empirical utterance prior.')
parser.add_argument('--speaker_sample_smoothed', type=config.boolean, default=False,
help='If `True`, take samples from the smoothed world prior; otherwise, '
'sample from the empirical world prior.')
class AggregatePrior(object):
def __init__(self, listeners, speakers, prior_name='prior_emp'):
self.listeners = listeners
self.speakers = speakers
self.prior_name = prior_name
def train(self, training_instances, listener=False):
for agent in self.listeners:
getattr(agent, self.prior_name).train(training_instances, listener=listener)
for agent in self.speakers:
getattr(agent, self.prior_name).train(training_instances, listener=listener)
def apply(self, input_vars):
assert False, ("AggregatePrior.apply shouldn't be called; "
"only individual model priors are used in RSA coop nets model")
class RSASubModel(SimpleLasagneModel):
'''
A SimpleLasagneModel for a subcomponent of an RSA graph.
'''
def __init__(self, input_vars, target_vars, l_out, loss, optimizer,
learning_rate=0.001, id=None):
super(RSASubModel, self).__init__(input_vars, target_vars, l_out, loss, optimizer,
learning_rate=learning_rate, id=id)
if len(target_vars) != 1:
raise ValueError('target_vars should be a sequence of length 1, instead got %s' %
(target_vars,))
self.target_var = target_vars[0]
def build_sample_vars(self, num_other_agents):
self.sample_inputs_self = [v.type('%s_sample_self' % (v.name,))
for v in self.input_vars]
self.sample_inputs_others = [[v.type('%s_sample_other%d' % (v.name, i))
for v in self.input_vars]
for i in range(num_other_agents)]
t = self.target_var
self.sample_target_self = t.type('%s_sample_self' % (t.name,))
self.sample_target_others = [t.type('%s_sample_other%d' % (t.name, i))
for i in range(num_other_agents)]
self.all_synth_vars = (self.sample_inputs_self +
[self.sample_target_self] +
[v
for o_inputs, o_target in zip(self.sample_inputs_others,
self.sample_target_others)
for v in o_inputs + [o_target]])
def data_to_synth_arrays(self, agent, samples_self, samples_others):
def flatten(arrays):
inputs, targets = arrays
return inputs + targets
return [arr
for i, samples in enumerate([samples_self] + samples_others)
for arr in flatten(agent._data_to_arrays(samples, inverted=(i != 0)))]
class RSAGraphModel(SimpleLasagneModel):
def __init__(self, listeners, speakers, eval_agent, id=None):
self.get_options()
self.listeners = listeners
self.speakers = speakers
self.eval_agent = eval_agent
input_vars = ([v for listener in listeners for v in listener.model.input_vars] +
[v for speaker in speakers for v in speaker.model.input_vars])
target_vars = ([listener.model.target_var for listener in listeners] +
[speaker.model.target_var for speaker in speakers])
super(RSAGraphModel, self).__init__(input_vars, target_vars,
l_out=eval_agent.model.l_out, loss=None,
optimizer=OPTIMIZERS[self.options.rsa_optimizer],
learning_rate=self.options.rsa_learning_rate,
id=id)
def params(self):
result = []
for listener in self.listeners:
result.extend(listener.params())
for speaker in self.speakers:
result.extend(speaker.params())
return result
def get_train_loss(self, target_vars, params):
for agent in self.speakers:
agent.model.build_sample_vars(len(self.listeners))
for agent in self.listeners:
agent.model.build_sample_vars(len(self.speakers))
monitored = self.get_est_loss(layer_by_layer=self.options.layer_by_layer)
if self.options.grad_of_est:
est_grad, monitored_grads = self.get_grad_of_est(monitored, params)
else:
est_grad, monitored_grads = self.get_est_grad(
params, layer_by_layer=self.options.layer_by_layer)
monitored.update(monitored_grads)
synth_vars = [v
for agent in self.listeners + self.speakers
for v in agent.model.all_synth_vars]
return monitored, est_grad, synth_vars
def get_est_loss(self, layer_by_layer=False):
def kl(agent_p, agent_q, other_idx):
if layer_by_layer:
return agent_q.loss_out(agent_q.model.sample_inputs_others[other_idx],
agent_q.model.sample_target_others[other_idx]).mean()
else:
return (
agent_p.log_joint_emp(agent_p.model.sample_inputs_self,
agent_p.model.sample_target_self) -
agent_q.log_joint_smooth(agent_q.model.sample_inputs_others[other_idx],
agent_q.model.sample_target_others[other_idx])
).mean()
id_tag_log = (self.id + ': ') if self.id else ''
id_tag = (self.id + '/') if self.id else ''
# \alpha * KL(dataset || L) = \alpha * log L(dataset) + C
if self.options.verbosity >= 4:
print(id_tag_log + 'loss: KL(dataset || L)')
alpha_losses = [
('%salpha_%s' % (id_tag, listener.id), alpha * listener.loss_out().mean())
for alpha, listener in zip(self.options.rsa_alpha, self.listeners)
]
# \beta * KL(dataset || S) = \beta * log S(dataset) + C
if self.options.verbosity >= 4:
print(id_tag_log + 'loss: KL(dataset || S)')
beta_losses = [
('%sbeta_%s' % (id_tag, speaker.id), beta * speaker.loss_out().mean())
for beta, speaker in zip(self.options.rsa_beta, self.speakers)
]
# \mu * KL(L || S)
if self.options.verbosity >= 4:
print(id_tag_log + 'loss: KL(L || S)')
mu_losses = [
('%smu_%s_%s' % (id_tag, listener.id, speaker.id), mu * kl(listener, speaker, j))
for mu, (listener, j, speaker, k) in zip(self.options.rsa_mu, self.dyads())
]
# \nu * KL(S || L)
if self.options.verbosity >= 4:
print(id_tag_log + 'loss: KL(S || L)')
nu_losses = [
('%snu_%s_%s' % (id_tag, speaker.id, listener.id), nu * kl(speaker, listener, k))
for nu, (listener, j, speaker, k) in zip(self.options.rsa_nu, self.dyads())
]
all_sublosses = alpha_losses + beta_losses + mu_losses + nu_losses
est_loss = t_sum(loss for tag, loss in all_sublosses)
monitored = OrderedDict([('loss', est_loss)])
if self.options.monitor_sublosses:
monitored.update(all_sublosses)
if self.options.monitor_activations:
for agent in self.listeners + self.speakers:
for name, layer in get_named_layers(agent.l_out).iteritems():
monitored['activation/' + name] = get_output(layer)
return monitored
def get_est_grad(self, params, layer_by_layer=False):
def mean_weighted_grad(weights, loss):
# Lop to the rescue! Here I was calling T.jacobian and trying to
# broadcast things and elementwise-multiply through the resulting lists,
# when a function already existed to do all of that for me...
return T.Lop(loss, params, weights / T.cast(weights.shape[0], 'float32'),
disconnected_inputs='ignore')
# TODO: control variates?
def mean_grad(loss):
return T.grad(loss.mean(), params, disconnected_inputs='ignore')
id_tag = (self.id + ': ') if self.id else ''
# alpha and beta: train the agents directly against the dataset.
# \alpha_j E_D [-d/d\theta_j log L(c | m; \theta_j)]
if self.options.verbosity >= 4:
print(id_tag + 'grad: alpha')
all_subgrads = [
('grad_alpha/%s' % (listener.id,),
mean_grad(alpha * listener.loss_out()))
for alpha, listener in zip(self.options.rsa_alpha, self.listeners)
]
# \beta_k E_D [-d/d\phi_k log S(m | c; \phi_k)]
if self.options.verbosity >= 4:
print(id_tag + 'grad: beta')
all_subgrads.extend([
('grad_beta/%s' % (speaker.id,),
mean_grad(beta * speaker.loss_out()))
for beta, speaker in zip(self.options.rsa_beta, self.speakers)
])
# The "simple" mu and nu terms: train the agents directly against each other.
# These are still ordinary log-likelihood terms; the complexity comes from
# identifying the right input variables and iterating over the m x n dyads.
# sum_k \nu_jk E_{G_S(\phi_k)} [-d/d\theta_j log L(c | m; \theta_j)]
if self.options.verbosity >= 4:
print(id_tag + 'grad: nu co-training')
all_subgrads.extend([
('grad_nu_co/%s_%s' % (listener.id, speaker.id),
mean_grad(nu * listener.loss_out(listener.model.sample_inputs_others[k],
listener.model.sample_target_others[k])))
for nu, (listener, j, speaker, k) in zip(self.options.rsa_nu, self.dyads())
])
# sum_j \nu_jk E_{G_L(\theta_j)} [-d/d\phi_k log S(m | c; \phi_k)]
if self.options.verbosity >= 4:
print(id_tag + 'grad: mu co-training')
all_subgrads.extend([
('grad_mu_co/%s_%s' % (listener.id, speaker.id),
mean_grad(mu * speaker.loss_out(speaker.model.sample_inputs_others[j],
speaker.model.sample_target_others[j])))
for mu, (listener, j, speaker, k) in zip(self.options.rsa_mu, self.dyads())
])
# The "hard" mu and nu terms: regularize the agents with maximum entropy and
# accommodating other agents' priors.
#
# Zero out these subgradients if we're doing layer-by-layer training.
if not layer_by_layer:
# sum_k \mu_jk E_{G_L(\theta_j)}
# [(1 + log G_L(c, m; \theta_j) - log H_S(c, m; \phi_k)) *
# d/d\theta_j log L(c | m; \theta_j)]
if self.options.verbosity >= 4:
print(id_tag + 'grad: mu regularizer')
all_subgrads.extend([
('grad_mu_reg/%s_%s' % (listener.id, speaker.id),
mean_weighted_grad(
mu *
(1 + listener.log_joint_emp(listener.model.sample_inputs_self,
listener.model.sample_target_self) -
speaker.log_joint_smooth(speaker.model.sample_inputs_others[j],
speaker.model.sample_target_others[j])),
listener.loss_out(listener.model.sample_inputs_self,
listener.model.sample_target_self)))
for mu, (listener, j, speaker, k) in zip(self.options.rsa_mu, self.dyads())
])
# sum_j \nu_jk E_{G_S(\phi_k)}
# [(1 + log G_S(c, m; \phi_k) - log H_L(c, m; \theta_j)) *
# d/d\phi_k log S(m | c; \phi_k)]
if self.options.verbosity >= 4:
print(id_tag + 'grad: nu regularizer')
all_subgrads.extend([
('grad_nu_reg/%s_%s' % (listener.id, speaker.id),
mean_weighted_grad(
nu *
(1 + speaker.log_joint_emp(speaker.model.sample_inputs_self,
speaker.model.sample_target_self) -
listener.log_joint_smooth(listener.model.sample_inputs_others[k],
listener.model.sample_target_others[k])),
speaker.loss_out(speaker.model.sample_inputs_self,
speaker.model.sample_target_self)))
for nu, (listener, j, speaker, k) in zip(self.options.rsa_nu, self.dyads())
])
est_grad = t_sum([grads for tag, grads in all_subgrads], nested=True)
monitored = OrderedDict()
if self.options.monitor_grads:
monitored.update([
('grad/' + param.name, grad)
for param, grad in zip(params, est_grad)
])
if self.options.monitor_subgrads:
monitored.update([
(tag + '/' + param.name, grad)
for tag, grads in all_subgrads
for param, grad in zip(params, grads)
])
return est_grad, monitored
def get_grad_of_est(self, monitored, params):
grad_of_est = T.grad(monitored['loss'], params)
monitored_grads = OrderedDict()
if self.options.monitor_grads:
monitored_grads.update([
('grad/' + param.name, grad)
for param, grad in zip(params, grad_of_est)
])
if self.options.monitor_subgrads:
monitored_grads.update([
(tag + '/' + param.name, grad)
for tag, subloss in monitored.iteritems() if tag != 'loss'
for param, grad in zip(params, T.grad(subloss, params,
disconnected_inputs='ignore'))
])
return grad_of_est, monitored_grads
def dyads(self):
for j, listener in enumerate(self.listeners):
for k, speaker in enumerate(self.speakers):
yield (listener, j, speaker, k)
def minibatches(self, inputs, targets, batch_size, shuffle=False):
agents = self.listeners + self.speakers
batches = super(RSAGraphModel, self).minibatches(inputs, targets, batch_size,
shuffle=shuffle)
for dataset_inputs, dataset_targets, _synth in batches:
inputs_batch = []
targets_batch = []
synth_batch = []
filtered = self.filter_arrays(dataset_inputs, dataset_targets)
for agent, (agent_inputs, agent_targets) in zip(agents, filtered):
inputs_batch.extend(agent_inputs)
targets_batch.extend(agent_targets)
input_types = [a.shape for a in agent_inputs]
target_types = [a.shape for a in agent_targets]
if self.options.verbosity >= 8:
print('%s: %s -> %s' % (agent.id, input_types, target_types))
listener_samples = [listener.sample_joint_smooth(self.options.listener_samples)
if self.options.listener_sample_smoothed else
listener.sample_joint_emp(self.options.listener_samples)
for listener in self.listeners]
speaker_samples = [speaker.sample_joint_smooth(self.options.speaker_samples)
if self.options.speaker_sample_smoothed else
speaker.sample_joint_emp(self.options.listener_samples)
for speaker in self.speakers]
for listener, samples in zip(self.listeners, listener_samples):
arrays = listener.model.data_to_synth_arrays(listener, samples,
speaker_samples)
synth_batch.extend(arrays)
synth_types = [a.shape for a in arrays]
if self.options.verbosity >= 8:
print('%s synth: %s' % (listener.id, synth_types))
for speaker, samples in zip(self.speakers, speaker_samples):
arrays = speaker.model.data_to_synth_arrays(speaker, samples,
listener_samples)
synth_batch.extend(arrays)
synth_types = [a.shape for a in arrays]
if self.options.verbosity >= 8:
print('%s synth: %s' % (speaker.id, synth_types))
yield inputs_batch, targets_batch, synth_batch
def filter_arrays(self, inputs, targets):
result = []
input_idx = 0
for agent, target in zip(self.listeners + self.speakers, targets):
assert input_idx + len(agent.model.input_vars) <= len(inputs), \
(input_idx, len(agent.model.input_vars), len(inputs))
agent_inputs = inputs[input_idx:input_idx + len(agent.model.input_vars)]
agent_targets = [target]
result.append((agent_inputs, agent_targets))
input_idx += len(agent.model.input_vars)
return result
class RSALearner(NeuralLearner):
def __init__(self, id=None):
self.get_options()
self.init_submodels(id)
super(RSALearner, self).__init__(id=id)
color_resolution = (self.options.listener_color_resolution
if self.options.listener else
self.options.speaker_color_resolution)
self.seq_vec = SequenceVectorizer()
self.color_vec = BucketsVectorizer(color_resolution, hsv=self.options.speaker_hsv)
def init_submodels(self, id=None):
id_tag = (id + '/') if id else ''
self.get_options()
listener_classes = self.options.listener_class
speaker_classes = self.options.speaker_class
if len(listener_classes) != self.options.rsa_listeners:
assert len(listener_classes) == 1, len(listener_classes)
listener_classes = listener_classes * self.options.rsa_listeners
if len(speaker_classes) != self.options.rsa_speakers:
assert len(speaker_classes) == 1, len(speaker_classes)
speaker_classes = speaker_classes * self.options.rsa_speakers
self.listeners = [LISTENERS[listener_classes[j]](id='%sL%d' % (id_tag, j))
for j in range(self.options.rsa_listeners)]
self.speakers = [SPEAKERS[speaker_classes[k]](id='%sS%d' % (id_tag, k))
for k in range(self.options.rsa_speakers)]
agents = self.listeners if self.options.listener else self.speakers
self.eval_agent = agents[self.options.eval_agent]
def predict(self, eval_instances, verbosity=0):
return self.eval_agent.predict(eval_instances, verbosity=verbosity)
def score(self, eval_instances, verbosity=0):
return self.eval_agent.score(eval_instances, verbosity=verbosity)
def predict_and_score(self, eval_instances, verbosity=0):
return self.eval_agent.predict_and_score(eval_instances, verbosity=verbosity)
def on_iter_end(self, step, writer):
for agent in self.speakers + self.listeners:
agent.on_iter_end(step, writer)
def sample_joint_smooth(self, num_samples):
return self.eval_agent.sample_joint_smooth(num_samples)
def _data_to_arrays(self, training_instances,
init_vectorizer=False, test=False, inverted=False):
input_arrays = []
target_arrays = []
if self.options.listener != inverted:
listener_dataset = training_instances
speaker_dataset = [inst.inverted() for inst in training_instances]
else:
listener_dataset = [inst.inverted() for inst in training_instances]
speaker_dataset = training_instances
for listener in self.listeners:
if not test:
listener.dataset = listener_dataset
inputs, targets = listener._data_to_arrays(listener_dataset, test=test,
init_vectorizer=init_vectorizer)
input_arrays.extend(inputs)
target_arrays.extend(targets)
for speaker in self.speakers:
if not test:
speaker.dataset = speaker_dataset
inputs, targets = speaker._data_to_arrays(speaker_dataset, test=test,
init_vectorizer=init_vectorizer)
input_arrays.extend(inputs)
target_arrays.extend(targets)
return input_arrays, target_arrays
def _build_model(self):
for agent in self.listeners + self.speakers:
agent._build_model(RSASubModel)
self.build_aggregate_model()
def train_priors(self, training_instances, listener_data=False):
prior_class = (LISTENER_PRIORS[self.options.listener_prior]
if self.options.listener else
SPEAKER_PRIORS[self.options.speaker_prior])
self.prior_emp = prior_class()
self.prior_smooth = prior_class()
self.prior_emp.train(training_instances, listener_data=listener_data)
self.prior_smooth.train(training_instances, listener_data=listener_data)
for agent in self.listeners + self.speakers:
agent.train_priors(training_instances, listener_data=listener_data)
def build_aggregate_model(self):
self.model = RSAGraphModel(self.listeners, self.speakers, self.eval_agent)
self.prior_emp = AggregatePrior(self.listeners, self.speakers, 'prior_emp')
self.prior_smooth = AggregatePrior(self.listeners, self.speakers, 'prior_smooth')
def __getstate__(self):
return (self.seq_vec, self.color_vec,
[agent.__getstate__() for agent in self.listeners + self.speakers])
def __setstate__(self, state):
self.seq_vec, self.color_vec, submodels = state
self.init_submodels()
for agent, substate in zip(self.listeners + self.speakers, submodels):
agent.unpickle(substate, RSASubModel)
self.build_aggregate_model()
def t_sum(seq, start=None, nested=False):
'''A version of sum that doesn't start with 0, for constructing
Theano graphs without superfluous TensorConstants.
If `nested` is True, sum expressions embedded within lists,
elementwise (for use with the output for T.jacobian).
>>> t_sum([1, 2, 3])
6
>>> t_sum(xrange(1, 4), start=4)
10
>>> t_sum([[1, 2], [3, 4], [5, 6]], nested=True)
[9, 12]
>>> t_sum([[1, 2], [3, 4], [5, 6]], start=[-1, -2], nested=True)
[8, 10]
'''
if nested:
if not isinstance(seq, list):
seq = list(seq)
if start:
return [t_sum(subseq, start_elem) for subseq, start_elem in zip(zip(*seq), start)]
else:
return [t_sum(subseq) for subseq in zip(*seq)]
seq_list = list(seq)
if seq_list:
reduced = reduce(operator.add, seq_list)
if start:
reduced = start + reduced
return reduced
elif start:
return start
else:
return 0
|
normal
|
{
"blob_id": "3496216de9f6b7d9d3db69eb4d8f8c0fdcd5123c",
"index": 1358,
"step-1": "<mask token>\n\n\nclass RSAGraphModel(SimpleLasagneModel):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass RSALearner(NeuralLearner):\n\n def __init__(self, id=None):\n self.get_options()\n self.init_submodels(id)\n super(RSALearner, self).__init__(id=id)\n color_resolution = (self.options.listener_color_resolution if self.\n options.listener else self.options.speaker_color_resolution)\n self.seq_vec = SequenceVectorizer()\n self.color_vec = BucketsVectorizer(color_resolution, hsv=self.\n options.speaker_hsv)\n\n def init_submodels(self, id=None):\n id_tag = id + '/' if id else ''\n self.get_options()\n listener_classes = self.options.listener_class\n speaker_classes = self.options.speaker_class\n if len(listener_classes) != self.options.rsa_listeners:\n assert len(listener_classes) == 1, len(listener_classes)\n listener_classes = listener_classes * self.options.rsa_listeners\n if len(speaker_classes) != self.options.rsa_speakers:\n assert len(speaker_classes) == 1, len(speaker_classes)\n speaker_classes = speaker_classes * self.options.rsa_speakers\n self.listeners = [LISTENERS[listener_classes[j]](id='%sL%d' % (\n id_tag, j)) for j in range(self.options.rsa_listeners)]\n self.speakers = [SPEAKERS[speaker_classes[k]](id='%sS%d' % (id_tag,\n k)) for k in range(self.options.rsa_speakers)]\n agents = self.listeners if self.options.listener else self.speakers\n self.eval_agent = agents[self.options.eval_agent]\n\n def predict(self, eval_instances, verbosity=0):\n return self.eval_agent.predict(eval_instances, verbosity=verbosity)\n\n def score(self, eval_instances, verbosity=0):\n return self.eval_agent.score(eval_instances, verbosity=verbosity)\n\n def predict_and_score(self, eval_instances, verbosity=0):\n return self.eval_agent.predict_and_score(eval_instances, verbosity=\n verbosity)\n\n def on_iter_end(self, step, writer):\n for agent in (self.speakers + self.listeners):\n agent.on_iter_end(step, writer)\n\n def sample_joint_smooth(self, num_samples):\n return self.eval_agent.sample_joint_smooth(num_samples)\n\n def _data_to_arrays(self, training_instances, init_vectorizer=False,\n test=False, inverted=False):\n input_arrays = []\n target_arrays = []\n if self.options.listener != inverted:\n listener_dataset = training_instances\n speaker_dataset = [inst.inverted() for inst in training_instances]\n else:\n listener_dataset = [inst.inverted() for inst in training_instances]\n speaker_dataset = training_instances\n for listener in self.listeners:\n if not test:\n listener.dataset = listener_dataset\n inputs, targets = listener._data_to_arrays(listener_dataset,\n test=test, init_vectorizer=init_vectorizer)\n input_arrays.extend(inputs)\n target_arrays.extend(targets)\n for speaker in self.speakers:\n if not test:\n speaker.dataset = speaker_dataset\n inputs, targets = speaker._data_to_arrays(speaker_dataset, test\n =test, init_vectorizer=init_vectorizer)\n input_arrays.extend(inputs)\n target_arrays.extend(targets)\n return input_arrays, target_arrays\n\n def _build_model(self):\n for agent in (self.listeners + self.speakers):\n agent._build_model(RSASubModel)\n self.build_aggregate_model()\n\n def train_priors(self, training_instances, listener_data=False):\n prior_class = LISTENER_PRIORS[self.options.listener_prior\n ] if self.options.listener else SPEAKER_PRIORS[self.options.\n speaker_prior]\n self.prior_emp = prior_class()\n self.prior_smooth = prior_class()\n self.prior_emp.train(training_instances, listener_data=listener_data)\n self.prior_smooth.train(training_instances, listener_data=listener_data\n )\n for agent in (self.listeners + self.speakers):\n agent.train_priors(training_instances, listener_data=listener_data)\n\n def build_aggregate_model(self):\n self.model = RSAGraphModel(self.listeners, self.speakers, self.\n eval_agent)\n self.prior_emp = AggregatePrior(self.listeners, self.speakers,\n 'prior_emp')\n self.prior_smooth = AggregatePrior(self.listeners, self.speakers,\n 'prior_smooth')\n\n def __getstate__(self):\n return self.seq_vec, self.color_vec, [agent.__getstate__() for\n agent in self.listeners + self.speakers]\n\n def __setstate__(self, state):\n self.seq_vec, self.color_vec, submodels = state\n self.init_submodels()\n for agent, substate in zip(self.listeners + self.speakers, submodels):\n agent.unpickle(substate, RSASubModel)\n self.build_aggregate_model()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass RSAGraphModel(SimpleLasagneModel):\n <mask token>\n\n def params(self):\n result = []\n for listener in self.listeners:\n result.extend(listener.params())\n for speaker in self.speakers:\n result.extend(speaker.params())\n return result\n\n def get_train_loss(self, target_vars, params):\n for agent in self.speakers:\n agent.model.build_sample_vars(len(self.listeners))\n for agent in self.listeners:\n agent.model.build_sample_vars(len(self.speakers))\n monitored = self.get_est_loss(layer_by_layer=self.options.\n layer_by_layer)\n if self.options.grad_of_est:\n est_grad, monitored_grads = self.get_grad_of_est(monitored, params)\n else:\n est_grad, monitored_grads = self.get_est_grad(params,\n layer_by_layer=self.options.layer_by_layer)\n monitored.update(monitored_grads)\n synth_vars = [v for agent in self.listeners + self.speakers for v in\n agent.model.all_synth_vars]\n return monitored, est_grad, synth_vars\n\n def get_est_loss(self, layer_by_layer=False):\n\n def kl(agent_p, agent_q, other_idx):\n if layer_by_layer:\n return agent_q.loss_out(agent_q.model.sample_inputs_others[\n other_idx], agent_q.model.sample_target_others[other_idx]\n ).mean()\n else:\n return (agent_p.log_joint_emp(agent_p.model.\n sample_inputs_self, agent_p.model.sample_target_self) -\n agent_q.log_joint_smooth(agent_q.model.\n sample_inputs_others[other_idx], agent_q.model.\n sample_target_others[other_idx])).mean()\n id_tag_log = self.id + ': ' if self.id else ''\n id_tag = self.id + '/' if self.id else ''\n if self.options.verbosity >= 4:\n print(id_tag_log + 'loss: KL(dataset || L)')\n alpha_losses = [('%salpha_%s' % (id_tag, listener.id), alpha *\n listener.loss_out().mean()) for alpha, listener in zip(self.\n options.rsa_alpha, self.listeners)]\n if self.options.verbosity >= 4:\n print(id_tag_log + 'loss: KL(dataset || S)')\n beta_losses = [('%sbeta_%s' % (id_tag, speaker.id), beta * speaker.\n loss_out().mean()) for beta, speaker in zip(self.options.\n rsa_beta, self.speakers)]\n if self.options.verbosity >= 4:\n print(id_tag_log + 'loss: KL(L || S)')\n mu_losses = [('%smu_%s_%s' % (id_tag, listener.id, speaker.id), mu *\n kl(listener, speaker, j)) for mu, (listener, j, speaker, k) in\n zip(self.options.rsa_mu, self.dyads())]\n if self.options.verbosity >= 4:\n print(id_tag_log + 'loss: KL(S || L)')\n nu_losses = [('%snu_%s_%s' % (id_tag, speaker.id, listener.id), nu *\n kl(speaker, listener, k)) for nu, (listener, j, speaker, k) in\n zip(self.options.rsa_nu, self.dyads())]\n all_sublosses = alpha_losses + beta_losses + mu_losses + nu_losses\n est_loss = t_sum(loss for tag, loss in all_sublosses)\n monitored = OrderedDict([('loss', est_loss)])\n if self.options.monitor_sublosses:\n monitored.update(all_sublosses)\n if self.options.monitor_activations:\n for agent in (self.listeners + self.speakers):\n for name, layer in get_named_layers(agent.l_out).iteritems():\n monitored['activation/' + name] = get_output(layer)\n return monitored\n <mask token>\n <mask token>\n\n def dyads(self):\n for j, listener in enumerate(self.listeners):\n for k, speaker in enumerate(self.speakers):\n yield listener, j, speaker, k\n\n def minibatches(self, inputs, targets, batch_size, shuffle=False):\n agents = self.listeners + self.speakers\n batches = super(RSAGraphModel, self).minibatches(inputs, targets,\n batch_size, shuffle=shuffle)\n for dataset_inputs, dataset_targets, _synth in batches:\n inputs_batch = []\n targets_batch = []\n synth_batch = []\n filtered = self.filter_arrays(dataset_inputs, dataset_targets)\n for agent, (agent_inputs, agent_targets) in zip(agents, filtered):\n inputs_batch.extend(agent_inputs)\n targets_batch.extend(agent_targets)\n input_types = [a.shape for a in agent_inputs]\n target_types = [a.shape for a in agent_targets]\n if self.options.verbosity >= 8:\n print('%s: %s -> %s' % (agent.id, input_types,\n target_types))\n listener_samples = [(listener.sample_joint_smooth(self.options.\n listener_samples) if self.options.listener_sample_smoothed else\n listener.sample_joint_emp(self.options.listener_samples)) for\n listener in self.listeners]\n speaker_samples = [(speaker.sample_joint_smooth(self.options.\n speaker_samples) if self.options.speaker_sample_smoothed else\n speaker.sample_joint_emp(self.options.listener_samples)) for\n speaker in self.speakers]\n for listener, samples in zip(self.listeners, listener_samples):\n arrays = listener.model.data_to_synth_arrays(listener,\n samples, speaker_samples)\n synth_batch.extend(arrays)\n synth_types = [a.shape for a in arrays]\n if self.options.verbosity >= 8:\n print('%s synth: %s' % (listener.id, synth_types))\n for speaker, samples in zip(self.speakers, speaker_samples):\n arrays = speaker.model.data_to_synth_arrays(speaker,\n samples, listener_samples)\n synth_batch.extend(arrays)\n synth_types = [a.shape for a in arrays]\n if self.options.verbosity >= 8:\n print('%s synth: %s' % (speaker.id, synth_types))\n yield inputs_batch, targets_batch, synth_batch\n\n def filter_arrays(self, inputs, targets):\n result = []\n input_idx = 0\n for agent, target in zip(self.listeners + self.speakers, targets):\n assert input_idx + len(agent.model.input_vars) <= len(inputs), (\n input_idx, len(agent.model.input_vars), len(inputs))\n agent_inputs = inputs[input_idx:input_idx + len(agent.model.\n input_vars)]\n agent_targets = [target]\n result.append((agent_inputs, agent_targets))\n input_idx += len(agent.model.input_vars)\n return result\n\n\nclass RSALearner(NeuralLearner):\n\n def __init__(self, id=None):\n self.get_options()\n self.init_submodels(id)\n super(RSALearner, self).__init__(id=id)\n color_resolution = (self.options.listener_color_resolution if self.\n options.listener else self.options.speaker_color_resolution)\n self.seq_vec = SequenceVectorizer()\n self.color_vec = BucketsVectorizer(color_resolution, hsv=self.\n options.speaker_hsv)\n\n def init_submodels(self, id=None):\n id_tag = id + '/' if id else ''\n self.get_options()\n listener_classes = self.options.listener_class\n speaker_classes = self.options.speaker_class\n if len(listener_classes) != self.options.rsa_listeners:\n assert len(listener_classes) == 1, len(listener_classes)\n listener_classes = listener_classes * self.options.rsa_listeners\n if len(speaker_classes) != self.options.rsa_speakers:\n assert len(speaker_classes) == 1, len(speaker_classes)\n speaker_classes = speaker_classes * self.options.rsa_speakers\n self.listeners = [LISTENERS[listener_classes[j]](id='%sL%d' % (\n id_tag, j)) for j in range(self.options.rsa_listeners)]\n self.speakers = [SPEAKERS[speaker_classes[k]](id='%sS%d' % (id_tag,\n k)) for k in range(self.options.rsa_speakers)]\n agents = self.listeners if self.options.listener else self.speakers\n self.eval_agent = agents[self.options.eval_agent]\n\n def predict(self, eval_instances, verbosity=0):\n return self.eval_agent.predict(eval_instances, verbosity=verbosity)\n\n def score(self, eval_instances, verbosity=0):\n return self.eval_agent.score(eval_instances, verbosity=verbosity)\n\n def predict_and_score(self, eval_instances, verbosity=0):\n return self.eval_agent.predict_and_score(eval_instances, verbosity=\n verbosity)\n\n def on_iter_end(self, step, writer):\n for agent in (self.speakers + self.listeners):\n agent.on_iter_end(step, writer)\n\n def sample_joint_smooth(self, num_samples):\n return self.eval_agent.sample_joint_smooth(num_samples)\n\n def _data_to_arrays(self, training_instances, init_vectorizer=False,\n test=False, inverted=False):\n input_arrays = []\n target_arrays = []\n if self.options.listener != inverted:\n listener_dataset = training_instances\n speaker_dataset = [inst.inverted() for inst in training_instances]\n else:\n listener_dataset = [inst.inverted() for inst in training_instances]\n speaker_dataset = training_instances\n for listener in self.listeners:\n if not test:\n listener.dataset = listener_dataset\n inputs, targets = listener._data_to_arrays(listener_dataset,\n test=test, init_vectorizer=init_vectorizer)\n input_arrays.extend(inputs)\n target_arrays.extend(targets)\n for speaker in self.speakers:\n if not test:\n speaker.dataset = speaker_dataset\n inputs, targets = speaker._data_to_arrays(speaker_dataset, test\n =test, init_vectorizer=init_vectorizer)\n input_arrays.extend(inputs)\n target_arrays.extend(targets)\n return input_arrays, target_arrays\n\n def _build_model(self):\n for agent in (self.listeners + self.speakers):\n agent._build_model(RSASubModel)\n self.build_aggregate_model()\n\n def train_priors(self, training_instances, listener_data=False):\n prior_class = LISTENER_PRIORS[self.options.listener_prior\n ] if self.options.listener else SPEAKER_PRIORS[self.options.\n speaker_prior]\n self.prior_emp = prior_class()\n self.prior_smooth = prior_class()\n self.prior_emp.train(training_instances, listener_data=listener_data)\n self.prior_smooth.train(training_instances, listener_data=listener_data\n )\n for agent in (self.listeners + self.speakers):\n agent.train_priors(training_instances, listener_data=listener_data)\n\n def build_aggregate_model(self):\n self.model = RSAGraphModel(self.listeners, self.speakers, self.\n eval_agent)\n self.prior_emp = AggregatePrior(self.listeners, self.speakers,\n 'prior_emp')\n self.prior_smooth = AggregatePrior(self.listeners, self.speakers,\n 'prior_smooth')\n\n def __getstate__(self):\n return self.seq_vec, self.color_vec, [agent.__getstate__() for\n agent in self.listeners + self.speakers]\n\n def __setstate__(self, state):\n self.seq_vec, self.color_vec, submodels = state\n self.init_submodels()\n for agent, substate in zip(self.listeners + self.speakers, submodels):\n agent.unpickle(substate, RSASubModel)\n self.build_aggregate_model()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass RSAGraphModel(SimpleLasagneModel):\n\n def __init__(self, listeners, speakers, eval_agent, id=None):\n self.get_options()\n self.listeners = listeners\n self.speakers = speakers\n self.eval_agent = eval_agent\n input_vars = [v for listener in listeners for v in listener.model.\n input_vars] + [v for speaker in speakers for v in speaker.model\n .input_vars]\n target_vars = [listener.model.target_var for listener in listeners] + [\n speaker.model.target_var for speaker in speakers]\n super(RSAGraphModel, self).__init__(input_vars, target_vars, l_out=\n eval_agent.model.l_out, loss=None, optimizer=OPTIMIZERS[self.\n options.rsa_optimizer], learning_rate=self.options.\n rsa_learning_rate, id=id)\n\n def params(self):\n result = []\n for listener in self.listeners:\n result.extend(listener.params())\n for speaker in self.speakers:\n result.extend(speaker.params())\n return result\n\n def get_train_loss(self, target_vars, params):\n for agent in self.speakers:\n agent.model.build_sample_vars(len(self.listeners))\n for agent in self.listeners:\n agent.model.build_sample_vars(len(self.speakers))\n monitored = self.get_est_loss(layer_by_layer=self.options.\n layer_by_layer)\n if self.options.grad_of_est:\n est_grad, monitored_grads = self.get_grad_of_est(monitored, params)\n else:\n est_grad, monitored_grads = self.get_est_grad(params,\n layer_by_layer=self.options.layer_by_layer)\n monitored.update(monitored_grads)\n synth_vars = [v for agent in self.listeners + self.speakers for v in\n agent.model.all_synth_vars]\n return monitored, est_grad, synth_vars\n\n def get_est_loss(self, layer_by_layer=False):\n\n def kl(agent_p, agent_q, other_idx):\n if layer_by_layer:\n return agent_q.loss_out(agent_q.model.sample_inputs_others[\n other_idx], agent_q.model.sample_target_others[other_idx]\n ).mean()\n else:\n return (agent_p.log_joint_emp(agent_p.model.\n sample_inputs_self, agent_p.model.sample_target_self) -\n agent_q.log_joint_smooth(agent_q.model.\n sample_inputs_others[other_idx], agent_q.model.\n sample_target_others[other_idx])).mean()\n id_tag_log = self.id + ': ' if self.id else ''\n id_tag = self.id + '/' if self.id else ''\n if self.options.verbosity >= 4:\n print(id_tag_log + 'loss: KL(dataset || L)')\n alpha_losses = [('%salpha_%s' % (id_tag, listener.id), alpha *\n listener.loss_out().mean()) for alpha, listener in zip(self.\n options.rsa_alpha, self.listeners)]\n if self.options.verbosity >= 4:\n print(id_tag_log + 'loss: KL(dataset || S)')\n beta_losses = [('%sbeta_%s' % (id_tag, speaker.id), beta * speaker.\n loss_out().mean()) for beta, speaker in zip(self.options.\n rsa_beta, self.speakers)]\n if self.options.verbosity >= 4:\n print(id_tag_log + 'loss: KL(L || S)')\n mu_losses = [('%smu_%s_%s' % (id_tag, listener.id, speaker.id), mu *\n kl(listener, speaker, j)) for mu, (listener, j, speaker, k) in\n zip(self.options.rsa_mu, self.dyads())]\n if self.options.verbosity >= 4:\n print(id_tag_log + 'loss: KL(S || L)')\n nu_losses = [('%snu_%s_%s' % (id_tag, speaker.id, listener.id), nu *\n kl(speaker, listener, k)) for nu, (listener, j, speaker, k) in\n zip(self.options.rsa_nu, self.dyads())]\n all_sublosses = alpha_losses + beta_losses + mu_losses + nu_losses\n est_loss = t_sum(loss for tag, loss in all_sublosses)\n monitored = OrderedDict([('loss', est_loss)])\n if self.options.monitor_sublosses:\n monitored.update(all_sublosses)\n if self.options.monitor_activations:\n for agent in (self.listeners + self.speakers):\n for name, layer in get_named_layers(agent.l_out).iteritems():\n monitored['activation/' + name] = get_output(layer)\n return monitored\n <mask token>\n\n def get_grad_of_est(self, monitored, params):\n grad_of_est = T.grad(monitored['loss'], params)\n monitored_grads = OrderedDict()\n if self.options.monitor_grads:\n monitored_grads.update([('grad/' + param.name, grad) for param,\n grad in zip(params, grad_of_est)])\n if self.options.monitor_subgrads:\n monitored_grads.update([(tag + '/' + param.name, grad) for tag,\n subloss in monitored.iteritems() if tag != 'loss' for param,\n grad in zip(params, T.grad(subloss, params,\n disconnected_inputs='ignore'))])\n return grad_of_est, monitored_grads\n\n def dyads(self):\n for j, listener in enumerate(self.listeners):\n for k, speaker in enumerate(self.speakers):\n yield listener, j, speaker, k\n\n def minibatches(self, inputs, targets, batch_size, shuffle=False):\n agents = self.listeners + self.speakers\n batches = super(RSAGraphModel, self).minibatches(inputs, targets,\n batch_size, shuffle=shuffle)\n for dataset_inputs, dataset_targets, _synth in batches:\n inputs_batch = []\n targets_batch = []\n synth_batch = []\n filtered = self.filter_arrays(dataset_inputs, dataset_targets)\n for agent, (agent_inputs, agent_targets) in zip(agents, filtered):\n inputs_batch.extend(agent_inputs)\n targets_batch.extend(agent_targets)\n input_types = [a.shape for a in agent_inputs]\n target_types = [a.shape for a in agent_targets]\n if self.options.verbosity >= 8:\n print('%s: %s -> %s' % (agent.id, input_types,\n target_types))\n listener_samples = [(listener.sample_joint_smooth(self.options.\n listener_samples) if self.options.listener_sample_smoothed else\n listener.sample_joint_emp(self.options.listener_samples)) for\n listener in self.listeners]\n speaker_samples = [(speaker.sample_joint_smooth(self.options.\n speaker_samples) if self.options.speaker_sample_smoothed else\n speaker.sample_joint_emp(self.options.listener_samples)) for\n speaker in self.speakers]\n for listener, samples in zip(self.listeners, listener_samples):\n arrays = listener.model.data_to_synth_arrays(listener,\n samples, speaker_samples)\n synth_batch.extend(arrays)\n synth_types = [a.shape for a in arrays]\n if self.options.verbosity >= 8:\n print('%s synth: %s' % (listener.id, synth_types))\n for speaker, samples in zip(self.speakers, speaker_samples):\n arrays = speaker.model.data_to_synth_arrays(speaker,\n samples, listener_samples)\n synth_batch.extend(arrays)\n synth_types = [a.shape for a in arrays]\n if self.options.verbosity >= 8:\n print('%s synth: %s' % (speaker.id, synth_types))\n yield inputs_batch, targets_batch, synth_batch\n\n def filter_arrays(self, inputs, targets):\n result = []\n input_idx = 0\n for agent, target in zip(self.listeners + self.speakers, targets):\n assert input_idx + len(agent.model.input_vars) <= len(inputs), (\n input_idx, len(agent.model.input_vars), len(inputs))\n agent_inputs = inputs[input_idx:input_idx + len(agent.model.\n input_vars)]\n agent_targets = [target]\n result.append((agent_inputs, agent_targets))\n input_idx += len(agent.model.input_vars)\n return result\n\n\nclass RSALearner(NeuralLearner):\n\n def __init__(self, id=None):\n self.get_options()\n self.init_submodels(id)\n super(RSALearner, self).__init__(id=id)\n color_resolution = (self.options.listener_color_resolution if self.\n options.listener else self.options.speaker_color_resolution)\n self.seq_vec = SequenceVectorizer()\n self.color_vec = BucketsVectorizer(color_resolution, hsv=self.\n options.speaker_hsv)\n\n def init_submodels(self, id=None):\n id_tag = id + '/' if id else ''\n self.get_options()\n listener_classes = self.options.listener_class\n speaker_classes = self.options.speaker_class\n if len(listener_classes) != self.options.rsa_listeners:\n assert len(listener_classes) == 1, len(listener_classes)\n listener_classes = listener_classes * self.options.rsa_listeners\n if len(speaker_classes) != self.options.rsa_speakers:\n assert len(speaker_classes) == 1, len(speaker_classes)\n speaker_classes = speaker_classes * self.options.rsa_speakers\n self.listeners = [LISTENERS[listener_classes[j]](id='%sL%d' % (\n id_tag, j)) for j in range(self.options.rsa_listeners)]\n self.speakers = [SPEAKERS[speaker_classes[k]](id='%sS%d' % (id_tag,\n k)) for k in range(self.options.rsa_speakers)]\n agents = self.listeners if self.options.listener else self.speakers\n self.eval_agent = agents[self.options.eval_agent]\n\n def predict(self, eval_instances, verbosity=0):\n return self.eval_agent.predict(eval_instances, verbosity=verbosity)\n\n def score(self, eval_instances, verbosity=0):\n return self.eval_agent.score(eval_instances, verbosity=verbosity)\n\n def predict_and_score(self, eval_instances, verbosity=0):\n return self.eval_agent.predict_and_score(eval_instances, verbosity=\n verbosity)\n\n def on_iter_end(self, step, writer):\n for agent in (self.speakers + self.listeners):\n agent.on_iter_end(step, writer)\n\n def sample_joint_smooth(self, num_samples):\n return self.eval_agent.sample_joint_smooth(num_samples)\n\n def _data_to_arrays(self, training_instances, init_vectorizer=False,\n test=False, inverted=False):\n input_arrays = []\n target_arrays = []\n if self.options.listener != inverted:\n listener_dataset = training_instances\n speaker_dataset = [inst.inverted() for inst in training_instances]\n else:\n listener_dataset = [inst.inverted() for inst in training_instances]\n speaker_dataset = training_instances\n for listener in self.listeners:\n if not test:\n listener.dataset = listener_dataset\n inputs, targets = listener._data_to_arrays(listener_dataset,\n test=test, init_vectorizer=init_vectorizer)\n input_arrays.extend(inputs)\n target_arrays.extend(targets)\n for speaker in self.speakers:\n if not test:\n speaker.dataset = speaker_dataset\n inputs, targets = speaker._data_to_arrays(speaker_dataset, test\n =test, init_vectorizer=init_vectorizer)\n input_arrays.extend(inputs)\n target_arrays.extend(targets)\n return input_arrays, target_arrays\n\n def _build_model(self):\n for agent in (self.listeners + self.speakers):\n agent._build_model(RSASubModel)\n self.build_aggregate_model()\n\n def train_priors(self, training_instances, listener_data=False):\n prior_class = LISTENER_PRIORS[self.options.listener_prior\n ] if self.options.listener else SPEAKER_PRIORS[self.options.\n speaker_prior]\n self.prior_emp = prior_class()\n self.prior_smooth = prior_class()\n self.prior_emp.train(training_instances, listener_data=listener_data)\n self.prior_smooth.train(training_instances, listener_data=listener_data\n )\n for agent in (self.listeners + self.speakers):\n agent.train_priors(training_instances, listener_data=listener_data)\n\n def build_aggregate_model(self):\n self.model = RSAGraphModel(self.listeners, self.speakers, self.\n eval_agent)\n self.prior_emp = AggregatePrior(self.listeners, self.speakers,\n 'prior_emp')\n self.prior_smooth = AggregatePrior(self.listeners, self.speakers,\n 'prior_smooth')\n\n def __getstate__(self):\n return self.seq_vec, self.color_vec, [agent.__getstate__() for\n agent in self.listeners + self.speakers]\n\n def __setstate__(self, state):\n self.seq_vec, self.color_vec, submodels = state\n self.init_submodels()\n for agent, substate in zip(self.listeners + self.speakers, submodels):\n agent.unpickle(substate, RSASubModel)\n self.build_aggregate_model()\n\n\n<mask token>\n",
"step-4": "<mask token>\nparser = config.get_options_parser()\nparser.add_argument('--rsa_listeners', type=int, default=1, help=\n 'Number of listeners to use in RSA cooperative nets graph')\nparser.add_argument('--rsa_speakers', type=int, default=1, help=\n 'Number of speakers to use in RSA cooperative nets graph')\nparser.add_argument('--listener_class', default=['Listener'], choices=\n LISTENERS.keys(), nargs='+', help=\n 'The name of the listener model to use in the RSA network.')\nparser.add_argument('--speaker_class', default=['Speaker'], choices=\n SPEAKERS.keys(), nargs='+', help=\n 'The name of the speaker model to use in the RSA network.')\nparser.add_argument('--eval_agent', type=int, default=0, help=\n 'Index of the agent (listener/speaker) to use as the primary object of evaluation. Whether this agent is a listener or speaker will be inferred from the --listener flag.'\n )\nparser.add_argument('--rsa_optimizer', choices=OPTIMIZERS.keys(), default=\n 'rmsprop', help=\n 'The optimization (update) algorithm to use for RSA training.')\nparser.add_argument('--rsa_learning_rate', type=float, default=0.1, help=\n 'The learning rate to use for RSA training.')\nparser.add_argument('--rsa_alpha', type=float, nargs='*', default=[1.0],\n help=\n 'Weights for the log-likelihood of the dataset according to the listeners. Provide as many values as there are listeners.'\n )\nparser.add_argument('--rsa_beta', type=float, nargs='*', default=[1.0],\n help=\n 'Weights for the log-likelihood of the dataset according to the speakers. Provide as many values as there are speakers.'\n )\nparser.add_argument('--rsa_mu', type=float, nargs='*', default=[1.0], help=\n 'Weights for KL(L_j||S_k). Provide values to fill a rsa_listeners x rsa_speakers matrix, in row-major order (i.e. all speakers for first listener, then all speakers for second listener, etc.).'\n )\nparser.add_argument('--rsa_nu', type=float, nargs='*', default=[1.0], help=\n 'Weights for KL(S_k||L_j). Provide values to fill a rsa_listeners x rsa_speakers matrix, in row-major order (i.e. all speakers for first listener, then all speakers for second listener, etc.).'\n )\nparser.add_argument('--listener_samples', type=int, default=128, help=\n 'Number of samples to draw from the listener per minibatch.')\nparser.add_argument('--speaker_samples', type=int, default=128, help=\n 'Number of samples to draw from the speaker per minibatch.')\nparser.add_argument('--monitor_sublosses', type=config.boolean, default=\n False, help=\n 'If `True`, return sub-losses for monitoring and write them to the TensorBoard events file. This will likely increase compilation time.'\n )\nparser.add_argument('--monitor_subgrads', type=config.boolean, default=\n False, help=\n 'If `True`, return sub-gradients for monitoring and write them to the TensorBoard events file. This will likely increase compilation time.'\n )\nparser.add_argument('--grad_of_est', type=config.boolean, default=False,\n help=\n 'If `True`, optimize using the gradient of the estimated loss; otherwise, use the manually-derived estimate of the gradient of the true loss.'\n )\nparser.add_argument('--layer_by_layer', type=config.boolean, default=False,\n help=\n 'If `True`, train RSA agents layer-by-layer (only use the log-likelihood sub-gradients, equivalent to training each agent on data generated from the other agents); otherwise, use the gradient of the full RSA objective.'\n )\nparser.add_argument('--listener_sample_smoothed', type=config.boolean,\n default=False, help=\n 'If `True`, take samples from the smoothed utterance prior; otherwise, sample from the empirical utterance prior.'\n )\nparser.add_argument('--speaker_sample_smoothed', type=config.boolean,\n default=False, help=\n 'If `True`, take samples from the smoothed world prior; otherwise, sample from the empirical world prior.'\n )\n\n\nclass AggregatePrior(object):\n\n def __init__(self, listeners, speakers, prior_name='prior_emp'):\n self.listeners = listeners\n self.speakers = speakers\n self.prior_name = prior_name\n\n def train(self, training_instances, listener=False):\n for agent in self.listeners:\n getattr(agent, self.prior_name).train(training_instances,\n listener=listener)\n for agent in self.speakers:\n getattr(agent, self.prior_name).train(training_instances,\n listener=listener)\n\n def apply(self, input_vars):\n assert False, \"AggregatePrior.apply shouldn't be called; only individual model priors are used in RSA coop nets model\"\n\n\nclass RSASubModel(SimpleLasagneModel):\n \"\"\"\n A SimpleLasagneModel for a subcomponent of an RSA graph.\n \"\"\"\n\n def __init__(self, input_vars, target_vars, l_out, loss, optimizer,\n learning_rate=0.001, id=None):\n super(RSASubModel, self).__init__(input_vars, target_vars, l_out,\n loss, optimizer, learning_rate=learning_rate, id=id)\n if len(target_vars) != 1:\n raise ValueError(\n 'target_vars should be a sequence of length 1, instead got %s'\n % (target_vars,))\n self.target_var = target_vars[0]\n\n def build_sample_vars(self, num_other_agents):\n self.sample_inputs_self = [v.type('%s_sample_self' % (v.name,)) for\n v in self.input_vars]\n self.sample_inputs_others = [[v.type('%s_sample_other%d' % (v.name,\n i)) for v in self.input_vars] for i in range(num_other_agents)]\n t = self.target_var\n self.sample_target_self = t.type('%s_sample_self' % (t.name,))\n self.sample_target_others = [t.type('%s_sample_other%d' % (t.name,\n i)) for i in range(num_other_agents)]\n self.all_synth_vars = self.sample_inputs_self + [self.\n sample_target_self] + [v for o_inputs, o_target in zip(self.\n sample_inputs_others, self.sample_target_others) for v in \n o_inputs + [o_target]]\n\n def data_to_synth_arrays(self, agent, samples_self, samples_others):\n\n def flatten(arrays):\n inputs, targets = arrays\n return inputs + targets\n return [arr for i, samples in enumerate([samples_self] +\n samples_others) for arr in flatten(agent._data_to_arrays(\n samples, inverted=i != 0))]\n\n\nclass RSAGraphModel(SimpleLasagneModel):\n\n def __init__(self, listeners, speakers, eval_agent, id=None):\n self.get_options()\n self.listeners = listeners\n self.speakers = speakers\n self.eval_agent = eval_agent\n input_vars = [v for listener in listeners for v in listener.model.\n input_vars] + [v for speaker in speakers for v in speaker.model\n .input_vars]\n target_vars = [listener.model.target_var for listener in listeners] + [\n speaker.model.target_var for speaker in speakers]\n super(RSAGraphModel, self).__init__(input_vars, target_vars, l_out=\n eval_agent.model.l_out, loss=None, optimizer=OPTIMIZERS[self.\n options.rsa_optimizer], learning_rate=self.options.\n rsa_learning_rate, id=id)\n\n def params(self):\n result = []\n for listener in self.listeners:\n result.extend(listener.params())\n for speaker in self.speakers:\n result.extend(speaker.params())\n return result\n\n def get_train_loss(self, target_vars, params):\n for agent in self.speakers:\n agent.model.build_sample_vars(len(self.listeners))\n for agent in self.listeners:\n agent.model.build_sample_vars(len(self.speakers))\n monitored = self.get_est_loss(layer_by_layer=self.options.\n layer_by_layer)\n if self.options.grad_of_est:\n est_grad, monitored_grads = self.get_grad_of_est(monitored, params)\n else:\n est_grad, monitored_grads = self.get_est_grad(params,\n layer_by_layer=self.options.layer_by_layer)\n monitored.update(monitored_grads)\n synth_vars = [v for agent in self.listeners + self.speakers for v in\n agent.model.all_synth_vars]\n return monitored, est_grad, synth_vars\n\n def get_est_loss(self, layer_by_layer=False):\n\n def kl(agent_p, agent_q, other_idx):\n if layer_by_layer:\n return agent_q.loss_out(agent_q.model.sample_inputs_others[\n other_idx], agent_q.model.sample_target_others[other_idx]\n ).mean()\n else:\n return (agent_p.log_joint_emp(agent_p.model.\n sample_inputs_self, agent_p.model.sample_target_self) -\n agent_q.log_joint_smooth(agent_q.model.\n sample_inputs_others[other_idx], agent_q.model.\n sample_target_others[other_idx])).mean()\n id_tag_log = self.id + ': ' if self.id else ''\n id_tag = self.id + '/' if self.id else ''\n if self.options.verbosity >= 4:\n print(id_tag_log + 'loss: KL(dataset || L)')\n alpha_losses = [('%salpha_%s' % (id_tag, listener.id), alpha *\n listener.loss_out().mean()) for alpha, listener in zip(self.\n options.rsa_alpha, self.listeners)]\n if self.options.verbosity >= 4:\n print(id_tag_log + 'loss: KL(dataset || S)')\n beta_losses = [('%sbeta_%s' % (id_tag, speaker.id), beta * speaker.\n loss_out().mean()) for beta, speaker in zip(self.options.\n rsa_beta, self.speakers)]\n if self.options.verbosity >= 4:\n print(id_tag_log + 'loss: KL(L || S)')\n mu_losses = [('%smu_%s_%s' % (id_tag, listener.id, speaker.id), mu *\n kl(listener, speaker, j)) for mu, (listener, j, speaker, k) in\n zip(self.options.rsa_mu, self.dyads())]\n if self.options.verbosity >= 4:\n print(id_tag_log + 'loss: KL(S || L)')\n nu_losses = [('%snu_%s_%s' % (id_tag, speaker.id, listener.id), nu *\n kl(speaker, listener, k)) for nu, (listener, j, speaker, k) in\n zip(self.options.rsa_nu, self.dyads())]\n all_sublosses = alpha_losses + beta_losses + mu_losses + nu_losses\n est_loss = t_sum(loss for tag, loss in all_sublosses)\n monitored = OrderedDict([('loss', est_loss)])\n if self.options.monitor_sublosses:\n monitored.update(all_sublosses)\n if self.options.monitor_activations:\n for agent in (self.listeners + self.speakers):\n for name, layer in get_named_layers(agent.l_out).iteritems():\n monitored['activation/' + name] = get_output(layer)\n return monitored\n\n def get_est_grad(self, params, layer_by_layer=False):\n\n def mean_weighted_grad(weights, loss):\n return T.Lop(loss, params, weights / T.cast(weights.shape[0],\n 'float32'), disconnected_inputs='ignore')\n\n def mean_grad(loss):\n return T.grad(loss.mean(), params, disconnected_inputs='ignore')\n id_tag = self.id + ': ' if self.id else ''\n if self.options.verbosity >= 4:\n print(id_tag + 'grad: alpha')\n all_subgrads = [('grad_alpha/%s' % (listener.id,), mean_grad(alpha *\n listener.loss_out())) for alpha, listener in zip(self.options.\n rsa_alpha, self.listeners)]\n if self.options.verbosity >= 4:\n print(id_tag + 'grad: beta')\n all_subgrads.extend([('grad_beta/%s' % (speaker.id,), mean_grad(\n beta * speaker.loss_out())) for beta, speaker in zip(self.\n options.rsa_beta, self.speakers)])\n if self.options.verbosity >= 4:\n print(id_tag + 'grad: nu co-training')\n all_subgrads.extend([('grad_nu_co/%s_%s' % (listener.id, speaker.id\n ), mean_grad(nu * listener.loss_out(listener.model.\n sample_inputs_others[k], listener.model.sample_target_others[k]\n ))) for nu, (listener, j, speaker, k) in zip(self.options.\n rsa_nu, self.dyads())])\n if self.options.verbosity >= 4:\n print(id_tag + 'grad: mu co-training')\n all_subgrads.extend([('grad_mu_co/%s_%s' % (listener.id, speaker.id\n ), mean_grad(mu * speaker.loss_out(speaker.model.\n sample_inputs_others[j], speaker.model.sample_target_others[j])\n )) for mu, (listener, j, speaker, k) in zip(self.options.rsa_mu,\n self.dyads())])\n if not layer_by_layer:\n if self.options.verbosity >= 4:\n print(id_tag + 'grad: mu regularizer')\n all_subgrads.extend([('grad_mu_reg/%s_%s' % (listener.id,\n speaker.id), mean_weighted_grad(mu * (1 + listener.\n log_joint_emp(listener.model.sample_inputs_self, listener.\n model.sample_target_self) - speaker.log_joint_smooth(\n speaker.model.sample_inputs_others[j], speaker.model.\n sample_target_others[j])), listener.loss_out(listener.model\n .sample_inputs_self, listener.model.sample_target_self))) for\n mu, (listener, j, speaker, k) in zip(self.options.rsa_mu,\n self.dyads())])\n if self.options.verbosity >= 4:\n print(id_tag + 'grad: nu regularizer')\n all_subgrads.extend([('grad_nu_reg/%s_%s' % (listener.id,\n speaker.id), mean_weighted_grad(nu * (1 + speaker.\n log_joint_emp(speaker.model.sample_inputs_self, speaker.\n model.sample_target_self) - listener.log_joint_smooth(\n listener.model.sample_inputs_others[k], listener.model.\n sample_target_others[k])), speaker.loss_out(speaker.model.\n sample_inputs_self, speaker.model.sample_target_self))) for\n nu, (listener, j, speaker, k) in zip(self.options.rsa_nu,\n self.dyads())])\n est_grad = t_sum([grads for tag, grads in all_subgrads], nested=True)\n monitored = OrderedDict()\n if self.options.monitor_grads:\n monitored.update([('grad/' + param.name, grad) for param, grad in\n zip(params, est_grad)])\n if self.options.monitor_subgrads:\n monitored.update([(tag + '/' + param.name, grad) for tag, grads in\n all_subgrads for param, grad in zip(params, grads)])\n return est_grad, monitored\n\n def get_grad_of_est(self, monitored, params):\n grad_of_est = T.grad(monitored['loss'], params)\n monitored_grads = OrderedDict()\n if self.options.monitor_grads:\n monitored_grads.update([('grad/' + param.name, grad) for param,\n grad in zip(params, grad_of_est)])\n if self.options.monitor_subgrads:\n monitored_grads.update([(tag + '/' + param.name, grad) for tag,\n subloss in monitored.iteritems() if tag != 'loss' for param,\n grad in zip(params, T.grad(subloss, params,\n disconnected_inputs='ignore'))])\n return grad_of_est, monitored_grads\n\n def dyads(self):\n for j, listener in enumerate(self.listeners):\n for k, speaker in enumerate(self.speakers):\n yield listener, j, speaker, k\n\n def minibatches(self, inputs, targets, batch_size, shuffle=False):\n agents = self.listeners + self.speakers\n batches = super(RSAGraphModel, self).minibatches(inputs, targets,\n batch_size, shuffle=shuffle)\n for dataset_inputs, dataset_targets, _synth in batches:\n inputs_batch = []\n targets_batch = []\n synth_batch = []\n filtered = self.filter_arrays(dataset_inputs, dataset_targets)\n for agent, (agent_inputs, agent_targets) in zip(agents, filtered):\n inputs_batch.extend(agent_inputs)\n targets_batch.extend(agent_targets)\n input_types = [a.shape for a in agent_inputs]\n target_types = [a.shape for a in agent_targets]\n if self.options.verbosity >= 8:\n print('%s: %s -> %s' % (agent.id, input_types,\n target_types))\n listener_samples = [(listener.sample_joint_smooth(self.options.\n listener_samples) if self.options.listener_sample_smoothed else\n listener.sample_joint_emp(self.options.listener_samples)) for\n listener in self.listeners]\n speaker_samples = [(speaker.sample_joint_smooth(self.options.\n speaker_samples) if self.options.speaker_sample_smoothed else\n speaker.sample_joint_emp(self.options.listener_samples)) for\n speaker in self.speakers]\n for listener, samples in zip(self.listeners, listener_samples):\n arrays = listener.model.data_to_synth_arrays(listener,\n samples, speaker_samples)\n synth_batch.extend(arrays)\n synth_types = [a.shape for a in arrays]\n if self.options.verbosity >= 8:\n print('%s synth: %s' % (listener.id, synth_types))\n for speaker, samples in zip(self.speakers, speaker_samples):\n arrays = speaker.model.data_to_synth_arrays(speaker,\n samples, listener_samples)\n synth_batch.extend(arrays)\n synth_types = [a.shape for a in arrays]\n if self.options.verbosity >= 8:\n print('%s synth: %s' % (speaker.id, synth_types))\n yield inputs_batch, targets_batch, synth_batch\n\n def filter_arrays(self, inputs, targets):\n result = []\n input_idx = 0\n for agent, target in zip(self.listeners + self.speakers, targets):\n assert input_idx + len(agent.model.input_vars) <= len(inputs), (\n input_idx, len(agent.model.input_vars), len(inputs))\n agent_inputs = inputs[input_idx:input_idx + len(agent.model.\n input_vars)]\n agent_targets = [target]\n result.append((agent_inputs, agent_targets))\n input_idx += len(agent.model.input_vars)\n return result\n\n\nclass RSALearner(NeuralLearner):\n\n def __init__(self, id=None):\n self.get_options()\n self.init_submodels(id)\n super(RSALearner, self).__init__(id=id)\n color_resolution = (self.options.listener_color_resolution if self.\n options.listener else self.options.speaker_color_resolution)\n self.seq_vec = SequenceVectorizer()\n self.color_vec = BucketsVectorizer(color_resolution, hsv=self.\n options.speaker_hsv)\n\n def init_submodels(self, id=None):\n id_tag = id + '/' if id else ''\n self.get_options()\n listener_classes = self.options.listener_class\n speaker_classes = self.options.speaker_class\n if len(listener_classes) != self.options.rsa_listeners:\n assert len(listener_classes) == 1, len(listener_classes)\n listener_classes = listener_classes * self.options.rsa_listeners\n if len(speaker_classes) != self.options.rsa_speakers:\n assert len(speaker_classes) == 1, len(speaker_classes)\n speaker_classes = speaker_classes * self.options.rsa_speakers\n self.listeners = [LISTENERS[listener_classes[j]](id='%sL%d' % (\n id_tag, j)) for j in range(self.options.rsa_listeners)]\n self.speakers = [SPEAKERS[speaker_classes[k]](id='%sS%d' % (id_tag,\n k)) for k in range(self.options.rsa_speakers)]\n agents = self.listeners if self.options.listener else self.speakers\n self.eval_agent = agents[self.options.eval_agent]\n\n def predict(self, eval_instances, verbosity=0):\n return self.eval_agent.predict(eval_instances, verbosity=verbosity)\n\n def score(self, eval_instances, verbosity=0):\n return self.eval_agent.score(eval_instances, verbosity=verbosity)\n\n def predict_and_score(self, eval_instances, verbosity=0):\n return self.eval_agent.predict_and_score(eval_instances, verbosity=\n verbosity)\n\n def on_iter_end(self, step, writer):\n for agent in (self.speakers + self.listeners):\n agent.on_iter_end(step, writer)\n\n def sample_joint_smooth(self, num_samples):\n return self.eval_agent.sample_joint_smooth(num_samples)\n\n def _data_to_arrays(self, training_instances, init_vectorizer=False,\n test=False, inverted=False):\n input_arrays = []\n target_arrays = []\n if self.options.listener != inverted:\n listener_dataset = training_instances\n speaker_dataset = [inst.inverted() for inst in training_instances]\n else:\n listener_dataset = [inst.inverted() for inst in training_instances]\n speaker_dataset = training_instances\n for listener in self.listeners:\n if not test:\n listener.dataset = listener_dataset\n inputs, targets = listener._data_to_arrays(listener_dataset,\n test=test, init_vectorizer=init_vectorizer)\n input_arrays.extend(inputs)\n target_arrays.extend(targets)\n for speaker in self.speakers:\n if not test:\n speaker.dataset = speaker_dataset\n inputs, targets = speaker._data_to_arrays(speaker_dataset, test\n =test, init_vectorizer=init_vectorizer)\n input_arrays.extend(inputs)\n target_arrays.extend(targets)\n return input_arrays, target_arrays\n\n def _build_model(self):\n for agent in (self.listeners + self.speakers):\n agent._build_model(RSASubModel)\n self.build_aggregate_model()\n\n def train_priors(self, training_instances, listener_data=False):\n prior_class = LISTENER_PRIORS[self.options.listener_prior\n ] if self.options.listener else SPEAKER_PRIORS[self.options.\n speaker_prior]\n self.prior_emp = prior_class()\n self.prior_smooth = prior_class()\n self.prior_emp.train(training_instances, listener_data=listener_data)\n self.prior_smooth.train(training_instances, listener_data=listener_data\n )\n for agent in (self.listeners + self.speakers):\n agent.train_priors(training_instances, listener_data=listener_data)\n\n def build_aggregate_model(self):\n self.model = RSAGraphModel(self.listeners, self.speakers, self.\n eval_agent)\n self.prior_emp = AggregatePrior(self.listeners, self.speakers,\n 'prior_emp')\n self.prior_smooth = AggregatePrior(self.listeners, self.speakers,\n 'prior_smooth')\n\n def __getstate__(self):\n return self.seq_vec, self.color_vec, [agent.__getstate__() for\n agent in self.listeners + self.speakers]\n\n def __setstate__(self, state):\n self.seq_vec, self.color_vec, submodels = state\n self.init_submodels()\n for agent, substate in zip(self.listeners + self.speakers, submodels):\n agent.unpickle(substate, RSASubModel)\n self.build_aggregate_model()\n\n\ndef t_sum(seq, start=None, nested=False):\n \"\"\"A version of sum that doesn't start with 0, for constructing\n Theano graphs without superfluous TensorConstants.\n\n If `nested` is True, sum expressions embedded within lists,\n elementwise (for use with the output for T.jacobian).\n\n >>> t_sum([1, 2, 3])\n 6\n >>> t_sum(xrange(1, 4), start=4)\n 10\n >>> t_sum([[1, 2], [3, 4], [5, 6]], nested=True)\n [9, 12]\n >>> t_sum([[1, 2], [3, 4], [5, 6]], start=[-1, -2], nested=True)\n [8, 10]\n \"\"\"\n if nested:\n if not isinstance(seq, list):\n seq = list(seq)\n if start:\n return [t_sum(subseq, start_elem) for subseq, start_elem in zip\n (zip(*seq), start)]\n else:\n return [t_sum(subseq) for subseq in zip(*seq)]\n seq_list = list(seq)\n if seq_list:\n reduced = reduce(operator.add, seq_list)\n if start:\n reduced = start + reduced\n return reduced\n elif start:\n return start\n else:\n return 0\n",
"step-5": "import operator\nimport theano.tensor as T\nfrom collections import OrderedDict\nfrom lasagne.layers import get_output\n\nfrom stanza.research import config\nfrom neural import SimpleLasagneModel, NeuralLearner\nfrom vectorizers import SequenceVectorizer, BucketsVectorizer\nfrom neural import OPTIMIZERS, get_named_layers\nfrom listener import LISTENERS, PRIORS as LISTENER_PRIORS\nfrom speaker import SPEAKERS, PRIORS as SPEAKER_PRIORS\n\nparser = config.get_options_parser()\nparser.add_argument('--rsa_listeners', type=int, default=1,\n help='Number of listeners to use in RSA cooperative nets graph')\nparser.add_argument('--rsa_speakers', type=int, default=1,\n help='Number of speakers to use in RSA cooperative nets graph')\nparser.add_argument('--listener_class', default=['Listener'], choices=LISTENERS.keys(), nargs='+',\n help='The name of the listener model to use in the RSA network.')\nparser.add_argument('--speaker_class', default=['Speaker'], choices=SPEAKERS.keys(), nargs='+',\n help='The name of the speaker model to use in the RSA network.')\nparser.add_argument('--eval_agent', type=int, default=0,\n help='Index of the agent (listener/speaker) to use as the primary object '\n 'of evaluation. Whether this agent is a listener or speaker will be '\n 'inferred from the --listener flag.')\nparser.add_argument('--rsa_optimizer', choices=OPTIMIZERS.keys(), default='rmsprop',\n help='The optimization (update) algorithm to use for RSA training.')\nparser.add_argument('--rsa_learning_rate', type=float, default=0.1,\n help='The learning rate to use for RSA training.')\n\nparser.add_argument('--rsa_alpha', type=float, nargs='*', default=[1.0],\n help='Weights for the log-likelihood of the dataset according to the '\n 'listeners. Provide as many values as there are listeners.')\nparser.add_argument('--rsa_beta', type=float, nargs='*', default=[1.0],\n help='Weights for the log-likelihood of the dataset according to the '\n 'speakers. Provide as many values as there are speakers.')\nparser.add_argument('--rsa_mu', type=float, nargs='*', default=[1.0],\n help='Weights for KL(L_j||S_k). Provide values to fill a '\n 'rsa_listeners x rsa_speakers matrix, in row-major order '\n '(i.e. all speakers for first listener, then all speakers for second '\n 'listener, etc.).')\nparser.add_argument('--rsa_nu', type=float, nargs='*', default=[1.0],\n help='Weights for KL(S_k||L_j). Provide values to fill a '\n 'rsa_listeners x rsa_speakers matrix, in row-major order '\n '(i.e. all speakers for first listener, then all speakers for second '\n 'listener, etc.).')\n\nparser.add_argument('--listener_samples', type=int, default=128,\n help='Number of samples to draw from the listener per minibatch.')\nparser.add_argument('--speaker_samples', type=int, default=128,\n help='Number of samples to draw from the speaker per minibatch.')\n\nparser.add_argument('--monitor_sublosses', type=config.boolean, default=False,\n help='If `True`, return sub-losses for monitoring and write them to the '\n 'TensorBoard events file. This will likely increase compilation time.')\nparser.add_argument('--monitor_subgrads', type=config.boolean, default=False,\n help='If `True`, return sub-gradients for monitoring and write them to the '\n 'TensorBoard events file. This will likely increase compilation time.')\nparser.add_argument('--grad_of_est', type=config.boolean, default=False,\n help='If `True`, optimize using the gradient of the estimated loss; '\n 'otherwise, use the manually-derived estimate of the gradient of '\n 'the true loss.')\nparser.add_argument('--layer_by_layer', type=config.boolean, default=False,\n help='If `True`, train RSA agents layer-by-layer (only use the log-likelihood '\n 'sub-gradients, equivalent to training each agent on data generated from '\n 'the other agents); otherwise, use the gradient of the full RSA '\n 'objective.')\nparser.add_argument('--listener_sample_smoothed', type=config.boolean, default=False,\n help='If `True`, take samples from the smoothed utterance prior; otherwise, '\n 'sample from the empirical utterance prior.')\nparser.add_argument('--speaker_sample_smoothed', type=config.boolean, default=False,\n help='If `True`, take samples from the smoothed world prior; otherwise, '\n 'sample from the empirical world prior.')\n\n\nclass AggregatePrior(object):\n def __init__(self, listeners, speakers, prior_name='prior_emp'):\n self.listeners = listeners\n self.speakers = speakers\n self.prior_name = prior_name\n\n def train(self, training_instances, listener=False):\n for agent in self.listeners:\n getattr(agent, self.prior_name).train(training_instances, listener=listener)\n for agent in self.speakers:\n getattr(agent, self.prior_name).train(training_instances, listener=listener)\n\n def apply(self, input_vars):\n assert False, (\"AggregatePrior.apply shouldn't be called; \"\n \"only individual model priors are used in RSA coop nets model\")\n\n\nclass RSASubModel(SimpleLasagneModel):\n '''\n A SimpleLasagneModel for a subcomponent of an RSA graph.\n '''\n def __init__(self, input_vars, target_vars, l_out, loss, optimizer,\n learning_rate=0.001, id=None):\n super(RSASubModel, self).__init__(input_vars, target_vars, l_out, loss, optimizer,\n learning_rate=learning_rate, id=id)\n if len(target_vars) != 1:\n raise ValueError('target_vars should be a sequence of length 1, instead got %s' %\n (target_vars,))\n self.target_var = target_vars[0]\n\n def build_sample_vars(self, num_other_agents):\n self.sample_inputs_self = [v.type('%s_sample_self' % (v.name,))\n for v in self.input_vars]\n self.sample_inputs_others = [[v.type('%s_sample_other%d' % (v.name, i))\n for v in self.input_vars]\n for i in range(num_other_agents)]\n t = self.target_var\n self.sample_target_self = t.type('%s_sample_self' % (t.name,))\n self.sample_target_others = [t.type('%s_sample_other%d' % (t.name, i))\n for i in range(num_other_agents)]\n\n self.all_synth_vars = (self.sample_inputs_self +\n [self.sample_target_self] +\n [v\n for o_inputs, o_target in zip(self.sample_inputs_others,\n self.sample_target_others)\n for v in o_inputs + [o_target]])\n\n def data_to_synth_arrays(self, agent, samples_self, samples_others):\n def flatten(arrays):\n inputs, targets = arrays\n return inputs + targets\n\n return [arr\n for i, samples in enumerate([samples_self] + samples_others)\n for arr in flatten(agent._data_to_arrays(samples, inverted=(i != 0)))]\n\n\nclass RSAGraphModel(SimpleLasagneModel):\n def __init__(self, listeners, speakers, eval_agent, id=None):\n self.get_options()\n\n self.listeners = listeners\n self.speakers = speakers\n self.eval_agent = eval_agent\n input_vars = ([v for listener in listeners for v in listener.model.input_vars] +\n [v for speaker in speakers for v in speaker.model.input_vars])\n target_vars = ([listener.model.target_var for listener in listeners] +\n [speaker.model.target_var for speaker in speakers])\n super(RSAGraphModel, self).__init__(input_vars, target_vars,\n l_out=eval_agent.model.l_out, loss=None,\n optimizer=OPTIMIZERS[self.options.rsa_optimizer],\n learning_rate=self.options.rsa_learning_rate,\n id=id)\n\n def params(self):\n result = []\n for listener in self.listeners:\n result.extend(listener.params())\n for speaker in self.speakers:\n result.extend(speaker.params())\n return result\n\n def get_train_loss(self, target_vars, params):\n for agent in self.speakers:\n agent.model.build_sample_vars(len(self.listeners))\n for agent in self.listeners:\n agent.model.build_sample_vars(len(self.speakers))\n\n monitored = self.get_est_loss(layer_by_layer=self.options.layer_by_layer)\n if self.options.grad_of_est:\n est_grad, monitored_grads = self.get_grad_of_est(monitored, params)\n else:\n est_grad, monitored_grads = self.get_est_grad(\n params, layer_by_layer=self.options.layer_by_layer)\n monitored.update(monitored_grads)\n synth_vars = [v\n for agent in self.listeners + self.speakers\n for v in agent.model.all_synth_vars]\n\n return monitored, est_grad, synth_vars\n\n def get_est_loss(self, layer_by_layer=False):\n def kl(agent_p, agent_q, other_idx):\n if layer_by_layer:\n return agent_q.loss_out(agent_q.model.sample_inputs_others[other_idx],\n agent_q.model.sample_target_others[other_idx]).mean()\n else:\n return (\n agent_p.log_joint_emp(agent_p.model.sample_inputs_self,\n agent_p.model.sample_target_self) -\n agent_q.log_joint_smooth(agent_q.model.sample_inputs_others[other_idx],\n agent_q.model.sample_target_others[other_idx])\n ).mean()\n\n id_tag_log = (self.id + ': ') if self.id else ''\n id_tag = (self.id + '/') if self.id else ''\n # \\alpha * KL(dataset || L) = \\alpha * log L(dataset) + C\n if self.options.verbosity >= 4:\n print(id_tag_log + 'loss: KL(dataset || L)')\n alpha_losses = [\n ('%salpha_%s' % (id_tag, listener.id), alpha * listener.loss_out().mean())\n for alpha, listener in zip(self.options.rsa_alpha, self.listeners)\n ]\n # \\beta * KL(dataset || S) = \\beta * log S(dataset) + C\n if self.options.verbosity >= 4:\n print(id_tag_log + 'loss: KL(dataset || S)')\n beta_losses = [\n ('%sbeta_%s' % (id_tag, speaker.id), beta * speaker.loss_out().mean())\n for beta, speaker in zip(self.options.rsa_beta, self.speakers)\n ]\n\n # \\mu * KL(L || S)\n if self.options.verbosity >= 4:\n print(id_tag_log + 'loss: KL(L || S)')\n mu_losses = [\n ('%smu_%s_%s' % (id_tag, listener.id, speaker.id), mu * kl(listener, speaker, j))\n for mu, (listener, j, speaker, k) in zip(self.options.rsa_mu, self.dyads())\n ]\n # \\nu * KL(S || L)\n if self.options.verbosity >= 4:\n print(id_tag_log + 'loss: KL(S || L)')\n nu_losses = [\n ('%snu_%s_%s' % (id_tag, speaker.id, listener.id), nu * kl(speaker, listener, k))\n for nu, (listener, j, speaker, k) in zip(self.options.rsa_nu, self.dyads())\n ]\n\n all_sublosses = alpha_losses + beta_losses + mu_losses + nu_losses\n est_loss = t_sum(loss for tag, loss in all_sublosses)\n\n monitored = OrderedDict([('loss', est_loss)])\n if self.options.monitor_sublosses:\n monitored.update(all_sublosses)\n if self.options.monitor_activations:\n for agent in self.listeners + self.speakers:\n for name, layer in get_named_layers(agent.l_out).iteritems():\n monitored['activation/' + name] = get_output(layer)\n return monitored\n\n def get_est_grad(self, params, layer_by_layer=False):\n def mean_weighted_grad(weights, loss):\n # Lop to the rescue! Here I was calling T.jacobian and trying to\n # broadcast things and elementwise-multiply through the resulting lists,\n # when a function already existed to do all of that for me...\n return T.Lop(loss, params, weights / T.cast(weights.shape[0], 'float32'),\n disconnected_inputs='ignore')\n # TODO: control variates?\n\n def mean_grad(loss):\n return T.grad(loss.mean(), params, disconnected_inputs='ignore')\n\n id_tag = (self.id + ': ') if self.id else ''\n # alpha and beta: train the agents directly against the dataset.\n # \\alpha_j E_D [-d/d\\theta_j log L(c | m; \\theta_j)]\n if self.options.verbosity >= 4:\n print(id_tag + 'grad: alpha')\n all_subgrads = [\n ('grad_alpha/%s' % (listener.id,),\n mean_grad(alpha * listener.loss_out()))\n for alpha, listener in zip(self.options.rsa_alpha, self.listeners)\n ]\n # \\beta_k E_D [-d/d\\phi_k log S(m | c; \\phi_k)]\n if self.options.verbosity >= 4:\n print(id_tag + 'grad: beta')\n all_subgrads.extend([\n ('grad_beta/%s' % (speaker.id,),\n mean_grad(beta * speaker.loss_out()))\n for beta, speaker in zip(self.options.rsa_beta, self.speakers)\n ])\n\n # The \"simple\" mu and nu terms: train the agents directly against each other.\n # These are still ordinary log-likelihood terms; the complexity comes from\n # identifying the right input variables and iterating over the m x n dyads.\n # sum_k \\nu_jk E_{G_S(\\phi_k)} [-d/d\\theta_j log L(c | m; \\theta_j)]\n if self.options.verbosity >= 4:\n print(id_tag + 'grad: nu co-training')\n all_subgrads.extend([\n ('grad_nu_co/%s_%s' % (listener.id, speaker.id),\n mean_grad(nu * listener.loss_out(listener.model.sample_inputs_others[k],\n listener.model.sample_target_others[k])))\n for nu, (listener, j, speaker, k) in zip(self.options.rsa_nu, self.dyads())\n ])\n # sum_j \\nu_jk E_{G_L(\\theta_j)} [-d/d\\phi_k log S(m | c; \\phi_k)]\n if self.options.verbosity >= 4:\n print(id_tag + 'grad: mu co-training')\n all_subgrads.extend([\n ('grad_mu_co/%s_%s' % (listener.id, speaker.id),\n mean_grad(mu * speaker.loss_out(speaker.model.sample_inputs_others[j],\n speaker.model.sample_target_others[j])))\n for mu, (listener, j, speaker, k) in zip(self.options.rsa_mu, self.dyads())\n ])\n\n # The \"hard\" mu and nu terms: regularize the agents with maximum entropy and\n # accommodating other agents' priors.\n #\n # Zero out these subgradients if we're doing layer-by-layer training.\n if not layer_by_layer:\n # sum_k \\mu_jk E_{G_L(\\theta_j)}\n # [(1 + log G_L(c, m; \\theta_j) - log H_S(c, m; \\phi_k)) *\n # d/d\\theta_j log L(c | m; \\theta_j)]\n if self.options.verbosity >= 4:\n print(id_tag + 'grad: mu regularizer')\n all_subgrads.extend([\n ('grad_mu_reg/%s_%s' % (listener.id, speaker.id),\n mean_weighted_grad(\n mu *\n (1 + listener.log_joint_emp(listener.model.sample_inputs_self,\n listener.model.sample_target_self) -\n speaker.log_joint_smooth(speaker.model.sample_inputs_others[j],\n speaker.model.sample_target_others[j])),\n listener.loss_out(listener.model.sample_inputs_self,\n listener.model.sample_target_self)))\n for mu, (listener, j, speaker, k) in zip(self.options.rsa_mu, self.dyads())\n ])\n # sum_j \\nu_jk E_{G_S(\\phi_k)}\n # [(1 + log G_S(c, m; \\phi_k) - log H_L(c, m; \\theta_j)) *\n # d/d\\phi_k log S(m | c; \\phi_k)]\n if self.options.verbosity >= 4:\n print(id_tag + 'grad: nu regularizer')\n all_subgrads.extend([\n ('grad_nu_reg/%s_%s' % (listener.id, speaker.id),\n mean_weighted_grad(\n nu *\n (1 + speaker.log_joint_emp(speaker.model.sample_inputs_self,\n speaker.model.sample_target_self) -\n listener.log_joint_smooth(listener.model.sample_inputs_others[k],\n listener.model.sample_target_others[k])),\n speaker.loss_out(speaker.model.sample_inputs_self,\n speaker.model.sample_target_self)))\n for nu, (listener, j, speaker, k) in zip(self.options.rsa_nu, self.dyads())\n ])\n\n est_grad = t_sum([grads for tag, grads in all_subgrads], nested=True)\n\n monitored = OrderedDict()\n if self.options.monitor_grads:\n monitored.update([\n ('grad/' + param.name, grad)\n for param, grad in zip(params, est_grad)\n ])\n if self.options.monitor_subgrads:\n monitored.update([\n (tag + '/' + param.name, grad)\n for tag, grads in all_subgrads\n for param, grad in zip(params, grads)\n ])\n return est_grad, monitored\n\n def get_grad_of_est(self, monitored, params):\n grad_of_est = T.grad(monitored['loss'], params)\n\n monitored_grads = OrderedDict()\n if self.options.monitor_grads:\n monitored_grads.update([\n ('grad/' + param.name, grad)\n for param, grad in zip(params, grad_of_est)\n ])\n if self.options.monitor_subgrads:\n monitored_grads.update([\n (tag + '/' + param.name, grad)\n for tag, subloss in monitored.iteritems() if tag != 'loss'\n for param, grad in zip(params, T.grad(subloss, params,\n disconnected_inputs='ignore'))\n ])\n\n return grad_of_est, monitored_grads\n\n def dyads(self):\n for j, listener in enumerate(self.listeners):\n for k, speaker in enumerate(self.speakers):\n yield (listener, j, speaker, k)\n\n def minibatches(self, inputs, targets, batch_size, shuffle=False):\n agents = self.listeners + self.speakers\n batches = super(RSAGraphModel, self).minibatches(inputs, targets, batch_size,\n shuffle=shuffle)\n for dataset_inputs, dataset_targets, _synth in batches:\n inputs_batch = []\n targets_batch = []\n synth_batch = []\n\n filtered = self.filter_arrays(dataset_inputs, dataset_targets)\n for agent, (agent_inputs, agent_targets) in zip(agents, filtered):\n inputs_batch.extend(agent_inputs)\n targets_batch.extend(agent_targets)\n input_types = [a.shape for a in agent_inputs]\n target_types = [a.shape for a in agent_targets]\n if self.options.verbosity >= 8:\n print('%s: %s -> %s' % (agent.id, input_types, target_types))\n\n listener_samples = [listener.sample_joint_smooth(self.options.listener_samples)\n if self.options.listener_sample_smoothed else\n listener.sample_joint_emp(self.options.listener_samples)\n for listener in self.listeners]\n speaker_samples = [speaker.sample_joint_smooth(self.options.speaker_samples)\n if self.options.speaker_sample_smoothed else\n speaker.sample_joint_emp(self.options.listener_samples)\n for speaker in self.speakers]\n\n for listener, samples in zip(self.listeners, listener_samples):\n arrays = listener.model.data_to_synth_arrays(listener, samples,\n speaker_samples)\n synth_batch.extend(arrays)\n synth_types = [a.shape for a in arrays]\n if self.options.verbosity >= 8:\n print('%s synth: %s' % (listener.id, synth_types))\n for speaker, samples in zip(self.speakers, speaker_samples):\n arrays = speaker.model.data_to_synth_arrays(speaker, samples,\n listener_samples)\n synth_batch.extend(arrays)\n synth_types = [a.shape for a in arrays]\n if self.options.verbosity >= 8:\n print('%s synth: %s' % (speaker.id, synth_types))\n yield inputs_batch, targets_batch, synth_batch\n\n def filter_arrays(self, inputs, targets):\n result = []\n input_idx = 0\n for agent, target in zip(self.listeners + self.speakers, targets):\n assert input_idx + len(agent.model.input_vars) <= len(inputs), \\\n (input_idx, len(agent.model.input_vars), len(inputs))\n agent_inputs = inputs[input_idx:input_idx + len(agent.model.input_vars)]\n agent_targets = [target]\n result.append((agent_inputs, agent_targets))\n input_idx += len(agent.model.input_vars)\n return result\n\n\nclass RSALearner(NeuralLearner):\n def __init__(self, id=None):\n self.get_options()\n self.init_submodels(id)\n super(RSALearner, self).__init__(id=id)\n\n color_resolution = (self.options.listener_color_resolution\n if self.options.listener else\n self.options.speaker_color_resolution)\n self.seq_vec = SequenceVectorizer()\n self.color_vec = BucketsVectorizer(color_resolution, hsv=self.options.speaker_hsv)\n\n def init_submodels(self, id=None):\n id_tag = (id + '/') if id else ''\n self.get_options()\n\n listener_classes = self.options.listener_class\n speaker_classes = self.options.speaker_class\n if len(listener_classes) != self.options.rsa_listeners:\n assert len(listener_classes) == 1, len(listener_classes)\n listener_classes = listener_classes * self.options.rsa_listeners\n if len(speaker_classes) != self.options.rsa_speakers:\n assert len(speaker_classes) == 1, len(speaker_classes)\n speaker_classes = speaker_classes * self.options.rsa_speakers\n self.listeners = [LISTENERS[listener_classes[j]](id='%sL%d' % (id_tag, j))\n for j in range(self.options.rsa_listeners)]\n self.speakers = [SPEAKERS[speaker_classes[k]](id='%sS%d' % (id_tag, k))\n for k in range(self.options.rsa_speakers)]\n\n agents = self.listeners if self.options.listener else self.speakers\n self.eval_agent = agents[self.options.eval_agent]\n\n def predict(self, eval_instances, verbosity=0):\n return self.eval_agent.predict(eval_instances, verbosity=verbosity)\n\n def score(self, eval_instances, verbosity=0):\n return self.eval_agent.score(eval_instances, verbosity=verbosity)\n\n def predict_and_score(self, eval_instances, verbosity=0):\n return self.eval_agent.predict_and_score(eval_instances, verbosity=verbosity)\n\n def on_iter_end(self, step, writer):\n for agent in self.speakers + self.listeners:\n agent.on_iter_end(step, writer)\n\n def sample_joint_smooth(self, num_samples):\n return self.eval_agent.sample_joint_smooth(num_samples)\n\n def _data_to_arrays(self, training_instances,\n init_vectorizer=False, test=False, inverted=False):\n input_arrays = []\n target_arrays = []\n\n if self.options.listener != inverted:\n listener_dataset = training_instances\n speaker_dataset = [inst.inverted() for inst in training_instances]\n else:\n listener_dataset = [inst.inverted() for inst in training_instances]\n speaker_dataset = training_instances\n\n for listener in self.listeners:\n if not test:\n listener.dataset = listener_dataset\n inputs, targets = listener._data_to_arrays(listener_dataset, test=test,\n init_vectorizer=init_vectorizer)\n input_arrays.extend(inputs)\n target_arrays.extend(targets)\n for speaker in self.speakers:\n if not test:\n speaker.dataset = speaker_dataset\n inputs, targets = speaker._data_to_arrays(speaker_dataset, test=test,\n init_vectorizer=init_vectorizer)\n input_arrays.extend(inputs)\n target_arrays.extend(targets)\n\n return input_arrays, target_arrays\n\n def _build_model(self):\n for agent in self.listeners + self.speakers:\n agent._build_model(RSASubModel)\n self.build_aggregate_model()\n\n def train_priors(self, training_instances, listener_data=False):\n prior_class = (LISTENER_PRIORS[self.options.listener_prior]\n if self.options.listener else\n SPEAKER_PRIORS[self.options.speaker_prior])\n self.prior_emp = prior_class()\n self.prior_smooth = prior_class()\n\n self.prior_emp.train(training_instances, listener_data=listener_data)\n self.prior_smooth.train(training_instances, listener_data=listener_data)\n\n for agent in self.listeners + self.speakers:\n agent.train_priors(training_instances, listener_data=listener_data)\n\n def build_aggregate_model(self):\n self.model = RSAGraphModel(self.listeners, self.speakers, self.eval_agent)\n self.prior_emp = AggregatePrior(self.listeners, self.speakers, 'prior_emp')\n self.prior_smooth = AggregatePrior(self.listeners, self.speakers, 'prior_smooth')\n\n def __getstate__(self):\n return (self.seq_vec, self.color_vec,\n [agent.__getstate__() for agent in self.listeners + self.speakers])\n\n def __setstate__(self, state):\n self.seq_vec, self.color_vec, submodels = state\n self.init_submodels()\n for agent, substate in zip(self.listeners + self.speakers, submodels):\n agent.unpickle(substate, RSASubModel)\n self.build_aggregate_model()\n\n\ndef t_sum(seq, start=None, nested=False):\n '''A version of sum that doesn't start with 0, for constructing\n Theano graphs without superfluous TensorConstants.\n\n If `nested` is True, sum expressions embedded within lists,\n elementwise (for use with the output for T.jacobian).\n\n >>> t_sum([1, 2, 3])\n 6\n >>> t_sum(xrange(1, 4), start=4)\n 10\n >>> t_sum([[1, 2], [3, 4], [5, 6]], nested=True)\n [9, 12]\n >>> t_sum([[1, 2], [3, 4], [5, 6]], start=[-1, -2], nested=True)\n [8, 10]\n '''\n if nested:\n if not isinstance(seq, list):\n seq = list(seq)\n if start:\n return [t_sum(subseq, start_elem) for subseq, start_elem in zip(zip(*seq), start)]\n else:\n return [t_sum(subseq) for subseq in zip(*seq)]\n\n seq_list = list(seq)\n if seq_list:\n reduced = reduce(operator.add, seq_list)\n if start:\n reduced = start + reduced\n return reduced\n elif start:\n return start\n else:\n return 0\n",
"step-ids": [
15,
21,
23,
36,
38
]
}
|
[
15,
21,
23,
36,
38
] |
# -*- coding: utf-8 -*-
import json
import argparse
def parse_args():
"""
Parse input arguments.
:return:
"""
parser = argparse.ArgumentParser(description='以图搜图API测试')
parser.add_argument('--ak', dest='access_key', help='access_key for qiniu account',
type=str)
parser.add_argument('--sk', dest='secret_key', help='secret_key for qiniu account',
type=str)
parser.add_argument('--in', dest='json_file', help='json file',
type=str)
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
file = open(args.json_file,'r')
res = []
a = 0
for line in file.readlines():
dic = json.loads(line)
img_url = dic["url"]
t = {"url": img_url, "true":0, "simialr_uri":[]}
if not "error" in dic.keys():
a += 1
#im_num = img_url.split('.')[-2].split('/')[-1].lstrip('image_group_test_')
im_num = img_url.split('.')[-2].split('/')[-1]#.lstrip('image_group_test_')
print(im_num)
for i in dic["result"]:
uri = []
#print((i["uri"].split('/'))[4].split('__')[0]=="eval",(i["uri"].split('/'))[4].split('-')[0])
print((i["uri"].split('/'))[4])
if ((i["uri"].split('/'))[4].split('__')[0]=="eval") and (im_num in (i["uri"].split('/'))[4].split('-')[0]):
t["simialr_uri"].append(i)
t["true"] += 1
res.append(t)
r = 0
for i in range(a):
r += res[i]["true"]
correct = r/(float(a)*15)
print ("The top-5 correct percentage is %f" % correct)
|
normal
|
{
"blob_id": "c7147741784b37b42200869002d4df5ddc900675",
"index": 2001,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef parse_args():\n \"\"\"\n Parse input arguments.\n :return:\n \"\"\"\n parser = argparse.ArgumentParser(description='以图搜图API测试')\n parser.add_argument('--ak', dest='access_key', help=\n 'access_key for qiniu account', type=str)\n parser.add_argument('--sk', dest='secret_key', help=\n 'secret_key for qiniu account', type=str)\n parser.add_argument('--in', dest='json_file', help='json file', type=str)\n return parser.parse_args()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef parse_args():\n \"\"\"\n Parse input arguments.\n :return:\n \"\"\"\n parser = argparse.ArgumentParser(description='以图搜图API测试')\n parser.add_argument('--ak', dest='access_key', help=\n 'access_key for qiniu account', type=str)\n parser.add_argument('--sk', dest='secret_key', help=\n 'secret_key for qiniu account', type=str)\n parser.add_argument('--in', dest='json_file', help='json file', type=str)\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n args = parse_args()\n file = open(args.json_file, 'r')\n res = []\n a = 0\n for line in file.readlines():\n dic = json.loads(line)\n img_url = dic['url']\n t = {'url': img_url, 'true': 0, 'simialr_uri': []}\n if not 'error' in dic.keys():\n a += 1\n im_num = img_url.split('.')[-2].split('/')[-1]\n print(im_num)\n for i in dic['result']:\n uri = []\n print(i['uri'].split('/')[4])\n if i['uri'].split('/')[4].split('__')[0\n ] == 'eval' and im_num in i['uri'].split('/')[4].split('-'\n )[0]:\n t['simialr_uri'].append(i)\n t['true'] += 1\n res.append(t)\n r = 0\n for i in range(a):\n r += res[i]['true']\n correct = r / (float(a) * 15)\n print('The top-5 correct percentage is %f' % correct)\n",
"step-4": "import json\nimport argparse\n\n\ndef parse_args():\n \"\"\"\n Parse input arguments.\n :return:\n \"\"\"\n parser = argparse.ArgumentParser(description='以图搜图API测试')\n parser.add_argument('--ak', dest='access_key', help=\n 'access_key for qiniu account', type=str)\n parser.add_argument('--sk', dest='secret_key', help=\n 'secret_key for qiniu account', type=str)\n parser.add_argument('--in', dest='json_file', help='json file', type=str)\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n args = parse_args()\n file = open(args.json_file, 'r')\n res = []\n a = 0\n for line in file.readlines():\n dic = json.loads(line)\n img_url = dic['url']\n t = {'url': img_url, 'true': 0, 'simialr_uri': []}\n if not 'error' in dic.keys():\n a += 1\n im_num = img_url.split('.')[-2].split('/')[-1]\n print(im_num)\n for i in dic['result']:\n uri = []\n print(i['uri'].split('/')[4])\n if i['uri'].split('/')[4].split('__')[0\n ] == 'eval' and im_num in i['uri'].split('/')[4].split('-'\n )[0]:\n t['simialr_uri'].append(i)\n t['true'] += 1\n res.append(t)\n r = 0\n for i in range(a):\n r += res[i]['true']\n correct = r / (float(a) * 15)\n print('The top-5 correct percentage is %f' % correct)\n",
"step-5": "# -*- coding: utf-8 -*-\nimport json\nimport argparse\n\ndef parse_args():\n \"\"\"\n Parse input arguments.\n :return:\n \"\"\"\n parser = argparse.ArgumentParser(description='以图搜图API测试')\n parser.add_argument('--ak', dest='access_key', help='access_key for qiniu account',\n type=str)\n\n parser.add_argument('--sk', dest='secret_key', help='secret_key for qiniu account',\n type=str)\n\n parser.add_argument('--in', dest='json_file', help='json file',\n type=str)\n\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n args = parse_args()\n\n file = open(args.json_file,'r')\n res = []\n a = 0\n\n for line in file.readlines():\n dic = json.loads(line)\n img_url = dic[\"url\"]\n t = {\"url\": img_url, \"true\":0, \"simialr_uri\":[]}\n if not \"error\" in dic.keys():\n a += 1\n #im_num = img_url.split('.')[-2].split('/')[-1].lstrip('image_group_test_')\n im_num = img_url.split('.')[-2].split('/')[-1]#.lstrip('image_group_test_')\n print(im_num)\n for i in dic[\"result\"]:\n uri = []\n #print((i[\"uri\"].split('/'))[4].split('__')[0]==\"eval\",(i[\"uri\"].split('/'))[4].split('-')[0])\n print((i[\"uri\"].split('/'))[4])\n if ((i[\"uri\"].split('/'))[4].split('__')[0]==\"eval\") and (im_num in (i[\"uri\"].split('/'))[4].split('-')[0]):\n t[\"simialr_uri\"].append(i)\n t[\"true\"] += 1\n res.append(t)\n\n r = 0\n for i in range(a):\n r += res[i][\"true\"]\n\n correct = r/(float(a)*15)\n print (\"The top-5 correct percentage is %f\" % correct)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from .submit import *
from .fck import *
|
flexible
|
{
"blob_id": "9a5ba88a61f5c27c0bc7b980fa9d865b52cbbb20",
"index": 7266,
"step-1": "<mask token>\n",
"step-2": "from .submit import *\nfrom .fck import *\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
class BasicTestSuite(unittest.TestCase):
<|reserved_special_token_0|>
def test_hello_world(self):
self.assertEqual(hello_world(), 'hello world')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BasicTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_hello_world(self):
self.assertEqual(hello_world(), 'hello world')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BasicTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_hello_world(self):
self.assertEqual(hello_world(), 'hello world')
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
import unittest
from .context import *
class BasicTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_hello_world(self):
self.assertEqual(hello_world(), 'hello world')
if __name__ == '__main__':
unittest.main()
|
flexible
|
{
"blob_id": "6420d1b9da7ff205e1e138f72b194f63d1011012",
"index": 4554,
"step-1": "<mask token>\n\n\nclass BasicTestSuite(unittest.TestCase):\n <mask token>\n\n def test_hello_world(self):\n self.assertEqual(hello_world(), 'hello world')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass BasicTestSuite(unittest.TestCase):\n \"\"\"Basic test cases.\"\"\"\n\n def test_hello_world(self):\n self.assertEqual(hello_world(), 'hello world')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass BasicTestSuite(unittest.TestCase):\n \"\"\"Basic test cases.\"\"\"\n\n def test_hello_world(self):\n self.assertEqual(hello_world(), 'hello world')\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "import unittest\nfrom .context import *\n\n\nclass BasicTestSuite(unittest.TestCase):\n \"\"\"Basic test cases.\"\"\"\n\n def test_hello_world(self):\n self.assertEqual(hello_world(), 'hello world')\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class TestRailAPI(Session):
<|reserved_special_token_0|>
@property
def attachments(self) ->_category.Attachments:
"""
https://www.gurock.com/testrail/docs/api/reference/attachments
Use the following API methods to upload, retrieve and delete attachments.
"""
return _category.Attachments(self)
@property
def cases(self) ->_category.Cases:
"""
https://www.gurock.com/testrail/docs/api/reference/cases
Use the following API methods to request details about test cases and
to create or modify test cases.
"""
return _category.Cases(self)
@property
def case_fields(self) ->_category.CaseFields:
"""
https://www.gurock.com/testrail/docs/api/reference/case-fields
Use the following API methods to request details about custom fields
for test cases.
"""
return _category.CaseFields(self)
@property
def case_types(self) ->_category.CaseTypes:
"""
https://www.gurock.com/testrail/docs/api/reference/case-types
Use the following API methods to request details about case type.
"""
return _category.CaseTypes(self)
<|reserved_special_token_0|>
@property
def milestones(self) ->_category.Milestones:
"""
https://www.gurock.com/testrail/docs/api/reference/milestones
Use the following API methods to request details about milestones and
to create or modify milestones.
"""
return _category.Milestones(self)
@property
def plans(self) ->_category.Plans:
"""
https://www.gurock.com/testrail/docs/api/reference/plans
Use the following API methods to request details about test plans and
to create or modify test plans.
"""
return _category.Plans(self)
@property
def priorities(self) ->_category.Priorities:
"""
https://www.gurock.com/testrail/docs/api/reference/priorities
Use the following API methods to request details about priorities.
"""
return _category.Priorities(self)
@property
def projects(self) ->_category.Projects:
"""
https://www.gurock.com/testrail/docs/api/reference/projects
Use the following API methods to request details about projects and
to create or modify projects
"""
return _category.Projects(self)
@property
def reports(self) ->_category.Reports:
"""
https://www.gurock.com/testrail/docs/api/reference/reports
Use the following methods to get and run reports that have been
made accessible to the API.
"""
return _category.Reports(self)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@property
def runs(self) ->_category.Runs:
"""
https://www.gurock.com/testrail/docs/api/reference/runs
Use the following API methods to request details about test runs and
to create or modify test runs.
"""
return _category.Runs(self)
@property
def sections(self) ->_category.Sections:
"""
https://www.gurock.com/testrail/docs/api/reference/sections
Use the following API methods to request details about sections and
to create or modify sections.
Sections are used to group and organize test cases in test suites.
"""
return _category.Sections(self)
@property
def shared_steps(self) ->_category.SharedSteps:
"""
https://www.gurock.com/testrail/docs/api/reference/api-shared-steps
Use the following API methods to request details about shared steps.
"""
return _category.SharedSteps(self)
@property
def statuses(self) ->_category.Statuses:
"""
https://www.gurock.com/testrail/docs/api/reference/statuses
Use the following API methods to request details about test statuses.
"""
return _category.Statuses(self)
@property
def suites(self) ->_category.Suites:
"""
https://www.gurock.com/testrail/docs/api/reference/suites
Use the following API methods to request details about test suites and
to create or modify test suites.
"""
return _category.Suites(self)
@property
def templates(self) ->_category.Template:
"""
https://www.gurock.com/testrail/docs/api/reference/templates
Use the following API methods to request details about templates
(field layouts for cases/results)
"""
return _category.Template(self)
@property
def tests(self) ->_category.Tests:
"""
https://www.gurock.com/testrail/docs/api/reference/tests
Use the following API methods to request details about tests.
"""
return _category.Tests(self)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestRailAPI(Session):
<|reserved_special_token_0|>
@property
def attachments(self) ->_category.Attachments:
"""
https://www.gurock.com/testrail/docs/api/reference/attachments
Use the following API methods to upload, retrieve and delete attachments.
"""
return _category.Attachments(self)
@property
def cases(self) ->_category.Cases:
"""
https://www.gurock.com/testrail/docs/api/reference/cases
Use the following API methods to request details about test cases and
to create or modify test cases.
"""
return _category.Cases(self)
@property
def case_fields(self) ->_category.CaseFields:
"""
https://www.gurock.com/testrail/docs/api/reference/case-fields
Use the following API methods to request details about custom fields
for test cases.
"""
return _category.CaseFields(self)
@property
def case_types(self) ->_category.CaseTypes:
"""
https://www.gurock.com/testrail/docs/api/reference/case-types
Use the following API methods to request details about case type.
"""
return _category.CaseTypes(self)
@property
def configurations(self) ->_category.Configurations:
"""
https://www.gurock.com/testrail/docs/api/reference/configurations
Use the following API methods to request details about configurations and
to create or modify configurations.
"""
return _category.Configurations(self)
@property
def milestones(self) ->_category.Milestones:
"""
https://www.gurock.com/testrail/docs/api/reference/milestones
Use the following API methods to request details about milestones and
to create or modify milestones.
"""
return _category.Milestones(self)
@property
def plans(self) ->_category.Plans:
"""
https://www.gurock.com/testrail/docs/api/reference/plans
Use the following API methods to request details about test plans and
to create or modify test plans.
"""
return _category.Plans(self)
@property
def priorities(self) ->_category.Priorities:
"""
https://www.gurock.com/testrail/docs/api/reference/priorities
Use the following API methods to request details about priorities.
"""
return _category.Priorities(self)
@property
def projects(self) ->_category.Projects:
"""
https://www.gurock.com/testrail/docs/api/reference/projects
Use the following API methods to request details about projects and
to create or modify projects
"""
return _category.Projects(self)
@property
def reports(self) ->_category.Reports:
"""
https://www.gurock.com/testrail/docs/api/reference/reports
Use the following methods to get and run reports that have been
made accessible to the API.
"""
return _category.Reports(self)
@property
def results(self) ->_category.Results:
"""
https://www.gurock.com/testrail/docs/api/reference/results
Use the following API methods to request details about test results and
to add new test results.
"""
return _category.Results(self)
@property
def result_fields(self) ->_category.ResultFields:
"""
https://www.gurock.com/testrail/docs/api/reference/result-fields
Use the following API methods to request details about custom fields
for test results.
"""
return _category.ResultFields(self)
@property
def runs(self) ->_category.Runs:
"""
https://www.gurock.com/testrail/docs/api/reference/runs
Use the following API methods to request details about test runs and
to create or modify test runs.
"""
return _category.Runs(self)
@property
def sections(self) ->_category.Sections:
"""
https://www.gurock.com/testrail/docs/api/reference/sections
Use the following API methods to request details about sections and
to create or modify sections.
Sections are used to group and organize test cases in test suites.
"""
return _category.Sections(self)
@property
def shared_steps(self) ->_category.SharedSteps:
"""
https://www.gurock.com/testrail/docs/api/reference/api-shared-steps
Use the following API methods to request details about shared steps.
"""
return _category.SharedSteps(self)
@property
def statuses(self) ->_category.Statuses:
"""
https://www.gurock.com/testrail/docs/api/reference/statuses
Use the following API methods to request details about test statuses.
"""
return _category.Statuses(self)
@property
def suites(self) ->_category.Suites:
"""
https://www.gurock.com/testrail/docs/api/reference/suites
Use the following API methods to request details about test suites and
to create or modify test suites.
"""
return _category.Suites(self)
@property
def templates(self) ->_category.Template:
"""
https://www.gurock.com/testrail/docs/api/reference/templates
Use the following API methods to request details about templates
(field layouts for cases/results)
"""
return _category.Template(self)
@property
def tests(self) ->_category.Tests:
"""
https://www.gurock.com/testrail/docs/api/reference/tests
Use the following API methods to request details about tests.
"""
return _category.Tests(self)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestRailAPI(Session):
<|reserved_special_token_0|>
@property
def attachments(self) ->_category.Attachments:
"""
https://www.gurock.com/testrail/docs/api/reference/attachments
Use the following API methods to upload, retrieve and delete attachments.
"""
return _category.Attachments(self)
@property
def cases(self) ->_category.Cases:
"""
https://www.gurock.com/testrail/docs/api/reference/cases
Use the following API methods to request details about test cases and
to create or modify test cases.
"""
return _category.Cases(self)
@property
def case_fields(self) ->_category.CaseFields:
"""
https://www.gurock.com/testrail/docs/api/reference/case-fields
Use the following API methods to request details about custom fields
for test cases.
"""
return _category.CaseFields(self)
@property
def case_types(self) ->_category.CaseTypes:
"""
https://www.gurock.com/testrail/docs/api/reference/case-types
Use the following API methods to request details about case type.
"""
return _category.CaseTypes(self)
@property
def configurations(self) ->_category.Configurations:
"""
https://www.gurock.com/testrail/docs/api/reference/configurations
Use the following API methods to request details about configurations and
to create or modify configurations.
"""
return _category.Configurations(self)
@property
def milestones(self) ->_category.Milestones:
"""
https://www.gurock.com/testrail/docs/api/reference/milestones
Use the following API methods to request details about milestones and
to create or modify milestones.
"""
return _category.Milestones(self)
@property
def plans(self) ->_category.Plans:
"""
https://www.gurock.com/testrail/docs/api/reference/plans
Use the following API methods to request details about test plans and
to create or modify test plans.
"""
return _category.Plans(self)
@property
def priorities(self) ->_category.Priorities:
"""
https://www.gurock.com/testrail/docs/api/reference/priorities
Use the following API methods to request details about priorities.
"""
return _category.Priorities(self)
@property
def projects(self) ->_category.Projects:
"""
https://www.gurock.com/testrail/docs/api/reference/projects
Use the following API methods to request details about projects and
to create or modify projects
"""
return _category.Projects(self)
@property
def reports(self) ->_category.Reports:
"""
https://www.gurock.com/testrail/docs/api/reference/reports
Use the following methods to get and run reports that have been
made accessible to the API.
"""
return _category.Reports(self)
@property
def results(self) ->_category.Results:
"""
https://www.gurock.com/testrail/docs/api/reference/results
Use the following API methods to request details about test results and
to add new test results.
"""
return _category.Results(self)
@property
def result_fields(self) ->_category.ResultFields:
"""
https://www.gurock.com/testrail/docs/api/reference/result-fields
Use the following API methods to request details about custom fields
for test results.
"""
return _category.ResultFields(self)
@property
def runs(self) ->_category.Runs:
"""
https://www.gurock.com/testrail/docs/api/reference/runs
Use the following API methods to request details about test runs and
to create or modify test runs.
"""
return _category.Runs(self)
@property
def sections(self) ->_category.Sections:
"""
https://www.gurock.com/testrail/docs/api/reference/sections
Use the following API methods to request details about sections and
to create or modify sections.
Sections are used to group and organize test cases in test suites.
"""
return _category.Sections(self)
@property
def shared_steps(self) ->_category.SharedSteps:
"""
https://www.gurock.com/testrail/docs/api/reference/api-shared-steps
Use the following API methods to request details about shared steps.
"""
return _category.SharedSteps(self)
@property
def statuses(self) ->_category.Statuses:
"""
https://www.gurock.com/testrail/docs/api/reference/statuses
Use the following API methods to request details about test statuses.
"""
return _category.Statuses(self)
@property
def suites(self) ->_category.Suites:
"""
https://www.gurock.com/testrail/docs/api/reference/suites
Use the following API methods to request details about test suites and
to create or modify test suites.
"""
return _category.Suites(self)
@property
def templates(self) ->_category.Template:
"""
https://www.gurock.com/testrail/docs/api/reference/templates
Use the following API methods to request details about templates
(field layouts for cases/results)
"""
return _category.Template(self)
@property
def tests(self) ->_category.Tests:
"""
https://www.gurock.com/testrail/docs/api/reference/tests
Use the following API methods to request details about tests.
"""
return _category.Tests(self)
@property
def users(self) ->_category.Users:
"""
https://www.gurock.com/testrail/docs/api/reference/users
Use the following API methods to request details about users.
"""
return _category.Users(self)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestRailAPI(Session):
"""Categories"""
@property
def attachments(self) ->_category.Attachments:
"""
https://www.gurock.com/testrail/docs/api/reference/attachments
Use the following API methods to upload, retrieve and delete attachments.
"""
return _category.Attachments(self)
@property
def cases(self) ->_category.Cases:
"""
https://www.gurock.com/testrail/docs/api/reference/cases
Use the following API methods to request details about test cases and
to create or modify test cases.
"""
return _category.Cases(self)
@property
def case_fields(self) ->_category.CaseFields:
"""
https://www.gurock.com/testrail/docs/api/reference/case-fields
Use the following API methods to request details about custom fields
for test cases.
"""
return _category.CaseFields(self)
@property
def case_types(self) ->_category.CaseTypes:
"""
https://www.gurock.com/testrail/docs/api/reference/case-types
Use the following API methods to request details about case type.
"""
return _category.CaseTypes(self)
@property
def configurations(self) ->_category.Configurations:
"""
https://www.gurock.com/testrail/docs/api/reference/configurations
Use the following API methods to request details about configurations and
to create or modify configurations.
"""
return _category.Configurations(self)
@property
def milestones(self) ->_category.Milestones:
"""
https://www.gurock.com/testrail/docs/api/reference/milestones
Use the following API methods to request details about milestones and
to create or modify milestones.
"""
return _category.Milestones(self)
@property
def plans(self) ->_category.Plans:
"""
https://www.gurock.com/testrail/docs/api/reference/plans
Use the following API methods to request details about test plans and
to create or modify test plans.
"""
return _category.Plans(self)
@property
def priorities(self) ->_category.Priorities:
"""
https://www.gurock.com/testrail/docs/api/reference/priorities
Use the following API methods to request details about priorities.
"""
return _category.Priorities(self)
@property
def projects(self) ->_category.Projects:
"""
https://www.gurock.com/testrail/docs/api/reference/projects
Use the following API methods to request details about projects and
to create or modify projects
"""
return _category.Projects(self)
@property
def reports(self) ->_category.Reports:
"""
https://www.gurock.com/testrail/docs/api/reference/reports
Use the following methods to get and run reports that have been
made accessible to the API.
"""
return _category.Reports(self)
@property
def results(self) ->_category.Results:
"""
https://www.gurock.com/testrail/docs/api/reference/results
Use the following API methods to request details about test results and
to add new test results.
"""
return _category.Results(self)
@property
def result_fields(self) ->_category.ResultFields:
"""
https://www.gurock.com/testrail/docs/api/reference/result-fields
Use the following API methods to request details about custom fields
for test results.
"""
return _category.ResultFields(self)
@property
def runs(self) ->_category.Runs:
"""
https://www.gurock.com/testrail/docs/api/reference/runs
Use the following API methods to request details about test runs and
to create or modify test runs.
"""
return _category.Runs(self)
@property
def sections(self) ->_category.Sections:
"""
https://www.gurock.com/testrail/docs/api/reference/sections
Use the following API methods to request details about sections and
to create or modify sections.
Sections are used to group and organize test cases in test suites.
"""
return _category.Sections(self)
@property
def shared_steps(self) ->_category.SharedSteps:
"""
https://www.gurock.com/testrail/docs/api/reference/api-shared-steps
Use the following API methods to request details about shared steps.
"""
return _category.SharedSteps(self)
@property
def statuses(self) ->_category.Statuses:
"""
https://www.gurock.com/testrail/docs/api/reference/statuses
Use the following API methods to request details about test statuses.
"""
return _category.Statuses(self)
@property
def suites(self) ->_category.Suites:
"""
https://www.gurock.com/testrail/docs/api/reference/suites
Use the following API methods to request details about test suites and
to create or modify test suites.
"""
return _category.Suites(self)
@property
def templates(self) ->_category.Template:
"""
https://www.gurock.com/testrail/docs/api/reference/templates
Use the following API methods to request details about templates
(field layouts for cases/results)
"""
return _category.Template(self)
@property
def tests(self) ->_category.Tests:
"""
https://www.gurock.com/testrail/docs/api/reference/tests
Use the following API methods to request details about tests.
"""
return _category.Tests(self)
@property
def users(self) ->_category.Users:
"""
https://www.gurock.com/testrail/docs/api/reference/users
Use the following API methods to request details about users.
"""
return _category.Users(self)
<|reserved_special_token_1|>
"""
TestRail API Categories
"""
from . import _category
from ._session import Session
class TestRailAPI(Session):
"""Categories"""
@property
def attachments(self) -> _category.Attachments:
"""
https://www.gurock.com/testrail/docs/api/reference/attachments
Use the following API methods to upload, retrieve and delete attachments.
"""
return _category.Attachments(self)
@property
def cases(self) -> _category.Cases:
"""
https://www.gurock.com/testrail/docs/api/reference/cases
Use the following API methods to request details about test cases and
to create or modify test cases.
"""
return _category.Cases(self)
@property
def case_fields(self) -> _category.CaseFields:
"""
https://www.gurock.com/testrail/docs/api/reference/case-fields
Use the following API methods to request details about custom fields
for test cases.
"""
return _category.CaseFields(self)
@property
def case_types(self) -> _category.CaseTypes:
"""
https://www.gurock.com/testrail/docs/api/reference/case-types
Use the following API methods to request details about case type.
"""
return _category.CaseTypes(self)
@property
def configurations(self) -> _category.Configurations:
"""
https://www.gurock.com/testrail/docs/api/reference/configurations
Use the following API methods to request details about configurations and
to create or modify configurations.
"""
return _category.Configurations(self)
@property
def milestones(self) -> _category.Milestones:
"""
https://www.gurock.com/testrail/docs/api/reference/milestones
Use the following API methods to request details about milestones and
to create or modify milestones.
"""
return _category.Milestones(self)
@property
def plans(self) -> _category.Plans:
"""
https://www.gurock.com/testrail/docs/api/reference/plans
Use the following API methods to request details about test plans and
to create or modify test plans.
"""
return _category.Plans(self)
@property
def priorities(self) -> _category.Priorities:
"""
https://www.gurock.com/testrail/docs/api/reference/priorities
Use the following API methods to request details about priorities.
"""
return _category.Priorities(self)
@property
def projects(self) -> _category.Projects:
"""
https://www.gurock.com/testrail/docs/api/reference/projects
Use the following API methods to request details about projects and
to create or modify projects
"""
return _category.Projects(self)
@property
def reports(self) -> _category.Reports:
"""
https://www.gurock.com/testrail/docs/api/reference/reports
Use the following methods to get and run reports that have been
made accessible to the API.
"""
return _category.Reports(self)
@property
def results(self) -> _category.Results:
"""
https://www.gurock.com/testrail/docs/api/reference/results
Use the following API methods to request details about test results and
to add new test results.
"""
return _category.Results(self)
@property
def result_fields(self) -> _category.ResultFields:
"""
https://www.gurock.com/testrail/docs/api/reference/result-fields
Use the following API methods to request details about custom fields
for test results.
"""
return _category.ResultFields(self)
@property
def runs(self) -> _category.Runs:
"""
https://www.gurock.com/testrail/docs/api/reference/runs
Use the following API methods to request details about test runs and
to create or modify test runs.
"""
return _category.Runs(self)
@property
def sections(self) -> _category.Sections:
"""
https://www.gurock.com/testrail/docs/api/reference/sections
Use the following API methods to request details about sections and
to create or modify sections.
Sections are used to group and organize test cases in test suites.
"""
return _category.Sections(self)
@property
def shared_steps(self) -> _category.SharedSteps:
"""
https://www.gurock.com/testrail/docs/api/reference/api-shared-steps
Use the following API methods to request details about shared steps.
"""
return _category.SharedSteps(self)
@property
def statuses(self) -> _category.Statuses:
"""
https://www.gurock.com/testrail/docs/api/reference/statuses
Use the following API methods to request details about test statuses.
"""
return _category.Statuses(self)
@property
def suites(self) -> _category.Suites:
"""
https://www.gurock.com/testrail/docs/api/reference/suites
Use the following API methods to request details about test suites and
to create or modify test suites.
"""
return _category.Suites(self)
@property
def templates(self) -> _category.Template:
"""
https://www.gurock.com/testrail/docs/api/reference/templates
Use the following API methods to request details about templates
(field layouts for cases/results)
"""
return _category.Template(self)
@property
def tests(self) -> _category.Tests:
"""
https://www.gurock.com/testrail/docs/api/reference/tests
Use the following API methods to request details about tests.
"""
return _category.Tests(self)
@property
def users(self) -> _category.Users:
"""
https://www.gurock.com/testrail/docs/api/reference/users
Use the following API methods to request details about users.
"""
return _category.Users(self)
|
flexible
|
{
"blob_id": "c2467e94a2ad474f0413e7ee3863aa134bf9c51f",
"index": 3399,
"step-1": "<mask token>\n\n\nclass TestRailAPI(Session):\n <mask token>\n\n @property\n def attachments(self) ->_category.Attachments:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/attachments\n Use the following API methods to upload, retrieve and delete attachments.\n \"\"\"\n return _category.Attachments(self)\n\n @property\n def cases(self) ->_category.Cases:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/cases\n Use the following API methods to request details about test cases and\n to create or modify test cases.\n \"\"\"\n return _category.Cases(self)\n\n @property\n def case_fields(self) ->_category.CaseFields:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/case-fields\n Use the following API methods to request details about custom fields\n for test cases.\n \"\"\"\n return _category.CaseFields(self)\n\n @property\n def case_types(self) ->_category.CaseTypes:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/case-types\n Use the following API methods to request details about case type.\n \"\"\"\n return _category.CaseTypes(self)\n <mask token>\n\n @property\n def milestones(self) ->_category.Milestones:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/milestones\n Use the following API methods to request details about milestones and\n to create or modify milestones.\n \"\"\"\n return _category.Milestones(self)\n\n @property\n def plans(self) ->_category.Plans:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/plans\n Use the following API methods to request details about test plans and\n to create or modify test plans.\n \"\"\"\n return _category.Plans(self)\n\n @property\n def priorities(self) ->_category.Priorities:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/priorities\n Use the following API methods to request details about priorities.\n \"\"\"\n return _category.Priorities(self)\n\n @property\n def projects(self) ->_category.Projects:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/projects\n Use the following API methods to request details about projects and\n to create or modify projects\n \"\"\"\n return _category.Projects(self)\n\n @property\n def reports(self) ->_category.Reports:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/reports\n Use the following methods to get and run reports that have been\n made accessible to the API.\n \"\"\"\n return _category.Reports(self)\n <mask token>\n <mask token>\n\n @property\n def runs(self) ->_category.Runs:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/runs\n Use the following API methods to request details about test runs and\n to create or modify test runs.\n \"\"\"\n return _category.Runs(self)\n\n @property\n def sections(self) ->_category.Sections:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/sections\n Use the following API methods to request details about sections and\n to create or modify sections.\n Sections are used to group and organize test cases in test suites.\n \"\"\"\n return _category.Sections(self)\n\n @property\n def shared_steps(self) ->_category.SharedSteps:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/api-shared-steps\n Use the following API methods to request details about shared steps.\n \"\"\"\n return _category.SharedSteps(self)\n\n @property\n def statuses(self) ->_category.Statuses:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/statuses\n Use the following API methods to request details about test statuses.\n \"\"\"\n return _category.Statuses(self)\n\n @property\n def suites(self) ->_category.Suites:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/suites\n Use the following API methods to request details about test suites and\n to create or modify test suites.\n \"\"\"\n return _category.Suites(self)\n\n @property\n def templates(self) ->_category.Template:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/templates\n Use the following API methods to request details about templates\n (field layouts for cases/results)\n \"\"\"\n return _category.Template(self)\n\n @property\n def tests(self) ->_category.Tests:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/tests\n Use the following API methods to request details about tests.\n \"\"\"\n return _category.Tests(self)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestRailAPI(Session):\n <mask token>\n\n @property\n def attachments(self) ->_category.Attachments:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/attachments\n Use the following API methods to upload, retrieve and delete attachments.\n \"\"\"\n return _category.Attachments(self)\n\n @property\n def cases(self) ->_category.Cases:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/cases\n Use the following API methods to request details about test cases and\n to create or modify test cases.\n \"\"\"\n return _category.Cases(self)\n\n @property\n def case_fields(self) ->_category.CaseFields:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/case-fields\n Use the following API methods to request details about custom fields\n for test cases.\n \"\"\"\n return _category.CaseFields(self)\n\n @property\n def case_types(self) ->_category.CaseTypes:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/case-types\n Use the following API methods to request details about case type.\n \"\"\"\n return _category.CaseTypes(self)\n\n @property\n def configurations(self) ->_category.Configurations:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/configurations\n Use the following API methods to request details about configurations and\n to create or modify configurations.\n \"\"\"\n return _category.Configurations(self)\n\n @property\n def milestones(self) ->_category.Milestones:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/milestones\n Use the following API methods to request details about milestones and\n to create or modify milestones.\n \"\"\"\n return _category.Milestones(self)\n\n @property\n def plans(self) ->_category.Plans:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/plans\n Use the following API methods to request details about test plans and\n to create or modify test plans.\n \"\"\"\n return _category.Plans(self)\n\n @property\n def priorities(self) ->_category.Priorities:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/priorities\n Use the following API methods to request details about priorities.\n \"\"\"\n return _category.Priorities(self)\n\n @property\n def projects(self) ->_category.Projects:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/projects\n Use the following API methods to request details about projects and\n to create or modify projects\n \"\"\"\n return _category.Projects(self)\n\n @property\n def reports(self) ->_category.Reports:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/reports\n Use the following methods to get and run reports that have been\n made accessible to the API.\n \"\"\"\n return _category.Reports(self)\n\n @property\n def results(self) ->_category.Results:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/results\n Use the following API methods to request details about test results and\n to add new test results.\n \"\"\"\n return _category.Results(self)\n\n @property\n def result_fields(self) ->_category.ResultFields:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/result-fields\n Use the following API methods to request details about custom fields\n for test results.\n \"\"\"\n return _category.ResultFields(self)\n\n @property\n def runs(self) ->_category.Runs:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/runs\n Use the following API methods to request details about test runs and\n to create or modify test runs.\n \"\"\"\n return _category.Runs(self)\n\n @property\n def sections(self) ->_category.Sections:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/sections\n Use the following API methods to request details about sections and\n to create or modify sections.\n Sections are used to group and organize test cases in test suites.\n \"\"\"\n return _category.Sections(self)\n\n @property\n def shared_steps(self) ->_category.SharedSteps:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/api-shared-steps\n Use the following API methods to request details about shared steps.\n \"\"\"\n return _category.SharedSteps(self)\n\n @property\n def statuses(self) ->_category.Statuses:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/statuses\n Use the following API methods to request details about test statuses.\n \"\"\"\n return _category.Statuses(self)\n\n @property\n def suites(self) ->_category.Suites:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/suites\n Use the following API methods to request details about test suites and\n to create or modify test suites.\n \"\"\"\n return _category.Suites(self)\n\n @property\n def templates(self) ->_category.Template:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/templates\n Use the following API methods to request details about templates\n (field layouts for cases/results)\n \"\"\"\n return _category.Template(self)\n\n @property\n def tests(self) ->_category.Tests:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/tests\n Use the following API methods to request details about tests.\n \"\"\"\n return _category.Tests(self)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TestRailAPI(Session):\n <mask token>\n\n @property\n def attachments(self) ->_category.Attachments:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/attachments\n Use the following API methods to upload, retrieve and delete attachments.\n \"\"\"\n return _category.Attachments(self)\n\n @property\n def cases(self) ->_category.Cases:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/cases\n Use the following API methods to request details about test cases and\n to create or modify test cases.\n \"\"\"\n return _category.Cases(self)\n\n @property\n def case_fields(self) ->_category.CaseFields:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/case-fields\n Use the following API methods to request details about custom fields\n for test cases.\n \"\"\"\n return _category.CaseFields(self)\n\n @property\n def case_types(self) ->_category.CaseTypes:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/case-types\n Use the following API methods to request details about case type.\n \"\"\"\n return _category.CaseTypes(self)\n\n @property\n def configurations(self) ->_category.Configurations:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/configurations\n Use the following API methods to request details about configurations and\n to create or modify configurations.\n \"\"\"\n return _category.Configurations(self)\n\n @property\n def milestones(self) ->_category.Milestones:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/milestones\n Use the following API methods to request details about milestones and\n to create or modify milestones.\n \"\"\"\n return _category.Milestones(self)\n\n @property\n def plans(self) ->_category.Plans:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/plans\n Use the following API methods to request details about test plans and\n to create or modify test plans.\n \"\"\"\n return _category.Plans(self)\n\n @property\n def priorities(self) ->_category.Priorities:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/priorities\n Use the following API methods to request details about priorities.\n \"\"\"\n return _category.Priorities(self)\n\n @property\n def projects(self) ->_category.Projects:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/projects\n Use the following API methods to request details about projects and\n to create or modify projects\n \"\"\"\n return _category.Projects(self)\n\n @property\n def reports(self) ->_category.Reports:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/reports\n Use the following methods to get and run reports that have been\n made accessible to the API.\n \"\"\"\n return _category.Reports(self)\n\n @property\n def results(self) ->_category.Results:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/results\n Use the following API methods to request details about test results and\n to add new test results.\n \"\"\"\n return _category.Results(self)\n\n @property\n def result_fields(self) ->_category.ResultFields:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/result-fields\n Use the following API methods to request details about custom fields\n for test results.\n \"\"\"\n return _category.ResultFields(self)\n\n @property\n def runs(self) ->_category.Runs:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/runs\n Use the following API methods to request details about test runs and\n to create or modify test runs.\n \"\"\"\n return _category.Runs(self)\n\n @property\n def sections(self) ->_category.Sections:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/sections\n Use the following API methods to request details about sections and\n to create or modify sections.\n Sections are used to group and organize test cases in test suites.\n \"\"\"\n return _category.Sections(self)\n\n @property\n def shared_steps(self) ->_category.SharedSteps:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/api-shared-steps\n Use the following API methods to request details about shared steps.\n \"\"\"\n return _category.SharedSteps(self)\n\n @property\n def statuses(self) ->_category.Statuses:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/statuses\n Use the following API methods to request details about test statuses.\n \"\"\"\n return _category.Statuses(self)\n\n @property\n def suites(self) ->_category.Suites:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/suites\n Use the following API methods to request details about test suites and\n to create or modify test suites.\n \"\"\"\n return _category.Suites(self)\n\n @property\n def templates(self) ->_category.Template:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/templates\n Use the following API methods to request details about templates\n (field layouts for cases/results)\n \"\"\"\n return _category.Template(self)\n\n @property\n def tests(self) ->_category.Tests:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/tests\n Use the following API methods to request details about tests.\n \"\"\"\n return _category.Tests(self)\n\n @property\n def users(self) ->_category.Users:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/users\n Use the following API methods to request details about users.\n \"\"\"\n return _category.Users(self)\n",
"step-4": "<mask token>\n\n\nclass TestRailAPI(Session):\n \"\"\"Categories\"\"\"\n\n @property\n def attachments(self) ->_category.Attachments:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/attachments\n Use the following API methods to upload, retrieve and delete attachments.\n \"\"\"\n return _category.Attachments(self)\n\n @property\n def cases(self) ->_category.Cases:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/cases\n Use the following API methods to request details about test cases and\n to create or modify test cases.\n \"\"\"\n return _category.Cases(self)\n\n @property\n def case_fields(self) ->_category.CaseFields:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/case-fields\n Use the following API methods to request details about custom fields\n for test cases.\n \"\"\"\n return _category.CaseFields(self)\n\n @property\n def case_types(self) ->_category.CaseTypes:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/case-types\n Use the following API methods to request details about case type.\n \"\"\"\n return _category.CaseTypes(self)\n\n @property\n def configurations(self) ->_category.Configurations:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/configurations\n Use the following API methods to request details about configurations and\n to create or modify configurations.\n \"\"\"\n return _category.Configurations(self)\n\n @property\n def milestones(self) ->_category.Milestones:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/milestones\n Use the following API methods to request details about milestones and\n to create or modify milestones.\n \"\"\"\n return _category.Milestones(self)\n\n @property\n def plans(self) ->_category.Plans:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/plans\n Use the following API methods to request details about test plans and\n to create or modify test plans.\n \"\"\"\n return _category.Plans(self)\n\n @property\n def priorities(self) ->_category.Priorities:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/priorities\n Use the following API methods to request details about priorities.\n \"\"\"\n return _category.Priorities(self)\n\n @property\n def projects(self) ->_category.Projects:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/projects\n Use the following API methods to request details about projects and\n to create or modify projects\n \"\"\"\n return _category.Projects(self)\n\n @property\n def reports(self) ->_category.Reports:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/reports\n Use the following methods to get and run reports that have been\n made accessible to the API.\n \"\"\"\n return _category.Reports(self)\n\n @property\n def results(self) ->_category.Results:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/results\n Use the following API methods to request details about test results and\n to add new test results.\n \"\"\"\n return _category.Results(self)\n\n @property\n def result_fields(self) ->_category.ResultFields:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/result-fields\n Use the following API methods to request details about custom fields\n for test results.\n \"\"\"\n return _category.ResultFields(self)\n\n @property\n def runs(self) ->_category.Runs:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/runs\n Use the following API methods to request details about test runs and\n to create or modify test runs.\n \"\"\"\n return _category.Runs(self)\n\n @property\n def sections(self) ->_category.Sections:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/sections\n Use the following API methods to request details about sections and\n to create or modify sections.\n Sections are used to group and organize test cases in test suites.\n \"\"\"\n return _category.Sections(self)\n\n @property\n def shared_steps(self) ->_category.SharedSteps:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/api-shared-steps\n Use the following API methods to request details about shared steps.\n \"\"\"\n return _category.SharedSteps(self)\n\n @property\n def statuses(self) ->_category.Statuses:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/statuses\n Use the following API methods to request details about test statuses.\n \"\"\"\n return _category.Statuses(self)\n\n @property\n def suites(self) ->_category.Suites:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/suites\n Use the following API methods to request details about test suites and\n to create or modify test suites.\n \"\"\"\n return _category.Suites(self)\n\n @property\n def templates(self) ->_category.Template:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/templates\n Use the following API methods to request details about templates\n (field layouts for cases/results)\n \"\"\"\n return _category.Template(self)\n\n @property\n def tests(self) ->_category.Tests:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/tests\n Use the following API methods to request details about tests.\n \"\"\"\n return _category.Tests(self)\n\n @property\n def users(self) ->_category.Users:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/users\n Use the following API methods to request details about users.\n \"\"\"\n return _category.Users(self)\n",
"step-5": "\"\"\"\nTestRail API Categories\n\"\"\"\n\nfrom . import _category\nfrom ._session import Session\n\n\nclass TestRailAPI(Session):\n \"\"\"Categories\"\"\"\n\n @property\n def attachments(self) -> _category.Attachments:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/attachments\n Use the following API methods to upload, retrieve and delete attachments.\n \"\"\"\n return _category.Attachments(self)\n\n @property\n def cases(self) -> _category.Cases:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/cases\n Use the following API methods to request details about test cases and\n to create or modify test cases.\n \"\"\"\n return _category.Cases(self)\n\n @property\n def case_fields(self) -> _category.CaseFields:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/case-fields\n Use the following API methods to request details about custom fields\n for test cases.\n \"\"\"\n return _category.CaseFields(self)\n\n @property\n def case_types(self) -> _category.CaseTypes:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/case-types\n Use the following API methods to request details about case type.\n \"\"\"\n return _category.CaseTypes(self)\n\n @property\n def configurations(self) -> _category.Configurations:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/configurations\n Use the following API methods to request details about configurations and\n to create or modify configurations.\n \"\"\"\n return _category.Configurations(self)\n\n @property\n def milestones(self) -> _category.Milestones:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/milestones\n Use the following API methods to request details about milestones and\n to create or modify milestones.\n \"\"\"\n return _category.Milestones(self)\n\n @property\n def plans(self) -> _category.Plans:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/plans\n Use the following API methods to request details about test plans and\n to create or modify test plans.\n \"\"\"\n return _category.Plans(self)\n\n @property\n def priorities(self) -> _category.Priorities:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/priorities\n Use the following API methods to request details about priorities.\n \"\"\"\n return _category.Priorities(self)\n\n @property\n def projects(self) -> _category.Projects:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/projects\n Use the following API methods to request details about projects and\n to create or modify projects\n \"\"\"\n return _category.Projects(self)\n\n @property\n def reports(self) -> _category.Reports:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/reports\n Use the following methods to get and run reports that have been\n made accessible to the API.\n \"\"\"\n return _category.Reports(self)\n\n @property\n def results(self) -> _category.Results:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/results\n Use the following API methods to request details about test results and\n to add new test results.\n \"\"\"\n return _category.Results(self)\n\n @property\n def result_fields(self) -> _category.ResultFields:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/result-fields\n Use the following API methods to request details about custom fields\n for test results.\n \"\"\"\n return _category.ResultFields(self)\n\n @property\n def runs(self) -> _category.Runs:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/runs\n Use the following API methods to request details about test runs and\n to create or modify test runs.\n \"\"\"\n return _category.Runs(self)\n\n @property\n def sections(self) -> _category.Sections:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/sections\n Use the following API methods to request details about sections and\n to create or modify sections.\n Sections are used to group and organize test cases in test suites.\n \"\"\"\n return _category.Sections(self)\n\n @property\n def shared_steps(self) -> _category.SharedSteps:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/api-shared-steps\n Use the following API methods to request details about shared steps.\n \"\"\"\n return _category.SharedSteps(self)\n\n @property\n def statuses(self) -> _category.Statuses:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/statuses\n Use the following API methods to request details about test statuses.\n \"\"\"\n return _category.Statuses(self)\n\n @property\n def suites(self) -> _category.Suites:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/suites\n Use the following API methods to request details about test suites and\n to create or modify test suites.\n \"\"\"\n return _category.Suites(self)\n\n @property\n def templates(self) -> _category.Template:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/templates\n Use the following API methods to request details about templates\n (field layouts for cases/results)\n \"\"\"\n return _category.Template(self)\n\n @property\n def tests(self) -> _category.Tests:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/tests\n Use the following API methods to request details about tests.\n \"\"\"\n return _category.Tests(self)\n\n @property\n def users(self) -> _category.Users:\n \"\"\"\n https://www.gurock.com/testrail/docs/api/reference/users\n Use the following API methods to request details about users.\n \"\"\"\n return _category.Users(self)\n",
"step-ids": [
17,
20,
21,
22,
24
]
}
|
[
17,
20,
21,
22,
24
] |
import org.cogroo.gc.cmdline
import typing
class __module_protocol__(typing.Protocol):
# A module protocol which reflects the result of ``jp.JPackage("org.cogroo.gc")``.
cmdline: org.cogroo.gc.cmdline.__module_protocol__
|
normal
|
{
"blob_id": "f615e7bbfa9179d0bfb321242cd8df4ae7b48993",
"index": 3181,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass __module_protocol__(typing.Protocol):\n cmdline: org.cogroo.gc.cmdline.__module_protocol__\n",
"step-3": "import org.cogroo.gc.cmdline\nimport typing\n\n\nclass __module_protocol__(typing.Protocol):\n cmdline: org.cogroo.gc.cmdline.__module_protocol__\n",
"step-4": "import org.cogroo.gc.cmdline\nimport typing\n\n\nclass __module_protocol__(typing.Protocol):\n # A module protocol which reflects the result of ``jp.JPackage(\"org.cogroo.gc\")``.\n\n cmdline: org.cogroo.gc.cmdline.__module_protocol__\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Odbserver(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Meta:
ordering = ['name']
verbose_name = '服务器信息'
verbose_name_plural = verbose_name
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Ousers(models.Model):
dbserver = models.ForeignKey(Odbserver, null=True, blank=True,
verbose_name='服务器')
user = models.CharField(max_length=20, verbose_name='用户名')
passwd = models.CharField(max_length=20, verbose_name='密码')
tablespace = models.CharField(max_length=20, null=True, blank=True,
verbose_name='表空间')
status = models.IntegerField(choices=USER_STATUS_CHOISE, verbose_name='状态')
business = models.CharField(null=True, blank=True, max_length=100,
verbose_name='业务')
created = models.DateField(null=True, blank=True, verbose_name='创建时间')
comment = models.TextField(null=True, blank=True, verbose_name='备注')
class Meta:
ordering = ['user']
verbose_name = '数据库用户信息'
verbose_name_plural = verbose_name
def __unicode__(self):
return u'%s' % self.business
def __str__(self):
return u'%s' % self.business
class Osysusers(models.Model):
dbserver = models.ForeignKey(Odbserver, null=True, blank=True,
verbose_name='服务器')
name = models.CharField(max_length=20, verbose_name='名称')
user = models.CharField(max_length=20, verbose_name='用户')
passwd = models.CharField(max_length=20, verbose_name='密码')
class Meta:
ordering = ['dbserver']
verbose_name = '系统用户信息'
verbose_name_plural = verbose_name
def __unicode__(self):
return u'%s' % self.name
def __str__(self):
return u'%s' % self.name
class Omysqluser(models.Model):
dbserver = models.ForeignKey(Odbserver, verbose_name='服务器')
name = models.CharField(max_length=20, verbose_name='用户名')
passwd = models.CharField(max_length=20, verbose_name='密码')
dbname = models.CharField(max_length=20, verbose_name='数据库名')
business = models.CharField(null=True, blank=True, max_length=100,
verbose_name='业务')
comment = models.TextField(null=True, blank=True, verbose_name='备注')
class Meta:
ordering = ['dbserver']
verbose_name = 'MYSQL用户信息'
verbose_name_plural = verbose_name
def __unicode__(self):
return u'%s' % self.business
def __str__(self):
return u'%s' % self.business
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Odbserver(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Meta:
ordering = ['name']
verbose_name = '服务器信息'
verbose_name_plural = verbose_name
<|reserved_special_token_0|>
def __str__(self):
return u'%s' % self.name
class Ousers(models.Model):
dbserver = models.ForeignKey(Odbserver, null=True, blank=True,
verbose_name='服务器')
user = models.CharField(max_length=20, verbose_name='用户名')
passwd = models.CharField(max_length=20, verbose_name='密码')
tablespace = models.CharField(max_length=20, null=True, blank=True,
verbose_name='表空间')
status = models.IntegerField(choices=USER_STATUS_CHOISE, verbose_name='状态')
business = models.CharField(null=True, blank=True, max_length=100,
verbose_name='业务')
created = models.DateField(null=True, blank=True, verbose_name='创建时间')
comment = models.TextField(null=True, blank=True, verbose_name='备注')
class Meta:
ordering = ['user']
verbose_name = '数据库用户信息'
verbose_name_plural = verbose_name
def __unicode__(self):
return u'%s' % self.business
def __str__(self):
return u'%s' % self.business
class Osysusers(models.Model):
dbserver = models.ForeignKey(Odbserver, null=True, blank=True,
verbose_name='服务器')
name = models.CharField(max_length=20, verbose_name='名称')
user = models.CharField(max_length=20, verbose_name='用户')
passwd = models.CharField(max_length=20, verbose_name='密码')
class Meta:
ordering = ['dbserver']
verbose_name = '系统用户信息'
verbose_name_plural = verbose_name
def __unicode__(self):
return u'%s' % self.name
def __str__(self):
return u'%s' % self.name
class Omysqluser(models.Model):
dbserver = models.ForeignKey(Odbserver, verbose_name='服务器')
name = models.CharField(max_length=20, verbose_name='用户名')
passwd = models.CharField(max_length=20, verbose_name='密码')
dbname = models.CharField(max_length=20, verbose_name='数据库名')
business = models.CharField(null=True, blank=True, max_length=100,
verbose_name='业务')
comment = models.TextField(null=True, blank=True, verbose_name='备注')
class Meta:
ordering = ['dbserver']
verbose_name = 'MYSQL用户信息'
verbose_name_plural = verbose_name
def __unicode__(self):
return u'%s' % self.business
def __str__(self):
return u'%s' % self.business
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Odbserver(models.Model):
name = models.CharField(max_length=30, verbose_name='名称')
ip = models.GenericIPAddressField(verbose_name='IP')
pos = models.IntegerField(default=1, choices=DBSERVER_POS_CHOISE,
verbose_name='位置')
sn = models.CharField(null=True, blank=True, max_length=50,
verbose_name='序列号')
sid = models.CharField(null=True, blank=True, max_length=50,
verbose_name='快速服务代码')
firm = models.IntegerField(default=1, choices=FIRM_CHOISE, verbose_name
='厂商')
model = models.CharField(null=True, blank=True, max_length=30,
verbose_name='型号')
feature = models.TextField(null=True, blank=True, verbose_name='配置')
buy_time = models.DateField(null=True, blank=True, verbose_name='购买时间')
service_range = models.IntegerField(default=1, choices=
SERVICE_RANGE_CHOISE, verbose_name='服务年限')
comment = models.TextField(null=True, blank=True, verbose_name='备注')
class Meta:
ordering = ['name']
verbose_name = '服务器信息'
verbose_name_plural = verbose_name
def __unicode__(self):
return u'%s' % self.name
def __str__(self):
return u'%s' % self.name
class Ousers(models.Model):
dbserver = models.ForeignKey(Odbserver, null=True, blank=True,
verbose_name='服务器')
user = models.CharField(max_length=20, verbose_name='用户名')
passwd = models.CharField(max_length=20, verbose_name='密码')
tablespace = models.CharField(max_length=20, null=True, blank=True,
verbose_name='表空间')
status = models.IntegerField(choices=USER_STATUS_CHOISE, verbose_name='状态')
business = models.CharField(null=True, blank=True, max_length=100,
verbose_name='业务')
created = models.DateField(null=True, blank=True, verbose_name='创建时间')
comment = models.TextField(null=True, blank=True, verbose_name='备注')
class Meta:
ordering = ['user']
verbose_name = '数据库用户信息'
verbose_name_plural = verbose_name
def __unicode__(self):
return u'%s' % self.business
def __str__(self):
return u'%s' % self.business
class Osysusers(models.Model):
dbserver = models.ForeignKey(Odbserver, null=True, blank=True,
verbose_name='服务器')
name = models.CharField(max_length=20, verbose_name='名称')
user = models.CharField(max_length=20, verbose_name='用户')
passwd = models.CharField(max_length=20, verbose_name='密码')
class Meta:
ordering = ['dbserver']
verbose_name = '系统用户信息'
verbose_name_plural = verbose_name
def __unicode__(self):
return u'%s' % self.name
def __str__(self):
return u'%s' % self.name
class Omysqluser(models.Model):
dbserver = models.ForeignKey(Odbserver, verbose_name='服务器')
name = models.CharField(max_length=20, verbose_name='用户名')
passwd = models.CharField(max_length=20, verbose_name='密码')
dbname = models.CharField(max_length=20, verbose_name='数据库名')
business = models.CharField(null=True, blank=True, max_length=100,
verbose_name='业务')
comment = models.TextField(null=True, blank=True, verbose_name='备注')
class Meta:
ordering = ['dbserver']
verbose_name = 'MYSQL用户信息'
verbose_name_plural = verbose_name
def __unicode__(self):
return u'%s' % self.business
def __str__(self):
return u'%s' % self.business
<|reserved_special_token_1|>
<|reserved_special_token_0|>
SERVICE_RANGE_CHOISE = {(1, '1年'), (2, '2年'), (3, '3年'), (4, '4年'), (5,
'5年'), (6, '6年'), (7, '7年'), (8, '8年'), (0, '长期')}
USER_STATUS_CHOISE = {(1, '停用'), (2, '正常'), (3, '锁定')}
DBSERVER_POS_CHOISE = {(1, '8层机房'), (2, '11层机房')}
FIRM_CHOISE = {(1, 'DELL'), (2, 'IBM'), (3, 'EMC')}
class Odbserver(models.Model):
name = models.CharField(max_length=30, verbose_name='名称')
ip = models.GenericIPAddressField(verbose_name='IP')
pos = models.IntegerField(default=1, choices=DBSERVER_POS_CHOISE,
verbose_name='位置')
sn = models.CharField(null=True, blank=True, max_length=50,
verbose_name='序列号')
sid = models.CharField(null=True, blank=True, max_length=50,
verbose_name='快速服务代码')
firm = models.IntegerField(default=1, choices=FIRM_CHOISE, verbose_name
='厂商')
model = models.CharField(null=True, blank=True, max_length=30,
verbose_name='型号')
feature = models.TextField(null=True, blank=True, verbose_name='配置')
buy_time = models.DateField(null=True, blank=True, verbose_name='购买时间')
service_range = models.IntegerField(default=1, choices=
SERVICE_RANGE_CHOISE, verbose_name='服务年限')
comment = models.TextField(null=True, blank=True, verbose_name='备注')
class Meta:
ordering = ['name']
verbose_name = '服务器信息'
verbose_name_plural = verbose_name
def __unicode__(self):
return u'%s' % self.name
def __str__(self):
return u'%s' % self.name
class Ousers(models.Model):
dbserver = models.ForeignKey(Odbserver, null=True, blank=True,
verbose_name='服务器')
user = models.CharField(max_length=20, verbose_name='用户名')
passwd = models.CharField(max_length=20, verbose_name='密码')
tablespace = models.CharField(max_length=20, null=True, blank=True,
verbose_name='表空间')
status = models.IntegerField(choices=USER_STATUS_CHOISE, verbose_name='状态')
business = models.CharField(null=True, blank=True, max_length=100,
verbose_name='业务')
created = models.DateField(null=True, blank=True, verbose_name='创建时间')
comment = models.TextField(null=True, blank=True, verbose_name='备注')
class Meta:
ordering = ['user']
verbose_name = '数据库用户信息'
verbose_name_plural = verbose_name
def __unicode__(self):
return u'%s' % self.business
def __str__(self):
return u'%s' % self.business
class Osysusers(models.Model):
dbserver = models.ForeignKey(Odbserver, null=True, blank=True,
verbose_name='服务器')
name = models.CharField(max_length=20, verbose_name='名称')
user = models.CharField(max_length=20, verbose_name='用户')
passwd = models.CharField(max_length=20, verbose_name='密码')
class Meta:
ordering = ['dbserver']
verbose_name = '系统用户信息'
verbose_name_plural = verbose_name
def __unicode__(self):
return u'%s' % self.name
def __str__(self):
return u'%s' % self.name
class Omysqluser(models.Model):
dbserver = models.ForeignKey(Odbserver, verbose_name='服务器')
name = models.CharField(max_length=20, verbose_name='用户名')
passwd = models.CharField(max_length=20, verbose_name='密码')
dbname = models.CharField(max_length=20, verbose_name='数据库名')
business = models.CharField(null=True, blank=True, max_length=100,
verbose_name='业务')
comment = models.TextField(null=True, blank=True, verbose_name='备注')
class Meta:
ordering = ['dbserver']
verbose_name = 'MYSQL用户信息'
verbose_name_plural = verbose_name
def __unicode__(self):
return u'%s' % self.business
def __str__(self):
return u'%s' % self.business
<|reserved_special_token_1|>
# -*- coding:utf-8 -*-
from __future__ import unicode_literals
from django.db import models
SERVICE_RANGE_CHOISE = {(1, '1年'), (2, '2年'), (3, '3年'), (4, '4年'), (5, '5年'), (6, '6年'), (7, '7年'), (8, '8年'), (0, '长期')}
USER_STATUS_CHOISE = {(1, '停用'), (2, '正常'), (3, '锁定')}
DBSERVER_POS_CHOISE = {(1, '8层机房'), (2, '11层机房')}
FIRM_CHOISE = {(1, 'DELL'), (2, 'IBM'), (3, 'EMC')}
class Odbserver(models.Model):
name = models.CharField(max_length=30, verbose_name='名称')
ip = models.GenericIPAddressField(verbose_name='IP')
pos = models.IntegerField(default=1, choices=DBSERVER_POS_CHOISE, verbose_name='位置')
sn = models.CharField(null=True, blank=True, max_length=50, verbose_name='序列号')
sid = models.CharField(null=True, blank=True, max_length=50, verbose_name='快速服务代码')
firm = models.IntegerField(default=1, choices=FIRM_CHOISE, verbose_name='厂商')
model = models.CharField(null=True, blank=True, max_length=30, verbose_name='型号')
feature = models.TextField(null=True, blank=True, verbose_name='配置')
buy_time = models.DateField(null=True, blank=True, verbose_name='购买时间')
service_range = models.IntegerField(default=1, choices=SERVICE_RANGE_CHOISE, verbose_name='服务年限')
comment = models.TextField(null=True, blank=True, verbose_name='备注')
class Meta:
ordering = ["name"]
verbose_name = '服务器信息'
verbose_name_plural = verbose_name
def __unicode__(self):
return u'%s' % self.name
def __str__(self):
return u'%s' % self.name
class Ousers(models.Model):
dbserver = models.ForeignKey(Odbserver, null=True, blank=True, verbose_name='服务器')
user = models.CharField(max_length=20, verbose_name='用户名')
passwd = models.CharField(max_length=20, verbose_name='密码')
tablespace = models.CharField(max_length=20, null=True, blank=True, verbose_name='表空间')
status = models.IntegerField(choices=USER_STATUS_CHOISE, verbose_name='状态')
business = models.CharField(null=True, blank=True, max_length=100, verbose_name='业务')
created = models.DateField(null=True, blank=True, verbose_name='创建时间')
comment = models.TextField(null=True, blank=True, verbose_name='备注')
class Meta:
ordering = ["user"]
verbose_name = '数据库用户信息'
verbose_name_plural = verbose_name
def __unicode__(self):
return u'%s' % self.business
def __str__(self):
return u'%s' % self.business
class Osysusers(models.Model):
dbserver = models.ForeignKey(Odbserver, null=True, blank=True, verbose_name='服务器')
name = models.CharField(max_length=20, verbose_name='名称')
user = models.CharField(max_length=20, verbose_name='用户')
passwd = models.CharField(max_length=20, verbose_name='密码')
class Meta:
ordering = ["dbserver"]
verbose_name = '系统用户信息'
verbose_name_plural = verbose_name
def __unicode__(self):
return u'%s' % self.name
def __str__(self):
return u'%s' % self.name
class Omysqluser(models.Model):
dbserver = models.ForeignKey(Odbserver, verbose_name='服务器')
name = models.CharField(max_length=20, verbose_name='用户名')
passwd = models.CharField(max_length=20, verbose_name='密码')
dbname = models.CharField(max_length=20, verbose_name='数据库名')
business = models.CharField(null=True, blank=True, max_length=100, verbose_name='业务')
comment = models.TextField(null=True, blank=True, verbose_name='备注')
class Meta:
ordering = ["dbserver"]
verbose_name = 'MYSQL用户信息'
verbose_name_plural = verbose_name
def __unicode__(self):
return u'%s' % self.business
def __str__(self):
return u'%s' % self.business
|
flexible
|
{
"blob_id": "c2490c3aacfa3ce22c3f47a69dbc44b695c2a2e5",
"index": 9509,
"step-1": "<mask token>\n\n\nclass Odbserver(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n ordering = ['name']\n verbose_name = '服务器信息'\n verbose_name_plural = verbose_name\n <mask token>\n <mask token>\n\n\nclass Ousers(models.Model):\n dbserver = models.ForeignKey(Odbserver, null=True, blank=True,\n verbose_name='服务器')\n user = models.CharField(max_length=20, verbose_name='用户名')\n passwd = models.CharField(max_length=20, verbose_name='密码')\n tablespace = models.CharField(max_length=20, null=True, blank=True,\n verbose_name='表空间')\n status = models.IntegerField(choices=USER_STATUS_CHOISE, verbose_name='状态')\n business = models.CharField(null=True, blank=True, max_length=100,\n verbose_name='业务')\n created = models.DateField(null=True, blank=True, verbose_name='创建时间')\n comment = models.TextField(null=True, blank=True, verbose_name='备注')\n\n\n class Meta:\n ordering = ['user']\n verbose_name = '数据库用户信息'\n verbose_name_plural = verbose_name\n\n def __unicode__(self):\n return u'%s' % self.business\n\n def __str__(self):\n return u'%s' % self.business\n\n\nclass Osysusers(models.Model):\n dbserver = models.ForeignKey(Odbserver, null=True, blank=True,\n verbose_name='服务器')\n name = models.CharField(max_length=20, verbose_name='名称')\n user = models.CharField(max_length=20, verbose_name='用户')\n passwd = models.CharField(max_length=20, verbose_name='密码')\n\n\n class Meta:\n ordering = ['dbserver']\n verbose_name = '系统用户信息'\n verbose_name_plural = verbose_name\n\n def __unicode__(self):\n return u'%s' % self.name\n\n def __str__(self):\n return u'%s' % self.name\n\n\nclass Omysqluser(models.Model):\n dbserver = models.ForeignKey(Odbserver, verbose_name='服务器')\n name = models.CharField(max_length=20, verbose_name='用户名')\n passwd = models.CharField(max_length=20, verbose_name='密码')\n dbname = models.CharField(max_length=20, verbose_name='数据库名')\n business = models.CharField(null=True, blank=True, max_length=100,\n verbose_name='业务')\n comment = models.TextField(null=True, blank=True, verbose_name='备注')\n\n\n class Meta:\n ordering = ['dbserver']\n verbose_name = 'MYSQL用户信息'\n verbose_name_plural = verbose_name\n\n def __unicode__(self):\n return u'%s' % self.business\n\n def __str__(self):\n return u'%s' % self.business\n",
"step-2": "<mask token>\n\n\nclass Odbserver(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n ordering = ['name']\n verbose_name = '服务器信息'\n verbose_name_plural = verbose_name\n <mask token>\n\n def __str__(self):\n return u'%s' % self.name\n\n\nclass Ousers(models.Model):\n dbserver = models.ForeignKey(Odbserver, null=True, blank=True,\n verbose_name='服务器')\n user = models.CharField(max_length=20, verbose_name='用户名')\n passwd = models.CharField(max_length=20, verbose_name='密码')\n tablespace = models.CharField(max_length=20, null=True, blank=True,\n verbose_name='表空间')\n status = models.IntegerField(choices=USER_STATUS_CHOISE, verbose_name='状态')\n business = models.CharField(null=True, blank=True, max_length=100,\n verbose_name='业务')\n created = models.DateField(null=True, blank=True, verbose_name='创建时间')\n comment = models.TextField(null=True, blank=True, verbose_name='备注')\n\n\n class Meta:\n ordering = ['user']\n verbose_name = '数据库用户信息'\n verbose_name_plural = verbose_name\n\n def __unicode__(self):\n return u'%s' % self.business\n\n def __str__(self):\n return u'%s' % self.business\n\n\nclass Osysusers(models.Model):\n dbserver = models.ForeignKey(Odbserver, null=True, blank=True,\n verbose_name='服务器')\n name = models.CharField(max_length=20, verbose_name='名称')\n user = models.CharField(max_length=20, verbose_name='用户')\n passwd = models.CharField(max_length=20, verbose_name='密码')\n\n\n class Meta:\n ordering = ['dbserver']\n verbose_name = '系统用户信息'\n verbose_name_plural = verbose_name\n\n def __unicode__(self):\n return u'%s' % self.name\n\n def __str__(self):\n return u'%s' % self.name\n\n\nclass Omysqluser(models.Model):\n dbserver = models.ForeignKey(Odbserver, verbose_name='服务器')\n name = models.CharField(max_length=20, verbose_name='用户名')\n passwd = models.CharField(max_length=20, verbose_name='密码')\n dbname = models.CharField(max_length=20, verbose_name='数据库名')\n business = models.CharField(null=True, blank=True, max_length=100,\n verbose_name='业务')\n comment = models.TextField(null=True, blank=True, verbose_name='备注')\n\n\n class Meta:\n ordering = ['dbserver']\n verbose_name = 'MYSQL用户信息'\n verbose_name_plural = verbose_name\n\n def __unicode__(self):\n return u'%s' % self.business\n\n def __str__(self):\n return u'%s' % self.business\n",
"step-3": "<mask token>\n\n\nclass Odbserver(models.Model):\n name = models.CharField(max_length=30, verbose_name='名称')\n ip = models.GenericIPAddressField(verbose_name='IP')\n pos = models.IntegerField(default=1, choices=DBSERVER_POS_CHOISE,\n verbose_name='位置')\n sn = models.CharField(null=True, blank=True, max_length=50,\n verbose_name='序列号')\n sid = models.CharField(null=True, blank=True, max_length=50,\n verbose_name='快速服务代码')\n firm = models.IntegerField(default=1, choices=FIRM_CHOISE, verbose_name\n ='厂商')\n model = models.CharField(null=True, blank=True, max_length=30,\n verbose_name='型号')\n feature = models.TextField(null=True, blank=True, verbose_name='配置')\n buy_time = models.DateField(null=True, blank=True, verbose_name='购买时间')\n service_range = models.IntegerField(default=1, choices=\n SERVICE_RANGE_CHOISE, verbose_name='服务年限')\n comment = models.TextField(null=True, blank=True, verbose_name='备注')\n\n\n class Meta:\n ordering = ['name']\n verbose_name = '服务器信息'\n verbose_name_plural = verbose_name\n\n def __unicode__(self):\n return u'%s' % self.name\n\n def __str__(self):\n return u'%s' % self.name\n\n\nclass Ousers(models.Model):\n dbserver = models.ForeignKey(Odbserver, null=True, blank=True,\n verbose_name='服务器')\n user = models.CharField(max_length=20, verbose_name='用户名')\n passwd = models.CharField(max_length=20, verbose_name='密码')\n tablespace = models.CharField(max_length=20, null=True, blank=True,\n verbose_name='表空间')\n status = models.IntegerField(choices=USER_STATUS_CHOISE, verbose_name='状态')\n business = models.CharField(null=True, blank=True, max_length=100,\n verbose_name='业务')\n created = models.DateField(null=True, blank=True, verbose_name='创建时间')\n comment = models.TextField(null=True, blank=True, verbose_name='备注')\n\n\n class Meta:\n ordering = ['user']\n verbose_name = '数据库用户信息'\n verbose_name_plural = verbose_name\n\n def __unicode__(self):\n return u'%s' % self.business\n\n def __str__(self):\n return u'%s' % self.business\n\n\nclass Osysusers(models.Model):\n dbserver = models.ForeignKey(Odbserver, null=True, blank=True,\n verbose_name='服务器')\n name = models.CharField(max_length=20, verbose_name='名称')\n user = models.CharField(max_length=20, verbose_name='用户')\n passwd = models.CharField(max_length=20, verbose_name='密码')\n\n\n class Meta:\n ordering = ['dbserver']\n verbose_name = '系统用户信息'\n verbose_name_plural = verbose_name\n\n def __unicode__(self):\n return u'%s' % self.name\n\n def __str__(self):\n return u'%s' % self.name\n\n\nclass Omysqluser(models.Model):\n dbserver = models.ForeignKey(Odbserver, verbose_name='服务器')\n name = models.CharField(max_length=20, verbose_name='用户名')\n passwd = models.CharField(max_length=20, verbose_name='密码')\n dbname = models.CharField(max_length=20, verbose_name='数据库名')\n business = models.CharField(null=True, blank=True, max_length=100,\n verbose_name='业务')\n comment = models.TextField(null=True, blank=True, verbose_name='备注')\n\n\n class Meta:\n ordering = ['dbserver']\n verbose_name = 'MYSQL用户信息'\n verbose_name_plural = verbose_name\n\n def __unicode__(self):\n return u'%s' % self.business\n\n def __str__(self):\n return u'%s' % self.business\n",
"step-4": "<mask token>\nSERVICE_RANGE_CHOISE = {(1, '1年'), (2, '2年'), (3, '3年'), (4, '4年'), (5,\n '5年'), (6, '6年'), (7, '7年'), (8, '8年'), (0, '长期')}\nUSER_STATUS_CHOISE = {(1, '停用'), (2, '正常'), (3, '锁定')}\nDBSERVER_POS_CHOISE = {(1, '8层机房'), (2, '11层机房')}\nFIRM_CHOISE = {(1, 'DELL'), (2, 'IBM'), (3, 'EMC')}\n\n\nclass Odbserver(models.Model):\n name = models.CharField(max_length=30, verbose_name='名称')\n ip = models.GenericIPAddressField(verbose_name='IP')\n pos = models.IntegerField(default=1, choices=DBSERVER_POS_CHOISE,\n verbose_name='位置')\n sn = models.CharField(null=True, blank=True, max_length=50,\n verbose_name='序列号')\n sid = models.CharField(null=True, blank=True, max_length=50,\n verbose_name='快速服务代码')\n firm = models.IntegerField(default=1, choices=FIRM_CHOISE, verbose_name\n ='厂商')\n model = models.CharField(null=True, blank=True, max_length=30,\n verbose_name='型号')\n feature = models.TextField(null=True, blank=True, verbose_name='配置')\n buy_time = models.DateField(null=True, blank=True, verbose_name='购买时间')\n service_range = models.IntegerField(default=1, choices=\n SERVICE_RANGE_CHOISE, verbose_name='服务年限')\n comment = models.TextField(null=True, blank=True, verbose_name='备注')\n\n\n class Meta:\n ordering = ['name']\n verbose_name = '服务器信息'\n verbose_name_plural = verbose_name\n\n def __unicode__(self):\n return u'%s' % self.name\n\n def __str__(self):\n return u'%s' % self.name\n\n\nclass Ousers(models.Model):\n dbserver = models.ForeignKey(Odbserver, null=True, blank=True,\n verbose_name='服务器')\n user = models.CharField(max_length=20, verbose_name='用户名')\n passwd = models.CharField(max_length=20, verbose_name='密码')\n tablespace = models.CharField(max_length=20, null=True, blank=True,\n verbose_name='表空间')\n status = models.IntegerField(choices=USER_STATUS_CHOISE, verbose_name='状态')\n business = models.CharField(null=True, blank=True, max_length=100,\n verbose_name='业务')\n created = models.DateField(null=True, blank=True, verbose_name='创建时间')\n comment = models.TextField(null=True, blank=True, verbose_name='备注')\n\n\n class Meta:\n ordering = ['user']\n verbose_name = '数据库用户信息'\n verbose_name_plural = verbose_name\n\n def __unicode__(self):\n return u'%s' % self.business\n\n def __str__(self):\n return u'%s' % self.business\n\n\nclass Osysusers(models.Model):\n dbserver = models.ForeignKey(Odbserver, null=True, blank=True,\n verbose_name='服务器')\n name = models.CharField(max_length=20, verbose_name='名称')\n user = models.CharField(max_length=20, verbose_name='用户')\n passwd = models.CharField(max_length=20, verbose_name='密码')\n\n\n class Meta:\n ordering = ['dbserver']\n verbose_name = '系统用户信息'\n verbose_name_plural = verbose_name\n\n def __unicode__(self):\n return u'%s' % self.name\n\n def __str__(self):\n return u'%s' % self.name\n\n\nclass Omysqluser(models.Model):\n dbserver = models.ForeignKey(Odbserver, verbose_name='服务器')\n name = models.CharField(max_length=20, verbose_name='用户名')\n passwd = models.CharField(max_length=20, verbose_name='密码')\n dbname = models.CharField(max_length=20, verbose_name='数据库名')\n business = models.CharField(null=True, blank=True, max_length=100,\n verbose_name='业务')\n comment = models.TextField(null=True, blank=True, verbose_name='备注')\n\n\n class Meta:\n ordering = ['dbserver']\n verbose_name = 'MYSQL用户信息'\n verbose_name_plural = verbose_name\n\n def __unicode__(self):\n return u'%s' % self.business\n\n def __str__(self):\n return u'%s' % self.business\n",
"step-5": "# -*- coding:utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models\n\nSERVICE_RANGE_CHOISE = {(1, '1年'), (2, '2年'), (3, '3年'), (4, '4年'), (5, '5年'), (6, '6年'), (7, '7年'), (8, '8年'), (0, '长期')}\nUSER_STATUS_CHOISE = {(1, '停用'), (2, '正常'), (3, '锁定')}\nDBSERVER_POS_CHOISE = {(1, '8层机房'), (2, '11层机房')}\nFIRM_CHOISE = {(1, 'DELL'), (2, 'IBM'), (3, 'EMC')}\n\n\nclass Odbserver(models.Model):\n name = models.CharField(max_length=30, verbose_name='名称')\n ip = models.GenericIPAddressField(verbose_name='IP')\n pos = models.IntegerField(default=1, choices=DBSERVER_POS_CHOISE, verbose_name='位置')\n sn = models.CharField(null=True, blank=True, max_length=50, verbose_name='序列号')\n sid = models.CharField(null=True, blank=True, max_length=50, verbose_name='快速服务代码')\n firm = models.IntegerField(default=1, choices=FIRM_CHOISE, verbose_name='厂商')\n model = models.CharField(null=True, blank=True, max_length=30, verbose_name='型号')\n feature = models.TextField(null=True, blank=True, verbose_name='配置')\n buy_time = models.DateField(null=True, blank=True, verbose_name='购买时间')\n service_range = models.IntegerField(default=1, choices=SERVICE_RANGE_CHOISE, verbose_name='服务年限')\n comment = models.TextField(null=True, blank=True, verbose_name='备注')\n\n class Meta:\n ordering = [\"name\"]\n verbose_name = '服务器信息'\n verbose_name_plural = verbose_name\n\n def __unicode__(self):\n return u'%s' % self.name\n\n def __str__(self):\n return u'%s' % self.name\n\n\nclass Ousers(models.Model):\n dbserver = models.ForeignKey(Odbserver, null=True, blank=True, verbose_name='服务器')\n user = models.CharField(max_length=20, verbose_name='用户名')\n passwd = models.CharField(max_length=20, verbose_name='密码')\n tablespace = models.CharField(max_length=20, null=True, blank=True, verbose_name='表空间')\n status = models.IntegerField(choices=USER_STATUS_CHOISE, verbose_name='状态')\n business = models.CharField(null=True, blank=True, max_length=100, verbose_name='业务')\n created = models.DateField(null=True, blank=True, verbose_name='创建时间')\n comment = models.TextField(null=True, blank=True, verbose_name='备注')\n\n class Meta:\n ordering = [\"user\"]\n verbose_name = '数据库用户信息'\n verbose_name_plural = verbose_name\n\n def __unicode__(self):\n return u'%s' % self.business\n\n def __str__(self):\n return u'%s' % self.business\n\n\nclass Osysusers(models.Model):\n dbserver = models.ForeignKey(Odbserver, null=True, blank=True, verbose_name='服务器')\n name = models.CharField(max_length=20, verbose_name='名称')\n user = models.CharField(max_length=20, verbose_name='用户')\n passwd = models.CharField(max_length=20, verbose_name='密码')\n\n class Meta:\n ordering = [\"dbserver\"]\n verbose_name = '系统用户信息'\n verbose_name_plural = verbose_name\n\n def __unicode__(self):\n return u'%s' % self.name\n\n def __str__(self):\n return u'%s' % self.name\n\n\nclass Omysqluser(models.Model):\n dbserver = models.ForeignKey(Odbserver, verbose_name='服务器')\n name = models.CharField(max_length=20, verbose_name='用户名')\n passwd = models.CharField(max_length=20, verbose_name='密码')\n dbname = models.CharField(max_length=20, verbose_name='数据库名')\n business = models.CharField(null=True, blank=True, max_length=100, verbose_name='业务')\n comment = models.TextField(null=True, blank=True, verbose_name='备注')\n\n class Meta:\n ordering = [\"dbserver\"]\n verbose_name = 'MYSQL用户信息'\n verbose_name_plural = verbose_name\n\n def __unicode__(self):\n return u'%s' % self.business\n\n def __str__(self):\n return u'%s' % self.business\n",
"step-ids": [
13,
14,
16,
17,
19
]
}
|
[
13,
14,
16,
17,
19
] |
<|reserved_special_token_0|>
def read_file(string_object):
""" Opens and reads through a file, returning none if it isnt found """
try:
return open(string_object, 'r')
except FileNotFoundError:
return None
<|reserved_special_token_0|>
def populate_weight_tuple_list(list_object):
""" Takes elements from a list containing course part names and their weights and returns a list of tuples containing those elements """
tuple_list = []
for i in range(len(list_object[0])):
weight_tuple = list_object[0][i], float(list_object[1][i])
tuple_list.append(weight_tuple)
return tuple_list
def populate_grades_tuple_list(list_object1, list_object2):
""" Takes elements from a list containing student emails and a list containing grades and returns a list of corresponding emails and grades in tuples """
tuple_list = []
for i in range(len(list_object1)):
grades_tuple = list_object1[i], list_object2[i]
tuple_list.append(grades_tuple)
return tuple_list
def calculate_final_grade(list_object1, list_object2):
""" Takes lists containing information about grades and course weights and calculates the final grade from the course """
list_object1 = [list(element) for element in list_object1]
for i in range(len(list_object1)):
final_grade = 0.0
for j in range(len(list_object1[i][1])):
final_grade += list_object1[i][1][j] * list_object2[j][1]
list_object1[i].append(final_grade)
list_object1 = [tuple(element) for element in list_object1]
return list_object1
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def read_file(string_object):
""" Opens and reads through a file, returning none if it isnt found """
try:
return open(string_object, 'r')
except FileNotFoundError:
return None
<|reserved_special_token_0|>
def populate_weight_tuple_list(list_object):
""" Takes elements from a list containing course part names and their weights and returns a list of tuples containing those elements """
tuple_list = []
for i in range(len(list_object[0])):
weight_tuple = list_object[0][i], float(list_object[1][i])
tuple_list.append(weight_tuple)
return tuple_list
def populate_grades_tuple_list(list_object1, list_object2):
""" Takes elements from a list containing student emails and a list containing grades and returns a list of corresponding emails and grades in tuples """
tuple_list = []
for i in range(len(list_object1)):
grades_tuple = list_object1[i], list_object2[i]
tuple_list.append(grades_tuple)
return tuple_list
def calculate_final_grade(list_object1, list_object2):
""" Takes lists containing information about grades and course weights and calculates the final grade from the course """
list_object1 = [list(element) for element in list_object1]
for i in range(len(list_object1)):
final_grade = 0.0
for j in range(len(list_object1[i][1])):
final_grade += list_object1[i][1][j] * list_object2[j][1]
list_object1[i].append(final_grade)
list_object1 = [tuple(element) for element in list_object1]
return list_object1
def print_results(list_object1, list_object2):
""" Takes lists containing information about course parts and student grades and prints them in a formatted menu """
STUDENT_COLUMN = 16
GENERAL_COLUMN = 14
print()
print('{:>{}}'.format('Student ID', STUDENT_COLUMN), end='')
for i in range(len(list_object1)):
print('{:>{}}'.format(list_object1[i][0], GENERAL_COLUMN), end='')
print('{:>{}}'.format('Course grade', GENERAL_COLUMN))
for tuple_element in list_object2:
print('{:>{}}'.format(tuple_element[0], STUDENT_COLUMN), end='')
for i, value in enumerate(tuple_element[1]):
print('{:>{}}'.format(value, GENERAL_COLUMN), end='')
print('{:>{}}'.format(round(tuple_element[-1], 2), GENERAL_COLUMN))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def read_file(string_object):
""" Opens and reads through a file, returning none if it isnt found """
try:
return open(string_object, 'r')
except FileNotFoundError:
return None
<|reserved_special_token_0|>
def populate_grades_list(file_object):
""" Takes information from a file containing student emails and grades and puts each in seperate lists """
email_list = []
grade_list = []
for line in file_object:
tmp_list = line.split()
email_list.append(tmp_list[0])
grade_list.append(tmp_list[1:])
for value_list in grade_list:
for i, value in enumerate(value_list):
value_list[i] = float(value)
return email_list, grade_list
def populate_weight_tuple_list(list_object):
""" Takes elements from a list containing course part names and their weights and returns a list of tuples containing those elements """
tuple_list = []
for i in range(len(list_object[0])):
weight_tuple = list_object[0][i], float(list_object[1][i])
tuple_list.append(weight_tuple)
return tuple_list
def populate_grades_tuple_list(list_object1, list_object2):
""" Takes elements from a list containing student emails and a list containing grades and returns a list of corresponding emails and grades in tuples """
tuple_list = []
for i in range(len(list_object1)):
grades_tuple = list_object1[i], list_object2[i]
tuple_list.append(grades_tuple)
return tuple_list
def calculate_final_grade(list_object1, list_object2):
""" Takes lists containing information about grades and course weights and calculates the final grade from the course """
list_object1 = [list(element) for element in list_object1]
for i in range(len(list_object1)):
final_grade = 0.0
for j in range(len(list_object1[i][1])):
final_grade += list_object1[i][1][j] * list_object2[j][1]
list_object1[i].append(final_grade)
list_object1 = [tuple(element) for element in list_object1]
return list_object1
def print_results(list_object1, list_object2):
""" Takes lists containing information about course parts and student grades and prints them in a formatted menu """
STUDENT_COLUMN = 16
GENERAL_COLUMN = 14
print()
print('{:>{}}'.format('Student ID', STUDENT_COLUMN), end='')
for i in range(len(list_object1)):
print('{:>{}}'.format(list_object1[i][0], GENERAL_COLUMN), end='')
print('{:>{}}'.format('Course grade', GENERAL_COLUMN))
for tuple_element in list_object2:
print('{:>{}}'.format(tuple_element[0], STUDENT_COLUMN), end='')
for i, value in enumerate(tuple_element[1]):
print('{:>{}}'.format(value, GENERAL_COLUMN), end='')
print('{:>{}}'.format(round(tuple_element[-1], 2), GENERAL_COLUMN))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def read_file(string_object):
""" Opens and reads through a file, returning none if it isnt found """
try:
return open(string_object, 'r')
except FileNotFoundError:
return None
def populate_weight_list(file_object):
""" Takes information from a file object containing course weights and puts it into a list """
new_list = []
for line in file_object:
new_list.append(line.split())
return new_list
def populate_grades_list(file_object):
""" Takes information from a file containing student emails and grades and puts each in seperate lists """
email_list = []
grade_list = []
for line in file_object:
tmp_list = line.split()
email_list.append(tmp_list[0])
grade_list.append(tmp_list[1:])
for value_list in grade_list:
for i, value in enumerate(value_list):
value_list[i] = float(value)
return email_list, grade_list
def populate_weight_tuple_list(list_object):
""" Takes elements from a list containing course part names and their weights and returns a list of tuples containing those elements """
tuple_list = []
for i in range(len(list_object[0])):
weight_tuple = list_object[0][i], float(list_object[1][i])
tuple_list.append(weight_tuple)
return tuple_list
def populate_grades_tuple_list(list_object1, list_object2):
""" Takes elements from a list containing student emails and a list containing grades and returns a list of corresponding emails and grades in tuples """
tuple_list = []
for i in range(len(list_object1)):
grades_tuple = list_object1[i], list_object2[i]
tuple_list.append(grades_tuple)
return tuple_list
def calculate_final_grade(list_object1, list_object2):
""" Takes lists containing information about grades and course weights and calculates the final grade from the course """
list_object1 = [list(element) for element in list_object1]
for i in range(len(list_object1)):
final_grade = 0.0
for j in range(len(list_object1[i][1])):
final_grade += list_object1[i][1][j] * list_object2[j][1]
list_object1[i].append(final_grade)
list_object1 = [tuple(element) for element in list_object1]
return list_object1
def print_results(list_object1, list_object2):
""" Takes lists containing information about course parts and student grades and prints them in a formatted menu """
STUDENT_COLUMN = 16
GENERAL_COLUMN = 14
print()
print('{:>{}}'.format('Student ID', STUDENT_COLUMN), end='')
for i in range(len(list_object1)):
print('{:>{}}'.format(list_object1[i][0], GENERAL_COLUMN), end='')
print('{:>{}}'.format('Course grade', GENERAL_COLUMN))
for tuple_element in list_object2:
print('{:>{}}'.format(tuple_element[0], STUDENT_COLUMN), end='')
for i, value in enumerate(tuple_element[1]):
print('{:>{}}'.format(value, GENERAL_COLUMN), end='')
print('{:>{}}'.format(round(tuple_element[-1], 2), GENERAL_COLUMN))
def main_func():
""" Main function """
parts_file_name = input('Enter filename for parts: ')
parts_file = read_file(parts_file_name)
if parts_file == None:
print('File {} not found'.format(parts_file_name))
else:
parts_list = populate_weight_list(parts_file)
weight_tuples_list = populate_weight_tuple_list(parts_list)
print(weight_tuples_list)
grades_file_name = input('Enter filename for grades: ')
grade_file = read_file(grades_file_name)
if grade_file == None:
print('File {} not found'.format(grades_file_name))
else:
email_list, grades_list = populate_grades_list(grade_file)
grades_tuple_list = populate_grades_tuple_list(email_list,
grades_list)
print(grades_tuple_list)
modified_grade_tuple_list = calculate_final_grade(grades_tuple_list
, weight_tuples_list)
print(modified_grade_tuple_list)
print_results(weight_tuples_list, modified_grade_tuple_list)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
"""
This program takes information about students and their coursework and calculates their final grades based on the weight of each course factor
"""
def read_file(string_object):
""" Opens and reads through a file, returning none if it isnt found """
try:
return open(string_object,"r")
except FileNotFoundError:
return None
def populate_weight_list(file_object):
""" Takes information from a file object containing course weights and puts it into a list """
new_list = []
for line in file_object:
new_list.append(line.split())
return new_list
def populate_grades_list(file_object):
""" Takes information from a file containing student emails and grades and puts each in seperate lists """
email_list = []
grade_list = []
for line in file_object:
tmp_list = line.split()
email_list.append(tmp_list[0])
grade_list.append(tmp_list[1::])
for value_list in grade_list:
for i, value in enumerate(value_list):
value_list[i] = float(value)
return email_list, grade_list
def populate_weight_tuple_list(list_object):
""" Takes elements from a list containing course part names and their weights and returns a list of tuples containing those elements """
tuple_list = []
for i in range(len(list_object[0])):
weight_tuple = (list_object[0][i], float(list_object[1][i]))
tuple_list.append(weight_tuple)
return tuple_list
def populate_grades_tuple_list(list_object1, list_object2):
""" Takes elements from a list containing student emails and a list containing grades and returns a list of corresponding emails and grades in tuples """
tuple_list = []
for i in range(len(list_object1)):
grades_tuple = (list_object1[i], list_object2[i])
tuple_list.append(grades_tuple)
return tuple_list
def calculate_final_grade(list_object1, list_object2):
""" Takes lists containing information about grades and course weights and calculates the final grade from the course """
list_object1 = [list(element) for element in list_object1] #Have to turn the tuples in the list to lists so that we can add the final grade to the list
for i in range(len(list_object1)):
final_grade = 0.0
for j in range(len(list_object1[i][1])):
final_grade += (list_object1[i][1][j] * list_object2[j][1])
list_object1[i].append(final_grade)
list_object1 = [tuple(element) for element in list_object1] #Turn the lists in the list into tuples again
return list_object1
def print_results(list_object1, list_object2):
""" Takes lists containing information about course parts and student grades and prints them in a formatted menu """
STUDENT_COLUMN = 16
GENERAL_COLUMN = 14
print()
print("{:>{}}".format("Student ID",STUDENT_COLUMN),end="")
for i in range(len(list_object1)):
print("{:>{}}".format(list_object1[i][0],GENERAL_COLUMN),end="")
print("{:>{}}".format("Course grade",GENERAL_COLUMN))
for tuple_element in list_object2:
print("{:>{}}".format(tuple_element[0],STUDENT_COLUMN),end="")
for i, value in enumerate(tuple_element[1]):
print("{:>{}}".format(value,GENERAL_COLUMN),end="")
print("{:>{}}".format(round(tuple_element[-1],2),GENERAL_COLUMN))
def main_func():
""" Main function """
parts_file_name = input("Enter filename for parts: ")
parts_file = read_file(parts_file_name)
if parts_file == None:
print("File {} not found".format(parts_file_name))
else:
parts_list = populate_weight_list(parts_file)
weight_tuples_list = populate_weight_tuple_list(parts_list)
print(weight_tuples_list)
grades_file_name = input("Enter filename for grades: ")
grade_file = read_file(grades_file_name)
if grade_file == None:
print("File {} not found".format(grades_file_name))
else:
email_list, grades_list = populate_grades_list(grade_file)
grades_tuple_list = populate_grades_tuple_list(email_list, grades_list)
print(grades_tuple_list)
modified_grade_tuple_list = calculate_final_grade(grades_tuple_list, weight_tuples_list)
print(modified_grade_tuple_list)
print_results(weight_tuples_list,modified_grade_tuple_list)
main_func()
|
flexible
|
{
"blob_id": "d8af8e36bd00fbfc966ef1c4dd0c6385cbb019ee",
"index": 2064,
"step-1": "<mask token>\n\n\ndef read_file(string_object):\n \"\"\" Opens and reads through a file, returning none if it isnt found \"\"\"\n try:\n return open(string_object, 'r')\n except FileNotFoundError:\n return None\n\n\n<mask token>\n\n\ndef populate_weight_tuple_list(list_object):\n \"\"\" Takes elements from a list containing course part names and their weights and returns a list of tuples containing those elements \"\"\"\n tuple_list = []\n for i in range(len(list_object[0])):\n weight_tuple = list_object[0][i], float(list_object[1][i])\n tuple_list.append(weight_tuple)\n return tuple_list\n\n\ndef populate_grades_tuple_list(list_object1, list_object2):\n \"\"\" Takes elements from a list containing student emails and a list containing grades and returns a list of corresponding emails and grades in tuples \"\"\"\n tuple_list = []\n for i in range(len(list_object1)):\n grades_tuple = list_object1[i], list_object2[i]\n tuple_list.append(grades_tuple)\n return tuple_list\n\n\ndef calculate_final_grade(list_object1, list_object2):\n \"\"\" Takes lists containing information about grades and course weights and calculates the final grade from the course \"\"\"\n list_object1 = [list(element) for element in list_object1]\n for i in range(len(list_object1)):\n final_grade = 0.0\n for j in range(len(list_object1[i][1])):\n final_grade += list_object1[i][1][j] * list_object2[j][1]\n list_object1[i].append(final_grade)\n list_object1 = [tuple(element) for element in list_object1]\n return list_object1\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef read_file(string_object):\n \"\"\" Opens and reads through a file, returning none if it isnt found \"\"\"\n try:\n return open(string_object, 'r')\n except FileNotFoundError:\n return None\n\n\n<mask token>\n\n\ndef populate_weight_tuple_list(list_object):\n \"\"\" Takes elements from a list containing course part names and their weights and returns a list of tuples containing those elements \"\"\"\n tuple_list = []\n for i in range(len(list_object[0])):\n weight_tuple = list_object[0][i], float(list_object[1][i])\n tuple_list.append(weight_tuple)\n return tuple_list\n\n\ndef populate_grades_tuple_list(list_object1, list_object2):\n \"\"\" Takes elements from a list containing student emails and a list containing grades and returns a list of corresponding emails and grades in tuples \"\"\"\n tuple_list = []\n for i in range(len(list_object1)):\n grades_tuple = list_object1[i], list_object2[i]\n tuple_list.append(grades_tuple)\n return tuple_list\n\n\ndef calculate_final_grade(list_object1, list_object2):\n \"\"\" Takes lists containing information about grades and course weights and calculates the final grade from the course \"\"\"\n list_object1 = [list(element) for element in list_object1]\n for i in range(len(list_object1)):\n final_grade = 0.0\n for j in range(len(list_object1[i][1])):\n final_grade += list_object1[i][1][j] * list_object2[j][1]\n list_object1[i].append(final_grade)\n list_object1 = [tuple(element) for element in list_object1]\n return list_object1\n\n\ndef print_results(list_object1, list_object2):\n \"\"\" Takes lists containing information about course parts and student grades and prints them in a formatted menu \"\"\"\n STUDENT_COLUMN = 16\n GENERAL_COLUMN = 14\n print()\n print('{:>{}}'.format('Student ID', STUDENT_COLUMN), end='')\n for i in range(len(list_object1)):\n print('{:>{}}'.format(list_object1[i][0], GENERAL_COLUMN), end='')\n print('{:>{}}'.format('Course grade', GENERAL_COLUMN))\n for tuple_element in list_object2:\n print('{:>{}}'.format(tuple_element[0], STUDENT_COLUMN), end='')\n for i, value in enumerate(tuple_element[1]):\n print('{:>{}}'.format(value, GENERAL_COLUMN), end='')\n print('{:>{}}'.format(round(tuple_element[-1], 2), GENERAL_COLUMN))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef read_file(string_object):\n \"\"\" Opens and reads through a file, returning none if it isnt found \"\"\"\n try:\n return open(string_object, 'r')\n except FileNotFoundError:\n return None\n\n\n<mask token>\n\n\ndef populate_grades_list(file_object):\n \"\"\" Takes information from a file containing student emails and grades and puts each in seperate lists \"\"\"\n email_list = []\n grade_list = []\n for line in file_object:\n tmp_list = line.split()\n email_list.append(tmp_list[0])\n grade_list.append(tmp_list[1:])\n for value_list in grade_list:\n for i, value in enumerate(value_list):\n value_list[i] = float(value)\n return email_list, grade_list\n\n\ndef populate_weight_tuple_list(list_object):\n \"\"\" Takes elements from a list containing course part names and their weights and returns a list of tuples containing those elements \"\"\"\n tuple_list = []\n for i in range(len(list_object[0])):\n weight_tuple = list_object[0][i], float(list_object[1][i])\n tuple_list.append(weight_tuple)\n return tuple_list\n\n\ndef populate_grades_tuple_list(list_object1, list_object2):\n \"\"\" Takes elements from a list containing student emails and a list containing grades and returns a list of corresponding emails and grades in tuples \"\"\"\n tuple_list = []\n for i in range(len(list_object1)):\n grades_tuple = list_object1[i], list_object2[i]\n tuple_list.append(grades_tuple)\n return tuple_list\n\n\ndef calculate_final_grade(list_object1, list_object2):\n \"\"\" Takes lists containing information about grades and course weights and calculates the final grade from the course \"\"\"\n list_object1 = [list(element) for element in list_object1]\n for i in range(len(list_object1)):\n final_grade = 0.0\n for j in range(len(list_object1[i][1])):\n final_grade += list_object1[i][1][j] * list_object2[j][1]\n list_object1[i].append(final_grade)\n list_object1 = [tuple(element) for element in list_object1]\n return list_object1\n\n\ndef print_results(list_object1, list_object2):\n \"\"\" Takes lists containing information about course parts and student grades and prints them in a formatted menu \"\"\"\n STUDENT_COLUMN = 16\n GENERAL_COLUMN = 14\n print()\n print('{:>{}}'.format('Student ID', STUDENT_COLUMN), end='')\n for i in range(len(list_object1)):\n print('{:>{}}'.format(list_object1[i][0], GENERAL_COLUMN), end='')\n print('{:>{}}'.format('Course grade', GENERAL_COLUMN))\n for tuple_element in list_object2:\n print('{:>{}}'.format(tuple_element[0], STUDENT_COLUMN), end='')\n for i, value in enumerate(tuple_element[1]):\n print('{:>{}}'.format(value, GENERAL_COLUMN), end='')\n print('{:>{}}'.format(round(tuple_element[-1], 2), GENERAL_COLUMN))\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef read_file(string_object):\n \"\"\" Opens and reads through a file, returning none if it isnt found \"\"\"\n try:\n return open(string_object, 'r')\n except FileNotFoundError:\n return None\n\n\ndef populate_weight_list(file_object):\n \"\"\" Takes information from a file object containing course weights and puts it into a list \"\"\"\n new_list = []\n for line in file_object:\n new_list.append(line.split())\n return new_list\n\n\ndef populate_grades_list(file_object):\n \"\"\" Takes information from a file containing student emails and grades and puts each in seperate lists \"\"\"\n email_list = []\n grade_list = []\n for line in file_object:\n tmp_list = line.split()\n email_list.append(tmp_list[0])\n grade_list.append(tmp_list[1:])\n for value_list in grade_list:\n for i, value in enumerate(value_list):\n value_list[i] = float(value)\n return email_list, grade_list\n\n\ndef populate_weight_tuple_list(list_object):\n \"\"\" Takes elements from a list containing course part names and their weights and returns a list of tuples containing those elements \"\"\"\n tuple_list = []\n for i in range(len(list_object[0])):\n weight_tuple = list_object[0][i], float(list_object[1][i])\n tuple_list.append(weight_tuple)\n return tuple_list\n\n\ndef populate_grades_tuple_list(list_object1, list_object2):\n \"\"\" Takes elements from a list containing student emails and a list containing grades and returns a list of corresponding emails and grades in tuples \"\"\"\n tuple_list = []\n for i in range(len(list_object1)):\n grades_tuple = list_object1[i], list_object2[i]\n tuple_list.append(grades_tuple)\n return tuple_list\n\n\ndef calculate_final_grade(list_object1, list_object2):\n \"\"\" Takes lists containing information about grades and course weights and calculates the final grade from the course \"\"\"\n list_object1 = [list(element) for element in list_object1]\n for i in range(len(list_object1)):\n final_grade = 0.0\n for j in range(len(list_object1[i][1])):\n final_grade += list_object1[i][1][j] * list_object2[j][1]\n list_object1[i].append(final_grade)\n list_object1 = [tuple(element) for element in list_object1]\n return list_object1\n\n\ndef print_results(list_object1, list_object2):\n \"\"\" Takes lists containing information about course parts and student grades and prints them in a formatted menu \"\"\"\n STUDENT_COLUMN = 16\n GENERAL_COLUMN = 14\n print()\n print('{:>{}}'.format('Student ID', STUDENT_COLUMN), end='')\n for i in range(len(list_object1)):\n print('{:>{}}'.format(list_object1[i][0], GENERAL_COLUMN), end='')\n print('{:>{}}'.format('Course grade', GENERAL_COLUMN))\n for tuple_element in list_object2:\n print('{:>{}}'.format(tuple_element[0], STUDENT_COLUMN), end='')\n for i, value in enumerate(tuple_element[1]):\n print('{:>{}}'.format(value, GENERAL_COLUMN), end='')\n print('{:>{}}'.format(round(tuple_element[-1], 2), GENERAL_COLUMN))\n\n\ndef main_func():\n \"\"\" Main function \"\"\"\n parts_file_name = input('Enter filename for parts: ')\n parts_file = read_file(parts_file_name)\n if parts_file == None:\n print('File {} not found'.format(parts_file_name))\n else:\n parts_list = populate_weight_list(parts_file)\n weight_tuples_list = populate_weight_tuple_list(parts_list)\n print(weight_tuples_list)\n grades_file_name = input('Enter filename for grades: ')\n grade_file = read_file(grades_file_name)\n if grade_file == None:\n print('File {} not found'.format(grades_file_name))\n else:\n email_list, grades_list = populate_grades_list(grade_file)\n grades_tuple_list = populate_grades_tuple_list(email_list,\n grades_list)\n print(grades_tuple_list)\n modified_grade_tuple_list = calculate_final_grade(grades_tuple_list\n , weight_tuples_list)\n print(modified_grade_tuple_list)\n print_results(weight_tuples_list, modified_grade_tuple_list)\n\n\n<mask token>\n",
"step-5": "\"\"\"\nThis program takes information about students and their coursework and calculates their final grades based on the weight of each course factor\n\"\"\"\n\ndef read_file(string_object):\n \"\"\" Opens and reads through a file, returning none if it isnt found \"\"\"\n try:\n return open(string_object,\"r\")\n except FileNotFoundError:\n return None\n\ndef populate_weight_list(file_object):\n \"\"\" Takes information from a file object containing course weights and puts it into a list \"\"\"\n new_list = []\n\n for line in file_object:\n new_list.append(line.split())\n \n return new_list\n\ndef populate_grades_list(file_object):\n \"\"\" Takes information from a file containing student emails and grades and puts each in seperate lists \"\"\"\n email_list = []\n grade_list = []\n\n for line in file_object:\n tmp_list = line.split()\n email_list.append(tmp_list[0])\n grade_list.append(tmp_list[1::])\n\n for value_list in grade_list:\n for i, value in enumerate(value_list):\n value_list[i] = float(value)\n\n return email_list, grade_list\n\ndef populate_weight_tuple_list(list_object):\n \"\"\" Takes elements from a list containing course part names and their weights and returns a list of tuples containing those elements \"\"\"\n tuple_list = []\n\n for i in range(len(list_object[0])):\n weight_tuple = (list_object[0][i], float(list_object[1][i]))\n tuple_list.append(weight_tuple)\n \n return tuple_list\n\ndef populate_grades_tuple_list(list_object1, list_object2):\n \"\"\" Takes elements from a list containing student emails and a list containing grades and returns a list of corresponding emails and grades in tuples \"\"\"\n tuple_list = []\n\n for i in range(len(list_object1)):\n grades_tuple = (list_object1[i], list_object2[i])\n tuple_list.append(grades_tuple)\n \n return tuple_list\n\ndef calculate_final_grade(list_object1, list_object2):\n \"\"\" Takes lists containing information about grades and course weights and calculates the final grade from the course \"\"\"\n\n list_object1 = [list(element) for element in list_object1] #Have to turn the tuples in the list to lists so that we can add the final grade to the list\n\n for i in range(len(list_object1)):\n final_grade = 0.0\n for j in range(len(list_object1[i][1])):\n final_grade += (list_object1[i][1][j] * list_object2[j][1])\n list_object1[i].append(final_grade)\n \n list_object1 = [tuple(element) for element in list_object1] #Turn the lists in the list into tuples again\n\n return list_object1\n\ndef print_results(list_object1, list_object2):\n \"\"\" Takes lists containing information about course parts and student grades and prints them in a formatted menu \"\"\"\n STUDENT_COLUMN = 16\n GENERAL_COLUMN = 14\n\n print()\n print(\"{:>{}}\".format(\"Student ID\",STUDENT_COLUMN),end=\"\")\n\n for i in range(len(list_object1)):\n print(\"{:>{}}\".format(list_object1[i][0],GENERAL_COLUMN),end=\"\")\n \n print(\"{:>{}}\".format(\"Course grade\",GENERAL_COLUMN))\n\n for tuple_element in list_object2:\n\n print(\"{:>{}}\".format(tuple_element[0],STUDENT_COLUMN),end=\"\")\n\n for i, value in enumerate(tuple_element[1]):\n print(\"{:>{}}\".format(value,GENERAL_COLUMN),end=\"\")\n \n print(\"{:>{}}\".format(round(tuple_element[-1],2),GENERAL_COLUMN))\n\n\ndef main_func():\n \"\"\" Main function \"\"\"\n\n parts_file_name = input(\"Enter filename for parts: \")\n parts_file = read_file(parts_file_name)\n\n if parts_file == None:\n print(\"File {} not found\".format(parts_file_name))\n else:\n parts_list = populate_weight_list(parts_file)\n weight_tuples_list = populate_weight_tuple_list(parts_list)\n print(weight_tuples_list)\n\n grades_file_name = input(\"Enter filename for grades: \")\n grade_file = read_file(grades_file_name)\n if grade_file == None:\n print(\"File {} not found\".format(grades_file_name))\n else:\n email_list, grades_list = populate_grades_list(grade_file)\n grades_tuple_list = populate_grades_tuple_list(email_list, grades_list)\n print(grades_tuple_list)\n\n modified_grade_tuple_list = calculate_final_grade(grades_tuple_list, weight_tuples_list)\n print(modified_grade_tuple_list)\n\n print_results(weight_tuples_list,modified_grade_tuple_list)\n\nmain_func() \n",
"step-ids": [
4,
5,
6,
8,
10
]
}
|
[
4,
5,
6,
8,
10
] |
from scrapera.image.duckduckgo import DuckDuckGoScraper
scraper = DuckDuckGoScraper()
scraper.scrape('spongebob squarepants', 1, r'path/to/output/directory')
|
normal
|
{
"blob_id": "d234034f7f232e842d0b4e465ea6ec314af6964d",
"index": 4209,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nscraper.scrape('spongebob squarepants', 1, 'path/to/output/directory')\n",
"step-3": "<mask token>\nscraper = DuckDuckGoScraper()\nscraper.scrape('spongebob squarepants', 1, 'path/to/output/directory')\n",
"step-4": "from scrapera.image.duckduckgo import DuckDuckGoScraper\nscraper = DuckDuckGoScraper()\nscraper.scrape('spongebob squarepants', 1, 'path/to/output/directory')\n",
"step-5": "from scrapera.image.duckduckgo import DuckDuckGoScraper\n\nscraper = DuckDuckGoScraper()\nscraper.scrape('spongebob squarepants', 1, r'path/to/output/directory')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""For logging training information to files."""
import os
def delete_log(file_path):
"""Delete a log file.
Args:
file_path: String, the full path to the log file.
Raises:
ValueError: if file not found.
"""
if os.path.exists(file_path):
print('Deleting log %s...' % file_path)
os.remove(file_path)
else:
raise ValueError("File %r doesn't exists - cannot delete." % file_path)
class Logger:
"""For logging information to file."""
def __init__(self, file_path, print_too=True, override=False):
"""Create a new Logger.
Args:
file_path: String, the full path to the target file.
print_too: Bool, whether or not to also print logger info to terminal.
override: Bool, whether or not to delete any old files.
"""
self.file_path = file_path
self.print_too = print_too
if override:
if os.path.exists(file_path):
print('Overriding - deleting previous log...')
os.remove(file_path)
os.makedirs(os.path.dirname(file_path), exist_ok=True)
def log(self, info):
with open(self.file_path, 'a') as file:
file.write('\n' + info)
if self.print_too:
print(info)
|
normal
|
{
"blob_id": "1355c3abfd2683f6dc869703fdb79a04e264099c",
"index": 3421,
"step-1": "<mask token>\n\n\nclass Logger:\n <mask token>\n\n def __init__(self, file_path, print_too=True, override=False):\n \"\"\"Create a new Logger.\n\n Args:\n file_path: String, the full path to the target file.\n print_too: Bool, whether or not to also print logger info to terminal.\n override: Bool, whether or not to delete any old files.\n \"\"\"\n self.file_path = file_path\n self.print_too = print_too\n if override:\n if os.path.exists(file_path):\n print('Overriding - deleting previous log...')\n os.remove(file_path)\n os.makedirs(os.path.dirname(file_path), exist_ok=True)\n\n def log(self, info):\n with open(self.file_path, 'a') as file:\n file.write('\\n' + info)\n if self.print_too:\n print(info)\n",
"step-2": "<mask token>\n\n\nclass Logger:\n \"\"\"For logging information to file.\"\"\"\n\n def __init__(self, file_path, print_too=True, override=False):\n \"\"\"Create a new Logger.\n\n Args:\n file_path: String, the full path to the target file.\n print_too: Bool, whether or not to also print logger info to terminal.\n override: Bool, whether or not to delete any old files.\n \"\"\"\n self.file_path = file_path\n self.print_too = print_too\n if override:\n if os.path.exists(file_path):\n print('Overriding - deleting previous log...')\n os.remove(file_path)\n os.makedirs(os.path.dirname(file_path), exist_ok=True)\n\n def log(self, info):\n with open(self.file_path, 'a') as file:\n file.write('\\n' + info)\n if self.print_too:\n print(info)\n",
"step-3": "<mask token>\n\n\ndef delete_log(file_path):\n \"\"\"Delete a log file.\n\n Args:\n file_path: String, the full path to the log file.\n\n Raises:\n ValueError: if file not found.\n \"\"\"\n if os.path.exists(file_path):\n print('Deleting log %s...' % file_path)\n os.remove(file_path)\n else:\n raise ValueError(\"File %r doesn't exists - cannot delete.\" % file_path\n )\n\n\nclass Logger:\n \"\"\"For logging information to file.\"\"\"\n\n def __init__(self, file_path, print_too=True, override=False):\n \"\"\"Create a new Logger.\n\n Args:\n file_path: String, the full path to the target file.\n print_too: Bool, whether or not to also print logger info to terminal.\n override: Bool, whether or not to delete any old files.\n \"\"\"\n self.file_path = file_path\n self.print_too = print_too\n if override:\n if os.path.exists(file_path):\n print('Overriding - deleting previous log...')\n os.remove(file_path)\n os.makedirs(os.path.dirname(file_path), exist_ok=True)\n\n def log(self, info):\n with open(self.file_path, 'a') as file:\n file.write('\\n' + info)\n if self.print_too:\n print(info)\n",
"step-4": "<mask token>\nimport os\n\n\ndef delete_log(file_path):\n \"\"\"Delete a log file.\n\n Args:\n file_path: String, the full path to the log file.\n\n Raises:\n ValueError: if file not found.\n \"\"\"\n if os.path.exists(file_path):\n print('Deleting log %s...' % file_path)\n os.remove(file_path)\n else:\n raise ValueError(\"File %r doesn't exists - cannot delete.\" % file_path\n )\n\n\nclass Logger:\n \"\"\"For logging information to file.\"\"\"\n\n def __init__(self, file_path, print_too=True, override=False):\n \"\"\"Create a new Logger.\n\n Args:\n file_path: String, the full path to the target file.\n print_too: Bool, whether or not to also print logger info to terminal.\n override: Bool, whether or not to delete any old files.\n \"\"\"\n self.file_path = file_path\n self.print_too = print_too\n if override:\n if os.path.exists(file_path):\n print('Overriding - deleting previous log...')\n os.remove(file_path)\n os.makedirs(os.path.dirname(file_path), exist_ok=True)\n\n def log(self, info):\n with open(self.file_path, 'a') as file:\n file.write('\\n' + info)\n if self.print_too:\n print(info)\n",
"step-5": "\"\"\"For logging training information to files.\"\"\"\nimport os\n\n\ndef delete_log(file_path):\n \"\"\"Delete a log file.\n\n Args:\n file_path: String, the full path to the log file.\n\n Raises:\n ValueError: if file not found.\n \"\"\"\n if os.path.exists(file_path):\n print('Deleting log %s...' % file_path)\n os.remove(file_path)\n else:\n raise ValueError(\"File %r doesn't exists - cannot delete.\" % file_path)\n\n\nclass Logger:\n \"\"\"For logging information to file.\"\"\"\n\n def __init__(self, file_path, print_too=True, override=False):\n \"\"\"Create a new Logger.\n\n Args:\n file_path: String, the full path to the target file.\n print_too: Bool, whether or not to also print logger info to terminal.\n override: Bool, whether or not to delete any old files.\n \"\"\"\n self.file_path = file_path\n self.print_too = print_too\n if override:\n if os.path.exists(file_path):\n print('Overriding - deleting previous log...')\n os.remove(file_path)\n os.makedirs(os.path.dirname(file_path), exist_ok=True)\n\n def log(self, info):\n with open(self.file_path, 'a') as file:\n file.write('\\n' + info)\n if self.print_too:\n print(info)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
class Gobang:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def new(self):
"""新局"""
self.__init__()
def printcb(self):
"""打印棋盘"""
print('\x1b[7;32;40m+ ', end='')
for c in range(65, 80):
print(chr(c), end=' ')
print('\x1b[0m\n')
for row in range(len(self.chessboard)):
print('\x1b[7;32;40m' + chr(row + 97), end='\x1b[0m ')
for i in self.chessboard[row]:
if i == 0:
print(i, end=' ')
elif i == 1:
print('\x1b[31m{}\x1b[0m'.format(i), end=' ')
elif i == 2:
print('\x1b[34m{}\x1b[0m'.format(i), end=' ')
print('\n')
def player(self):
"""获取玩家ID"""
return len(self.step) % 2 + 1
def sortstep(self):
"""将总步表分配给黑白子"""
self.white, self.black = {}, {}
for s in self.step.items():
if s[0] % 2 == 1:
self.black.update({s[0]: s[1]})
else:
self.white.update({s[0]: s[1]})
<|reserved_special_token_0|>
def recall(self, s=-1):
""" 悔棋
"""
if s == -1:
try:
if len(self.max_step) < len(self.step):
self.max_step = self.step.copy()
if len(self.step) == 0:
raise KeyError
except KeyError:
return False
else:
self.step.popitem()
return self.loadstep()
elif s == 1:
if len(self.max_step) > len(self.step):
self.step.update({(len(self.step) + 1): self.max_step[len(
self.step) + 1]})
return self.loadstep()
else:
return False
<|reserved_special_token_0|>
def iswin(self):
"""判断是否结束
"""
step_set_ls = []
cb = self.chessboard
for s in self.step.values():
step_set_ls.append((ord(s[0]) - 97, ord(s[1]) - 97))
for r, c in step_set_ls:
try:
if cb[r][c - 2] == cb[r][c - 1] == cb[r][c] == cb[r][c + 1
] == cb[r][c + 2] in (1, 2):
return True, cb[r][c]
except IndexError:
pass
try:
if cb[r - 2][c] == cb[r - 1][c] == cb[r][c] == cb[r + 1][c
] == cb[r + 2][c] in (1, 2):
return True, cb[r][c]
except IndexError:
pass
try:
if cb[r - 2][c - 2] == cb[r - 1][c - 1] == cb[r][c] == cb[r + 1
][c + 1] == cb[r + 2][c + 2] in (1, 2):
return True, cb[r][c]
except IndexError:
pass
try:
if cb[r + 2][c - 2] == cb[r + 1][c - 1] == cb[r][c] == cb[r - 1
][c + 1] == cb[r - 2][c + 2] in (1, 2):
return True, cb[r][c]
except IndexError:
pass
return False, 0
def __init__(self):
self.chessboard = [[(0) for i in range(self.SIDE)] for j in range(
self.SIDE)]
self.step = {}
self.max_step = {}
self.black = {}
self.white = {}
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Gobang:
<|reserved_special_token_0|>
SIDE = 15
def new(self):
"""新局"""
self.__init__()
def printcb(self):
"""打印棋盘"""
print('\x1b[7;32;40m+ ', end='')
for c in range(65, 80):
print(chr(c), end=' ')
print('\x1b[0m\n')
for row in range(len(self.chessboard)):
print('\x1b[7;32;40m' + chr(row + 97), end='\x1b[0m ')
for i in self.chessboard[row]:
if i == 0:
print(i, end=' ')
elif i == 1:
print('\x1b[31m{}\x1b[0m'.format(i), end=' ')
elif i == 2:
print('\x1b[34m{}\x1b[0m'.format(i), end=' ')
print('\n')
def player(self):
"""获取玩家ID"""
return len(self.step) % 2 + 1
def sortstep(self):
"""将总步表分配给黑白子"""
self.white, self.black = {}, {}
for s in self.step.items():
if s[0] % 2 == 1:
self.black.update({s[0]: s[1]})
else:
self.white.update({s[0]: s[1]})
def loadstep(self):
""" 载入步表
将 self.step 载入到棋盘上
"""
try:
self.chessboard = [[(0) for i in range(self.SIDE)] for j in
range(self.SIDE)]
step_list = list(self.step.values()).copy()
for i in range(len(step_list)):
self.chessboard[ord(step_list[i][0]) - 97][ord(step_list[i]
[1]) - 97] = i % 2 + 1
self.sortstep()
return True
except TypeError:
return False
def recall(self, s=-1):
""" 悔棋
"""
if s == -1:
try:
if len(self.max_step) < len(self.step):
self.max_step = self.step.copy()
if len(self.step) == 0:
raise KeyError
except KeyError:
return False
else:
self.step.popitem()
return self.loadstep()
elif s == 1:
if len(self.max_step) > len(self.step):
self.step.update({(len(self.step) + 1): self.max_step[len(
self.step) + 1]})
return self.loadstep()
else:
return False
def move(self, row: int=7, column: int=7, **kwgs):
"""移動棋盘
row: 棋盘的行号
column: 棋盘的列号
"""
if 's' in kwgs:
row = ord(kwgs['s'][0].lower()) - 97
column = ord(kwgs['s'][1].lower()) - 97
if 0 <= row < self.SIDE and 0 <= column < self.SIDE:
if self.chessboard[row][column] == 0:
self.chessboard[row][column] = self.player()
self.step[len(self.step) + 1] = chr(row + 97) + chr(column + 97
)
self.sortstep()
return True
return False
def iswin(self):
"""判断是否结束
"""
step_set_ls = []
cb = self.chessboard
for s in self.step.values():
step_set_ls.append((ord(s[0]) - 97, ord(s[1]) - 97))
for r, c in step_set_ls:
try:
if cb[r][c - 2] == cb[r][c - 1] == cb[r][c] == cb[r][c + 1
] == cb[r][c + 2] in (1, 2):
return True, cb[r][c]
except IndexError:
pass
try:
if cb[r - 2][c] == cb[r - 1][c] == cb[r][c] == cb[r + 1][c
] == cb[r + 2][c] in (1, 2):
return True, cb[r][c]
except IndexError:
pass
try:
if cb[r - 2][c - 2] == cb[r - 1][c - 1] == cb[r][c] == cb[r + 1
][c + 1] == cb[r + 2][c + 2] in (1, 2):
return True, cb[r][c]
except IndexError:
pass
try:
if cb[r + 2][c - 2] == cb[r + 1][c - 1] == cb[r][c] == cb[r - 1
][c + 1] == cb[r - 2][c + 2] in (1, 2):
return True, cb[r][c]
except IndexError:
pass
return False, 0
def __init__(self):
self.chessboard = [[(0) for i in range(self.SIDE)] for j in range(
self.SIDE)]
self.step = {}
self.max_step = {}
self.black = {}
self.white = {}
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Gobang:
"""
五子棋
=====
一个简单的五子棋类, 可以在控制台下五子棋. 提供以下函数 :
new(): 新局
printcb(): 打印棋盘
player(): 获取当前应落子 ID (轮走方)
sortstep(): 处理总步表
loadstep(): 将 step 步表的内容载入棋盘
recall(): 前进后退的操作
move(): 落子
iswin(): 判断是否获胜
"""
SIDE = 15
def new(self):
"""新局"""
self.__init__()
def printcb(self):
"""打印棋盘"""
print('\x1b[7;32;40m+ ', end='')
for c in range(65, 80):
print(chr(c), end=' ')
print('\x1b[0m\n')
for row in range(len(self.chessboard)):
print('\x1b[7;32;40m' + chr(row + 97), end='\x1b[0m ')
for i in self.chessboard[row]:
if i == 0:
print(i, end=' ')
elif i == 1:
print('\x1b[31m{}\x1b[0m'.format(i), end=' ')
elif i == 2:
print('\x1b[34m{}\x1b[0m'.format(i), end=' ')
print('\n')
def player(self):
"""获取玩家ID"""
return len(self.step) % 2 + 1
def sortstep(self):
"""将总步表分配给黑白子"""
self.white, self.black = {}, {}
for s in self.step.items():
if s[0] % 2 == 1:
self.black.update({s[0]: s[1]})
else:
self.white.update({s[0]: s[1]})
def loadstep(self):
""" 载入步表
将 self.step 载入到棋盘上
"""
try:
self.chessboard = [[(0) for i in range(self.SIDE)] for j in
range(self.SIDE)]
step_list = list(self.step.values()).copy()
for i in range(len(step_list)):
self.chessboard[ord(step_list[i][0]) - 97][ord(step_list[i]
[1]) - 97] = i % 2 + 1
self.sortstep()
return True
except TypeError:
return False
def recall(self, s=-1):
""" 悔棋
"""
if s == -1:
try:
if len(self.max_step) < len(self.step):
self.max_step = self.step.copy()
if len(self.step) == 0:
raise KeyError
except KeyError:
return False
else:
self.step.popitem()
return self.loadstep()
elif s == 1:
if len(self.max_step) > len(self.step):
self.step.update({(len(self.step) + 1): self.max_step[len(
self.step) + 1]})
return self.loadstep()
else:
return False
def move(self, row: int=7, column: int=7, **kwgs):
"""移動棋盘
row: 棋盘的行号
column: 棋盘的列号
"""
if 's' in kwgs:
row = ord(kwgs['s'][0].lower()) - 97
column = ord(kwgs['s'][1].lower()) - 97
if 0 <= row < self.SIDE and 0 <= column < self.SIDE:
if self.chessboard[row][column] == 0:
self.chessboard[row][column] = self.player()
self.step[len(self.step) + 1] = chr(row + 97) + chr(column + 97
)
self.sortstep()
return True
return False
def iswin(self):
"""判断是否结束
"""
step_set_ls = []
cb = self.chessboard
for s in self.step.values():
step_set_ls.append((ord(s[0]) - 97, ord(s[1]) - 97))
for r, c in step_set_ls:
try:
if cb[r][c - 2] == cb[r][c - 1] == cb[r][c] == cb[r][c + 1
] == cb[r][c + 2] in (1, 2):
return True, cb[r][c]
except IndexError:
pass
try:
if cb[r - 2][c] == cb[r - 1][c] == cb[r][c] == cb[r + 1][c
] == cb[r + 2][c] in (1, 2):
return True, cb[r][c]
except IndexError:
pass
try:
if cb[r - 2][c - 2] == cb[r - 1][c - 1] == cb[r][c] == cb[r + 1
][c + 1] == cb[r + 2][c + 2] in (1, 2):
return True, cb[r][c]
except IndexError:
pass
try:
if cb[r + 2][c - 2] == cb[r + 1][c - 1] == cb[r][c] == cb[r - 1
][c + 1] == cb[r - 2][c + 2] in (1, 2):
return True, cb[r][c]
except IndexError:
pass
return False, 0
def __init__(self):
self.chessboard = [[(0) for i in range(self.SIDE)] for j in range(
self.SIDE)]
self.step = {}
self.max_step = {}
self.black = {}
self.white = {}
def _test():
a = Gobang()
a.step = {(1): 'no', (2): 'oo', (3): 'mn', (4): 'nn', (5): 'lm', (6):
'mm', (7): 'kl', (8): 'll'}
a.loadstep()
a.move(9, 10)
a.printcb()
print(a.iswin())
a.new()
a.printcb()
if __name__ == '__main__':
_test()
<|reserved_special_token_1|>
__version__ = '0.2.2'
__author__ = 'Anton Vanke <[email protected]>'
class Gobang:
"""
五子棋
=====
一个简单的五子棋类, 可以在控制台下五子棋. 提供以下函数 :
new(): 新局
printcb(): 打印棋盘
player(): 获取当前应落子 ID (轮走方)
sortstep(): 处理总步表
loadstep(): 将 step 步表的内容载入棋盘
recall(): 前进后退的操作
move(): 落子
iswin(): 判断是否获胜
"""
SIDE = 15
def new(self):
"""新局"""
self.__init__()
def printcb(self):
"""打印棋盘"""
print('\x1b[7;32;40m+ ', end='')
for c in range(65, 80):
print(chr(c), end=' ')
print('\x1b[0m\n')
for row in range(len(self.chessboard)):
print('\x1b[7;32;40m' + chr(row + 97), end='\x1b[0m ')
for i in self.chessboard[row]:
if i == 0:
print(i, end=' ')
elif i == 1:
print('\x1b[31m{}\x1b[0m'.format(i), end=' ')
elif i == 2:
print('\x1b[34m{}\x1b[0m'.format(i), end=' ')
print('\n')
def player(self):
"""获取玩家ID"""
return len(self.step) % 2 + 1
def sortstep(self):
"""将总步表分配给黑白子"""
self.white, self.black = {}, {}
for s in self.step.items():
if s[0] % 2 == 1:
self.black.update({s[0]: s[1]})
else:
self.white.update({s[0]: s[1]})
def loadstep(self):
""" 载入步表
将 self.step 载入到棋盘上
"""
try:
self.chessboard = [[(0) for i in range(self.SIDE)] for j in
range(self.SIDE)]
step_list = list(self.step.values()).copy()
for i in range(len(step_list)):
self.chessboard[ord(step_list[i][0]) - 97][ord(step_list[i]
[1]) - 97] = i % 2 + 1
self.sortstep()
return True
except TypeError:
return False
def recall(self, s=-1):
""" 悔棋
"""
if s == -1:
try:
if len(self.max_step) < len(self.step):
self.max_step = self.step.copy()
if len(self.step) == 0:
raise KeyError
except KeyError:
return False
else:
self.step.popitem()
return self.loadstep()
elif s == 1:
if len(self.max_step) > len(self.step):
self.step.update({(len(self.step) + 1): self.max_step[len(
self.step) + 1]})
return self.loadstep()
else:
return False
def move(self, row: int=7, column: int=7, **kwgs):
"""移動棋盘
row: 棋盘的行号
column: 棋盘的列号
"""
if 's' in kwgs:
row = ord(kwgs['s'][0].lower()) - 97
column = ord(kwgs['s'][1].lower()) - 97
if 0 <= row < self.SIDE and 0 <= column < self.SIDE:
if self.chessboard[row][column] == 0:
self.chessboard[row][column] = self.player()
self.step[len(self.step) + 1] = chr(row + 97) + chr(column + 97
)
self.sortstep()
return True
return False
def iswin(self):
"""判断是否结束
"""
step_set_ls = []
cb = self.chessboard
for s in self.step.values():
step_set_ls.append((ord(s[0]) - 97, ord(s[1]) - 97))
for r, c in step_set_ls:
try:
if cb[r][c - 2] == cb[r][c - 1] == cb[r][c] == cb[r][c + 1
] == cb[r][c + 2] in (1, 2):
return True, cb[r][c]
except IndexError:
pass
try:
if cb[r - 2][c] == cb[r - 1][c] == cb[r][c] == cb[r + 1][c
] == cb[r + 2][c] in (1, 2):
return True, cb[r][c]
except IndexError:
pass
try:
if cb[r - 2][c - 2] == cb[r - 1][c - 1] == cb[r][c] == cb[r + 1
][c + 1] == cb[r + 2][c + 2] in (1, 2):
return True, cb[r][c]
except IndexError:
pass
try:
if cb[r + 2][c - 2] == cb[r + 1][c - 1] == cb[r][c] == cb[r - 1
][c + 1] == cb[r - 2][c + 2] in (1, 2):
return True, cb[r][c]
except IndexError:
pass
return False, 0
def __init__(self):
self.chessboard = [[(0) for i in range(self.SIDE)] for j in range(
self.SIDE)]
self.step = {}
self.max_step = {}
self.black = {}
self.white = {}
def _test():
a = Gobang()
a.step = {(1): 'no', (2): 'oo', (3): 'mn', (4): 'nn', (5): 'lm', (6):
'mm', (7): 'kl', (8): 'll'}
a.loadstep()
a.move(9, 10)
a.printcb()
print(a.iswin())
a.new()
a.printcb()
if __name__ == '__main__':
_test()
<|reserved_special_token_1|>
#!/usr/bin/python3.8
# -*- coding: utf-8 -*-
__version__ = "0.2.2"
__author__ = 'Anton Vanke <[email protected]>'
class Gobang:
"""
五子棋
=====
一个简单的五子棋类, 可以在控制台下五子棋. 提供以下函数 :
new(): 新局
printcb(): 打印棋盘
player(): 获取当前应落子 ID (轮走方)
sortstep(): 处理总步表
loadstep(): 将 step 步表的内容载入棋盘
recall(): 前进后退的操作
move(): 落子
iswin(): 判断是否获胜
"""
# 棋盘的边长
SIDE = 15
def new(self):
"""新局"""
self.__init__()
def printcb(self):
"""打印棋盘"""
print("\033[7;32;40m+ ", end="")
for c in range(65, 80):
print(chr(c), end=" ")
print("\033[0m\n")
for row in range(len(self.chessboard)):
print("\033[7;32;40m" + chr(row + 97), end="\033[0m ")
for i in self.chessboard[row]:
if i == 0:
print(i, end=" ")
elif i == 1:
print("\033[31m{}\033[0m".format(i), end=" ")
elif i == 2:
print("\033[34m{}\033[0m".format(i), end=" ")
print("\n")
def player(self):
"""获取玩家ID"""
return (len(self.step) % 2) + 1
def sortstep(self):
"""将总步表分配给黑白子"""
self.white, self.black = {}, {}
for s in self.step.items():
if s[0] % 2 == 1:
self.black.update({s[0]: s[1]})
else:
self.white.update({s[0]: s[1]})
def loadstep(self):
""" 载入步表
将 self.step 载入到棋盘上
"""
try:
self.chessboard = [[0 for i in range(self.SIDE)]
for j in range(self.SIDE)]
step_list = list(self.step.values()).copy()
for i in range(len(step_list)):
self.chessboard[ord(step_list[i][0]) -
97][ord(step_list[i][1]) - 97] = (i % 2) + 1
self.sortstep()
return True
except TypeError:
return False
def recall(self, s=-1):
""" 悔棋
"""
if s == -1:
try:
if len(self.max_step) < len(self.step):
self.max_step = self.step.copy()
if len(self.step) == 0:
raise KeyError
except KeyError:
return False
else:
self.step.popitem()
return self.loadstep()
# 重下
elif s == 1:
if len(self.max_step) > len(self.step):
self.step.update(
{len(self.step) + 1: self.max_step[len(self.step) + 1]})
return self.loadstep()
else:
return False
def move(self, row: int = 7, column: int = 7, **kwgs):
"""移動棋盘
row: 棋盘的行号
column: 棋盘的列号
"""
if 's' in kwgs:
row = ord(kwgs['s'][0].lower()) - 97
column = ord(kwgs['s'][1].lower()) - 97
# 判斷是否在棋盤上
if 0 <= row < self.SIDE and 0 <= column < self.SIDE:
# 判斷該位置上是否有子落過
if self.chessboard[row][column] == 0:
self.chessboard[row][column] = self.player()
self.step[len(self.step) +
1] = chr(row + 97) + chr(column + 97)
self.sortstep()
return True
return False
def iswin(self):
"""判断是否结束
"""
step_set_ls = []
cb = self.chessboard
# 将步表转换为列表
for s in self.step.values():
step_set_ls.append((ord(s[0]) - 97, ord(s[1]) - 97))
# print(step_set_ls)
for r, c in step_set_ls:
try:
# 判断 -- 行有 5 子
if cb[r][c - 2] == cb[r][c - 1] == cb[r][c] == cb[r][
c + 1] == cb[r][c + 2] in (1, 2):
return True, cb[r][c]
except IndexError:
pass
try:
# 判断 | 有 5 子
if cb[r - 2][c] == cb[r - 1][c] == cb[r][c] == cb[
r + 1][c] == cb[r + 2][c] in (1, 2):
return True, cb[r][c]
except IndexError:
pass
try:
# 判断 \ 有 5 子
if cb[r - 2][c - 2] == cb[r - 1][c - 1] == cb[r][c] == cb[
r + 1][c + 1] == cb[r + 2][c + 2] in (1, 2):
return True, cb[r][c]
except IndexError:
pass
try:
# 判断 / 列有 5 子
if cb[r + 2][c - 2] == cb[r + 1][c - 1] == cb[r][c] == cb[
r - 1][c + 1] == cb[r - 2][c + 2] in (1, 2):
return True, cb[r][c]
except IndexError:
pass
return False, 0
def __init__(self):
# 棋盤
self.chessboard = [[0 for i in range(self.SIDE)]
for j in range(self.SIDE)]
# 總步表
self.step = {}
# 单局最长步表
self.max_step = {}
# 黑子步表
self.black = {}
# 白子步表
self.white = {}
def _test():
a = Gobang()
# 输入步表
a.step = {
1: 'no',
2: 'oo',
3: 'mn',
4: 'nn',
5: 'lm',
6: 'mm',
7: 'kl',
8: 'll',
}
# 加载
a.loadstep()
# 落子
a.move(9, 10)
# 打印棋盘
a.printcb()
# 输出输赢
print(a.iswin())
a.new()
a.printcb()
if __name__ == "__main__":
_test()
|
flexible
|
{
"blob_id": "e0394bfed51cd0af9bca06867e9b556b226f37d1",
"index": 1720,
"step-1": "<mask token>\n\n\nclass Gobang:\n <mask token>\n <mask token>\n\n def new(self):\n \"\"\"新局\"\"\"\n self.__init__()\n\n def printcb(self):\n \"\"\"打印棋盘\"\"\"\n print('\\x1b[7;32;40m+ ', end='')\n for c in range(65, 80):\n print(chr(c), end=' ')\n print('\\x1b[0m\\n')\n for row in range(len(self.chessboard)):\n print('\\x1b[7;32;40m' + chr(row + 97), end='\\x1b[0m ')\n for i in self.chessboard[row]:\n if i == 0:\n print(i, end=' ')\n elif i == 1:\n print('\\x1b[31m{}\\x1b[0m'.format(i), end=' ')\n elif i == 2:\n print('\\x1b[34m{}\\x1b[0m'.format(i), end=' ')\n print('\\n')\n\n def player(self):\n \"\"\"获取玩家ID\"\"\"\n return len(self.step) % 2 + 1\n\n def sortstep(self):\n \"\"\"将总步表分配给黑白子\"\"\"\n self.white, self.black = {}, {}\n for s in self.step.items():\n if s[0] % 2 == 1:\n self.black.update({s[0]: s[1]})\n else:\n self.white.update({s[0]: s[1]})\n <mask token>\n\n def recall(self, s=-1):\n \"\"\" 悔棋\n \"\"\"\n if s == -1:\n try:\n if len(self.max_step) < len(self.step):\n self.max_step = self.step.copy()\n if len(self.step) == 0:\n raise KeyError\n except KeyError:\n return False\n else:\n self.step.popitem()\n return self.loadstep()\n elif s == 1:\n if len(self.max_step) > len(self.step):\n self.step.update({(len(self.step) + 1): self.max_step[len(\n self.step) + 1]})\n return self.loadstep()\n else:\n return False\n <mask token>\n\n def iswin(self):\n \"\"\"判断是否结束\n \"\"\"\n step_set_ls = []\n cb = self.chessboard\n for s in self.step.values():\n step_set_ls.append((ord(s[0]) - 97, ord(s[1]) - 97))\n for r, c in step_set_ls:\n try:\n if cb[r][c - 2] == cb[r][c - 1] == cb[r][c] == cb[r][c + 1\n ] == cb[r][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n if cb[r - 2][c] == cb[r - 1][c] == cb[r][c] == cb[r + 1][c\n ] == cb[r + 2][c] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n if cb[r - 2][c - 2] == cb[r - 1][c - 1] == cb[r][c] == cb[r + 1\n ][c + 1] == cb[r + 2][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n if cb[r + 2][c - 2] == cb[r + 1][c - 1] == cb[r][c] == cb[r - 1\n ][c + 1] == cb[r - 2][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n return False, 0\n\n def __init__(self):\n self.chessboard = [[(0) for i in range(self.SIDE)] for j in range(\n self.SIDE)]\n self.step = {}\n self.max_step = {}\n self.black = {}\n self.white = {}\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Gobang:\n <mask token>\n SIDE = 15\n\n def new(self):\n \"\"\"新局\"\"\"\n self.__init__()\n\n def printcb(self):\n \"\"\"打印棋盘\"\"\"\n print('\\x1b[7;32;40m+ ', end='')\n for c in range(65, 80):\n print(chr(c), end=' ')\n print('\\x1b[0m\\n')\n for row in range(len(self.chessboard)):\n print('\\x1b[7;32;40m' + chr(row + 97), end='\\x1b[0m ')\n for i in self.chessboard[row]:\n if i == 0:\n print(i, end=' ')\n elif i == 1:\n print('\\x1b[31m{}\\x1b[0m'.format(i), end=' ')\n elif i == 2:\n print('\\x1b[34m{}\\x1b[0m'.format(i), end=' ')\n print('\\n')\n\n def player(self):\n \"\"\"获取玩家ID\"\"\"\n return len(self.step) % 2 + 1\n\n def sortstep(self):\n \"\"\"将总步表分配给黑白子\"\"\"\n self.white, self.black = {}, {}\n for s in self.step.items():\n if s[0] % 2 == 1:\n self.black.update({s[0]: s[1]})\n else:\n self.white.update({s[0]: s[1]})\n\n def loadstep(self):\n \"\"\" 载入步表\n 将 self.step 载入到棋盘上\n \"\"\"\n try:\n self.chessboard = [[(0) for i in range(self.SIDE)] for j in\n range(self.SIDE)]\n step_list = list(self.step.values()).copy()\n for i in range(len(step_list)):\n self.chessboard[ord(step_list[i][0]) - 97][ord(step_list[i]\n [1]) - 97] = i % 2 + 1\n self.sortstep()\n return True\n except TypeError:\n return False\n\n def recall(self, s=-1):\n \"\"\" 悔棋\n \"\"\"\n if s == -1:\n try:\n if len(self.max_step) < len(self.step):\n self.max_step = self.step.copy()\n if len(self.step) == 0:\n raise KeyError\n except KeyError:\n return False\n else:\n self.step.popitem()\n return self.loadstep()\n elif s == 1:\n if len(self.max_step) > len(self.step):\n self.step.update({(len(self.step) + 1): self.max_step[len(\n self.step) + 1]})\n return self.loadstep()\n else:\n return False\n\n def move(self, row: int=7, column: int=7, **kwgs):\n \"\"\"移動棋盘\n row: 棋盘的行号\n column: 棋盘的列号\n \"\"\"\n if 's' in kwgs:\n row = ord(kwgs['s'][0].lower()) - 97\n column = ord(kwgs['s'][1].lower()) - 97\n if 0 <= row < self.SIDE and 0 <= column < self.SIDE:\n if self.chessboard[row][column] == 0:\n self.chessboard[row][column] = self.player()\n self.step[len(self.step) + 1] = chr(row + 97) + chr(column + 97\n )\n self.sortstep()\n return True\n return False\n\n def iswin(self):\n \"\"\"判断是否结束\n \"\"\"\n step_set_ls = []\n cb = self.chessboard\n for s in self.step.values():\n step_set_ls.append((ord(s[0]) - 97, ord(s[1]) - 97))\n for r, c in step_set_ls:\n try:\n if cb[r][c - 2] == cb[r][c - 1] == cb[r][c] == cb[r][c + 1\n ] == cb[r][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n if cb[r - 2][c] == cb[r - 1][c] == cb[r][c] == cb[r + 1][c\n ] == cb[r + 2][c] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n if cb[r - 2][c - 2] == cb[r - 1][c - 1] == cb[r][c] == cb[r + 1\n ][c + 1] == cb[r + 2][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n if cb[r + 2][c - 2] == cb[r + 1][c - 1] == cb[r][c] == cb[r - 1\n ][c + 1] == cb[r - 2][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n return False, 0\n\n def __init__(self):\n self.chessboard = [[(0) for i in range(self.SIDE)] for j in range(\n self.SIDE)]\n self.step = {}\n self.max_step = {}\n self.black = {}\n self.white = {}\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Gobang:\n \"\"\"\n 五子棋\n =====\n 一个简单的五子棋类, 可以在控制台下五子棋. 提供以下函数 :\n\n new(): 新局\n printcb(): 打印棋盘\n player(): 获取当前应落子 ID (轮走方)\n sortstep(): 处理总步表\n loadstep(): 将 step 步表的内容载入棋盘\n recall(): 前进后退的操作\n move(): 落子\n iswin(): 判断是否获胜\n \"\"\"\n SIDE = 15\n\n def new(self):\n \"\"\"新局\"\"\"\n self.__init__()\n\n def printcb(self):\n \"\"\"打印棋盘\"\"\"\n print('\\x1b[7;32;40m+ ', end='')\n for c in range(65, 80):\n print(chr(c), end=' ')\n print('\\x1b[0m\\n')\n for row in range(len(self.chessboard)):\n print('\\x1b[7;32;40m' + chr(row + 97), end='\\x1b[0m ')\n for i in self.chessboard[row]:\n if i == 0:\n print(i, end=' ')\n elif i == 1:\n print('\\x1b[31m{}\\x1b[0m'.format(i), end=' ')\n elif i == 2:\n print('\\x1b[34m{}\\x1b[0m'.format(i), end=' ')\n print('\\n')\n\n def player(self):\n \"\"\"获取玩家ID\"\"\"\n return len(self.step) % 2 + 1\n\n def sortstep(self):\n \"\"\"将总步表分配给黑白子\"\"\"\n self.white, self.black = {}, {}\n for s in self.step.items():\n if s[0] % 2 == 1:\n self.black.update({s[0]: s[1]})\n else:\n self.white.update({s[0]: s[1]})\n\n def loadstep(self):\n \"\"\" 载入步表\n 将 self.step 载入到棋盘上\n \"\"\"\n try:\n self.chessboard = [[(0) for i in range(self.SIDE)] for j in\n range(self.SIDE)]\n step_list = list(self.step.values()).copy()\n for i in range(len(step_list)):\n self.chessboard[ord(step_list[i][0]) - 97][ord(step_list[i]\n [1]) - 97] = i % 2 + 1\n self.sortstep()\n return True\n except TypeError:\n return False\n\n def recall(self, s=-1):\n \"\"\" 悔棋\n \"\"\"\n if s == -1:\n try:\n if len(self.max_step) < len(self.step):\n self.max_step = self.step.copy()\n if len(self.step) == 0:\n raise KeyError\n except KeyError:\n return False\n else:\n self.step.popitem()\n return self.loadstep()\n elif s == 1:\n if len(self.max_step) > len(self.step):\n self.step.update({(len(self.step) + 1): self.max_step[len(\n self.step) + 1]})\n return self.loadstep()\n else:\n return False\n\n def move(self, row: int=7, column: int=7, **kwgs):\n \"\"\"移動棋盘\n row: 棋盘的行号\n column: 棋盘的列号\n \"\"\"\n if 's' in kwgs:\n row = ord(kwgs['s'][0].lower()) - 97\n column = ord(kwgs['s'][1].lower()) - 97\n if 0 <= row < self.SIDE and 0 <= column < self.SIDE:\n if self.chessboard[row][column] == 0:\n self.chessboard[row][column] = self.player()\n self.step[len(self.step) + 1] = chr(row + 97) + chr(column + 97\n )\n self.sortstep()\n return True\n return False\n\n def iswin(self):\n \"\"\"判断是否结束\n \"\"\"\n step_set_ls = []\n cb = self.chessboard\n for s in self.step.values():\n step_set_ls.append((ord(s[0]) - 97, ord(s[1]) - 97))\n for r, c in step_set_ls:\n try:\n if cb[r][c - 2] == cb[r][c - 1] == cb[r][c] == cb[r][c + 1\n ] == cb[r][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n if cb[r - 2][c] == cb[r - 1][c] == cb[r][c] == cb[r + 1][c\n ] == cb[r + 2][c] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n if cb[r - 2][c - 2] == cb[r - 1][c - 1] == cb[r][c] == cb[r + 1\n ][c + 1] == cb[r + 2][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n if cb[r + 2][c - 2] == cb[r + 1][c - 1] == cb[r][c] == cb[r - 1\n ][c + 1] == cb[r - 2][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n return False, 0\n\n def __init__(self):\n self.chessboard = [[(0) for i in range(self.SIDE)] for j in range(\n self.SIDE)]\n self.step = {}\n self.max_step = {}\n self.black = {}\n self.white = {}\n\n\ndef _test():\n a = Gobang()\n a.step = {(1): 'no', (2): 'oo', (3): 'mn', (4): 'nn', (5): 'lm', (6):\n 'mm', (7): 'kl', (8): 'll'}\n a.loadstep()\n a.move(9, 10)\n a.printcb()\n print(a.iswin())\n a.new()\n a.printcb()\n\n\nif __name__ == '__main__':\n _test()\n",
"step-4": "__version__ = '0.2.2'\n__author__ = 'Anton Vanke <[email protected]>'\n\n\nclass Gobang:\n \"\"\"\n 五子棋\n =====\n 一个简单的五子棋类, 可以在控制台下五子棋. 提供以下函数 :\n\n new(): 新局\n printcb(): 打印棋盘\n player(): 获取当前应落子 ID (轮走方)\n sortstep(): 处理总步表\n loadstep(): 将 step 步表的内容载入棋盘\n recall(): 前进后退的操作\n move(): 落子\n iswin(): 判断是否获胜\n \"\"\"\n SIDE = 15\n\n def new(self):\n \"\"\"新局\"\"\"\n self.__init__()\n\n def printcb(self):\n \"\"\"打印棋盘\"\"\"\n print('\\x1b[7;32;40m+ ', end='')\n for c in range(65, 80):\n print(chr(c), end=' ')\n print('\\x1b[0m\\n')\n for row in range(len(self.chessboard)):\n print('\\x1b[7;32;40m' + chr(row + 97), end='\\x1b[0m ')\n for i in self.chessboard[row]:\n if i == 0:\n print(i, end=' ')\n elif i == 1:\n print('\\x1b[31m{}\\x1b[0m'.format(i), end=' ')\n elif i == 2:\n print('\\x1b[34m{}\\x1b[0m'.format(i), end=' ')\n print('\\n')\n\n def player(self):\n \"\"\"获取玩家ID\"\"\"\n return len(self.step) % 2 + 1\n\n def sortstep(self):\n \"\"\"将总步表分配给黑白子\"\"\"\n self.white, self.black = {}, {}\n for s in self.step.items():\n if s[0] % 2 == 1:\n self.black.update({s[0]: s[1]})\n else:\n self.white.update({s[0]: s[1]})\n\n def loadstep(self):\n \"\"\" 载入步表\n 将 self.step 载入到棋盘上\n \"\"\"\n try:\n self.chessboard = [[(0) for i in range(self.SIDE)] for j in\n range(self.SIDE)]\n step_list = list(self.step.values()).copy()\n for i in range(len(step_list)):\n self.chessboard[ord(step_list[i][0]) - 97][ord(step_list[i]\n [1]) - 97] = i % 2 + 1\n self.sortstep()\n return True\n except TypeError:\n return False\n\n def recall(self, s=-1):\n \"\"\" 悔棋\n \"\"\"\n if s == -1:\n try:\n if len(self.max_step) < len(self.step):\n self.max_step = self.step.copy()\n if len(self.step) == 0:\n raise KeyError\n except KeyError:\n return False\n else:\n self.step.popitem()\n return self.loadstep()\n elif s == 1:\n if len(self.max_step) > len(self.step):\n self.step.update({(len(self.step) + 1): self.max_step[len(\n self.step) + 1]})\n return self.loadstep()\n else:\n return False\n\n def move(self, row: int=7, column: int=7, **kwgs):\n \"\"\"移動棋盘\n row: 棋盘的行号\n column: 棋盘的列号\n \"\"\"\n if 's' in kwgs:\n row = ord(kwgs['s'][0].lower()) - 97\n column = ord(kwgs['s'][1].lower()) - 97\n if 0 <= row < self.SIDE and 0 <= column < self.SIDE:\n if self.chessboard[row][column] == 0:\n self.chessboard[row][column] = self.player()\n self.step[len(self.step) + 1] = chr(row + 97) + chr(column + 97\n )\n self.sortstep()\n return True\n return False\n\n def iswin(self):\n \"\"\"判断是否结束\n \"\"\"\n step_set_ls = []\n cb = self.chessboard\n for s in self.step.values():\n step_set_ls.append((ord(s[0]) - 97, ord(s[1]) - 97))\n for r, c in step_set_ls:\n try:\n if cb[r][c - 2] == cb[r][c - 1] == cb[r][c] == cb[r][c + 1\n ] == cb[r][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n if cb[r - 2][c] == cb[r - 1][c] == cb[r][c] == cb[r + 1][c\n ] == cb[r + 2][c] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n if cb[r - 2][c - 2] == cb[r - 1][c - 1] == cb[r][c] == cb[r + 1\n ][c + 1] == cb[r + 2][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n if cb[r + 2][c - 2] == cb[r + 1][c - 1] == cb[r][c] == cb[r - 1\n ][c + 1] == cb[r - 2][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n return False, 0\n\n def __init__(self):\n self.chessboard = [[(0) for i in range(self.SIDE)] for j in range(\n self.SIDE)]\n self.step = {}\n self.max_step = {}\n self.black = {}\n self.white = {}\n\n\ndef _test():\n a = Gobang()\n a.step = {(1): 'no', (2): 'oo', (3): 'mn', (4): 'nn', (5): 'lm', (6):\n 'mm', (7): 'kl', (8): 'll'}\n a.loadstep()\n a.move(9, 10)\n a.printcb()\n print(a.iswin())\n a.new()\n a.printcb()\n\n\nif __name__ == '__main__':\n _test()\n",
"step-5": "#!/usr/bin/python3.8\n# -*- coding: utf-8 -*-\n__version__ = \"0.2.2\"\n__author__ = 'Anton Vanke <[email protected]>'\n\n\nclass Gobang:\n \"\"\"\n 五子棋\n =====\n 一个简单的五子棋类, 可以在控制台下五子棋. 提供以下函数 :\n\n new(): 新局\n printcb(): 打印棋盘\n player(): 获取当前应落子 ID (轮走方)\n sortstep(): 处理总步表\n loadstep(): 将 step 步表的内容载入棋盘\n recall(): 前进后退的操作\n move(): 落子\n iswin(): 判断是否获胜\n \"\"\"\n # 棋盘的边长\n SIDE = 15\n\n def new(self):\n \"\"\"新局\"\"\"\n self.__init__()\n\n def printcb(self):\n \"\"\"打印棋盘\"\"\"\n print(\"\\033[7;32;40m+ \", end=\"\")\n for c in range(65, 80):\n print(chr(c), end=\" \")\n print(\"\\033[0m\\n\")\n for row in range(len(self.chessboard)):\n print(\"\\033[7;32;40m\" + chr(row + 97), end=\"\\033[0m \")\n for i in self.chessboard[row]:\n if i == 0:\n print(i, end=\" \")\n elif i == 1:\n print(\"\\033[31m{}\\033[0m\".format(i), end=\" \")\n elif i == 2:\n print(\"\\033[34m{}\\033[0m\".format(i), end=\" \")\n print(\"\\n\")\n\n def player(self):\n \"\"\"获取玩家ID\"\"\"\n return (len(self.step) % 2) + 1\n\n def sortstep(self):\n \"\"\"将总步表分配给黑白子\"\"\"\n self.white, self.black = {}, {}\n for s in self.step.items():\n if s[0] % 2 == 1:\n self.black.update({s[0]: s[1]})\n else:\n self.white.update({s[0]: s[1]})\n\n def loadstep(self):\n \"\"\" 载入步表\n 将 self.step 载入到棋盘上\n \"\"\"\n try:\n self.chessboard = [[0 for i in range(self.SIDE)]\n for j in range(self.SIDE)]\n step_list = list(self.step.values()).copy()\n for i in range(len(step_list)):\n self.chessboard[ord(step_list[i][0]) -\n 97][ord(step_list[i][1]) - 97] = (i % 2) + 1\n self.sortstep()\n return True\n except TypeError:\n return False\n\n def recall(self, s=-1):\n \"\"\" 悔棋\n \"\"\"\n if s == -1:\n try:\n if len(self.max_step) < len(self.step):\n self.max_step = self.step.copy()\n if len(self.step) == 0:\n raise KeyError\n except KeyError:\n return False\n else:\n self.step.popitem()\n return self.loadstep()\n # 重下\n elif s == 1:\n if len(self.max_step) > len(self.step):\n self.step.update(\n {len(self.step) + 1: self.max_step[len(self.step) + 1]})\n return self.loadstep()\n else:\n return False\n\n def move(self, row: int = 7, column: int = 7, **kwgs):\n \"\"\"移動棋盘\n row: 棋盘的行号\n column: 棋盘的列号\n \"\"\"\n if 's' in kwgs:\n row = ord(kwgs['s'][0].lower()) - 97\n column = ord(kwgs['s'][1].lower()) - 97\n # 判斷是否在棋盤上\n if 0 <= row < self.SIDE and 0 <= column < self.SIDE:\n # 判斷該位置上是否有子落過\n if self.chessboard[row][column] == 0:\n self.chessboard[row][column] = self.player()\n self.step[len(self.step) +\n 1] = chr(row + 97) + chr(column + 97)\n self.sortstep()\n return True\n return False\n\n def iswin(self):\n \"\"\"判断是否结束\n \"\"\"\n step_set_ls = []\n cb = self.chessboard\n # 将步表转换为列表\n for s in self.step.values():\n step_set_ls.append((ord(s[0]) - 97, ord(s[1]) - 97))\n # print(step_set_ls)\n for r, c in step_set_ls:\n try:\n # 判断 -- 行有 5 子\n if cb[r][c - 2] == cb[r][c - 1] == cb[r][c] == cb[r][\n c + 1] == cb[r][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n # 判断 | 有 5 子\n if cb[r - 2][c] == cb[r - 1][c] == cb[r][c] == cb[\n r + 1][c] == cb[r + 2][c] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n # 判断 \\ 有 5 子\n if cb[r - 2][c - 2] == cb[r - 1][c - 1] == cb[r][c] == cb[\n r + 1][c + 1] == cb[r + 2][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n # 判断 / 列有 5 子\n if cb[r + 2][c - 2] == cb[r + 1][c - 1] == cb[r][c] == cb[\n r - 1][c + 1] == cb[r - 2][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n return False, 0\n\n def __init__(self):\n # 棋盤\n self.chessboard = [[0 for i in range(self.SIDE)]\n for j in range(self.SIDE)]\n # 總步表\n self.step = {}\n # 单局最长步表\n self.max_step = {}\n # 黑子步表\n self.black = {}\n # 白子步表\n self.white = {}\n\n\ndef _test():\n a = Gobang()\n # 输入步表\n a.step = {\n 1: 'no',\n 2: 'oo',\n 3: 'mn',\n 4: 'nn',\n 5: 'lm',\n 6: 'mm',\n 7: 'kl',\n 8: 'll',\n }\n # 加载\n a.loadstep()\n # 落子\n a.move(9, 10)\n # 打印棋盘\n a.printcb()\n # 输出输赢\n print(a.iswin())\n a.new()\n a.printcb()\n\n\nif __name__ == \"__main__\":\n _test()\n",
"step-ids": [
8,
11,
14,
15,
16
]
}
|
[
8,
11,
14,
15,
16
] |
Xeval[[1,2],:]
# *** Spyder Python Console History Log ***
Xeval[:,:]
optfunc.P(Xeval[:,:])
optfunc.P(Xeval)
optfunc.P(Xeval[[0,1,2,3,4],:])
optfunc.P(Xeval[[0,1,],:])
optfunc.P(Xeval[[0,1],:])
optfunc.P(Xeval[[0,1,2,3],:])
optfunc.P(Xeval[[0,1,2,3,4],:])
optfunc.P(Xeval[[0,1,2],:])
Xeval[[0,1,2,3,4],:]
Xeval[[0,1,2,3],:]
Xeval[[0,1,2],:]
optfunc.gp_list[0]
optfunc.gp_list[0](Xeval)
optfunc.gp_list[0].preduct(Xeval)
optfunc.gp_list[0].predict(Xeval)
optfunc.gp_list[0].predict(Xeval[[0,1,2,3,4],:])
optfunc.gp_list[0].predict(Xeval[[0,1,2,3],:])
optfunc.gp_list[0].predict(Xeval[[0,1,2],:])
optfunc.P(Xeval[[0,1,2,3,4],:])
optfunc.ypred
optfunc.P(Xeval[[0,1,2],:])
optfunc.ypred
optfunc.P(Xeval[[0,1,2,3,4],:])
optfunc.MSE
optfunc.sigma
optfunc.P(Xeval[[0,1,2],:])
optfunc.sigma
optfunc.gp_list[0].predict(Xeval[[0,1,2],:],eval_MSE=True)
optfunc.gp_list[0].predict(Xeval[[0,1,2,3],:],eval_MSE=True)
optfunc.gp_list[0].predict(Xeval[[0,1,2],:],eval_MSE=True)
optfunc.gp_list[0].predict(Xeval[[0,1,2,0],:],eval_MSE=True)
optfunc.gp_list[0].predict(Xeval[[0,0,0,0],:],eval_MSE=True)
optfunc.gp_list[0].predict(Xeval[[0,0,0],:],eval_MSE=True)
optfunc.gp_list[0].predict(Xeval[[0,0],:],eval_MSE=True)
optfunc.gp_list[0].predict(Xeval[[0],:],eval_MSE=True)
optfunc.gp_list[0].predict(Xeval[[0,1],:],eval_MSE=True)
optfunc.gp_list[0].predict(Xeval[[0,1,1],:],eval_MSE=True)
optfunc.gp_list[0].predict(Xeval[[0,1,1,1],:],eval_MSE=True)
optfunc.gp_list[0].predict(Xeval[[zeros(1,5)],:],eval_MSE=True)
optfunc.gp_list[0].predict(Xeval[[np.zeros(1,5)],:],eval_MSE=True)
optfunc.gp_list[0].predict(Xeval[np.zeros(1,5),:],eval_MSE=True)
np.zeros(1,5)
np.zeros(5)
optfunc.gp_list[0].predict(Xeval[np.zeros(15),:],eval_MSE=True)
optfunc.gp_list[0].predict(Xeval[np.zeros(5),:],eval_MSE=True)
optfunc.gp_list[0].predict(Xeval[[np.zeros(5)],:],eval_MSE=True)
optfunc.gp_list[0].predict(Xeval[[0,0,0,0,0,0,0,0,0,0,0],:],eval_MSE=True)
optfunc.gp_list[0].predict(Xeval[[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],:],eval_MSE=True)
Xeval[[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],:]
optfunc.gp_list[0].predict(Xeval[[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],:],eval_MSE=True)
optfunc.gp_list[0].predict(Xeval[[0,1,2,3],:],eval_MSE=True)
optfunc.gp_list[0].predict(Xeval[[0,1,2],:],eval_MSE=True)
Xeval[[0,1,2,3]]
Xeval[[0,1,2]]
Xeval[[0,1,2],:]
optfunc.gp_list[0].predict(Xeval[[0,0],:],eval_MSE=True)
optfunc.gp_list[0].predict(Xeval[[0,0,0],:],eval_MSE=True)
optfunc.gp_list[0].predict(Xeval[[0,0,0,0],:],eval_MSE=True)
optfunc.gp_list[0].predict([0.5,0.5],:],eval_MSE=True)
optfunc.gp_list[0].predict([0.5,0.5],eval_MSE=True)
optfunc.gp_list[0].predict([[0.5,0.5]],eval_MSE=True)
optfunc.gp_list[0].predict([[0.5,0.49]],eval_MSE=True)
optfunc.gp_list[0].predict([[0.5,0.48]],eval_MSE=True)
optfunc.gp_list[0].predict([[0.5,0.495]],eval_MSE=True)
optfunc.gp_list[0].predict([[0.5,0.499]],eval_MSE=True)
optfunc.gp_list[0].predict([[0.5,0.4999]],eval_MSE=True)
optfunc.gp_list[0].predict([[0.5,0.49999]],eval_MSE=True)
optfunc.gp_list[0].predict([[0.5,0.5]],eval_MSE=True)
optfunc.gp_list[0].predict([[0.5,0.5001]],eval_MSE=True)
for i in range(0,100)
for i in range(0,100): y[i],s[i] = optfunc.gp_list[0].predict([[0.5,i*0.01]],eval_MSE=True)
y = []
s = []
for i in range(0,100): y[i],s[i] = optfunc.gp_list[0].predict([[0.5,i*0.01]],eval_MSE=True)
for i in range(0,100): optfunc.gp_list[0].predict([[0.5,i*0.01]],eval_MSE=True)
for i in range(0,100): a, b = optfunc.gp_list[0].predict([[0.5,i*0.01]],eval_MSE=True) y = np.r_[y,a] s = np.r_[s,b]
y
optfunc.gp_list[0]
runfile('C:/Users/b4/.spyder2-py3/PEIOPT.py', wdir='C:/Users/b4/.spyder2-py3')
y = []
s = []
for i in range(0,100): a, b = optfunc.gp_list[0].predict([[0.5,i*0.01]],eval_MSE=True) y = np.r_[y,a] s = np.r_[s,b]
y = []
s = []
for i in range(0,200): a, b = optfunc.gp_list[0].predict([[0.5,i*0.01]],eval_MSE=True) y = np.r_[y,a] s = np.r_[s,b]
y = []
s = []
for i in range(0,200): a, b = optfunc.gp_list[0].predict([[1.,i*0.01]],eval_MSE=True) y = np.r_[y,a] s = np.r_[s,b]
runfile('C:/Users/b4/.spyder2-py3/PEIOPT.py', wdir='C:/Users/b4/.spyder2-py3')
y = []
s = []
for i in range(0,200): a, b = optfunc.gp_list[0].predict([[1.,i*0.01]],eval_MSE=True) y = np.r_[y,a] s = np.r_[s,b]
runfile('C:/Users/b4/.spyder2-py3/PEIOPT.py', wdir='C:/Users/b4/.spyder2-py3')
y = []
s = []
for i in range(0,200): a, b = optfunc.gp_list[0].predict([[1.,i*0.01]],eval_MSE=True) y = np.r_[y,a] s = np.r_[s,b]
runfile('C:/Users/b4/.spyder2-py3/PEIOPT.py', wdir='C:/Users/b4/.spyder2-py3')
##---(Wed Mar 23 11:14:55 2016)---
runfile('C:/Users/b4/.spyder2-py3/PEIOPT.py', wdir='C:/Users/b4/.spyder2-py3')
|
normal
|
{
"blob_id": "02b20c3f5941873dfd22a7fbedb825e66c613ace",
"index": 2278,
"step-1": "Xeval[[1,2],:]\r\n# *** Spyder Python Console History Log ***\r\nXeval[:,:]\r\noptfunc.P(Xeval[:,:])\r\noptfunc.P(Xeval)\r\noptfunc.P(Xeval[[0,1,2,3,4],:])\r\noptfunc.P(Xeval[[0,1,],:])\r\noptfunc.P(Xeval[[0,1],:])\r\noptfunc.P(Xeval[[0,1,2,3],:])\r\noptfunc.P(Xeval[[0,1,2,3,4],:])\r\noptfunc.P(Xeval[[0,1,2],:])\r\nXeval[[0,1,2,3,4],:]\r\nXeval[[0,1,2,3],:]\r\nXeval[[0,1,2],:]\r\noptfunc.gp_list[0]\r\noptfunc.gp_list[0](Xeval)\r\noptfunc.gp_list[0].preduct(Xeval)\r\noptfunc.gp_list[0].predict(Xeval)\r\noptfunc.gp_list[0].predict(Xeval[[0,1,2,3,4],:])\r\noptfunc.gp_list[0].predict(Xeval[[0,1,2,3],:])\r\noptfunc.gp_list[0].predict(Xeval[[0,1,2],:])\r\noptfunc.P(Xeval[[0,1,2,3,4],:])\r\noptfunc.ypred\r\noptfunc.P(Xeval[[0,1,2],:])\r\noptfunc.ypred\r\noptfunc.P(Xeval[[0,1,2,3,4],:])\r\noptfunc.MSE\r\noptfunc.sigma\r\noptfunc.P(Xeval[[0,1,2],:])\r\noptfunc.sigma\r\noptfunc.gp_list[0].predict(Xeval[[0,1,2],:],eval_MSE=True)\r\noptfunc.gp_list[0].predict(Xeval[[0,1,2,3],:],eval_MSE=True)\r\noptfunc.gp_list[0].predict(Xeval[[0,1,2],:],eval_MSE=True)\r\noptfunc.gp_list[0].predict(Xeval[[0,1,2,0],:],eval_MSE=True)\r\noptfunc.gp_list[0].predict(Xeval[[0,0,0,0],:],eval_MSE=True)\r\noptfunc.gp_list[0].predict(Xeval[[0,0,0],:],eval_MSE=True)\r\noptfunc.gp_list[0].predict(Xeval[[0,0],:],eval_MSE=True)\r\noptfunc.gp_list[0].predict(Xeval[[0],:],eval_MSE=True)\r\noptfunc.gp_list[0].predict(Xeval[[0,1],:],eval_MSE=True)\r\noptfunc.gp_list[0].predict(Xeval[[0,1,1],:],eval_MSE=True)\r\noptfunc.gp_list[0].predict(Xeval[[0,1,1,1],:],eval_MSE=True)\r\noptfunc.gp_list[0].predict(Xeval[[zeros(1,5)],:],eval_MSE=True)\r\noptfunc.gp_list[0].predict(Xeval[[np.zeros(1,5)],:],eval_MSE=True)\r\noptfunc.gp_list[0].predict(Xeval[np.zeros(1,5),:],eval_MSE=True)\r\nnp.zeros(1,5)\r\nnp.zeros(5)\r\noptfunc.gp_list[0].predict(Xeval[np.zeros(15),:],eval_MSE=True)\r\noptfunc.gp_list[0].predict(Xeval[np.zeros(5),:],eval_MSE=True)\r\noptfunc.gp_list[0].predict(Xeval[[np.zeros(5)],:],eval_MSE=True)\r\noptfunc.gp_list[0].predict(Xeval[[0,0,0,0,0,0,0,0,0,0,0],:],eval_MSE=True)\r\noptfunc.gp_list[0].predict(Xeval[[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],:],eval_MSE=True)\r\nXeval[[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],:]\r\noptfunc.gp_list[0].predict(Xeval[[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],:],eval_MSE=True)\r\noptfunc.gp_list[0].predict(Xeval[[0,1,2,3],:],eval_MSE=True)\r\noptfunc.gp_list[0].predict(Xeval[[0,1,2],:],eval_MSE=True)\r\nXeval[[0,1,2,3]]\r\nXeval[[0,1,2]]\r\nXeval[[0,1,2],:]\r\noptfunc.gp_list[0].predict(Xeval[[0,0],:],eval_MSE=True)\r\noptfunc.gp_list[0].predict(Xeval[[0,0,0],:],eval_MSE=True)\r\noptfunc.gp_list[0].predict(Xeval[[0,0,0,0],:],eval_MSE=True)\r\noptfunc.gp_list[0].predict([0.5,0.5],:],eval_MSE=True)\r\noptfunc.gp_list[0].predict([0.5,0.5],eval_MSE=True)\r\noptfunc.gp_list[0].predict([[0.5,0.5]],eval_MSE=True)\r\noptfunc.gp_list[0].predict([[0.5,0.49]],eval_MSE=True)\r\noptfunc.gp_list[0].predict([[0.5,0.48]],eval_MSE=True)\r\noptfunc.gp_list[0].predict([[0.5,0.495]],eval_MSE=True)\r\noptfunc.gp_list[0].predict([[0.5,0.499]],eval_MSE=True)\r\noptfunc.gp_list[0].predict([[0.5,0.4999]],eval_MSE=True)\r\noptfunc.gp_list[0].predict([[0.5,0.49999]],eval_MSE=True)\r\noptfunc.gp_list[0].predict([[0.5,0.5]],eval_MSE=True)\r\noptfunc.gp_list[0].predict([[0.5,0.5001]],eval_MSE=True)\r\nfor i in range(0,100)\r\nfor i in range(0,100): y[i],s[i] = optfunc.gp_list[0].predict([[0.5,i*0.01]],eval_MSE=True)\r\ny = []\r\ns = []\r\nfor i in range(0,100): y[i],s[i] = optfunc.gp_list[0].predict([[0.5,i*0.01]],eval_MSE=True)\r\nfor i in range(0,100): optfunc.gp_list[0].predict([[0.5,i*0.01]],eval_MSE=True)\r\nfor i in range(0,100): a, b = optfunc.gp_list[0].predict([[0.5,i*0.01]],eval_MSE=True) y = np.r_[y,a] s = np.r_[s,b]\r\ny\r\noptfunc.gp_list[0]\r\nrunfile('C:/Users/b4/.spyder2-py3/PEIOPT.py', wdir='C:/Users/b4/.spyder2-py3')\r\ny = []\r\ns = []\r\nfor i in range(0,100): a, b = optfunc.gp_list[0].predict([[0.5,i*0.01]],eval_MSE=True) y = np.r_[y,a] s = np.r_[s,b]\r\ny = []\r\ns = []\r\nfor i in range(0,200): a, b = optfunc.gp_list[0].predict([[0.5,i*0.01]],eval_MSE=True) y = np.r_[y,a] s = np.r_[s,b]\r\ny = []\r\ns = []\r\nfor i in range(0,200): a, b = optfunc.gp_list[0].predict([[1.,i*0.01]],eval_MSE=True) y = np.r_[y,a] s = np.r_[s,b]\r\nrunfile('C:/Users/b4/.spyder2-py3/PEIOPT.py', wdir='C:/Users/b4/.spyder2-py3')\r\ny = []\r\ns = []\r\nfor i in range(0,200): a, b = optfunc.gp_list[0].predict([[1.,i*0.01]],eval_MSE=True) y = np.r_[y,a] s = np.r_[s,b]\r\nrunfile('C:/Users/b4/.spyder2-py3/PEIOPT.py', wdir='C:/Users/b4/.spyder2-py3')\r\ny = []\r\ns = []\r\nfor i in range(0,200): a, b = optfunc.gp_list[0].predict([[1.,i*0.01]],eval_MSE=True) y = np.r_[y,a] s = np.r_[s,b]\r\nrunfile('C:/Users/b4/.spyder2-py3/PEIOPT.py', wdir='C:/Users/b4/.spyder2-py3')\r\n\r\n##---(Wed Mar 23 11:14:55 2016)---\r\nrunfile('C:/Users/b4/.spyder2-py3/PEIOPT.py', wdir='C:/Users/b4/.spyder2-py3')",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
class Persona:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def hola(self):
print('Hola Mundo')
class Empleado(Persona):
def __init__(self, salario, antiguedad, nombre_empleado, edad_empleado,
residencia_empleado):
super().__init__(nombre_empleado, edad_empleado, residencia_empleado)
self.salario = salario
self.antiguedad_persona = antiguedad
super().hola()
def descripcion(self):
super().descripcion()
print('Salario: ', self.salario, 'Antiguedad: ', self.
antiguedad_persona)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Persona:
def __init__(self, nombre, edad, lugar_residencia):
self.nombre = nombre
self.edad = edad
self.residencia = lugar_residencia
<|reserved_special_token_0|>
def hola(self):
print('Hola Mundo')
class Empleado(Persona):
def __init__(self, salario, antiguedad, nombre_empleado, edad_empleado,
residencia_empleado):
super().__init__(nombre_empleado, edad_empleado, residencia_empleado)
self.salario = salario
self.antiguedad_persona = antiguedad
super().hola()
def descripcion(self):
super().descripcion()
print('Salario: ', self.salario, 'Antiguedad: ', self.
antiguedad_persona)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Persona:
def __init__(self, nombre, edad, lugar_residencia):
self.nombre = nombre
self.edad = edad
self.residencia = lugar_residencia
def descripcion(self):
print('Nombre: ', self.nombre, ' Edad: ', self.edad,
' Lugar de residencia: ', self.residencia)
def hola(self):
print('Hola Mundo')
class Empleado(Persona):
def __init__(self, salario, antiguedad, nombre_empleado, edad_empleado,
residencia_empleado):
super().__init__(nombre_empleado, edad_empleado, residencia_empleado)
self.salario = salario
self.antiguedad_persona = antiguedad
super().hola()
def descripcion(self):
super().descripcion()
print('Salario: ', self.salario, 'Antiguedad: ', self.
antiguedad_persona)
<|reserved_special_token_0|>
Antonio.descripcion()
print(isinstance(Antonio, Empleado))
<|reserved_special_token_1|>
class Persona:
def __init__(self, nombre, edad, lugar_residencia):
self.nombre = nombre
self.edad = edad
self.residencia = lugar_residencia
def descripcion(self):
print('Nombre: ', self.nombre, ' Edad: ', self.edad,
' Lugar de residencia: ', self.residencia)
def hola(self):
print('Hola Mundo')
class Empleado(Persona):
def __init__(self, salario, antiguedad, nombre_empleado, edad_empleado,
residencia_empleado):
super().__init__(nombre_empleado, edad_empleado, residencia_empleado)
self.salario = salario
self.antiguedad_persona = antiguedad
super().hola()
def descripcion(self):
super().descripcion()
print('Salario: ', self.salario, 'Antiguedad: ', self.
antiguedad_persona)
Antonio = Persona('Alex', 23, 'Merida')
Antonio.descripcion()
print(isinstance(Antonio, Empleado))
<|reserved_special_token_1|>
#Aplicacion de la funcion super()
class Persona():
def __init__(self,nombre,edad,lugar_residencia):
self.nombre = nombre
self.edad = edad
self.residencia = lugar_residencia
def descripcion(self):
print("Nombre: ",self.nombre," Edad: ", self.edad," Lugar de residencia: ",self.residencia)
def hola(self):
print("Hola Mundo")
class Empleado(Persona):
def __init__(self,salario,antiguedad,nombre_empleado,edad_empleado,residencia_empleado):
super().__init__(nombre_empleado,edad_empleado,residencia_empleado)#Hace la llamada al constructor de la clase padre que esta heredando
self.salario = salario
self.antiguedad_persona=antiguedad
super().hola()
def descripcion(self):
super().descripcion()
print("Salario: " ,self.salario, "Antiguedad: ",self.antiguedad_persona)
Antonio = Persona("Alex",23,"Merida")
Antonio.descripcion()
print(isinstance(Antonio,Empleado))
#Principio de sustitucion
#consiste en plantearse las siguientes preguntas:
#es siempre un o una
#funcion isinstance()--> nos informa si un objeto es instancia de una clase determinada devuelve verdadero o falso
|
flexible
|
{
"blob_id": "92a50bcdbb4c03d1a4813a93c2e0986250516f14",
"index": 1117,
"step-1": "class Persona:\n <mask token>\n <mask token>\n\n def hola(self):\n print('Hola Mundo')\n\n\nclass Empleado(Persona):\n\n def __init__(self, salario, antiguedad, nombre_empleado, edad_empleado,\n residencia_empleado):\n super().__init__(nombre_empleado, edad_empleado, residencia_empleado)\n self.salario = salario\n self.antiguedad_persona = antiguedad\n super().hola()\n\n def descripcion(self):\n super().descripcion()\n print('Salario: ', self.salario, 'Antiguedad: ', self.\n antiguedad_persona)\n\n\n<mask token>\n",
"step-2": "class Persona:\n\n def __init__(self, nombre, edad, lugar_residencia):\n self.nombre = nombre\n self.edad = edad\n self.residencia = lugar_residencia\n <mask token>\n\n def hola(self):\n print('Hola Mundo')\n\n\nclass Empleado(Persona):\n\n def __init__(self, salario, antiguedad, nombre_empleado, edad_empleado,\n residencia_empleado):\n super().__init__(nombre_empleado, edad_empleado, residencia_empleado)\n self.salario = salario\n self.antiguedad_persona = antiguedad\n super().hola()\n\n def descripcion(self):\n super().descripcion()\n print('Salario: ', self.salario, 'Antiguedad: ', self.\n antiguedad_persona)\n\n\n<mask token>\n",
"step-3": "class Persona:\n\n def __init__(self, nombre, edad, lugar_residencia):\n self.nombre = nombre\n self.edad = edad\n self.residencia = lugar_residencia\n\n def descripcion(self):\n print('Nombre: ', self.nombre, ' Edad: ', self.edad,\n ' Lugar de residencia: ', self.residencia)\n\n def hola(self):\n print('Hola Mundo')\n\n\nclass Empleado(Persona):\n\n def __init__(self, salario, antiguedad, nombre_empleado, edad_empleado,\n residencia_empleado):\n super().__init__(nombre_empleado, edad_empleado, residencia_empleado)\n self.salario = salario\n self.antiguedad_persona = antiguedad\n super().hola()\n\n def descripcion(self):\n super().descripcion()\n print('Salario: ', self.salario, 'Antiguedad: ', self.\n antiguedad_persona)\n\n\n<mask token>\nAntonio.descripcion()\nprint(isinstance(Antonio, Empleado))\n",
"step-4": "class Persona:\n\n def __init__(self, nombre, edad, lugar_residencia):\n self.nombre = nombre\n self.edad = edad\n self.residencia = lugar_residencia\n\n def descripcion(self):\n print('Nombre: ', self.nombre, ' Edad: ', self.edad,\n ' Lugar de residencia: ', self.residencia)\n\n def hola(self):\n print('Hola Mundo')\n\n\nclass Empleado(Persona):\n\n def __init__(self, salario, antiguedad, nombre_empleado, edad_empleado,\n residencia_empleado):\n super().__init__(nombre_empleado, edad_empleado, residencia_empleado)\n self.salario = salario\n self.antiguedad_persona = antiguedad\n super().hola()\n\n def descripcion(self):\n super().descripcion()\n print('Salario: ', self.salario, 'Antiguedad: ', self.\n antiguedad_persona)\n\n\nAntonio = Persona('Alex', 23, 'Merida')\nAntonio.descripcion()\nprint(isinstance(Antonio, Empleado))\n",
"step-5": "\n\n#Aplicacion de la funcion super()\n\nclass Persona():\n def __init__(self,nombre,edad,lugar_residencia):\n self.nombre = nombre\n self.edad = edad\n self.residencia = lugar_residencia\n \n def descripcion(self):\n print(\"Nombre: \",self.nombre,\" Edad: \", self.edad,\" Lugar de residencia: \",self.residencia)\n \n def hola(self):\n print(\"Hola Mundo\")\n\nclass Empleado(Persona):\n\n def __init__(self,salario,antiguedad,nombre_empleado,edad_empleado,residencia_empleado):\n\n super().__init__(nombre_empleado,edad_empleado,residencia_empleado)#Hace la llamada al constructor de la clase padre que esta heredando\n self.salario = salario\n self.antiguedad_persona=antiguedad\n\n super().hola()\n \n def descripcion(self):\n super().descripcion()\n print(\"Salario: \" ,self.salario, \"Antiguedad: \",self.antiguedad_persona)\n\n\nAntonio = Persona(\"Alex\",23,\"Merida\")\nAntonio.descripcion()\n\nprint(isinstance(Antonio,Empleado))\n\n\n#Principio de sustitucion\n#consiste en plantearse las siguientes preguntas:\n\n#es siempre un o una\n\n#funcion isinstance()--> nos informa si un objeto es instancia de una clase determinada devuelve verdadero o falso\n\n\n\n",
"step-ids": [
5,
6,
8,
9,
10
]
}
|
[
5,
6,
8,
9,
10
] |
n = eval(input("Entrez valeur: "))
res = 0
while n > 0:
res += n%10
n //= 10
print(res, n)
print(res)
|
normal
|
{
"blob_id": "391ecb2f23cc0ce59bd9fac6f97bd4c1788444b9",
"index": 4416,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile n > 0:\n res += n % 10\n n //= 10\n print(res, n)\nprint(res)\n",
"step-3": "n = eval(input('Entrez valeur: '))\nres = 0\nwhile n > 0:\n res += n % 10\n n //= 10\n print(res, n)\nprint(res)\n",
"step-4": "n = eval(input(\"Entrez valeur: \"))\nres = 0\n\nwhile n > 0:\n res += n%10\n n //= 10\n print(res, n)\n\nprint(res)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def CreateCGCS2000prj(shpPath):
body = (
'GEOGCS["CGCS_2000",DATUM["D_2000",SPHEROID["S_2000",6378137.0,298.2572221010041]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]]'
)
writePrj(shpPath, body)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def writePrj(shpPath, test):
prj = open(shpPath.split('.')[0] + '.prj', 'w')
prj.write(test)
prj.close()
def CreateCGCS2000prj(shpPath):
body = (
'GEOGCS["CGCS_2000",DATUM["D_2000",SPHEROID["S_2000",6378137.0,298.2572221010041]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]]'
)
writePrj(shpPath, body)
def CreateWGS84(shpPath):
body = (
'GEOGCS["GCS_WGS_1984",DATUM["D_WGS_1984",SPHEROID["WGS_1984",6378137.0,298.257223563]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]]'
)
writePrj(shpPath, body)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def writePrj(shpPath, test):
prj = open(shpPath.split('.')[0] + '.prj', 'w')
prj.write(test)
prj.close()
def CreateCGCS2000prj(shpPath):
body = (
'GEOGCS["CGCS_2000",DATUM["D_2000",SPHEROID["S_2000",6378137.0,298.2572221010041]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]]'
)
writePrj(shpPath, body)
def CreateWGS84(shpPath):
body = (
'GEOGCS["GCS_WGS_1984",DATUM["D_WGS_1984",SPHEROID["WGS_1984",6378137.0,298.257223563]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]]'
)
writePrj(shpPath, body)
def CreateBeijing54(shpPath):
body = (
'GEOGCS["GCS_Beijing_1954",DATUM["D_Beijing_1954",SPHEROID["Krasovsky_1940",6378245.0,298.3]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]]'
)
writePrj(shpPath, body)
def CreateXian54(shpPath):
body = (
'GEOGCS["GCS_Xian_1980",DATUM["D_Xian_1980",SPHEROID["Xian_1980",6378140.0,298.257]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]]'
)
writePrj(shpPath, body)
def CreatePoint(shpPath, pointList):
point = arcpy.Point()
pointGeoms = []
for pt in pointList:
point.X = pt[0]
point.Y = pt[1]
pointGeoms.append(arcpy.PointGeometry(point))
arcpy.CopyFeatures_management(pointGeoms, shpPath)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import sys
import os
arcpy_path = ['D:\\software\\ArcGIS\\python 27\\ArcGIS10.2\\Lib\\site-packages'
, 'D:\\software\\ArcGIS\\Desktop 10.2\\Desktop10.2\\arcpy',
'D:\\software\\ArcGIS\\Desktop 10.2\\Desktop10.2\\bin',
'D:\\software\\ArcGIS\\Desktop 10.2\\Desktop10.2\\ArcToolbox\\Scripts']
sys.path.extend(arcpy_path)
import arcpy
arcpy.gp.overweiteOutput = 1
def writePrj(shpPath, test):
prj = open(shpPath.split('.')[0] + '.prj', 'w')
prj.write(test)
prj.close()
def CreateCGCS2000prj(shpPath):
body = (
'GEOGCS["CGCS_2000",DATUM["D_2000",SPHEROID["S_2000",6378137.0,298.2572221010041]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]]'
)
writePrj(shpPath, body)
def CreateWGS84(shpPath):
body = (
'GEOGCS["GCS_WGS_1984",DATUM["D_WGS_1984",SPHEROID["WGS_1984",6378137.0,298.257223563]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]]'
)
writePrj(shpPath, body)
def CreateBeijing54(shpPath):
body = (
'GEOGCS["GCS_Beijing_1954",DATUM["D_Beijing_1954",SPHEROID["Krasovsky_1940",6378245.0,298.3]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]]'
)
writePrj(shpPath, body)
def CreateXian54(shpPath):
body = (
'GEOGCS["GCS_Xian_1980",DATUM["D_Xian_1980",SPHEROID["Xian_1980",6378140.0,298.257]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]]'
)
writePrj(shpPath, body)
def CreatePoint(shpPath, pointList):
point = arcpy.Point()
pointGeoms = []
for pt in pointList:
point.X = pt[0]
point.Y = pt[1]
pointGeoms.append(arcpy.PointGeometry(point))
arcpy.CopyFeatures_management(pointGeoms, shpPath)
ptList = [[20.0, 43.0], [25.5, 45.085], [26.574, 46.025], [28.131, 48.124]]
shpPath = 'D:\\geodata\\test\\point.shp'
CreatePoint(shpPath, ptList)
CreateCGCS2000prj(shpPath)
<|reserved_special_token_1|>
import sys
import os
arcpy_path = [r'D:\software\ArcGIS\python 27\ArcGIS10.2\Lib\site-packages',
r'D:\software\ArcGIS\Desktop 10.2\Desktop10.2\arcpy',
r'D:\software\ArcGIS\Desktop 10.2\Desktop10.2\bin',
r'D:\software\ArcGIS\Desktop 10.2\Desktop10.2\ArcToolbox\Scripts']
sys.path.extend(arcpy_path)
import arcpy
arcpy.gp.overweiteOutput = 1
def writePrj(shpPath, test):
prj = open(shpPath.split('.')[0] + '.prj', 'w')
prj.write(test)
prj.close()
def CreateCGCS2000prj(shpPath):
body = 'GEOGCS["CGCS_2000",DATUM["D_2000",SPHEROID["S_2000",6378137.0,298.2572221010041]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]]'
writePrj(shpPath, body)
def CreateWGS84(shpPath):
body = 'GEOGCS["GCS_WGS_1984",DATUM["D_WGS_1984",SPHEROID["WGS_1984",6378137.0,298.257223563]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]]'
writePrj(shpPath, body)
def CreateBeijing54(shpPath):
body = 'GEOGCS["GCS_Beijing_1954",DATUM["D_Beijing_1954",SPHEROID["Krasovsky_1940",6378245.0,298.3]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]]'
writePrj(shpPath, body)
def CreateXian54(shpPath):
body = 'GEOGCS["GCS_Xian_1980",DATUM["D_Xian_1980",SPHEROID["Xian_1980",6378140.0,298.257]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]]'
writePrj(shpPath, body)
def CreatePoint(shpPath, pointList):
point = arcpy.Point()
pointGeoms = []
for pt in pointList:
point.X = pt[0]
point.Y = pt[1]
pointGeoms.append(arcpy.PointGeometry(point))
arcpy.CopyFeatures_management(pointGeoms, shpPath)
ptList =[[20.000,43.000],[25.500, 45.085],[26.574, 46.025], [28.131, 48.124]]
shpPath = r'D:\geodata\test\point.shp'
CreatePoint(shpPath, ptList)
CreateCGCS2000prj(shpPath)
|
flexible
|
{
"blob_id": "eab2cdd92d3be5760f13e747b05ca902eaf9aca8",
"index": 8287,
"step-1": "<mask token>\n\n\ndef CreateCGCS2000prj(shpPath):\n body = (\n 'GEOGCS[\"CGCS_2000\",DATUM[\"D_2000\",SPHEROID[\"S_2000\",6378137.0,298.2572221010041]],PRIMEM[\"Greenwich\",0.0],UNIT[\"Degree\",0.0174532925199433]]'\n )\n writePrj(shpPath, body)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef writePrj(shpPath, test):\n prj = open(shpPath.split('.')[0] + '.prj', 'w')\n prj.write(test)\n prj.close()\n\n\ndef CreateCGCS2000prj(shpPath):\n body = (\n 'GEOGCS[\"CGCS_2000\",DATUM[\"D_2000\",SPHEROID[\"S_2000\",6378137.0,298.2572221010041]],PRIMEM[\"Greenwich\",0.0],UNIT[\"Degree\",0.0174532925199433]]'\n )\n writePrj(shpPath, body)\n\n\ndef CreateWGS84(shpPath):\n body = (\n 'GEOGCS[\"GCS_WGS_1984\",DATUM[\"D_WGS_1984\",SPHEROID[\"WGS_1984\",6378137.0,298.257223563]],PRIMEM[\"Greenwich\",0.0],UNIT[\"Degree\",0.0174532925199433]]'\n )\n writePrj(shpPath, body)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef writePrj(shpPath, test):\n prj = open(shpPath.split('.')[0] + '.prj', 'w')\n prj.write(test)\n prj.close()\n\n\ndef CreateCGCS2000prj(shpPath):\n body = (\n 'GEOGCS[\"CGCS_2000\",DATUM[\"D_2000\",SPHEROID[\"S_2000\",6378137.0,298.2572221010041]],PRIMEM[\"Greenwich\",0.0],UNIT[\"Degree\",0.0174532925199433]]'\n )\n writePrj(shpPath, body)\n\n\ndef CreateWGS84(shpPath):\n body = (\n 'GEOGCS[\"GCS_WGS_1984\",DATUM[\"D_WGS_1984\",SPHEROID[\"WGS_1984\",6378137.0,298.257223563]],PRIMEM[\"Greenwich\",0.0],UNIT[\"Degree\",0.0174532925199433]]'\n )\n writePrj(shpPath, body)\n\n\ndef CreateBeijing54(shpPath):\n body = (\n 'GEOGCS[\"GCS_Beijing_1954\",DATUM[\"D_Beijing_1954\",SPHEROID[\"Krasovsky_1940\",6378245.0,298.3]],PRIMEM[\"Greenwich\",0.0],UNIT[\"Degree\",0.0174532925199433]]'\n )\n writePrj(shpPath, body)\n\n\ndef CreateXian54(shpPath):\n body = (\n 'GEOGCS[\"GCS_Xian_1980\",DATUM[\"D_Xian_1980\",SPHEROID[\"Xian_1980\",6378140.0,298.257]],PRIMEM[\"Greenwich\",0.0],UNIT[\"Degree\",0.0174532925199433]]'\n )\n writePrj(shpPath, body)\n\n\ndef CreatePoint(shpPath, pointList):\n point = arcpy.Point()\n pointGeoms = []\n for pt in pointList:\n point.X = pt[0]\n point.Y = pt[1]\n pointGeoms.append(arcpy.PointGeometry(point))\n arcpy.CopyFeatures_management(pointGeoms, shpPath)\n\n\n<mask token>\n",
"step-4": "import sys\nimport os\narcpy_path = ['D:\\\\software\\\\ArcGIS\\\\python 27\\\\ArcGIS10.2\\\\Lib\\\\site-packages'\n , 'D:\\\\software\\\\ArcGIS\\\\Desktop 10.2\\\\Desktop10.2\\\\arcpy',\n 'D:\\\\software\\\\ArcGIS\\\\Desktop 10.2\\\\Desktop10.2\\\\bin',\n 'D:\\\\software\\\\ArcGIS\\\\Desktop 10.2\\\\Desktop10.2\\\\ArcToolbox\\\\Scripts']\nsys.path.extend(arcpy_path)\nimport arcpy\narcpy.gp.overweiteOutput = 1\n\n\ndef writePrj(shpPath, test):\n prj = open(shpPath.split('.')[0] + '.prj', 'w')\n prj.write(test)\n prj.close()\n\n\ndef CreateCGCS2000prj(shpPath):\n body = (\n 'GEOGCS[\"CGCS_2000\",DATUM[\"D_2000\",SPHEROID[\"S_2000\",6378137.0,298.2572221010041]],PRIMEM[\"Greenwich\",0.0],UNIT[\"Degree\",0.0174532925199433]]'\n )\n writePrj(shpPath, body)\n\n\ndef CreateWGS84(shpPath):\n body = (\n 'GEOGCS[\"GCS_WGS_1984\",DATUM[\"D_WGS_1984\",SPHEROID[\"WGS_1984\",6378137.0,298.257223563]],PRIMEM[\"Greenwich\",0.0],UNIT[\"Degree\",0.0174532925199433]]'\n )\n writePrj(shpPath, body)\n\n\ndef CreateBeijing54(shpPath):\n body = (\n 'GEOGCS[\"GCS_Beijing_1954\",DATUM[\"D_Beijing_1954\",SPHEROID[\"Krasovsky_1940\",6378245.0,298.3]],PRIMEM[\"Greenwich\",0.0],UNIT[\"Degree\",0.0174532925199433]]'\n )\n writePrj(shpPath, body)\n\n\ndef CreateXian54(shpPath):\n body = (\n 'GEOGCS[\"GCS_Xian_1980\",DATUM[\"D_Xian_1980\",SPHEROID[\"Xian_1980\",6378140.0,298.257]],PRIMEM[\"Greenwich\",0.0],UNIT[\"Degree\",0.0174532925199433]]'\n )\n writePrj(shpPath, body)\n\n\ndef CreatePoint(shpPath, pointList):\n point = arcpy.Point()\n pointGeoms = []\n for pt in pointList:\n point.X = pt[0]\n point.Y = pt[1]\n pointGeoms.append(arcpy.PointGeometry(point))\n arcpy.CopyFeatures_management(pointGeoms, shpPath)\n\n\nptList = [[20.0, 43.0], [25.5, 45.085], [26.574, 46.025], [28.131, 48.124]]\nshpPath = 'D:\\\\geodata\\\\test\\\\point.shp'\nCreatePoint(shpPath, ptList)\nCreateCGCS2000prj(shpPath)\n",
"step-5": "import sys\nimport os\n\narcpy_path = [r'D:\\software\\ArcGIS\\python 27\\ArcGIS10.2\\Lib\\site-packages',\n r'D:\\software\\ArcGIS\\Desktop 10.2\\Desktop10.2\\arcpy',\n r'D:\\software\\ArcGIS\\Desktop 10.2\\Desktop10.2\\bin',\n r'D:\\software\\ArcGIS\\Desktop 10.2\\Desktop10.2\\ArcToolbox\\Scripts']\n\nsys.path.extend(arcpy_path)\n\nimport arcpy\narcpy.gp.overweiteOutput = 1\n\ndef writePrj(shpPath, test):\n prj = open(shpPath.split('.')[0] + '.prj', 'w')\n prj.write(test)\n prj.close()\n\ndef CreateCGCS2000prj(shpPath):\n body = 'GEOGCS[\"CGCS_2000\",DATUM[\"D_2000\",SPHEROID[\"S_2000\",6378137.0,298.2572221010041]],PRIMEM[\"Greenwich\",0.0],UNIT[\"Degree\",0.0174532925199433]]'\n writePrj(shpPath, body)\ndef CreateWGS84(shpPath):\n body = 'GEOGCS[\"GCS_WGS_1984\",DATUM[\"D_WGS_1984\",SPHEROID[\"WGS_1984\",6378137.0,298.257223563]],PRIMEM[\"Greenwich\",0.0],UNIT[\"Degree\",0.0174532925199433]]'\n writePrj(shpPath, body)\ndef CreateBeijing54(shpPath):\n body = 'GEOGCS[\"GCS_Beijing_1954\",DATUM[\"D_Beijing_1954\",SPHEROID[\"Krasovsky_1940\",6378245.0,298.3]],PRIMEM[\"Greenwich\",0.0],UNIT[\"Degree\",0.0174532925199433]]'\n writePrj(shpPath, body)\ndef CreateXian54(shpPath):\n body = 'GEOGCS[\"GCS_Xian_1980\",DATUM[\"D_Xian_1980\",SPHEROID[\"Xian_1980\",6378140.0,298.257]],PRIMEM[\"Greenwich\",0.0],UNIT[\"Degree\",0.0174532925199433]]'\n writePrj(shpPath, body)\n \n \ndef CreatePoint(shpPath, pointList):\n point = arcpy.Point()\n pointGeoms = []\n for pt in pointList:\n point.X = pt[0]\n point.Y = pt[1]\n pointGeoms.append(arcpy.PointGeometry(point))\n arcpy.CopyFeatures_management(pointGeoms, shpPath)\n\nptList =[[20.000,43.000],[25.500, 45.085],[26.574, 46.025], [28.131, 48.124]]\nshpPath = r'D:\\geodata\\test\\point.shp'\nCreatePoint(shpPath, ptList)\nCreateCGCS2000prj(shpPath)",
"step-ids": [
1,
3,
6,
9,
10
]
}
|
[
1,
3,
6,
9,
10
] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import sh
import reqwire.helpers.cli
log_methods = (
'echo',
'error',
'fatal',
'info',
'warn',
'warning',
)
def test_emojize_win32(mocker):
mocker.patch('sys.platform', 'win32')
assert reqwire.helpers.cli.emojize(
':thumbs_up_sign: foo').encode('utf-8') == b'foo'
def test_emojize_linux(mocker):
mocker.patch('sys.platform', 'linux')
mocker.patch('io.open', mocker.mock_open(
read_data='Linux version 4.4.0-31-generic (gcc version 5.3.1)'))
assert reqwire.helpers.cli.emojize(
':thumbs_up_sign:').encode('utf-8') == b'\xf0\x9f\x91\x8d'
def test_emojize_linux_ioerror(mocker):
mocker.patch('sys.platform', 'linux')
mocker.patch('io.open', side_effect=IOError)
assert reqwire.helpers.cli.emojize(
':thumbs_up_sign:').encode('utf-8') == b'\xf0\x9f\x91\x8d'
def test_emojize_wsl(mocker):
mocker.patch('sys.platform', 'linux')
mocker.patch('io.open', mocker.mock_open(
read_data='Linux version 3.4.0-Microsoft ([email protected])'))
assert reqwire.helpers.cli.emojize(
':thumbs_up_sign: foo').encode('utf-8') == b'foo'
def test_console_writer_quiet(mocker):
click_echo = mocker.patch('click.echo')
console = reqwire.helpers.cli.ConsoleWriter(verbose=False)
for method in log_methods:
getattr(console, method)('test')
click_echo.assert_not_called()
def test_console_writer_verbose(mocker):
mocker.patch('sys.platform', 'linux')
mocker.patch('io.open', mocker.mock_open(
read_data='Linux version 4.4.0-31-generic (gcc version 5.3.1)'))
click_echo = mocker.patch('click.echo')
console = reqwire.helpers.cli.ConsoleWriter(verbose=True)
for method in log_methods:
getattr(console, method)('test')
fmt = console.format_strings.get(method, '{msg}')
message = reqwire.helpers.cli.emojize(fmt.format(msg='test'))
click_echo.assert_called_once_with(message)
click_echo.reset_mock()
def test_build_with_pip_compile_options(cli_runner, mocker):
from reqwire.cli import main
pip_compile = mocker.patch.object(sh, 'pip_compile')
result = cli_runner.invoke(main, ['build', '-t', 'main', '--',
'--no-header'])
assert result.exit_code == 0, result.output
assert pip_compile.call_args[0][2] == '--no-header'
def test_main_remove(cli_runner):
from reqwire.cli import main
result = cli_runner.invoke(main, ['remove', 'Flask'])
assert result.exit_code == 0, result.output
|
normal
|
{
"blob_id": "1a7a2c2cfb2aa94401defd7a7a500f7dd2e7e0aa",
"index": 9680,
"step-1": "<mask token>\n\n\ndef test_emojize_win32(mocker):\n mocker.patch('sys.platform', 'win32')\n assert reqwire.helpers.cli.emojize(':thumbs_up_sign: foo').encode('utf-8'\n ) == b'foo'\n\n\ndef test_emojize_linux(mocker):\n mocker.patch('sys.platform', 'linux')\n mocker.patch('io.open', mocker.mock_open(read_data=\n 'Linux version 4.4.0-31-generic (gcc version 5.3.1)'))\n assert reqwire.helpers.cli.emojize(':thumbs_up_sign:').encode('utf-8'\n ) == b'\\xf0\\x9f\\x91\\x8d'\n\n\ndef test_emojize_linux_ioerror(mocker):\n mocker.patch('sys.platform', 'linux')\n mocker.patch('io.open', side_effect=IOError)\n assert reqwire.helpers.cli.emojize(':thumbs_up_sign:').encode('utf-8'\n ) == b'\\xf0\\x9f\\x91\\x8d'\n\n\ndef test_emojize_wsl(mocker):\n mocker.patch('sys.platform', 'linux')\n mocker.patch('io.open', mocker.mock_open(read_data=\n 'Linux version 3.4.0-Microsoft ([email protected])'))\n assert reqwire.helpers.cli.emojize(':thumbs_up_sign: foo').encode('utf-8'\n ) == b'foo'\n\n\ndef test_console_writer_quiet(mocker):\n click_echo = mocker.patch('click.echo')\n console = reqwire.helpers.cli.ConsoleWriter(verbose=False)\n for method in log_methods:\n getattr(console, method)('test')\n click_echo.assert_not_called()\n\n\ndef test_console_writer_verbose(mocker):\n mocker.patch('sys.platform', 'linux')\n mocker.patch('io.open', mocker.mock_open(read_data=\n 'Linux version 4.4.0-31-generic (gcc version 5.3.1)'))\n click_echo = mocker.patch('click.echo')\n console = reqwire.helpers.cli.ConsoleWriter(verbose=True)\n for method in log_methods:\n getattr(console, method)('test')\n fmt = console.format_strings.get(method, '{msg}')\n message = reqwire.helpers.cli.emojize(fmt.format(msg='test'))\n click_echo.assert_called_once_with(message)\n click_echo.reset_mock()\n\n\n<mask token>\n\n\ndef test_main_remove(cli_runner):\n from reqwire.cli import main\n result = cli_runner.invoke(main, ['remove', 'Flask'])\n assert result.exit_code == 0, result.output\n",
"step-2": "<mask token>\n\n\ndef test_emojize_win32(mocker):\n mocker.patch('sys.platform', 'win32')\n assert reqwire.helpers.cli.emojize(':thumbs_up_sign: foo').encode('utf-8'\n ) == b'foo'\n\n\ndef test_emojize_linux(mocker):\n mocker.patch('sys.platform', 'linux')\n mocker.patch('io.open', mocker.mock_open(read_data=\n 'Linux version 4.4.0-31-generic (gcc version 5.3.1)'))\n assert reqwire.helpers.cli.emojize(':thumbs_up_sign:').encode('utf-8'\n ) == b'\\xf0\\x9f\\x91\\x8d'\n\n\ndef test_emojize_linux_ioerror(mocker):\n mocker.patch('sys.platform', 'linux')\n mocker.patch('io.open', side_effect=IOError)\n assert reqwire.helpers.cli.emojize(':thumbs_up_sign:').encode('utf-8'\n ) == b'\\xf0\\x9f\\x91\\x8d'\n\n\ndef test_emojize_wsl(mocker):\n mocker.patch('sys.platform', 'linux')\n mocker.patch('io.open', mocker.mock_open(read_data=\n 'Linux version 3.4.0-Microsoft ([email protected])'))\n assert reqwire.helpers.cli.emojize(':thumbs_up_sign: foo').encode('utf-8'\n ) == b'foo'\n\n\ndef test_console_writer_quiet(mocker):\n click_echo = mocker.patch('click.echo')\n console = reqwire.helpers.cli.ConsoleWriter(verbose=False)\n for method in log_methods:\n getattr(console, method)('test')\n click_echo.assert_not_called()\n\n\ndef test_console_writer_verbose(mocker):\n mocker.patch('sys.platform', 'linux')\n mocker.patch('io.open', mocker.mock_open(read_data=\n 'Linux version 4.4.0-31-generic (gcc version 5.3.1)'))\n click_echo = mocker.patch('click.echo')\n console = reqwire.helpers.cli.ConsoleWriter(verbose=True)\n for method in log_methods:\n getattr(console, method)('test')\n fmt = console.format_strings.get(method, '{msg}')\n message = reqwire.helpers.cli.emojize(fmt.format(msg='test'))\n click_echo.assert_called_once_with(message)\n click_echo.reset_mock()\n\n\ndef test_build_with_pip_compile_options(cli_runner, mocker):\n from reqwire.cli import main\n pip_compile = mocker.patch.object(sh, 'pip_compile')\n result = cli_runner.invoke(main, ['build', '-t', 'main', '--',\n '--no-header'])\n assert result.exit_code == 0, result.output\n assert pip_compile.call_args[0][2] == '--no-header'\n\n\ndef test_main_remove(cli_runner):\n from reqwire.cli import main\n result = cli_runner.invoke(main, ['remove', 'Flask'])\n assert result.exit_code == 0, result.output\n",
"step-3": "<mask token>\nlog_methods = 'echo', 'error', 'fatal', 'info', 'warn', 'warning'\n\n\ndef test_emojize_win32(mocker):\n mocker.patch('sys.platform', 'win32')\n assert reqwire.helpers.cli.emojize(':thumbs_up_sign: foo').encode('utf-8'\n ) == b'foo'\n\n\ndef test_emojize_linux(mocker):\n mocker.patch('sys.platform', 'linux')\n mocker.patch('io.open', mocker.mock_open(read_data=\n 'Linux version 4.4.0-31-generic (gcc version 5.3.1)'))\n assert reqwire.helpers.cli.emojize(':thumbs_up_sign:').encode('utf-8'\n ) == b'\\xf0\\x9f\\x91\\x8d'\n\n\ndef test_emojize_linux_ioerror(mocker):\n mocker.patch('sys.platform', 'linux')\n mocker.patch('io.open', side_effect=IOError)\n assert reqwire.helpers.cli.emojize(':thumbs_up_sign:').encode('utf-8'\n ) == b'\\xf0\\x9f\\x91\\x8d'\n\n\ndef test_emojize_wsl(mocker):\n mocker.patch('sys.platform', 'linux')\n mocker.patch('io.open', mocker.mock_open(read_data=\n 'Linux version 3.4.0-Microsoft ([email protected])'))\n assert reqwire.helpers.cli.emojize(':thumbs_up_sign: foo').encode('utf-8'\n ) == b'foo'\n\n\ndef test_console_writer_quiet(mocker):\n click_echo = mocker.patch('click.echo')\n console = reqwire.helpers.cli.ConsoleWriter(verbose=False)\n for method in log_methods:\n getattr(console, method)('test')\n click_echo.assert_not_called()\n\n\ndef test_console_writer_verbose(mocker):\n mocker.patch('sys.platform', 'linux')\n mocker.patch('io.open', mocker.mock_open(read_data=\n 'Linux version 4.4.0-31-generic (gcc version 5.3.1)'))\n click_echo = mocker.patch('click.echo')\n console = reqwire.helpers.cli.ConsoleWriter(verbose=True)\n for method in log_methods:\n getattr(console, method)('test')\n fmt = console.format_strings.get(method, '{msg}')\n message = reqwire.helpers.cli.emojize(fmt.format(msg='test'))\n click_echo.assert_called_once_with(message)\n click_echo.reset_mock()\n\n\ndef test_build_with_pip_compile_options(cli_runner, mocker):\n from reqwire.cli import main\n pip_compile = mocker.patch.object(sh, 'pip_compile')\n result = cli_runner.invoke(main, ['build', '-t', 'main', '--',\n '--no-header'])\n assert result.exit_code == 0, result.output\n assert pip_compile.call_args[0][2] == '--no-header'\n\n\ndef test_main_remove(cli_runner):\n from reqwire.cli import main\n result = cli_runner.invoke(main, ['remove', 'Flask'])\n assert result.exit_code == 0, result.output\n",
"step-4": "from __future__ import absolute_import\nimport sh\nimport reqwire.helpers.cli\nlog_methods = 'echo', 'error', 'fatal', 'info', 'warn', 'warning'\n\n\ndef test_emojize_win32(mocker):\n mocker.patch('sys.platform', 'win32')\n assert reqwire.helpers.cli.emojize(':thumbs_up_sign: foo').encode('utf-8'\n ) == b'foo'\n\n\ndef test_emojize_linux(mocker):\n mocker.patch('sys.platform', 'linux')\n mocker.patch('io.open', mocker.mock_open(read_data=\n 'Linux version 4.4.0-31-generic (gcc version 5.3.1)'))\n assert reqwire.helpers.cli.emojize(':thumbs_up_sign:').encode('utf-8'\n ) == b'\\xf0\\x9f\\x91\\x8d'\n\n\ndef test_emojize_linux_ioerror(mocker):\n mocker.patch('sys.platform', 'linux')\n mocker.patch('io.open', side_effect=IOError)\n assert reqwire.helpers.cli.emojize(':thumbs_up_sign:').encode('utf-8'\n ) == b'\\xf0\\x9f\\x91\\x8d'\n\n\ndef test_emojize_wsl(mocker):\n mocker.patch('sys.platform', 'linux')\n mocker.patch('io.open', mocker.mock_open(read_data=\n 'Linux version 3.4.0-Microsoft ([email protected])'))\n assert reqwire.helpers.cli.emojize(':thumbs_up_sign: foo').encode('utf-8'\n ) == b'foo'\n\n\ndef test_console_writer_quiet(mocker):\n click_echo = mocker.patch('click.echo')\n console = reqwire.helpers.cli.ConsoleWriter(verbose=False)\n for method in log_methods:\n getattr(console, method)('test')\n click_echo.assert_not_called()\n\n\ndef test_console_writer_verbose(mocker):\n mocker.patch('sys.platform', 'linux')\n mocker.patch('io.open', mocker.mock_open(read_data=\n 'Linux version 4.4.0-31-generic (gcc version 5.3.1)'))\n click_echo = mocker.patch('click.echo')\n console = reqwire.helpers.cli.ConsoleWriter(verbose=True)\n for method in log_methods:\n getattr(console, method)('test')\n fmt = console.format_strings.get(method, '{msg}')\n message = reqwire.helpers.cli.emojize(fmt.format(msg='test'))\n click_echo.assert_called_once_with(message)\n click_echo.reset_mock()\n\n\ndef test_build_with_pip_compile_options(cli_runner, mocker):\n from reqwire.cli import main\n pip_compile = mocker.patch.object(sh, 'pip_compile')\n result = cli_runner.invoke(main, ['build', '-t', 'main', '--',\n '--no-header'])\n assert result.exit_code == 0, result.output\n assert pip_compile.call_args[0][2] == '--no-header'\n\n\ndef test_main_remove(cli_runner):\n from reqwire.cli import main\n result = cli_runner.invoke(main, ['remove', 'Flask'])\n assert result.exit_code == 0, result.output\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\n\nimport sh\n\nimport reqwire.helpers.cli\n\n\nlog_methods = (\n 'echo',\n 'error',\n 'fatal',\n 'info',\n 'warn',\n 'warning',\n)\n\n\ndef test_emojize_win32(mocker):\n mocker.patch('sys.platform', 'win32')\n assert reqwire.helpers.cli.emojize(\n ':thumbs_up_sign: foo').encode('utf-8') == b'foo'\n\n\ndef test_emojize_linux(mocker):\n mocker.patch('sys.platform', 'linux')\n mocker.patch('io.open', mocker.mock_open(\n read_data='Linux version 4.4.0-31-generic (gcc version 5.3.1)'))\n assert reqwire.helpers.cli.emojize(\n ':thumbs_up_sign:').encode('utf-8') == b'\\xf0\\x9f\\x91\\x8d'\n\n\ndef test_emojize_linux_ioerror(mocker):\n mocker.patch('sys.platform', 'linux')\n mocker.patch('io.open', side_effect=IOError)\n assert reqwire.helpers.cli.emojize(\n ':thumbs_up_sign:').encode('utf-8') == b'\\xf0\\x9f\\x91\\x8d'\n\n\ndef test_emojize_wsl(mocker):\n mocker.patch('sys.platform', 'linux')\n mocker.patch('io.open', mocker.mock_open(\n read_data='Linux version 3.4.0-Microsoft ([email protected])'))\n assert reqwire.helpers.cli.emojize(\n ':thumbs_up_sign: foo').encode('utf-8') == b'foo'\n\n\ndef test_console_writer_quiet(mocker):\n click_echo = mocker.patch('click.echo')\n console = reqwire.helpers.cli.ConsoleWriter(verbose=False)\n for method in log_methods:\n getattr(console, method)('test')\n click_echo.assert_not_called()\n\n\ndef test_console_writer_verbose(mocker):\n mocker.patch('sys.platform', 'linux')\n mocker.patch('io.open', mocker.mock_open(\n read_data='Linux version 4.4.0-31-generic (gcc version 5.3.1)'))\n click_echo = mocker.patch('click.echo')\n console = reqwire.helpers.cli.ConsoleWriter(verbose=True)\n for method in log_methods:\n getattr(console, method)('test')\n fmt = console.format_strings.get(method, '{msg}')\n message = reqwire.helpers.cli.emojize(fmt.format(msg='test'))\n click_echo.assert_called_once_with(message)\n click_echo.reset_mock()\n\n\ndef test_build_with_pip_compile_options(cli_runner, mocker):\n from reqwire.cli import main\n pip_compile = mocker.patch.object(sh, 'pip_compile')\n result = cli_runner.invoke(main, ['build', '-t', 'main', '--',\n '--no-header'])\n assert result.exit_code == 0, result.output\n assert pip_compile.call_args[0][2] == '--no-header'\n\n\ndef test_main_remove(cli_runner):\n from reqwire.cli import main\n result = cli_runner.invoke(main, ['remove', 'Flask'])\n assert result.exit_code == 0, result.output\n",
"step-ids": [
7,
8,
9,
10,
11
]
}
|
[
7,
8,
9,
10,
11
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.