code
stringlengths 13
1.2M
| order_type
stringclasses 1
value | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
import os
import unittest
from mock import Mock
from tfsnippet.utils import *
class HumanizeDurationTestCase(unittest.TestCase):
cases = [
(0.0, '0 sec'),
(1e-8, '1e-08 sec'),
(0.1, '0.1 sec'),
(1.0, '1 sec'),
(1, '1 sec'),
(1.1, '1.1 secs'),
(59, '59 secs'),
(59.9, '59.9 secs'),
(60, '1 min'),
(61, '1 min 1 sec'),
(62, '1 min 2 secs'),
(119, '1 min 59 secs'),
(120, '2 mins'),
(121, '2 mins 1 sec'),
(122, '2 mins 2 secs'),
(3599, '59 mins 59 secs'),
(3600, '1 hr'),
(3601, '1 hr 1 sec'),
(3661, '1 hr 1 min 1 sec'),
(86399, '23 hrs 59 mins 59 secs'),
(86400, '1 day'),
(86401, '1 day 1 sec'),
(172799, '1 day 23 hrs 59 mins 59 secs'),
(259199, '2 days 23 hrs 59 mins 59 secs'),
]
def test_positive(self):
for seconds, answer in self.cases:
result = humanize_duration(seconds)
self.assertEqual(
result, answer,
msg='humanize_duraion({!r}) is expected to be {!r}, '
'but got {!r}.'.format(seconds, answer, result)
)
def test_negative(self):
for seconds, answer in self.cases[1:]:
seconds = -seconds
answer = answer + ' ago'
result = humanize_duration(seconds)
self.assertEqual(
result, answer,
msg='humanize_duraion({!r}) is expected to be {!r}, '
'but got {!r}.'.format(seconds, answer, result)
)
class CamelToUnderscoreTestCase(unittest.TestCase):
def assert_convert(self, camel, underscore):
self.assertEqual(
camel_to_underscore(camel),
underscore,
msg='{!r} should be converted to {!r}'.format(camel, underscore)
)
def test_camel_to_underscore(self):
examples = [
('simpleTest', 'simple_test'),
('easy', 'easy'),
('HTML', 'html'),
('simpleXML', 'simple_xml'),
('PDFLoad', 'pdf_load'),
('startMIDDLELast', 'start_middle_last'),
('AString', 'a_string'),
('Some4Numbers234', 'some4_numbers234'),
('TEST123String', 'test123_string'),
]
for camel, underscore in examples:
self.assert_convert(camel, underscore)
self.assert_convert(underscore, underscore)
self.assert_convert('_{}_'.format(camel),
'_{}_'.format(underscore))
self.assert_convert('_{}_'.format(underscore),
'_{}_'.format(underscore))
self.assert_convert('__{}__'.format(camel),
'__{}__'.format(underscore))
self.assert_convert('__{}__'.format(underscore),
'__{}__'.format(underscore))
self.assert_convert(
'_'.join([s.capitalize() for s in underscore.split('_')]),
underscore
)
self.assert_convert(
'_'.join([s.upper() for s in underscore.split('_')]),
underscore
)
class NotSetTestCase(unittest.TestCase):
def test_repr(self):
self.assertEqual(repr(NOT_SET), 'NOT_SET')
class _CachedPropertyHelper(object):
def __init__(self, value):
self.value = value
@cached_property('_cached_value')
def cached_value(self):
return self.value
class CachedPropertyTestCase(unittest.TestCase):
def test_cached_property(self):
o = _CachedPropertyHelper(0)
self.assertFalse(hasattr(o, '_cached_value'))
o.value = 123
self.assertEqual(o.cached_value, 123)
self.assertTrue(hasattr(o, '_cached_value'))
self.assertEqual(o._cached_value, 123)
o.value = 456
self.assertEqual(o.cached_value, 123)
self.assertEqual(o._cached_value, 123)
def test_clear_cached_property(self):
o = _CachedPropertyHelper(123)
_ = o.cached_value
clear_cached_property(o, '_cached_value')
o.value = 456
self.assertFalse(hasattr(o, '_cached_value'))
self.assertEqual(o.cached_value, 456)
self.assertEqual(o._cached_value, 456)
class MaybeCloseTestCase(unittest.TestCase):
def test_maybe_close(self):
# test having `close()`
f = Mock(close=Mock(return_value=None))
with maybe_close(f):
self.assertFalse(f.close.called)
self.assertTrue(f.close.called)
# test having not `close()`
with maybe_close(1):
pass
class IterFilesTestCase(unittest.TestCase):
def test_iter_files(self):
names = ['a/1.txt', 'a/2.txt', 'a/b/1.txt', 'a/b/2.txt',
'b/1.txt', 'b/2.txt', 'c.txt']
with TemporaryDirectory() as tempdir:
for name in names:
f_path = os.path.join(tempdir, name)
f_dir = os.path.split(f_path)[0]
makedirs(f_dir, exist_ok=True)
with open(f_path, 'wb') as f:
f.write(b'')
self.assertListEqual(names, sorted(iter_files(tempdir)))
self.assertListEqual(names, sorted(iter_files(tempdir + '/a/../')))
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "9189c1dd21b0858df3138bcf4fc7568b378e6271",
"index": 885,
"step-1": "<mask token>\n\n\nclass NotSetTestCase(unittest.TestCase):\n <mask token>\n\n\nclass _CachedPropertyHelper(object):\n\n def __init__(self, value):\n self.value = value\n\n @cached_property('_cached_value')\n def cached_value(self):\n return self.value\n\n\nclass CachedPropertyTestCase(unittest.TestCase):\n\n def test_cached_property(self):\n o = _CachedPropertyHelper(0)\n self.assertFalse(hasattr(o, '_cached_value'))\n o.value = 123\n self.assertEqual(o.cached_value, 123)\n self.assertTrue(hasattr(o, '_cached_value'))\n self.assertEqual(o._cached_value, 123)\n o.value = 456\n self.assertEqual(o.cached_value, 123)\n self.assertEqual(o._cached_value, 123)\n\n def test_clear_cached_property(self):\n o = _CachedPropertyHelper(123)\n _ = o.cached_value\n clear_cached_property(o, '_cached_value')\n o.value = 456\n self.assertFalse(hasattr(o, '_cached_value'))\n self.assertEqual(o.cached_value, 456)\n self.assertEqual(o._cached_value, 456)\n\n\nclass MaybeCloseTestCase(unittest.TestCase):\n\n def test_maybe_close(self):\n f = Mock(close=Mock(return_value=None))\n with maybe_close(f):\n self.assertFalse(f.close.called)\n self.assertTrue(f.close.called)\n with maybe_close(1):\n pass\n\n\nclass IterFilesTestCase(unittest.TestCase):\n\n def test_iter_files(self):\n names = ['a/1.txt', 'a/2.txt', 'a/b/1.txt', 'a/b/2.txt', 'b/1.txt',\n 'b/2.txt', 'c.txt']\n with TemporaryDirectory() as tempdir:\n for name in names:\n f_path = os.path.join(tempdir, name)\n f_dir = os.path.split(f_path)[0]\n makedirs(f_dir, exist_ok=True)\n with open(f_path, 'wb') as f:\n f.write(b'')\n self.assertListEqual(names, sorted(iter_files(tempdir)))\n self.assertListEqual(names, sorted(iter_files(tempdir + '/a/../')))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass NotSetTestCase(unittest.TestCase):\n\n def test_repr(self):\n self.assertEqual(repr(NOT_SET), 'NOT_SET')\n\n\nclass _CachedPropertyHelper(object):\n\n def __init__(self, value):\n self.value = value\n\n @cached_property('_cached_value')\n def cached_value(self):\n return self.value\n\n\nclass CachedPropertyTestCase(unittest.TestCase):\n\n def test_cached_property(self):\n o = _CachedPropertyHelper(0)\n self.assertFalse(hasattr(o, '_cached_value'))\n o.value = 123\n self.assertEqual(o.cached_value, 123)\n self.assertTrue(hasattr(o, '_cached_value'))\n self.assertEqual(o._cached_value, 123)\n o.value = 456\n self.assertEqual(o.cached_value, 123)\n self.assertEqual(o._cached_value, 123)\n\n def test_clear_cached_property(self):\n o = _CachedPropertyHelper(123)\n _ = o.cached_value\n clear_cached_property(o, '_cached_value')\n o.value = 456\n self.assertFalse(hasattr(o, '_cached_value'))\n self.assertEqual(o.cached_value, 456)\n self.assertEqual(o._cached_value, 456)\n\n\nclass MaybeCloseTestCase(unittest.TestCase):\n\n def test_maybe_close(self):\n f = Mock(close=Mock(return_value=None))\n with maybe_close(f):\n self.assertFalse(f.close.called)\n self.assertTrue(f.close.called)\n with maybe_close(1):\n pass\n\n\nclass IterFilesTestCase(unittest.TestCase):\n\n def test_iter_files(self):\n names = ['a/1.txt', 'a/2.txt', 'a/b/1.txt', 'a/b/2.txt', 'b/1.txt',\n 'b/2.txt', 'c.txt']\n with TemporaryDirectory() as tempdir:\n for name in names:\n f_path = os.path.join(tempdir, name)\n f_dir = os.path.split(f_path)[0]\n makedirs(f_dir, exist_ok=True)\n with open(f_path, 'wb') as f:\n f.write(b'')\n self.assertListEqual(names, sorted(iter_files(tempdir)))\n self.assertListEqual(names, sorted(iter_files(tempdir + '/a/../')))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass CamelToUnderscoreTestCase(unittest.TestCase):\n <mask token>\n <mask token>\n\n\nclass NotSetTestCase(unittest.TestCase):\n\n def test_repr(self):\n self.assertEqual(repr(NOT_SET), 'NOT_SET')\n\n\nclass _CachedPropertyHelper(object):\n\n def __init__(self, value):\n self.value = value\n\n @cached_property('_cached_value')\n def cached_value(self):\n return self.value\n\n\nclass CachedPropertyTestCase(unittest.TestCase):\n\n def test_cached_property(self):\n o = _CachedPropertyHelper(0)\n self.assertFalse(hasattr(o, '_cached_value'))\n o.value = 123\n self.assertEqual(o.cached_value, 123)\n self.assertTrue(hasattr(o, '_cached_value'))\n self.assertEqual(o._cached_value, 123)\n o.value = 456\n self.assertEqual(o.cached_value, 123)\n self.assertEqual(o._cached_value, 123)\n\n def test_clear_cached_property(self):\n o = _CachedPropertyHelper(123)\n _ = o.cached_value\n clear_cached_property(o, '_cached_value')\n o.value = 456\n self.assertFalse(hasattr(o, '_cached_value'))\n self.assertEqual(o.cached_value, 456)\n self.assertEqual(o._cached_value, 456)\n\n\nclass MaybeCloseTestCase(unittest.TestCase):\n\n def test_maybe_close(self):\n f = Mock(close=Mock(return_value=None))\n with maybe_close(f):\n self.assertFalse(f.close.called)\n self.assertTrue(f.close.called)\n with maybe_close(1):\n pass\n\n\nclass IterFilesTestCase(unittest.TestCase):\n\n def test_iter_files(self):\n names = ['a/1.txt', 'a/2.txt', 'a/b/1.txt', 'a/b/2.txt', 'b/1.txt',\n 'b/2.txt', 'c.txt']\n with TemporaryDirectory() as tempdir:\n for name in names:\n f_path = os.path.join(tempdir, name)\n f_dir = os.path.split(f_path)[0]\n makedirs(f_dir, exist_ok=True)\n with open(f_path, 'wb') as f:\n f.write(b'')\n self.assertListEqual(names, sorted(iter_files(tempdir)))\n self.assertListEqual(names, sorted(iter_files(tempdir + '/a/../')))\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass HumanizeDurationTestCase(unittest.TestCase):\n <mask token>\n\n def test_positive(self):\n for seconds, answer in self.cases:\n result = humanize_duration(seconds)\n self.assertEqual(result, answer, msg=\n 'humanize_duraion({!r}) is expected to be {!r}, but got {!r}.'\n .format(seconds, answer, result))\n\n def test_negative(self):\n for seconds, answer in self.cases[1:]:\n seconds = -seconds\n answer = answer + ' ago'\n result = humanize_duration(seconds)\n self.assertEqual(result, answer, msg=\n 'humanize_duraion({!r}) is expected to be {!r}, but got {!r}.'\n .format(seconds, answer, result))\n\n\nclass CamelToUnderscoreTestCase(unittest.TestCase):\n\n def assert_convert(self, camel, underscore):\n self.assertEqual(camel_to_underscore(camel), underscore, msg=\n '{!r} should be converted to {!r}'.format(camel, underscore))\n\n def test_camel_to_underscore(self):\n examples = [('simpleTest', 'simple_test'), ('easy', 'easy'), (\n 'HTML', 'html'), ('simpleXML', 'simple_xml'), ('PDFLoad',\n 'pdf_load'), ('startMIDDLELast', 'start_middle_last'), (\n 'AString', 'a_string'), ('Some4Numbers234', 'some4_numbers234'),\n ('TEST123String', 'test123_string')]\n for camel, underscore in examples:\n self.assert_convert(camel, underscore)\n self.assert_convert(underscore, underscore)\n self.assert_convert('_{}_'.format(camel), '_{}_'.format(underscore)\n )\n self.assert_convert('_{}_'.format(underscore), '_{}_'.format(\n underscore))\n self.assert_convert('__{}__'.format(camel), '__{}__'.format(\n underscore))\n self.assert_convert('__{}__'.format(underscore), '__{}__'.\n format(underscore))\n self.assert_convert('_'.join([s.capitalize() for s in\n underscore.split('_')]), underscore)\n self.assert_convert('_'.join([s.upper() for s in underscore.\n split('_')]), underscore)\n\n\nclass NotSetTestCase(unittest.TestCase):\n\n def test_repr(self):\n self.assertEqual(repr(NOT_SET), 'NOT_SET')\n\n\nclass _CachedPropertyHelper(object):\n\n def __init__(self, value):\n self.value = value\n\n @cached_property('_cached_value')\n def cached_value(self):\n return self.value\n\n\nclass CachedPropertyTestCase(unittest.TestCase):\n\n def test_cached_property(self):\n o = _CachedPropertyHelper(0)\n self.assertFalse(hasattr(o, '_cached_value'))\n o.value = 123\n self.assertEqual(o.cached_value, 123)\n self.assertTrue(hasattr(o, '_cached_value'))\n self.assertEqual(o._cached_value, 123)\n o.value = 456\n self.assertEqual(o.cached_value, 123)\n self.assertEqual(o._cached_value, 123)\n\n def test_clear_cached_property(self):\n o = _CachedPropertyHelper(123)\n _ = o.cached_value\n clear_cached_property(o, '_cached_value')\n o.value = 456\n self.assertFalse(hasattr(o, '_cached_value'))\n self.assertEqual(o.cached_value, 456)\n self.assertEqual(o._cached_value, 456)\n\n\nclass MaybeCloseTestCase(unittest.TestCase):\n\n def test_maybe_close(self):\n f = Mock(close=Mock(return_value=None))\n with maybe_close(f):\n self.assertFalse(f.close.called)\n self.assertTrue(f.close.called)\n with maybe_close(1):\n pass\n\n\nclass IterFilesTestCase(unittest.TestCase):\n\n def test_iter_files(self):\n names = ['a/1.txt', 'a/2.txt', 'a/b/1.txt', 'a/b/2.txt', 'b/1.txt',\n 'b/2.txt', 'c.txt']\n with TemporaryDirectory() as tempdir:\n for name in names:\n f_path = os.path.join(tempdir, name)\n f_dir = os.path.split(f_path)[0]\n makedirs(f_dir, exist_ok=True)\n with open(f_path, 'wb') as f:\n f.write(b'')\n self.assertListEqual(names, sorted(iter_files(tempdir)))\n self.assertListEqual(names, sorted(iter_files(tempdir + '/a/../')))\n\n\n<mask token>\n",
"step-5": "import os\nimport unittest\n\nfrom mock import Mock\n\nfrom tfsnippet.utils import *\n\n\nclass HumanizeDurationTestCase(unittest.TestCase):\n cases = [\n (0.0, '0 sec'),\n (1e-8, '1e-08 sec'),\n (0.1, '0.1 sec'),\n (1.0, '1 sec'),\n (1, '1 sec'),\n (1.1, '1.1 secs'),\n (59, '59 secs'),\n (59.9, '59.9 secs'),\n (60, '1 min'),\n (61, '1 min 1 sec'),\n (62, '1 min 2 secs'),\n (119, '1 min 59 secs'),\n (120, '2 mins'),\n (121, '2 mins 1 sec'),\n (122, '2 mins 2 secs'),\n (3599, '59 mins 59 secs'),\n (3600, '1 hr'),\n (3601, '1 hr 1 sec'),\n (3661, '1 hr 1 min 1 sec'),\n (86399, '23 hrs 59 mins 59 secs'),\n (86400, '1 day'),\n (86401, '1 day 1 sec'),\n (172799, '1 day 23 hrs 59 mins 59 secs'),\n (259199, '2 days 23 hrs 59 mins 59 secs'),\n ]\n\n def test_positive(self):\n for seconds, answer in self.cases:\n result = humanize_duration(seconds)\n self.assertEqual(\n result, answer,\n msg='humanize_duraion({!r}) is expected to be {!r}, '\n 'but got {!r}.'.format(seconds, answer, result)\n )\n\n def test_negative(self):\n for seconds, answer in self.cases[1:]:\n seconds = -seconds\n answer = answer + ' ago'\n result = humanize_duration(seconds)\n self.assertEqual(\n result, answer,\n msg='humanize_duraion({!r}) is expected to be {!r}, '\n 'but got {!r}.'.format(seconds, answer, result)\n )\n\n\nclass CamelToUnderscoreTestCase(unittest.TestCase):\n def assert_convert(self, camel, underscore):\n self.assertEqual(\n camel_to_underscore(camel),\n underscore,\n msg='{!r} should be converted to {!r}'.format(camel, underscore)\n )\n\n def test_camel_to_underscore(self):\n examples = [\n ('simpleTest', 'simple_test'),\n ('easy', 'easy'),\n ('HTML', 'html'),\n ('simpleXML', 'simple_xml'),\n ('PDFLoad', 'pdf_load'),\n ('startMIDDLELast', 'start_middle_last'),\n ('AString', 'a_string'),\n ('Some4Numbers234', 'some4_numbers234'),\n ('TEST123String', 'test123_string'),\n ]\n for camel, underscore in examples:\n self.assert_convert(camel, underscore)\n self.assert_convert(underscore, underscore)\n self.assert_convert('_{}_'.format(camel),\n '_{}_'.format(underscore))\n self.assert_convert('_{}_'.format(underscore),\n '_{}_'.format(underscore))\n self.assert_convert('__{}__'.format(camel),\n '__{}__'.format(underscore))\n self.assert_convert('__{}__'.format(underscore),\n '__{}__'.format(underscore))\n self.assert_convert(\n '_'.join([s.capitalize() for s in underscore.split('_')]),\n underscore\n )\n self.assert_convert(\n '_'.join([s.upper() for s in underscore.split('_')]),\n underscore\n )\n\n\nclass NotSetTestCase(unittest.TestCase):\n\n def test_repr(self):\n self.assertEqual(repr(NOT_SET), 'NOT_SET')\n\n\nclass _CachedPropertyHelper(object):\n\n def __init__(self, value):\n self.value = value\n\n @cached_property('_cached_value')\n def cached_value(self):\n return self.value\n\n\nclass CachedPropertyTestCase(unittest.TestCase):\n\n def test_cached_property(self):\n o = _CachedPropertyHelper(0)\n self.assertFalse(hasattr(o, '_cached_value'))\n o.value = 123\n self.assertEqual(o.cached_value, 123)\n self.assertTrue(hasattr(o, '_cached_value'))\n self.assertEqual(o._cached_value, 123)\n o.value = 456\n self.assertEqual(o.cached_value, 123)\n self.assertEqual(o._cached_value, 123)\n\n def test_clear_cached_property(self):\n o = _CachedPropertyHelper(123)\n _ = o.cached_value\n clear_cached_property(o, '_cached_value')\n o.value = 456\n self.assertFalse(hasattr(o, '_cached_value'))\n self.assertEqual(o.cached_value, 456)\n self.assertEqual(o._cached_value, 456)\n\n\nclass MaybeCloseTestCase(unittest.TestCase):\n\n def test_maybe_close(self):\n # test having `close()`\n f = Mock(close=Mock(return_value=None))\n with maybe_close(f):\n self.assertFalse(f.close.called)\n self.assertTrue(f.close.called)\n\n # test having not `close()`\n with maybe_close(1):\n pass\n\n\nclass IterFilesTestCase(unittest.TestCase):\n\n def test_iter_files(self):\n names = ['a/1.txt', 'a/2.txt', 'a/b/1.txt', 'a/b/2.txt',\n 'b/1.txt', 'b/2.txt', 'c.txt']\n\n with TemporaryDirectory() as tempdir:\n for name in names:\n f_path = os.path.join(tempdir, name)\n f_dir = os.path.split(f_path)[0]\n makedirs(f_dir, exist_ok=True)\n with open(f_path, 'wb') as f:\n f.write(b'')\n\n self.assertListEqual(names, sorted(iter_files(tempdir)))\n self.assertListEqual(names, sorted(iter_files(tempdir + '/a/../')))\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
11,
12,
13,
18,
22
]
}
|
[
11,
12,
13,
18,
22
] |
# uncompyle6 version 3.2.3
# Python bytecode 3.6 (3379)
# Decompiled from: Python 2.7.5 (default, Jul 13 2018, 13:06:57)
# [GCC 4.8.5 20150623 (Red Hat 4.8.5-28)]
# Embedded file name: ./authx/migrations/0001_initial.py
# Compiled at: 2018-08-23 19:33:14
# Size of source mod 2**32: 2715 bytes
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion, uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length')]
operations = [
migrations.CreateModel(name='User',
fields=[
(
'password', models.CharField(max_length=128, verbose_name='password')),
(
'last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
(
'is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
(
'id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
(
'created_at', models.DateTimeField(auto_now_add=True)),
(
'updated_at', models.DateTimeField(auto_now=True)),
(
'username', models.CharField(max_length=11, unique=True, verbose_name='')),
(
'fullname', models.CharField(blank=True, max_length=80, verbose_name='')),
(
'thumbnail', models.ImageField(blank=True, null=True, upload_to='thumbnail', verbose_name='')),
(
'is_active', models.BooleanField(default=True)),
(
'is_admin', models.BooleanField(default=False)),
(
'is_staff', models.BooleanField(default=False)),
(
'phone_number', models.CharField(blank=True, max_length=30, null=True)),
(
'email', models.CharField(blank=True, max_length=30, null=True)),
(
'groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
(
'owner', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
(
'user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions'))],
options={'verbose_name':'',
'verbose_name_plural':'',
'db_table':'auth_user',
'permissions':(('view_user', 'Can drive'), )})]
# okay decompiling ./restful/hawkeye/authx/migrations/0001_initial.pyc
|
normal
|
{
"blob_id": "1073845131afb2446ca68ee10092eeb00feef800",
"index": 3585,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [('auth', '0008_alter_user_username_max_length')]\n operations = [migrations.CreateModel(name='User', fields=[('password',\n models.CharField(max_length=128, verbose_name='password')), (\n 'last_login', models.DateTimeField(blank=True, null=True,\n verbose_name='last login')), ('is_superuser', models.BooleanField(\n default=False, help_text=\n 'Designates that this user has all permissions without explicitly assigning them.'\n , verbose_name='superuser status')), ('id', models.UUIDField(\n default=uuid.uuid4, editable=False, primary_key=True, serialize=\n False)), ('created_at', models.DateTimeField(auto_now_add=True)), (\n 'updated_at', models.DateTimeField(auto_now=True)), ('username',\n models.CharField(max_length=11, unique=True, verbose_name='')), (\n 'fullname', models.CharField(blank=True, max_length=80,\n verbose_name='')), ('thumbnail', models.ImageField(blank=True, null\n =True, upload_to='thumbnail', verbose_name='')), ('is_active',\n models.BooleanField(default=True)), ('is_admin', models.\n BooleanField(default=False)), ('is_staff', models.BooleanField(\n default=False)), ('phone_number', models.CharField(blank=True,\n max_length=30, null=True)), ('email', models.CharField(blank=True,\n max_length=30, null=True)), ('groups', models.ManyToManyField(blank\n =True, help_text=\n 'The groups this user belongs to. A user will get all permissions granted to each of their groups.'\n , related_name='user_set', related_query_name='user', to=\n 'auth.Group', verbose_name='groups')), ('owner', models.ForeignKey(\n blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,\n to=settings.AUTH_USER_MODEL)), ('user_permissions', models.\n ManyToManyField(blank=True, help_text=\n 'Specific permissions for this user.', related_name='user_set',\n related_query_name='user', to='auth.Permission', verbose_name=\n 'user permissions'))], options={'verbose_name': '',\n 'verbose_name_plural': '', 'db_table': 'auth_user', 'permissions':\n (('view_user', 'Can drive'),)})]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion, uuid\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [('auth', '0008_alter_user_username_max_length')]\n operations = [migrations.CreateModel(name='User', fields=[('password',\n models.CharField(max_length=128, verbose_name='password')), (\n 'last_login', models.DateTimeField(blank=True, null=True,\n verbose_name='last login')), ('is_superuser', models.BooleanField(\n default=False, help_text=\n 'Designates that this user has all permissions without explicitly assigning them.'\n , verbose_name='superuser status')), ('id', models.UUIDField(\n default=uuid.uuid4, editable=False, primary_key=True, serialize=\n False)), ('created_at', models.DateTimeField(auto_now_add=True)), (\n 'updated_at', models.DateTimeField(auto_now=True)), ('username',\n models.CharField(max_length=11, unique=True, verbose_name='')), (\n 'fullname', models.CharField(blank=True, max_length=80,\n verbose_name='')), ('thumbnail', models.ImageField(blank=True, null\n =True, upload_to='thumbnail', verbose_name='')), ('is_active',\n models.BooleanField(default=True)), ('is_admin', models.\n BooleanField(default=False)), ('is_staff', models.BooleanField(\n default=False)), ('phone_number', models.CharField(blank=True,\n max_length=30, null=True)), ('email', models.CharField(blank=True,\n max_length=30, null=True)), ('groups', models.ManyToManyField(blank\n =True, help_text=\n 'The groups this user belongs to. A user will get all permissions granted to each of their groups.'\n , related_name='user_set', related_query_name='user', to=\n 'auth.Group', verbose_name='groups')), ('owner', models.ForeignKey(\n blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,\n to=settings.AUTH_USER_MODEL)), ('user_permissions', models.\n ManyToManyField(blank=True, help_text=\n 'Specific permissions for this user.', related_name='user_set',\n related_query_name='user', to='auth.Permission', verbose_name=\n 'user permissions'))], options={'verbose_name': '',\n 'verbose_name_plural': '', 'db_table': 'auth_user', 'permissions':\n (('view_user', 'Can drive'),)})]\n",
"step-5": "# uncompyle6 version 3.2.3\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 2.7.5 (default, Jul 13 2018, 13:06:57) \n# [GCC 4.8.5 20150623 (Red Hat 4.8.5-28)]\n# Embedded file name: ./authx/migrations/0001_initial.py\n# Compiled at: 2018-08-23 19:33:14\n# Size of source mod 2**32: 2715 bytes\nfrom __future__ import unicode_literals\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion, uuid\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [\n ('auth', '0008_alter_user_username_max_length')]\n operations = [\n migrations.CreateModel(name='User',\n fields=[\n (\n 'password', models.CharField(max_length=128, verbose_name='password')),\n (\n 'last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),\n (\n 'is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),\n (\n 'id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),\n (\n 'created_at', models.DateTimeField(auto_now_add=True)),\n (\n 'updated_at', models.DateTimeField(auto_now=True)),\n (\n 'username', models.CharField(max_length=11, unique=True, verbose_name='')),\n (\n 'fullname', models.CharField(blank=True, max_length=80, verbose_name='')),\n (\n 'thumbnail', models.ImageField(blank=True, null=True, upload_to='thumbnail', verbose_name='')),\n (\n 'is_active', models.BooleanField(default=True)),\n (\n 'is_admin', models.BooleanField(default=False)),\n (\n 'is_staff', models.BooleanField(default=False)),\n (\n 'phone_number', models.CharField(blank=True, max_length=30, null=True)),\n (\n 'email', models.CharField(blank=True, max_length=30, null=True)),\n (\n 'groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),\n (\n 'owner', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n (\n 'user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions'))],\n options={'verbose_name':'', \n 'verbose_name_plural':'', \n 'db_table':'auth_user', \n 'permissions':(('view_user', 'Can drive'), )})]\n# okay decompiling ./restful/hawkeye/authx/migrations/0001_initial.pyc\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
"""Chatbot learning
학습시 생성된 vocab 딕셔너리 파일을 Cindy ui 실행시 경로를 동일시 해주어야 연결성 있는 문장을 생성해줍니다.
"""
from tensorflow.keras import models
from tensorflow.keras import layers
from tensorflow.keras import optimizers, losses, metrics
from tensorflow.keras import preprocessing
import numpy as np
import pandas as pd
#import matplotlib.pyplot as plt
import os
import re
from konlpy.tag import Okt
import pickle
import tensorflow as tf
tf.__version__
# 태그 단어
PAD = "<PADDING>" # 패딩
STA = "<START>" # 시작
END = "<END>" # 끝
OOV = "<OOV>" # 없는 단어(Out of Vocabulary)
# 태그 인덱스
PAD_INDEX = 0
STA_INDEX = 1
END_INDEX = 2
OOV_INDEX = 3
# 데이터 타입
ENCODER_INPUT = 0
DECODER_INPUT = 1
DECODER_TARGET = 2
# 한 문장에서 단어 시퀀스의 최대 개수
max_sequences = 30
# 임베딩 벡터 차원
embedding_dim = 100
# LSTM 히든레이어 차원
lstm_hidden_dim = 128
# 정규 표현식 필터
RE_FILTER = re.compile("[.,!?\"':;~()]")
# 챗봇 데이터 로드
chatbot_data = pd.read_csv('./seq2seq/ChatbotData_Cindy.csv', encoding='utf-8')
question, answer = list(chatbot_data['Q']), list(chatbot_data['A'])
chatbot_data.head()
len(chatbot_data['Q'].unique())
# 데이터 개수
len(question)
# 형태소분석 함수
def pos_tag(sentences):
# KoNLPy 형태소분석기 설정
tagger = Okt()
# 문장 품사 변수 초기화
sentences_pos = []
# 모든 문장 반복
for sentence in sentences:
# 특수기호 제거
sentence = re.sub(RE_FILTER, "", sentence)
#print(sentence)
# 배열인 형태소분석의 출력을 띄어쓰기로 구분하여 붙임
sentence = " ".join(tagger.morphs(sentence))
sentences_pos.append(sentence)
return sentences_pos
# 형태소분석 수행
question = pos_tag(question)
answer = pos_tag(answer)
# 질문과 대답 문장들을 하나로 합침
sentences = []
sentences.extend(question)
sentences.extend(answer)
words = []
# 단어들의 배열 생성
for sentence in sentences:
for word in sentence.split():
words.append(word)
# 길이가 0인 단어는 삭제
words = [word for word in words if len(word) > 0]
# 중복된 단어 삭제
words = list(set(words))
# 제일 앞에 태그 단어 삽입
words[:0] = [PAD, STA, END, OOV]
# 단어 개수
len(words)
# 단어와 인덱스의 딕셔너리 생성
word_to_index = {word: index for index, word in enumerate(words)}
index_to_word = {index: word for index, word in enumerate(words)}
#word_index vocab 저장 - >
with open('./seq2seq/vocab_dict/word_to_index_final.pickle', 'wb') as f:
pickle.dump(word_to_index, f, pickle.HIGHEST_PROTOCOL)
with open('./seq2seq/vocab_dict/index_to_word_final.pickle', 'wb') as f:
pickle.dump(index_to_word, f, pickle.HIGHEST_PROTOCOL)
# 단어 -> 인덱스
# 문장을 인덱스로 변환하여 모델 입력으로 사용
print(dict(list(word_to_index.items())[:20]))
# 인덱스 -> 단어
# 모델의 예측 결과인 인덱스를 문장으로 변환시 사용
print(dict(list(index_to_word.items())[:20]))
# 문장을 인덱스로 변환
def convert_text_to_index(sentences, vocabulary, type):
sentences_index = []
# 모든 문장에 대해서 반복
for sentence in sentences:
sentence_index = []
# 디코더 입력일 경우 맨 앞에 START 태그 추가
if type == DECODER_INPUT:
sentence_index.extend([vocabulary[STA]])
# 문장의 단어들을 띄어쓰기로 분리
for word in sentence.split():
if vocabulary.get(word) is not None:
# 사전에 있는 단어면 해당 인덱스를 추가
sentence_index.extend([vocabulary[word]])
else:
# 사전에 없는 단어면 OOV 인덱스를 추가
sentence_index.extend([vocabulary[OOV]])
# 최대 길이 검사
if type == DECODER_TARGET:
# 디코더 목표일 경우 맨 뒤에 END 태그 추가
if len(sentence_index) >= max_sequences:
sentence_index = sentence_index[:max_sequences-1] + [vocabulary[END]]
else:
sentence_index += [vocabulary[END]]
else:
if len(sentence_index) > max_sequences:
sentence_index = sentence_index[:max_sequences]
# 최대 길이에 없는 공간은 패딩 인덱스로 채움
sentence_index += (max_sequences - len(sentence_index)) * [vocabulary[PAD]]
# 문장의 인덱스 배열을 추가
sentences_index.append(sentence_index)
return np.asarray(sentences_index)
# 인코더 입력 인덱스 변환
x_encoder = convert_text_to_index(question, word_to_index, ENCODER_INPUT)
# 첫 번째 인코더 입력 출력 (12시 땡)
x_encoder[0]
# 디코더 입력 인덱스 변환
x_decoder = convert_text_to_index(answer, word_to_index, DECODER_INPUT)
# 첫 번째 디코더 입력 출력 (START 하루 가 또 가네요)
x_decoder[0]
len(x_decoder[0])
# 디코더 목표 인덱스 변환
y_decoder = convert_text_to_index(answer, word_to_index, DECODER_TARGET)
# 첫 번째 디코더 목표 출력 (하루 가 또 가네요 END)
print(y_decoder[0])
# 원핫인코딩 초기화
one_hot_data = np.zeros((len(y_decoder), max_sequences, len(words)))
# 디코더 목표를 원핫인코딩으로 변환
# 학습시 입력은 인덱스이지만, 출력은 원핫인코딩 형식임
for i, sequence in enumerate(y_decoder):
for j, index in enumerate(sequence):
one_hot_data[i, j, index] = 1
# 디코더 목표 설정
y_decoder = one_hot_data
# 첫 번째 디코더 목표 출력
print(y_decoder[0])
#--------------------------------------------
# 훈련 모델 인코더 정의
#--------------------------------------------
# 입력 문장의 인덱스 시퀀스를 입력으로 받음
encoder_inputs = layers.Input(shape=(None,))
# 임베딩 레이어
encoder_outputs = layers.Embedding(len(words), embedding_dim)(encoder_inputs)
# return_state가 True면 상태값 리턴
# LSTM은 state_h(hidden state)와 state_c(cell state) 2개의 상태 존재
encoder_outputs, state_h, state_c = layers.LSTM(lstm_hidden_dim,
dropout=0.1,
recurrent_dropout=0.5,
return_state=True)(encoder_outputs)
# 히든 상태와 셀 상태를 하나로 묶음
encoder_states = [state_h, state_c]
#--------------------------------------------
# 훈련 모델 디코더 정의
#--------------------------------------------
# 목표 문장의 인덱스 시퀀스를 입력으로 받음
decoder_inputs = layers.Input(shape=(None,))
# 임베딩 레이어
decoder_embedding = layers.Embedding(len(words), embedding_dim)
decoder_outputs = decoder_embedding(decoder_inputs)
# 인코더와 달리 return_sequences를 True로 설정하여 모든 타임 스텝 출력값 리턴
# 모든 타임 스텝의 출력값들을 다음 레이어의 Dense()로 처리하기 위함
decoder_lstm = layers.LSTM(lstm_hidden_dim,
dropout=0.1,
recurrent_dropout=0.5,
return_state=True,
return_sequences=True)
# initial_state를 인코더의 상태로 초기화
decoder_outputs, _, _ = decoder_lstm(decoder_outputs,
initial_state=encoder_states)
# 단어의 개수만큼 노드의 개수를 설정하여 원핫 형식으로 각 단어 인덱스를 출력
decoder_dense = layers.Dense(len(words), activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)
#--------------------------------------------
# 훈련 모델 정의
#--------------------------------------------
# 입력과 출력으로 함수형 API 모델 생성
model = models.Model([encoder_inputs, decoder_inputs], decoder_outputs)
# 학습 방법 설정
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
#--------------------------------------------
# 예측 모델 인코더 정의
#--------------------------------------------
# 훈련 모델의 인코더 상태를 사용하여 예측 모델 인코더 설정
encoder_model = models.Model(encoder_inputs, encoder_states)
#--------------------------------------------
# 예측 모델 디코더 정의
#--------------------------------------------
# 예측시에는 훈련시와 달리 타임 스텝을 한 단계씩 수행
# 매번 이전 디코더 상태를 입력으로 받아서 새로 설정
decoder_state_input_h = layers.Input(shape=(lstm_hidden_dim,))
decoder_state_input_c = layers.Input(shape=(lstm_hidden_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
# 임베딩 레이어
decoder_outputs = decoder_embedding(decoder_inputs)
# LSTM 레이어
decoder_outputs, state_h, state_c = decoder_lstm(decoder_outputs,
initial_state=decoder_states_inputs)
# 히든 상태와 셀 상태를 하나로 묶음
decoder_states = [state_h, state_c]
# Dense 레이어를 통해 원핫 형식으로 각 단어 인덱스를 출력
decoder_outputs = decoder_dense(decoder_outputs)
# 예측 모델 디코더 설정
decoder_model = models.Model([decoder_inputs] + decoder_states_inputs,
[decoder_outputs] + decoder_states)
# 인덱스를 문장으로 변환
def convert_index_to_text(indexs, vocabulary):
sentence = ''
# 모든 문장에 대해서 반복
for index in indexs:
if index == END_INDEX:
# 종료 인덱스면 중지
break;
if vocabulary.get(index) is not None:
# 사전에 있는 인덱스면 해당 단어를 추가
sentence += vocabulary[index]
else:
# 사전에 없는 인덱스면 OOV 단어를 추가
sentence.extend([vocabulary[OOV_INDEX]])
# 빈칸 추가
sentence += ' '
return sentence
# len(x_decoder)
#
# len(y_decoder)
#model.summary()
#encoder_model.summary()
#decoder_model.summary()
from tqdm import tqdm
#에폭 반복
for epoch in range(10):
print('Total Epoch :', epoch + 1)
history = model.fit([x_encoder, x_decoder], y_decoder, epochs=100, batch_size=64, verbose=1)
model.summary()
# 정확도와 손실 출력
print('accuracy :', history.history['accuracy'][-1])
print('loss :', history.history['loss'][-1])
# 문장 예측 테스트
# (3 박 4일 놀러 가고 싶다) -> (여행 은 언제나 좋죠)
input_encoder = x_encoder[2].reshape(1, x_encoder[2].shape[0])
input_decoder = x_decoder[2].reshape(1, x_decoder[2].shape[0])
results = model.predict([input_encoder, input_decoder])
# 결과의 원핫인코딩 형식을 인덱스로 변환
# 1축을 기준으로 가장 높은 값의 위치를 구함
indexs = np.argmax(results[0], 1)
# 인덱스를 문장으로 변환
sentence = convert_index_to_text(indexs, index_to_word)
#모델 가중치 저장
model.save_weights('./seq2seq/seq2seq_model/seq2seq2_model_weights')
encoder_model.save_weights('./seq2seq/seq2seq_model/seq2seq2_encoder_model_weights')
decoder_model.save_weights('./seq2seq/seq2seq_model/seq2seq2_decoder_model_weights')
# 예측을 위한 입력 생성
def make_predict_input(sentence):
sentences = []
sentences.append(sentence)
sentences = pos_tag(sentences)
input_seq = convert_text_to_index(sentences, word_to_index, ENCODER_INPUT)
return input_seq
# 텍스트 생성
def generate_text(input_seq):
# 입력을 인코더에 넣어 마지막 상태 구함
states = encoder_model.predict(input_seq)
# 목표 시퀀스 초기화
target_seq = np.zeros((1, 1))
# 목표 시퀀스의 첫 번째에 <START> 태그 추가
target_seq[0, 0] = STA_INDEX
# 인덱스 초기화
indexs = []
# 디코더 타임 스텝 반복
while 1:
# 디코더로 현재 타임 스텝 출력 구함
# 처음에는 인코더 상태를, 다음부터 이전 디코더 상태로 초기화
decoder_outputs, state_h, state_c = decoder_model.predict(
[target_seq] + states)
# 결과의 원핫인코딩 형식을 인덱스로 변환
index = np.argmax(decoder_outputs[0, 0, :])
indexs.append(index)
# 종료 검사
if index == END_INDEX or len(indexs) >= max_sequences:
break
# 목표 시퀀스를 바로 이전의 출력으로 설정
target_seq = np.zeros((1, 1))
target_seq[0, 0] = index
# 디코더의 이전 상태를 다음 디코더 예측에 사용
states = [state_h, state_c]
# 인덱스를 문장으로 변환
sentence = convert_index_to_text(indexs, index_to_word)
return sentence
# 문장을 인덱스로 변환
input_seq = make_predict_input('3박4일 놀러가고 싶다')
input_seq
# 예측 모델로 텍스트 생성
sentence = generate_text(input_seq)
print(sentence)
# 문장을 인덱스로 변환
input_seq = make_predict_input('3박4일 같이 놀러가고 싶다')
input_seq
# 예측 모델로 텍스트 생성
sentence = generate_text(input_seq)
print(sentence)
# 문장을 인덱스로 변환
input_seq = make_predict_input('3박4일 놀러가려고')
print(sentence)
# 예측 모델로 텍스트 생성
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('SNS 시간낭비인데')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('PPL 너무나 심하네')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('가상화폐 망함')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('가스불')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('가스비')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('가족 보고 싶어')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('간식 먹고 싶어')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('간접흡연 싫어')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('감기 기운 잇어')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('내일 날씨 어떄?')
sentence = generate_text(input_seq)
print(sentence)
|
normal
|
{
"blob_id": "bd06030ace665a0686c894a863e5c779b6d0931c",
"index": 6447,
"step-1": "<mask token>\n\n\ndef convert_text_to_index(sentences, vocabulary, type):\n sentences_index = []\n for sentence in sentences:\n sentence_index = []\n if type == DECODER_INPUT:\n sentence_index.extend([vocabulary[STA]])\n for word in sentence.split():\n if vocabulary.get(word) is not None:\n sentence_index.extend([vocabulary[word]])\n else:\n sentence_index.extend([vocabulary[OOV]])\n if type == DECODER_TARGET:\n if len(sentence_index) >= max_sequences:\n sentence_index = sentence_index[:max_sequences - 1] + [\n vocabulary[END]]\n else:\n sentence_index += [vocabulary[END]]\n elif len(sentence_index) > max_sequences:\n sentence_index = sentence_index[:max_sequences]\n sentence_index += (max_sequences - len(sentence_index)) * [vocabulary\n [PAD]]\n sentences_index.append(sentence_index)\n return np.asarray(sentences_index)\n\n\n<mask token>\n\n\ndef make_predict_input(sentence):\n sentences = []\n sentences.append(sentence)\n sentences = pos_tag(sentences)\n input_seq = convert_text_to_index(sentences, word_to_index, ENCODER_INPUT)\n return input_seq\n\n\ndef generate_text(input_seq):\n states = encoder_model.predict(input_seq)\n target_seq = np.zeros((1, 1))\n target_seq[0, 0] = STA_INDEX\n indexs = []\n while 1:\n decoder_outputs, state_h, state_c = decoder_model.predict([\n target_seq] + states)\n index = np.argmax(decoder_outputs[0, 0, :])\n indexs.append(index)\n if index == END_INDEX or len(indexs) >= max_sequences:\n break\n target_seq = np.zeros((1, 1))\n target_seq[0, 0] = index\n states = [state_h, state_c]\n sentence = convert_index_to_text(indexs, index_to_word)\n return sentence\n\n\n<mask token>\n",
"step-2": "<mask token>\ntf.__version__\n<mask token>\nchatbot_data.head()\nlen(chatbot_data['Q'].unique())\nlen(question)\n\n\ndef pos_tag(sentences):\n tagger = Okt()\n sentences_pos = []\n for sentence in sentences:\n sentence = re.sub(RE_FILTER, '', sentence)\n sentence = ' '.join(tagger.morphs(sentence))\n sentences_pos.append(sentence)\n return sentences_pos\n\n\n<mask token>\nsentences.extend(question)\nsentences.extend(answer)\n<mask token>\nfor sentence in sentences:\n for word in sentence.split():\n words.append(word)\n<mask token>\nlen(words)\n<mask token>\nwith open('./seq2seq/vocab_dict/word_to_index_final.pickle', 'wb') as f:\n pickle.dump(word_to_index, f, pickle.HIGHEST_PROTOCOL)\nwith open('./seq2seq/vocab_dict/index_to_word_final.pickle', 'wb') as f:\n pickle.dump(index_to_word, f, pickle.HIGHEST_PROTOCOL)\nprint(dict(list(word_to_index.items())[:20]))\nprint(dict(list(index_to_word.items())[:20]))\n\n\ndef convert_text_to_index(sentences, vocabulary, type):\n sentences_index = []\n for sentence in sentences:\n sentence_index = []\n if type == DECODER_INPUT:\n sentence_index.extend([vocabulary[STA]])\n for word in sentence.split():\n if vocabulary.get(word) is not None:\n sentence_index.extend([vocabulary[word]])\n else:\n sentence_index.extend([vocabulary[OOV]])\n if type == DECODER_TARGET:\n if len(sentence_index) >= max_sequences:\n sentence_index = sentence_index[:max_sequences - 1] + [\n vocabulary[END]]\n else:\n sentence_index += [vocabulary[END]]\n elif len(sentence_index) > max_sequences:\n sentence_index = sentence_index[:max_sequences]\n sentence_index += (max_sequences - len(sentence_index)) * [vocabulary\n [PAD]]\n sentences_index.append(sentence_index)\n return np.asarray(sentences_index)\n\n\n<mask token>\nx_encoder[0]\n<mask token>\nx_decoder[0]\nlen(x_decoder[0])\n<mask token>\nprint(y_decoder[0])\n<mask token>\nfor i, sequence in enumerate(y_decoder):\n for j, index in enumerate(sequence):\n one_hot_data[i, j, index] = 1\n<mask token>\nprint(y_decoder[0])\n<mask token>\nmodel.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics\n =['accuracy'])\n<mask token>\n\n\ndef convert_index_to_text(indexs, vocabulary):\n sentence = ''\n for index in indexs:\n if index == END_INDEX:\n break\n if vocabulary.get(index) is not None:\n sentence += vocabulary[index]\n else:\n sentence.extend([vocabulary[OOV_INDEX]])\n sentence += ' '\n return sentence\n\n\n<mask token>\nfor epoch in range(10):\n print('Total Epoch :', epoch + 1)\n history = model.fit([x_encoder, x_decoder], y_decoder, epochs=100,\n batch_size=64, verbose=1)\n model.summary()\n print('accuracy :', history.history['accuracy'][-1])\n print('loss :', history.history['loss'][-1])\n input_encoder = x_encoder[2].reshape(1, x_encoder[2].shape[0])\n input_decoder = x_decoder[2].reshape(1, x_decoder[2].shape[0])\n results = model.predict([input_encoder, input_decoder])\n indexs = np.argmax(results[0], 1)\n sentence = convert_index_to_text(indexs, index_to_word)\nmodel.save_weights('./seq2seq/seq2seq_model/seq2seq2_model_weights')\nencoder_model.save_weights(\n './seq2seq/seq2seq_model/seq2seq2_encoder_model_weights')\ndecoder_model.save_weights(\n './seq2seq/seq2seq_model/seq2seq2_decoder_model_weights')\n\n\ndef make_predict_input(sentence):\n sentences = []\n sentences.append(sentence)\n sentences = pos_tag(sentences)\n input_seq = convert_text_to_index(sentences, word_to_index, ENCODER_INPUT)\n return input_seq\n\n\ndef generate_text(input_seq):\n states = encoder_model.predict(input_seq)\n target_seq = np.zeros((1, 1))\n target_seq[0, 0] = STA_INDEX\n indexs = []\n while 1:\n decoder_outputs, state_h, state_c = decoder_model.predict([\n target_seq] + states)\n index = np.argmax(decoder_outputs[0, 0, :])\n indexs.append(index)\n if index == END_INDEX or len(indexs) >= max_sequences:\n break\n target_seq = np.zeros((1, 1))\n target_seq[0, 0] = index\n states = [state_h, state_c]\n sentence = convert_index_to_text(indexs, index_to_word)\n return sentence\n\n\n<mask token>\ninput_seq\n<mask token>\nprint(sentence)\n<mask token>\ninput_seq\n<mask token>\nprint(sentence)\n<mask token>\nprint(sentence)\n<mask token>\nprint(sentence)\n<mask token>\nprint(sentence)\n<mask token>\nprint(sentence)\n<mask token>\nprint(sentence)\n<mask token>\nprint(sentence)\n<mask token>\nprint(sentence)\n<mask token>\nprint(sentence)\n<mask token>\nprint(sentence)\n<mask token>\nprint(sentence)\n<mask token>\nprint(sentence)\n<mask token>\nprint(sentence)\n",
"step-3": "<mask token>\ntf.__version__\nPAD = '<PADDING>'\nSTA = '<START>'\nEND = '<END>'\nOOV = '<OOV>'\nPAD_INDEX = 0\nSTA_INDEX = 1\nEND_INDEX = 2\nOOV_INDEX = 3\nENCODER_INPUT = 0\nDECODER_INPUT = 1\nDECODER_TARGET = 2\nmax_sequences = 30\nembedding_dim = 100\nlstm_hidden_dim = 128\nRE_FILTER = re.compile('[.,!?\"\\':;~()]')\nchatbot_data = pd.read_csv('./seq2seq/ChatbotData_Cindy.csv', encoding='utf-8')\nquestion, answer = list(chatbot_data['Q']), list(chatbot_data['A'])\nchatbot_data.head()\nlen(chatbot_data['Q'].unique())\nlen(question)\n\n\ndef pos_tag(sentences):\n tagger = Okt()\n sentences_pos = []\n for sentence in sentences:\n sentence = re.sub(RE_FILTER, '', sentence)\n sentence = ' '.join(tagger.morphs(sentence))\n sentences_pos.append(sentence)\n return sentences_pos\n\n\nquestion = pos_tag(question)\nanswer = pos_tag(answer)\nsentences = []\nsentences.extend(question)\nsentences.extend(answer)\nwords = []\nfor sentence in sentences:\n for word in sentence.split():\n words.append(word)\nwords = [word for word in words if len(word) > 0]\nwords = list(set(words))\nwords[:0] = [PAD, STA, END, OOV]\nlen(words)\nword_to_index = {word: index for index, word in enumerate(words)}\nindex_to_word = {index: word for index, word in enumerate(words)}\nwith open('./seq2seq/vocab_dict/word_to_index_final.pickle', 'wb') as f:\n pickle.dump(word_to_index, f, pickle.HIGHEST_PROTOCOL)\nwith open('./seq2seq/vocab_dict/index_to_word_final.pickle', 'wb') as f:\n pickle.dump(index_to_word, f, pickle.HIGHEST_PROTOCOL)\nprint(dict(list(word_to_index.items())[:20]))\nprint(dict(list(index_to_word.items())[:20]))\n\n\ndef convert_text_to_index(sentences, vocabulary, type):\n sentences_index = []\n for sentence in sentences:\n sentence_index = []\n if type == DECODER_INPUT:\n sentence_index.extend([vocabulary[STA]])\n for word in sentence.split():\n if vocabulary.get(word) is not None:\n sentence_index.extend([vocabulary[word]])\n else:\n sentence_index.extend([vocabulary[OOV]])\n if type == DECODER_TARGET:\n if len(sentence_index) >= max_sequences:\n sentence_index = sentence_index[:max_sequences - 1] + [\n vocabulary[END]]\n else:\n sentence_index += [vocabulary[END]]\n elif len(sentence_index) > max_sequences:\n sentence_index = sentence_index[:max_sequences]\n sentence_index += (max_sequences - len(sentence_index)) * [vocabulary\n [PAD]]\n sentences_index.append(sentence_index)\n return np.asarray(sentences_index)\n\n\nx_encoder = convert_text_to_index(question, word_to_index, ENCODER_INPUT)\nx_encoder[0]\nx_decoder = convert_text_to_index(answer, word_to_index, DECODER_INPUT)\nx_decoder[0]\nlen(x_decoder[0])\ny_decoder = convert_text_to_index(answer, word_to_index, DECODER_TARGET)\nprint(y_decoder[0])\none_hot_data = np.zeros((len(y_decoder), max_sequences, len(words)))\nfor i, sequence in enumerate(y_decoder):\n for j, index in enumerate(sequence):\n one_hot_data[i, j, index] = 1\ny_decoder = one_hot_data\nprint(y_decoder[0])\nencoder_inputs = layers.Input(shape=(None,))\nencoder_outputs = layers.Embedding(len(words), embedding_dim)(encoder_inputs)\nencoder_outputs, state_h, state_c = layers.LSTM(lstm_hidden_dim, dropout=\n 0.1, recurrent_dropout=0.5, return_state=True)(encoder_outputs)\nencoder_states = [state_h, state_c]\ndecoder_inputs = layers.Input(shape=(None,))\ndecoder_embedding = layers.Embedding(len(words), embedding_dim)\ndecoder_outputs = decoder_embedding(decoder_inputs)\ndecoder_lstm = layers.LSTM(lstm_hidden_dim, dropout=0.1, recurrent_dropout=\n 0.5, return_state=True, return_sequences=True)\ndecoder_outputs, _, _ = decoder_lstm(decoder_outputs, initial_state=\n encoder_states)\ndecoder_dense = layers.Dense(len(words), activation='softmax')\ndecoder_outputs = decoder_dense(decoder_outputs)\nmodel = models.Model([encoder_inputs, decoder_inputs], decoder_outputs)\nmodel.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics\n =['accuracy'])\nencoder_model = models.Model(encoder_inputs, encoder_states)\ndecoder_state_input_h = layers.Input(shape=(lstm_hidden_dim,))\ndecoder_state_input_c = layers.Input(shape=(lstm_hidden_dim,))\ndecoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]\ndecoder_outputs = decoder_embedding(decoder_inputs)\ndecoder_outputs, state_h, state_c = decoder_lstm(decoder_outputs,\n initial_state=decoder_states_inputs)\ndecoder_states = [state_h, state_c]\ndecoder_outputs = decoder_dense(decoder_outputs)\ndecoder_model = models.Model([decoder_inputs] + decoder_states_inputs, [\n decoder_outputs] + decoder_states)\n\n\ndef convert_index_to_text(indexs, vocabulary):\n sentence = ''\n for index in indexs:\n if index == END_INDEX:\n break\n if vocabulary.get(index) is not None:\n sentence += vocabulary[index]\n else:\n sentence.extend([vocabulary[OOV_INDEX]])\n sentence += ' '\n return sentence\n\n\n<mask token>\nfor epoch in range(10):\n print('Total Epoch :', epoch + 1)\n history = model.fit([x_encoder, x_decoder], y_decoder, epochs=100,\n batch_size=64, verbose=1)\n model.summary()\n print('accuracy :', history.history['accuracy'][-1])\n print('loss :', history.history['loss'][-1])\n input_encoder = x_encoder[2].reshape(1, x_encoder[2].shape[0])\n input_decoder = x_decoder[2].reshape(1, x_decoder[2].shape[0])\n results = model.predict([input_encoder, input_decoder])\n indexs = np.argmax(results[0], 1)\n sentence = convert_index_to_text(indexs, index_to_word)\nmodel.save_weights('./seq2seq/seq2seq_model/seq2seq2_model_weights')\nencoder_model.save_weights(\n './seq2seq/seq2seq_model/seq2seq2_encoder_model_weights')\ndecoder_model.save_weights(\n './seq2seq/seq2seq_model/seq2seq2_decoder_model_weights')\n\n\ndef make_predict_input(sentence):\n sentences = []\n sentences.append(sentence)\n sentences = pos_tag(sentences)\n input_seq = convert_text_to_index(sentences, word_to_index, ENCODER_INPUT)\n return input_seq\n\n\ndef generate_text(input_seq):\n states = encoder_model.predict(input_seq)\n target_seq = np.zeros((1, 1))\n target_seq[0, 0] = STA_INDEX\n indexs = []\n while 1:\n decoder_outputs, state_h, state_c = decoder_model.predict([\n target_seq] + states)\n index = np.argmax(decoder_outputs[0, 0, :])\n indexs.append(index)\n if index == END_INDEX or len(indexs) >= max_sequences:\n break\n target_seq = np.zeros((1, 1))\n target_seq[0, 0] = index\n states = [state_h, state_c]\n sentence = convert_index_to_text(indexs, index_to_word)\n return sentence\n\n\ninput_seq = make_predict_input('3박4일 놀러가고 싶다')\ninput_seq\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('3박4일 같이 놀러가고 싶다')\ninput_seq\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('3박4일 놀러가려고')\nprint(sentence)\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('SNS 시간낭비인데')\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('PPL 너무나 심하네')\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('가상화폐 망함')\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('가스불')\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('가스비')\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('가족 보고 싶어')\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('간식 먹고 싶어')\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('간접흡연 싫어')\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('감기 기운 잇어')\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('내일 날씨 어떄?')\nsentence = generate_text(input_seq)\nprint(sentence)\n",
"step-4": "<mask token>\nfrom tensorflow.keras import models\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import optimizers, losses, metrics\nfrom tensorflow.keras import preprocessing\nimport numpy as np\nimport pandas as pd\nimport os\nimport re\nfrom konlpy.tag import Okt\nimport pickle\nimport tensorflow as tf\ntf.__version__\nPAD = '<PADDING>'\nSTA = '<START>'\nEND = '<END>'\nOOV = '<OOV>'\nPAD_INDEX = 0\nSTA_INDEX = 1\nEND_INDEX = 2\nOOV_INDEX = 3\nENCODER_INPUT = 0\nDECODER_INPUT = 1\nDECODER_TARGET = 2\nmax_sequences = 30\nembedding_dim = 100\nlstm_hidden_dim = 128\nRE_FILTER = re.compile('[.,!?\"\\':;~()]')\nchatbot_data = pd.read_csv('./seq2seq/ChatbotData_Cindy.csv', encoding='utf-8')\nquestion, answer = list(chatbot_data['Q']), list(chatbot_data['A'])\nchatbot_data.head()\nlen(chatbot_data['Q'].unique())\nlen(question)\n\n\ndef pos_tag(sentences):\n tagger = Okt()\n sentences_pos = []\n for sentence in sentences:\n sentence = re.sub(RE_FILTER, '', sentence)\n sentence = ' '.join(tagger.morphs(sentence))\n sentences_pos.append(sentence)\n return sentences_pos\n\n\nquestion = pos_tag(question)\nanswer = pos_tag(answer)\nsentences = []\nsentences.extend(question)\nsentences.extend(answer)\nwords = []\nfor sentence in sentences:\n for word in sentence.split():\n words.append(word)\nwords = [word for word in words if len(word) > 0]\nwords = list(set(words))\nwords[:0] = [PAD, STA, END, OOV]\nlen(words)\nword_to_index = {word: index for index, word in enumerate(words)}\nindex_to_word = {index: word for index, word in enumerate(words)}\nwith open('./seq2seq/vocab_dict/word_to_index_final.pickle', 'wb') as f:\n pickle.dump(word_to_index, f, pickle.HIGHEST_PROTOCOL)\nwith open('./seq2seq/vocab_dict/index_to_word_final.pickle', 'wb') as f:\n pickle.dump(index_to_word, f, pickle.HIGHEST_PROTOCOL)\nprint(dict(list(word_to_index.items())[:20]))\nprint(dict(list(index_to_word.items())[:20]))\n\n\ndef convert_text_to_index(sentences, vocabulary, type):\n sentences_index = []\n for sentence in sentences:\n sentence_index = []\n if type == DECODER_INPUT:\n sentence_index.extend([vocabulary[STA]])\n for word in sentence.split():\n if vocabulary.get(word) is not None:\n sentence_index.extend([vocabulary[word]])\n else:\n sentence_index.extend([vocabulary[OOV]])\n if type == DECODER_TARGET:\n if len(sentence_index) >= max_sequences:\n sentence_index = sentence_index[:max_sequences - 1] + [\n vocabulary[END]]\n else:\n sentence_index += [vocabulary[END]]\n elif len(sentence_index) > max_sequences:\n sentence_index = sentence_index[:max_sequences]\n sentence_index += (max_sequences - len(sentence_index)) * [vocabulary\n [PAD]]\n sentences_index.append(sentence_index)\n return np.asarray(sentences_index)\n\n\nx_encoder = convert_text_to_index(question, word_to_index, ENCODER_INPUT)\nx_encoder[0]\nx_decoder = convert_text_to_index(answer, word_to_index, DECODER_INPUT)\nx_decoder[0]\nlen(x_decoder[0])\ny_decoder = convert_text_to_index(answer, word_to_index, DECODER_TARGET)\nprint(y_decoder[0])\none_hot_data = np.zeros((len(y_decoder), max_sequences, len(words)))\nfor i, sequence in enumerate(y_decoder):\n for j, index in enumerate(sequence):\n one_hot_data[i, j, index] = 1\ny_decoder = one_hot_data\nprint(y_decoder[0])\nencoder_inputs = layers.Input(shape=(None,))\nencoder_outputs = layers.Embedding(len(words), embedding_dim)(encoder_inputs)\nencoder_outputs, state_h, state_c = layers.LSTM(lstm_hidden_dim, dropout=\n 0.1, recurrent_dropout=0.5, return_state=True)(encoder_outputs)\nencoder_states = [state_h, state_c]\ndecoder_inputs = layers.Input(shape=(None,))\ndecoder_embedding = layers.Embedding(len(words), embedding_dim)\ndecoder_outputs = decoder_embedding(decoder_inputs)\ndecoder_lstm = layers.LSTM(lstm_hidden_dim, dropout=0.1, recurrent_dropout=\n 0.5, return_state=True, return_sequences=True)\ndecoder_outputs, _, _ = decoder_lstm(decoder_outputs, initial_state=\n encoder_states)\ndecoder_dense = layers.Dense(len(words), activation='softmax')\ndecoder_outputs = decoder_dense(decoder_outputs)\nmodel = models.Model([encoder_inputs, decoder_inputs], decoder_outputs)\nmodel.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics\n =['accuracy'])\nencoder_model = models.Model(encoder_inputs, encoder_states)\ndecoder_state_input_h = layers.Input(shape=(lstm_hidden_dim,))\ndecoder_state_input_c = layers.Input(shape=(lstm_hidden_dim,))\ndecoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]\ndecoder_outputs = decoder_embedding(decoder_inputs)\ndecoder_outputs, state_h, state_c = decoder_lstm(decoder_outputs,\n initial_state=decoder_states_inputs)\ndecoder_states = [state_h, state_c]\ndecoder_outputs = decoder_dense(decoder_outputs)\ndecoder_model = models.Model([decoder_inputs] + decoder_states_inputs, [\n decoder_outputs] + decoder_states)\n\n\ndef convert_index_to_text(indexs, vocabulary):\n sentence = ''\n for index in indexs:\n if index == END_INDEX:\n break\n if vocabulary.get(index) is not None:\n sentence += vocabulary[index]\n else:\n sentence.extend([vocabulary[OOV_INDEX]])\n sentence += ' '\n return sentence\n\n\nfrom tqdm import tqdm\nfor epoch in range(10):\n print('Total Epoch :', epoch + 1)\n history = model.fit([x_encoder, x_decoder], y_decoder, epochs=100,\n batch_size=64, verbose=1)\n model.summary()\n print('accuracy :', history.history['accuracy'][-1])\n print('loss :', history.history['loss'][-1])\n input_encoder = x_encoder[2].reshape(1, x_encoder[2].shape[0])\n input_decoder = x_decoder[2].reshape(1, x_decoder[2].shape[0])\n results = model.predict([input_encoder, input_decoder])\n indexs = np.argmax(results[0], 1)\n sentence = convert_index_to_text(indexs, index_to_word)\nmodel.save_weights('./seq2seq/seq2seq_model/seq2seq2_model_weights')\nencoder_model.save_weights(\n './seq2seq/seq2seq_model/seq2seq2_encoder_model_weights')\ndecoder_model.save_weights(\n './seq2seq/seq2seq_model/seq2seq2_decoder_model_weights')\n\n\ndef make_predict_input(sentence):\n sentences = []\n sentences.append(sentence)\n sentences = pos_tag(sentences)\n input_seq = convert_text_to_index(sentences, word_to_index, ENCODER_INPUT)\n return input_seq\n\n\ndef generate_text(input_seq):\n states = encoder_model.predict(input_seq)\n target_seq = np.zeros((1, 1))\n target_seq[0, 0] = STA_INDEX\n indexs = []\n while 1:\n decoder_outputs, state_h, state_c = decoder_model.predict([\n target_seq] + states)\n index = np.argmax(decoder_outputs[0, 0, :])\n indexs.append(index)\n if index == END_INDEX or len(indexs) >= max_sequences:\n break\n target_seq = np.zeros((1, 1))\n target_seq[0, 0] = index\n states = [state_h, state_c]\n sentence = convert_index_to_text(indexs, index_to_word)\n return sentence\n\n\ninput_seq = make_predict_input('3박4일 놀러가고 싶다')\ninput_seq\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('3박4일 같이 놀러가고 싶다')\ninput_seq\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('3박4일 놀러가려고')\nprint(sentence)\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('SNS 시간낭비인데')\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('PPL 너무나 심하네')\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('가상화폐 망함')\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('가스불')\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('가스비')\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('가족 보고 싶어')\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('간식 먹고 싶어')\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('간접흡연 싫어')\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('감기 기운 잇어')\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('내일 날씨 어떄?')\nsentence = generate_text(input_seq)\nprint(sentence)\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"Chatbot learning\n학습시 생성된 vocab 딕셔너리 파일을 Cindy ui 실행시 경로를 동일시 해주어야 연결성 있는 문장을 생성해줍니다.\n\"\"\"\n\n\n\nfrom tensorflow.keras import models\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import optimizers, losses, metrics\nfrom tensorflow.keras import preprocessing\n\nimport numpy as np\nimport pandas as pd\n#import matplotlib.pyplot as plt\nimport os\nimport re\n\nfrom konlpy.tag import Okt\nimport pickle\nimport tensorflow as tf\ntf.__version__\n\n# 태그 단어\nPAD = \"<PADDING>\" # 패딩\nSTA = \"<START>\" # 시작\nEND = \"<END>\" # 끝\nOOV = \"<OOV>\" # 없는 단어(Out of Vocabulary)\n\n# 태그 인덱스\nPAD_INDEX = 0\nSTA_INDEX = 1\nEND_INDEX = 2\nOOV_INDEX = 3\n\n# 데이터 타입\nENCODER_INPUT = 0\nDECODER_INPUT = 1\nDECODER_TARGET = 2\n\n# 한 문장에서 단어 시퀀스의 최대 개수\nmax_sequences = 30\n\n# 임베딩 벡터 차원\nembedding_dim = 100\n\n# LSTM 히든레이어 차원\nlstm_hidden_dim = 128\n\n# 정규 표현식 필터\nRE_FILTER = re.compile(\"[.,!?\\\"':;~()]\")\n\n# 챗봇 데이터 로드\nchatbot_data = pd.read_csv('./seq2seq/ChatbotData_Cindy.csv', encoding='utf-8')\nquestion, answer = list(chatbot_data['Q']), list(chatbot_data['A'])\n\nchatbot_data.head()\n\nlen(chatbot_data['Q'].unique())\n\n# 데이터 개수\nlen(question)\n\n\n# 형태소분석 함수\ndef pos_tag(sentences):\n \n # KoNLPy 형태소분석기 설정\n tagger = Okt()\n \n # 문장 품사 변수 초기화\n sentences_pos = []\n \n # 모든 문장 반복\n for sentence in sentences:\n # 특수기호 제거\n sentence = re.sub(RE_FILTER, \"\", sentence)\n #print(sentence)\n # 배열인 형태소분석의 출력을 띄어쓰기로 구분하여 붙임\n sentence = \" \".join(tagger.morphs(sentence))\n sentences_pos.append(sentence)\n \n return sentences_pos\n\n# 형태소분석 수행\nquestion = pos_tag(question)\nanswer = pos_tag(answer)\n\n\n# 질문과 대답 문장들을 하나로 합침\nsentences = []\nsentences.extend(question)\nsentences.extend(answer)\n\nwords = []\n\n# 단어들의 배열 생성\nfor sentence in sentences:\n for word in sentence.split():\n words.append(word)\n\n# 길이가 0인 단어는 삭제\nwords = [word for word in words if len(word) > 0]\n# 중복된 단어 삭제\nwords = list(set(words))\n# 제일 앞에 태그 단어 삽입\nwords[:0] = [PAD, STA, END, OOV]\n# 단어 개수\nlen(words)\n\n# 단어와 인덱스의 딕셔너리 생성\n\n\nword_to_index = {word: index for index, word in enumerate(words)}\nindex_to_word = {index: word for index, word in enumerate(words)}\n\n\n#word_index vocab 저장 - > \nwith open('./seq2seq/vocab_dict/word_to_index_final.pickle', 'wb') as f:\n pickle.dump(word_to_index, f, pickle.HIGHEST_PROTOCOL)\n\nwith open('./seq2seq/vocab_dict/index_to_word_final.pickle', 'wb') as f:\n pickle.dump(index_to_word, f, pickle.HIGHEST_PROTOCOL)\n\n\n# 단어 -> 인덱스\n# 문장을 인덱스로 변환하여 모델 입력으로 사용\nprint(dict(list(word_to_index.items())[:20]))\n\n# 인덱스 -> 단어\n# 모델의 예측 결과인 인덱스를 문장으로 변환시 사용\nprint(dict(list(index_to_word.items())[:20]))\n\n# 문장을 인덱스로 변환\ndef convert_text_to_index(sentences, vocabulary, type): \n \n sentences_index = []\n \n # 모든 문장에 대해서 반복\n for sentence in sentences:\n sentence_index = []\n \n # 디코더 입력일 경우 맨 앞에 START 태그 추가\n if type == DECODER_INPUT:\n sentence_index.extend([vocabulary[STA]])\n \n # 문장의 단어들을 띄어쓰기로 분리\n for word in sentence.split():\n if vocabulary.get(word) is not None:\n # 사전에 있는 단어면 해당 인덱스를 추가\n sentence_index.extend([vocabulary[word]])\n else:\n # 사전에 없는 단어면 OOV 인덱스를 추가\n sentence_index.extend([vocabulary[OOV]])\n\n # 최대 길이 검사\n if type == DECODER_TARGET:\n # 디코더 목표일 경우 맨 뒤에 END 태그 추가\n if len(sentence_index) >= max_sequences:\n sentence_index = sentence_index[:max_sequences-1] + [vocabulary[END]]\n else:\n sentence_index += [vocabulary[END]]\n else:\n if len(sentence_index) > max_sequences:\n sentence_index = sentence_index[:max_sequences]\n \n # 최대 길이에 없는 공간은 패딩 인덱스로 채움\n sentence_index += (max_sequences - len(sentence_index)) * [vocabulary[PAD]]\n \n # 문장의 인덱스 배열을 추가\n sentences_index.append(sentence_index)\n\n return np.asarray(sentences_index)\n\n# 인코더 입력 인덱스 변환\nx_encoder = convert_text_to_index(question, word_to_index, ENCODER_INPUT)\n\n# 첫 번째 인코더 입력 출력 (12시 땡)\nx_encoder[0]\n\n# 디코더 입력 인덱스 변환\nx_decoder = convert_text_to_index(answer, word_to_index, DECODER_INPUT)\n\n# 첫 번째 디코더 입력 출력 (START 하루 가 또 가네요)\nx_decoder[0]\n\nlen(x_decoder[0])\n\n# 디코더 목표 인덱스 변환\ny_decoder = convert_text_to_index(answer, word_to_index, DECODER_TARGET)\n\n# 첫 번째 디코더 목표 출력 (하루 가 또 가네요 END)\nprint(y_decoder[0])\n\n# 원핫인코딩 초기화\none_hot_data = np.zeros((len(y_decoder), max_sequences, len(words)))\n\n# 디코더 목표를 원핫인코딩으로 변환\n# 학습시 입력은 인덱스이지만, 출력은 원핫인코딩 형식임\nfor i, sequence in enumerate(y_decoder):\n for j, index in enumerate(sequence):\n one_hot_data[i, j, index] = 1\n\n# 디코더 목표 설정\ny_decoder = one_hot_data\n\n# 첫 번째 디코더 목표 출력\nprint(y_decoder[0])\n\n#--------------------------------------------\n# 훈련 모델 인코더 정의\n#--------------------------------------------\n\n# 입력 문장의 인덱스 시퀀스를 입력으로 받음\nencoder_inputs = layers.Input(shape=(None,))\n\n# 임베딩 레이어\nencoder_outputs = layers.Embedding(len(words), embedding_dim)(encoder_inputs)\n\n# return_state가 True면 상태값 리턴\n# LSTM은 state_h(hidden state)와 state_c(cell state) 2개의 상태 존재\nencoder_outputs, state_h, state_c = layers.LSTM(lstm_hidden_dim,\n dropout=0.1,\n recurrent_dropout=0.5,\n return_state=True)(encoder_outputs)\n\n# 히든 상태와 셀 상태를 하나로 묶음\nencoder_states = [state_h, state_c]\n\n\n\n#--------------------------------------------\n# 훈련 모델 디코더 정의\n#--------------------------------------------\n\n# 목표 문장의 인덱스 시퀀스를 입력으로 받음\ndecoder_inputs = layers.Input(shape=(None,))\n\n# 임베딩 레이어\ndecoder_embedding = layers.Embedding(len(words), embedding_dim)\ndecoder_outputs = decoder_embedding(decoder_inputs)\n\n# 인코더와 달리 return_sequences를 True로 설정하여 모든 타임 스텝 출력값 리턴\n# 모든 타임 스텝의 출력값들을 다음 레이어의 Dense()로 처리하기 위함\ndecoder_lstm = layers.LSTM(lstm_hidden_dim,\n dropout=0.1,\n recurrent_dropout=0.5,\n return_state=True,\n return_sequences=True)\n\n# initial_state를 인코더의 상태로 초기화\ndecoder_outputs, _, _ = decoder_lstm(decoder_outputs,\n initial_state=encoder_states)\n\n# 단어의 개수만큼 노드의 개수를 설정하여 원핫 형식으로 각 단어 인덱스를 출력\ndecoder_dense = layers.Dense(len(words), activation='softmax')\ndecoder_outputs = decoder_dense(decoder_outputs)\n\n\n\n#--------------------------------------------\n# 훈련 모델 정의\n#--------------------------------------------\n\n# 입력과 출력으로 함수형 API 모델 생성\nmodel = models.Model([encoder_inputs, decoder_inputs], decoder_outputs)\n\n# 학습 방법 설정\nmodel.compile(optimizer='rmsprop',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n#--------------------------------------------\n# 예측 모델 인코더 정의\n#--------------------------------------------\n\n# 훈련 모델의 인코더 상태를 사용하여 예측 모델 인코더 설정\nencoder_model = models.Model(encoder_inputs, encoder_states)\n\n\n\n#--------------------------------------------\n# 예측 모델 디코더 정의\n#--------------------------------------------\n\n# 예측시에는 훈련시와 달리 타임 스텝을 한 단계씩 수행\n# 매번 이전 디코더 상태를 입력으로 받아서 새로 설정\ndecoder_state_input_h = layers.Input(shape=(lstm_hidden_dim,))\ndecoder_state_input_c = layers.Input(shape=(lstm_hidden_dim,))\ndecoder_states_inputs = [decoder_state_input_h, decoder_state_input_c] \n\n# 임베딩 레이어\ndecoder_outputs = decoder_embedding(decoder_inputs)\n\n# LSTM 레이어\ndecoder_outputs, state_h, state_c = decoder_lstm(decoder_outputs,\n initial_state=decoder_states_inputs)\n\n# 히든 상태와 셀 상태를 하나로 묶음\ndecoder_states = [state_h, state_c]\n\n# Dense 레이어를 통해 원핫 형식으로 각 단어 인덱스를 출력\ndecoder_outputs = decoder_dense(decoder_outputs)\n\n# 예측 모델 디코더 설정\ndecoder_model = models.Model([decoder_inputs] + decoder_states_inputs,\n [decoder_outputs] + decoder_states)\n\n# 인덱스를 문장으로 변환\ndef convert_index_to_text(indexs, vocabulary): \n \n sentence = ''\n \n # 모든 문장에 대해서 반복\n for index in indexs:\n if index == END_INDEX:\n # 종료 인덱스면 중지\n break;\n if vocabulary.get(index) is not None:\n # 사전에 있는 인덱스면 해당 단어를 추가\n sentence += vocabulary[index]\n else:\n # 사전에 없는 인덱스면 OOV 단어를 추가\n sentence.extend([vocabulary[OOV_INDEX]])\n \n # 빈칸 추가\n sentence += ' '\n\n return sentence\n\n# len(x_decoder)\n#\n# len(y_decoder)\n\n#model.summary()\n\n#encoder_model.summary()\n\n#decoder_model.summary()\n\nfrom tqdm import tqdm\n#에폭 반복\nfor epoch in range(10):\n print('Total Epoch :', epoch + 1)\n\n history = model.fit([x_encoder, x_decoder], y_decoder, epochs=100, batch_size=64, verbose=1)\n model.summary()\n\n # 정확도와 손실 출력\n print('accuracy :', history.history['accuracy'][-1])\n print('loss :', history.history['loss'][-1])\n\n # 문장 예측 테스트\n # (3 박 4일 놀러 가고 싶다) -> (여행 은 언제나 좋죠)\n input_encoder = x_encoder[2].reshape(1, x_encoder[2].shape[0])\n input_decoder = x_decoder[2].reshape(1, x_decoder[2].shape[0])\n results = model.predict([input_encoder, input_decoder])\n\n # 결과의 원핫인코딩 형식을 인덱스로 변환\n # 1축을 기준으로 가장 높은 값의 위치를 구함\n indexs = np.argmax(results[0], 1)\n\n # 인덱스를 문장으로 변환\n sentence = convert_index_to_text(indexs, index_to_word)\n\n\n\n#모델 가중치 저장\nmodel.save_weights('./seq2seq/seq2seq_model/seq2seq2_model_weights')\nencoder_model.save_weights('./seq2seq/seq2seq_model/seq2seq2_encoder_model_weights')\ndecoder_model.save_weights('./seq2seq/seq2seq_model/seq2seq2_decoder_model_weights')\n\n\n# 예측을 위한 입력 생성\ndef make_predict_input(sentence):\n\n sentences = []\n sentences.append(sentence)\n sentences = pos_tag(sentences)\n input_seq = convert_text_to_index(sentences, word_to_index, ENCODER_INPUT)\n \n return input_seq\n\n# 텍스트 생성\ndef generate_text(input_seq):\n \n # 입력을 인코더에 넣어 마지막 상태 구함\n states = encoder_model.predict(input_seq)\n\n # 목표 시퀀스 초기화\n target_seq = np.zeros((1, 1))\n \n # 목표 시퀀스의 첫 번째에 <START> 태그 추가\n target_seq[0, 0] = STA_INDEX\n \n # 인덱스 초기화\n indexs = []\n \n # 디코더 타임 스텝 반복\n while 1:\n # 디코더로 현재 타임 스텝 출력 구함\n # 처음에는 인코더 상태를, 다음부터 이전 디코더 상태로 초기화\n decoder_outputs, state_h, state_c = decoder_model.predict(\n [target_seq] + states)\n\n\n # 결과의 원핫인코딩 형식을 인덱스로 변환\n index = np.argmax(decoder_outputs[0, 0, :])\n indexs.append(index)\n \n # 종료 검사\n if index == END_INDEX or len(indexs) >= max_sequences:\n break\n\n # 목표 시퀀스를 바로 이전의 출력으로 설정\n target_seq = np.zeros((1, 1))\n target_seq[0, 0] = index\n \n # 디코더의 이전 상태를 다음 디코더 예측에 사용\n states = [state_h, state_c]\n\n # 인덱스를 문장으로 변환\n sentence = convert_index_to_text(indexs, index_to_word)\n \n return sentence\n\n# 문장을 인덱스로 변환\ninput_seq = make_predict_input('3박4일 놀러가고 싶다')\ninput_seq\n\n# 예측 모델로 텍스트 생성\nsentence = generate_text(input_seq)\nprint(sentence)\n\n# 문장을 인덱스로 변환\ninput_seq = make_predict_input('3박4일 같이 놀러가고 싶다')\ninput_seq\n\n# 예측 모델로 텍스트 생성\nsentence = generate_text(input_seq)\nprint(sentence)\n\n# 문장을 인덱스로 변환\ninput_seq = make_predict_input('3박4일 놀러가려고')\nprint(sentence)\n\n\n# 예측 모델로 텍스트 생성\nsentence = generate_text(input_seq)\nprint(sentence)\n\n\ninput_seq = make_predict_input('SNS 시간낭비인데')\nsentence = generate_text(input_seq)\nprint(sentence)\n\ninput_seq = make_predict_input('PPL 너무나 심하네')\nsentence = generate_text(input_seq)\nprint(sentence)\n\ninput_seq = make_predict_input('가상화폐 망함')\nsentence = generate_text(input_seq)\nprint(sentence)\n\ninput_seq = make_predict_input('가스불')\nsentence = generate_text(input_seq)\nprint(sentence)\n\ninput_seq = make_predict_input('가스비')\nsentence = generate_text(input_seq)\nprint(sentence)\n\ninput_seq = make_predict_input('가족 보고 싶어')\nsentence = generate_text(input_seq)\nprint(sentence)\n\ninput_seq = make_predict_input('간식 먹고 싶어')\nsentence = generate_text(input_seq)\nprint(sentence)\n\ninput_seq = make_predict_input('간접흡연 싫어')\nsentence = generate_text(input_seq)\nprint(sentence)\n\ninput_seq = make_predict_input('감기 기운 잇어')\nsentence = generate_text(input_seq)\nprint(sentence)\n\ninput_seq = make_predict_input('내일 날씨 어떄?')\nsentence = generate_text(input_seq)\nprint(sentence)\n\n\n",
"step-ids": [
3,
6,
7,
8,
9
]
}
|
[
3,
6,
7,
8,
9
] |
import argparse
import datetime
import importlib
import pprint
import time
import random
import numpy as np
import torch
from torch.utils.tensorboard import SummaryWriter
from utils import get_git_state, time_print, AverageMeter, ProgressMeter, save_checkpoint
def train(cfg, epoch, data_loader, model):
data_time = AverageMeter("Data", ":6.3f")
batch_time = AverageMeter("Time", ":6.3f")
losses = AverageMeter("Loss", ":.4e")
progress = ProgressMeter(
len(data_loader)-1,
[batch_time, data_time, losses],
prefix=f"Epoch: [{epoch}]\t")
model.train()
end = time.time()
for batch_nb, batch in enumerate(data_loader):
d_time = time.time() - end
data_time.update(d_time)
global_step = model.global_step
writer.add_scalar("time/data/train", d_time, global_step)
report = model.training_step(batch, batch_nb)
losses.update(report["loss"])
for k, v in report.items():
writer.add_scalar(f"{k}/train", v, global_step)
b_time = time.time() - end
batch_time.update(b_time)
writer.add_scalar("time/batch/train", b_time, global_step)
end = time.time()
if batch_nb % cfg.log.freq == 0 or batch_nb == len(data_loader) - 1:
progress.display(batch_nb, print_fn=lambda *x: time_print(*x, end="\r"))
def test(cfg, data_loader, model):
data_time = AverageMeter("Data", ":6.3f")
batch_time = AverageMeter("Time", ":6.3f")
losses = AverageMeter("Loss", ":.4e")
metrics = ["performance"]
metrics = {m: AverageMeter(m, ":.4e") for m in metrics}
progress = ProgressMeter(
len(data_loader)-1,
[batch_time, data_time, losses, *metrics.values()],
prefix="Test:\t")
model.eval()
global_step = model.global_step
end = time.time()
for batch_nb, batch in enumerate(data_loader):
data_time.update(time.time() - end)
with torch.no_grad():
report = model.test_step(batch, batch_nb)
losses.update(report["loss"])
for k, v in report.items():
if k not in metrics:
metrics[k] = AverageMeter(k, ":.3f")
metrics[k].update(v)
batch_time.update(time.time() - end)
end = time.time()
if batch_nb % cfg.log.freq == 0 or batch_nb == len(data_loader) - 1:
progress.display(batch_nb, print_fn=lambda *x: time_print(*x, end="\r"))
writer.add_scalar("loss/test", losses.avg, global_step)
writer.add_scalar("time/batch/test", batch_time.avg, global_step)
writer.add_scalar("time/data/test", data_time.avg, global_step)
for k,v in metrics.items():
writer.add_scalar(f"{k}/test", v.avg, global_step)
progress.display(len(data_loader) - 1, time_print)
def main(cfg, pool=None):
model = importlib.import_module(f"models.{cfg.model}").Model(cfg, pool=pool)
if getattr(cfg, "load_model", False):
model.load_ckpt()
if model.device != "cpu" and torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
model = model.to(model.device)
train_loader = model.get_train_loader()
test_loader = model.get_test_loader()
for epoch in range(cfg.num_epoch):
time_print(f"\nEpoch {epoch} Training")
train(cfg, epoch, train_loader, model)
filename = "checkpoint.pth.tar"
if not getattr(cfg.log, "overwrite_ckpt", True):
filename = "_".join([str(epoch), filename])
save_checkpoint(
state={
"epoch": epoch,
"global_step": model.global_step,
"state_dict": model.state_dict(),
"opt_state_dict": {k: v.state_dict() for k,v in model.optimizers.items()},
"cfg": cfg,
},
directory=cfg.log.misc_dir,
filename=filename)
time_print("\nTest")
test(cfg, test_loader, model)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run script")
parser.add_argument("--config", "-c",type=str, required=False, default="config")
args = parser.parse_args()
git_state = get_git_state()
config = importlib.import_module(f"configs.{args.config}").config
config.log.exp_id = git_state[1][:7] + datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
config.log.misc_dir = config.log.dir / "misc" / config.log.exp_id
config.log.tb_dir = config.log.dir / "tb" / config.log.exp_id
config.log.misc_dir.mkdir(exist_ok=True, parents=True)
config.log.tb_dir.mkdir(exist_ok=True, parents=True)
torch.manual_seed(config.rnd_seed)
np.random.seed(config.rnd_seed)
random.seed(config.rnd_seed)
if getattr(config, "anomaly_detection", False):
torch.autograd.set_detect_anomaly(True)
global writer
writer = SummaryWriter(
log_dir=config.log.tb_dir,
comment=f"{config.description}, {git_state}")
time_print(pprint.pformat(config))
time_print(f"Git head at state: {git_state}")
try:
if npp:=getattr(config, "n_process_pool", 0):
with torch.multiprocessing.Pool(npp) as pool:
main(config, pool=pool)
else:
main(config)
except KeyboardInterrupt:
time_print(f"Keyboard interrupt")
exit(0)
|
normal
|
{
"blob_id": "81688d51696156905736b5de7a4929387fd385ab",
"index": 91,
"step-1": "<mask token>\n\n\ndef train(cfg, epoch, data_loader, model):\n data_time = AverageMeter('Data', ':6.3f')\n batch_time = AverageMeter('Time', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n progress = ProgressMeter(len(data_loader) - 1, [batch_time, data_time,\n losses], prefix=f'Epoch: [{epoch}]\\t')\n model.train()\n end = time.time()\n for batch_nb, batch in enumerate(data_loader):\n d_time = time.time() - end\n data_time.update(d_time)\n global_step = model.global_step\n writer.add_scalar('time/data/train', d_time, global_step)\n report = model.training_step(batch, batch_nb)\n losses.update(report['loss'])\n for k, v in report.items():\n writer.add_scalar(f'{k}/train', v, global_step)\n b_time = time.time() - end\n batch_time.update(b_time)\n writer.add_scalar('time/batch/train', b_time, global_step)\n end = time.time()\n if batch_nb % cfg.log.freq == 0 or batch_nb == len(data_loader) - 1:\n progress.display(batch_nb, print_fn=lambda *x: time_print(*x,\n end='\\r'))\n\n\n<mask token>\n\n\ndef main(cfg, pool=None):\n model = importlib.import_module(f'models.{cfg.model}').Model(cfg, pool=pool\n )\n if getattr(cfg, 'load_model', False):\n model.load_ckpt()\n if model.device != 'cpu' and torch.cuda.device_count() > 1:\n model = torch.nn.DataParallel(model)\n model = model.to(model.device)\n train_loader = model.get_train_loader()\n test_loader = model.get_test_loader()\n for epoch in range(cfg.num_epoch):\n time_print(f'\\nEpoch {epoch} Training')\n train(cfg, epoch, train_loader, model)\n filename = 'checkpoint.pth.tar'\n if not getattr(cfg.log, 'overwrite_ckpt', True):\n filename = '_'.join([str(epoch), filename])\n save_checkpoint(state={'epoch': epoch, 'global_step': model.\n global_step, 'state_dict': model.state_dict(), 'opt_state_dict':\n {k: v.state_dict() for k, v in model.optimizers.items()}, 'cfg':\n cfg}, directory=cfg.log.misc_dir, filename=filename)\n time_print('\\nTest')\n test(cfg, test_loader, model)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef train(cfg, epoch, data_loader, model):\n data_time = AverageMeter('Data', ':6.3f')\n batch_time = AverageMeter('Time', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n progress = ProgressMeter(len(data_loader) - 1, [batch_time, data_time,\n losses], prefix=f'Epoch: [{epoch}]\\t')\n model.train()\n end = time.time()\n for batch_nb, batch in enumerate(data_loader):\n d_time = time.time() - end\n data_time.update(d_time)\n global_step = model.global_step\n writer.add_scalar('time/data/train', d_time, global_step)\n report = model.training_step(batch, batch_nb)\n losses.update(report['loss'])\n for k, v in report.items():\n writer.add_scalar(f'{k}/train', v, global_step)\n b_time = time.time() - end\n batch_time.update(b_time)\n writer.add_scalar('time/batch/train', b_time, global_step)\n end = time.time()\n if batch_nb % cfg.log.freq == 0 or batch_nb == len(data_loader) - 1:\n progress.display(batch_nb, print_fn=lambda *x: time_print(*x,\n end='\\r'))\n\n\ndef test(cfg, data_loader, model):\n data_time = AverageMeter('Data', ':6.3f')\n batch_time = AverageMeter('Time', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n metrics = ['performance']\n metrics = {m: AverageMeter(m, ':.4e') for m in metrics}\n progress = ProgressMeter(len(data_loader) - 1, [batch_time, data_time,\n losses, *metrics.values()], prefix='Test:\\t')\n model.eval()\n global_step = model.global_step\n end = time.time()\n for batch_nb, batch in enumerate(data_loader):\n data_time.update(time.time() - end)\n with torch.no_grad():\n report = model.test_step(batch, batch_nb)\n losses.update(report['loss'])\n for k, v in report.items():\n if k not in metrics:\n metrics[k] = AverageMeter(k, ':.3f')\n metrics[k].update(v)\n batch_time.update(time.time() - end)\n end = time.time()\n if batch_nb % cfg.log.freq == 0 or batch_nb == len(data_loader) - 1:\n progress.display(batch_nb, print_fn=lambda *x: time_print(*x,\n end='\\r'))\n writer.add_scalar('loss/test', losses.avg, global_step)\n writer.add_scalar('time/batch/test', batch_time.avg, global_step)\n writer.add_scalar('time/data/test', data_time.avg, global_step)\n for k, v in metrics.items():\n writer.add_scalar(f'{k}/test', v.avg, global_step)\n progress.display(len(data_loader) - 1, time_print)\n\n\ndef main(cfg, pool=None):\n model = importlib.import_module(f'models.{cfg.model}').Model(cfg, pool=pool\n )\n if getattr(cfg, 'load_model', False):\n model.load_ckpt()\n if model.device != 'cpu' and torch.cuda.device_count() > 1:\n model = torch.nn.DataParallel(model)\n model = model.to(model.device)\n train_loader = model.get_train_loader()\n test_loader = model.get_test_loader()\n for epoch in range(cfg.num_epoch):\n time_print(f'\\nEpoch {epoch} Training')\n train(cfg, epoch, train_loader, model)\n filename = 'checkpoint.pth.tar'\n if not getattr(cfg.log, 'overwrite_ckpt', True):\n filename = '_'.join([str(epoch), filename])\n save_checkpoint(state={'epoch': epoch, 'global_step': model.\n global_step, 'state_dict': model.state_dict(), 'opt_state_dict':\n {k: v.state_dict() for k, v in model.optimizers.items()}, 'cfg':\n cfg}, directory=cfg.log.misc_dir, filename=filename)\n time_print('\\nTest')\n test(cfg, test_loader, model)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef train(cfg, epoch, data_loader, model):\n data_time = AverageMeter('Data', ':6.3f')\n batch_time = AverageMeter('Time', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n progress = ProgressMeter(len(data_loader) - 1, [batch_time, data_time,\n losses], prefix=f'Epoch: [{epoch}]\\t')\n model.train()\n end = time.time()\n for batch_nb, batch in enumerate(data_loader):\n d_time = time.time() - end\n data_time.update(d_time)\n global_step = model.global_step\n writer.add_scalar('time/data/train', d_time, global_step)\n report = model.training_step(batch, batch_nb)\n losses.update(report['loss'])\n for k, v in report.items():\n writer.add_scalar(f'{k}/train', v, global_step)\n b_time = time.time() - end\n batch_time.update(b_time)\n writer.add_scalar('time/batch/train', b_time, global_step)\n end = time.time()\n if batch_nb % cfg.log.freq == 0 or batch_nb == len(data_loader) - 1:\n progress.display(batch_nb, print_fn=lambda *x: time_print(*x,\n end='\\r'))\n\n\ndef test(cfg, data_loader, model):\n data_time = AverageMeter('Data', ':6.3f')\n batch_time = AverageMeter('Time', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n metrics = ['performance']\n metrics = {m: AverageMeter(m, ':.4e') for m in metrics}\n progress = ProgressMeter(len(data_loader) - 1, [batch_time, data_time,\n losses, *metrics.values()], prefix='Test:\\t')\n model.eval()\n global_step = model.global_step\n end = time.time()\n for batch_nb, batch in enumerate(data_loader):\n data_time.update(time.time() - end)\n with torch.no_grad():\n report = model.test_step(batch, batch_nb)\n losses.update(report['loss'])\n for k, v in report.items():\n if k not in metrics:\n metrics[k] = AverageMeter(k, ':.3f')\n metrics[k].update(v)\n batch_time.update(time.time() - end)\n end = time.time()\n if batch_nb % cfg.log.freq == 0 or batch_nb == len(data_loader) - 1:\n progress.display(batch_nb, print_fn=lambda *x: time_print(*x,\n end='\\r'))\n writer.add_scalar('loss/test', losses.avg, global_step)\n writer.add_scalar('time/batch/test', batch_time.avg, global_step)\n writer.add_scalar('time/data/test', data_time.avg, global_step)\n for k, v in metrics.items():\n writer.add_scalar(f'{k}/test', v.avg, global_step)\n progress.display(len(data_loader) - 1, time_print)\n\n\ndef main(cfg, pool=None):\n model = importlib.import_module(f'models.{cfg.model}').Model(cfg, pool=pool\n )\n if getattr(cfg, 'load_model', False):\n model.load_ckpt()\n if model.device != 'cpu' and torch.cuda.device_count() > 1:\n model = torch.nn.DataParallel(model)\n model = model.to(model.device)\n train_loader = model.get_train_loader()\n test_loader = model.get_test_loader()\n for epoch in range(cfg.num_epoch):\n time_print(f'\\nEpoch {epoch} Training')\n train(cfg, epoch, train_loader, model)\n filename = 'checkpoint.pth.tar'\n if not getattr(cfg.log, 'overwrite_ckpt', True):\n filename = '_'.join([str(epoch), filename])\n save_checkpoint(state={'epoch': epoch, 'global_step': model.\n global_step, 'state_dict': model.state_dict(), 'opt_state_dict':\n {k: v.state_dict() for k, v in model.optimizers.items()}, 'cfg':\n cfg}, directory=cfg.log.misc_dir, filename=filename)\n time_print('\\nTest')\n test(cfg, test_loader, model)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Run script')\n parser.add_argument('--config', '-c', type=str, required=False, default\n ='config')\n args = parser.parse_args()\n git_state = get_git_state()\n config = importlib.import_module(f'configs.{args.config}').config\n config.log.exp_id = git_state[1][:7] + datetime.datetime.now().strftime(\n '%Y-%m-%d_%H:%M:%S')\n config.log.misc_dir = config.log.dir / 'misc' / config.log.exp_id\n config.log.tb_dir = config.log.dir / 'tb' / config.log.exp_id\n config.log.misc_dir.mkdir(exist_ok=True, parents=True)\n config.log.tb_dir.mkdir(exist_ok=True, parents=True)\n torch.manual_seed(config.rnd_seed)\n np.random.seed(config.rnd_seed)\n random.seed(config.rnd_seed)\n if getattr(config, 'anomaly_detection', False):\n torch.autograd.set_detect_anomaly(True)\n global writer\n writer = SummaryWriter(log_dir=config.log.tb_dir, comment=\n f'{config.description}, {git_state}')\n time_print(pprint.pformat(config))\n time_print(f'Git head at state: {git_state}')\n try:\n if (npp := getattr(config, 'n_process_pool', 0)):\n with torch.multiprocessing.Pool(npp) as pool:\n main(config, pool=pool)\n else:\n main(config)\n except KeyboardInterrupt:\n time_print(f'Keyboard interrupt')\n exit(0)\n",
"step-4": "import argparse\nimport datetime\nimport importlib\nimport pprint\nimport time\nimport random\nimport numpy as np\nimport torch\nfrom torch.utils.tensorboard import SummaryWriter\nfrom utils import get_git_state, time_print, AverageMeter, ProgressMeter, save_checkpoint\n\n\ndef train(cfg, epoch, data_loader, model):\n data_time = AverageMeter('Data', ':6.3f')\n batch_time = AverageMeter('Time', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n progress = ProgressMeter(len(data_loader) - 1, [batch_time, data_time,\n losses], prefix=f'Epoch: [{epoch}]\\t')\n model.train()\n end = time.time()\n for batch_nb, batch in enumerate(data_loader):\n d_time = time.time() - end\n data_time.update(d_time)\n global_step = model.global_step\n writer.add_scalar('time/data/train', d_time, global_step)\n report = model.training_step(batch, batch_nb)\n losses.update(report['loss'])\n for k, v in report.items():\n writer.add_scalar(f'{k}/train', v, global_step)\n b_time = time.time() - end\n batch_time.update(b_time)\n writer.add_scalar('time/batch/train', b_time, global_step)\n end = time.time()\n if batch_nb % cfg.log.freq == 0 or batch_nb == len(data_loader) - 1:\n progress.display(batch_nb, print_fn=lambda *x: time_print(*x,\n end='\\r'))\n\n\ndef test(cfg, data_loader, model):\n data_time = AverageMeter('Data', ':6.3f')\n batch_time = AverageMeter('Time', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n metrics = ['performance']\n metrics = {m: AverageMeter(m, ':.4e') for m in metrics}\n progress = ProgressMeter(len(data_loader) - 1, [batch_time, data_time,\n losses, *metrics.values()], prefix='Test:\\t')\n model.eval()\n global_step = model.global_step\n end = time.time()\n for batch_nb, batch in enumerate(data_loader):\n data_time.update(time.time() - end)\n with torch.no_grad():\n report = model.test_step(batch, batch_nb)\n losses.update(report['loss'])\n for k, v in report.items():\n if k not in metrics:\n metrics[k] = AverageMeter(k, ':.3f')\n metrics[k].update(v)\n batch_time.update(time.time() - end)\n end = time.time()\n if batch_nb % cfg.log.freq == 0 or batch_nb == len(data_loader) - 1:\n progress.display(batch_nb, print_fn=lambda *x: time_print(*x,\n end='\\r'))\n writer.add_scalar('loss/test', losses.avg, global_step)\n writer.add_scalar('time/batch/test', batch_time.avg, global_step)\n writer.add_scalar('time/data/test', data_time.avg, global_step)\n for k, v in metrics.items():\n writer.add_scalar(f'{k}/test', v.avg, global_step)\n progress.display(len(data_loader) - 1, time_print)\n\n\ndef main(cfg, pool=None):\n model = importlib.import_module(f'models.{cfg.model}').Model(cfg, pool=pool\n )\n if getattr(cfg, 'load_model', False):\n model.load_ckpt()\n if model.device != 'cpu' and torch.cuda.device_count() > 1:\n model = torch.nn.DataParallel(model)\n model = model.to(model.device)\n train_loader = model.get_train_loader()\n test_loader = model.get_test_loader()\n for epoch in range(cfg.num_epoch):\n time_print(f'\\nEpoch {epoch} Training')\n train(cfg, epoch, train_loader, model)\n filename = 'checkpoint.pth.tar'\n if not getattr(cfg.log, 'overwrite_ckpt', True):\n filename = '_'.join([str(epoch), filename])\n save_checkpoint(state={'epoch': epoch, 'global_step': model.\n global_step, 'state_dict': model.state_dict(), 'opt_state_dict':\n {k: v.state_dict() for k, v in model.optimizers.items()}, 'cfg':\n cfg}, directory=cfg.log.misc_dir, filename=filename)\n time_print('\\nTest')\n test(cfg, test_loader, model)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Run script')\n parser.add_argument('--config', '-c', type=str, required=False, default\n ='config')\n args = parser.parse_args()\n git_state = get_git_state()\n config = importlib.import_module(f'configs.{args.config}').config\n config.log.exp_id = git_state[1][:7] + datetime.datetime.now().strftime(\n '%Y-%m-%d_%H:%M:%S')\n config.log.misc_dir = config.log.dir / 'misc' / config.log.exp_id\n config.log.tb_dir = config.log.dir / 'tb' / config.log.exp_id\n config.log.misc_dir.mkdir(exist_ok=True, parents=True)\n config.log.tb_dir.mkdir(exist_ok=True, parents=True)\n torch.manual_seed(config.rnd_seed)\n np.random.seed(config.rnd_seed)\n random.seed(config.rnd_seed)\n if getattr(config, 'anomaly_detection', False):\n torch.autograd.set_detect_anomaly(True)\n global writer\n writer = SummaryWriter(log_dir=config.log.tb_dir, comment=\n f'{config.description}, {git_state}')\n time_print(pprint.pformat(config))\n time_print(f'Git head at state: {git_state}')\n try:\n if (npp := getattr(config, 'n_process_pool', 0)):\n with torch.multiprocessing.Pool(npp) as pool:\n main(config, pool=pool)\n else:\n main(config)\n except KeyboardInterrupt:\n time_print(f'Keyboard interrupt')\n exit(0)\n",
"step-5": "import argparse\nimport datetime\nimport importlib\nimport pprint\nimport time\nimport random\n\nimport numpy as np\nimport torch\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom utils import get_git_state, time_print, AverageMeter, ProgressMeter, save_checkpoint\n\n\ndef train(cfg, epoch, data_loader, model):\n data_time = AverageMeter(\"Data\", \":6.3f\")\n batch_time = AverageMeter(\"Time\", \":6.3f\")\n losses = AverageMeter(\"Loss\", \":.4e\")\n \n progress = ProgressMeter(\n len(data_loader)-1,\n [batch_time, data_time, losses],\n prefix=f\"Epoch: [{epoch}]\\t\")\n\n model.train()\n\n end = time.time()\n for batch_nb, batch in enumerate(data_loader):\n d_time = time.time() - end\n data_time.update(d_time)\n\n global_step = model.global_step\n writer.add_scalar(\"time/data/train\", d_time, global_step)\n\n report = model.training_step(batch, batch_nb)\n\n losses.update(report[\"loss\"])\n\n for k, v in report.items():\n writer.add_scalar(f\"{k}/train\", v, global_step)\n\n b_time = time.time() - end\n batch_time.update(b_time)\n writer.add_scalar(\"time/batch/train\", b_time, global_step)\n end = time.time()\n\n if batch_nb % cfg.log.freq == 0 or batch_nb == len(data_loader) - 1:\n progress.display(batch_nb, print_fn=lambda *x: time_print(*x, end=\"\\r\"))\n\ndef test(cfg, data_loader, model):\n data_time = AverageMeter(\"Data\", \":6.3f\")\n batch_time = AverageMeter(\"Time\", \":6.3f\")\n losses = AverageMeter(\"Loss\", \":.4e\")\n metrics = [\"performance\"]\n metrics = {m: AverageMeter(m, \":.4e\") for m in metrics}\n\n progress = ProgressMeter(\n len(data_loader)-1,\n [batch_time, data_time, losses, *metrics.values()],\n prefix=\"Test:\\t\")\n\n model.eval()\n\n global_step = model.global_step\n\n end = time.time()\n for batch_nb, batch in enumerate(data_loader):\n data_time.update(time.time() - end)\n with torch.no_grad():\n report = model.test_step(batch, batch_nb)\n \n losses.update(report[\"loss\"])\n\n for k, v in report.items():\n if k not in metrics:\n metrics[k] = AverageMeter(k, \":.3f\")\n metrics[k].update(v)\n \n batch_time.update(time.time() - end)\n end = time.time()\n\n if batch_nb % cfg.log.freq == 0 or batch_nb == len(data_loader) - 1:\n progress.display(batch_nb, print_fn=lambda *x: time_print(*x, end=\"\\r\"))\n\n writer.add_scalar(\"loss/test\", losses.avg, global_step)\n writer.add_scalar(\"time/batch/test\", batch_time.avg, global_step)\n writer.add_scalar(\"time/data/test\", data_time.avg, global_step)\n\n for k,v in metrics.items():\n writer.add_scalar(f\"{k}/test\", v.avg, global_step)\n\n progress.display(len(data_loader) - 1, time_print)\n\n\ndef main(cfg, pool=None):\n model = importlib.import_module(f\"models.{cfg.model}\").Model(cfg, pool=pool)\n\n if getattr(cfg, \"load_model\", False):\n model.load_ckpt()\n\n if model.device != \"cpu\" and torch.cuda.device_count() > 1:\n model = torch.nn.DataParallel(model)\n\n model = model.to(model.device)\n\n train_loader = model.get_train_loader()\n test_loader = model.get_test_loader()\n\n for epoch in range(cfg.num_epoch):\n time_print(f\"\\nEpoch {epoch} Training\")\n train(cfg, epoch, train_loader, model)\n \n filename = \"checkpoint.pth.tar\"\n if not getattr(cfg.log, \"overwrite_ckpt\", True):\n filename = \"_\".join([str(epoch), filename])\n\n save_checkpoint(\n state={\n \"epoch\": epoch,\n \"global_step\": model.global_step,\n \"state_dict\": model.state_dict(),\n \"opt_state_dict\": {k: v.state_dict() for k,v in model.optimizers.items()},\n \"cfg\": cfg,\n },\n directory=cfg.log.misc_dir,\n filename=filename)\n \n time_print(\"\\nTest\")\n test(cfg, test_loader, model)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Run script\")\n parser.add_argument(\"--config\", \"-c\",type=str, required=False, default=\"config\")\n args = parser.parse_args()\n git_state = get_git_state()\n config = importlib.import_module(f\"configs.{args.config}\").config\n config.log.exp_id = git_state[1][:7] + datetime.datetime.now().strftime(\"%Y-%m-%d_%H:%M:%S\")\n config.log.misc_dir = config.log.dir / \"misc\" / config.log.exp_id\n config.log.tb_dir = config.log.dir / \"tb\" / config.log.exp_id\n config.log.misc_dir.mkdir(exist_ok=True, parents=True)\n config.log.tb_dir.mkdir(exist_ok=True, parents=True)\n\n torch.manual_seed(config.rnd_seed)\n np.random.seed(config.rnd_seed)\n random.seed(config.rnd_seed)\n\n if getattr(config, \"anomaly_detection\", False):\n torch.autograd.set_detect_anomaly(True)\n\n global writer\n writer = SummaryWriter(\n log_dir=config.log.tb_dir,\n comment=f\"{config.description}, {git_state}\")\n\n time_print(pprint.pformat(config))\n time_print(f\"Git head at state: {git_state}\")\n\n try:\n if npp:=getattr(config, \"n_process_pool\", 0):\n with torch.multiprocessing.Pool(npp) as pool:\n main(config, pool=pool)\n else:\n main(config)\n except KeyboardInterrupt:\n time_print(f\"Keyboard interrupt\")\n exit(0)",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from django import forms
class TeacherForm(forms.Form):
name = forms.CharField(label='Your Name', max_length=100, widget=forms.
TextInput(attrs={'class': 'form-control text-center w-75 mx-auto'}))
email = forms.EmailField(widget=forms.TextInput(attrs={'class':
'form-control text-center w-75 mx-auto'}))
|
normal
|
{
"blob_id": "7c5877eea78c3fa8b7928219edd52e2502c16c09",
"index": 6392,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass TeacherForm(forms.Form):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TeacherForm(forms.Form):\n name = forms.CharField(label='Your Name', max_length=100, widget=forms.\n TextInput(attrs={'class': 'form-control text-center w-75 mx-auto'}))\n email = forms.EmailField(widget=forms.TextInput(attrs={'class':\n 'form-control text-center w-75 mx-auto'}))\n",
"step-4": "from django import forms\n\n\nclass TeacherForm(forms.Form):\n name = forms.CharField(label='Your Name', max_length=100, widget=forms.\n TextInput(attrs={'class': 'form-control text-center w-75 mx-auto'}))\n email = forms.EmailField(widget=forms.TextInput(attrs={'class':\n 'form-control text-center w-75 mx-auto'}))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
zi=["L","Ma","Mi","J","Vi","S","D"]
V=[]
for i in range(0,len(zi)):
x=input("dati salariul de: {} ".format(zi[i]))
V.append(int(x))
print("Salariul in fiecare zi: {}".format(V))
print(sum(V))
print(round(sum(V)/7,2))
print(max(V))
vMax=[]
vMin=[]
for i in range(0,len(zi)):
if V[i]==max(V):
vMax.append(zi[i])
print(vMax)
for i in range(0,len(zi)):
if V[i]==min(V):
vMin.append(zi[i])
print(vMin)
|
normal
|
{
"blob_id": "6c91114e0c32628b64734000c82354105032b2fd",
"index": 7954,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(0, len(zi)):\n x = input('dati salariul de: {} '.format(zi[i]))\n V.append(int(x))\nprint('Salariul in fiecare zi: {}'.format(V))\nprint(sum(V))\nprint(round(sum(V) / 7, 2))\nprint(max(V))\n<mask token>\nfor i in range(0, len(zi)):\n if V[i] == max(V):\n vMax.append(zi[i])\nprint(vMax)\nfor i in range(0, len(zi)):\n if V[i] == min(V):\n vMin.append(zi[i])\nprint(vMin)\n",
"step-3": "zi = ['L', 'Ma', 'Mi', 'J', 'Vi', 'S', 'D']\nV = []\nfor i in range(0, len(zi)):\n x = input('dati salariul de: {} '.format(zi[i]))\n V.append(int(x))\nprint('Salariul in fiecare zi: {}'.format(V))\nprint(sum(V))\nprint(round(sum(V) / 7, 2))\nprint(max(V))\nvMax = []\nvMin = []\nfor i in range(0, len(zi)):\n if V[i] == max(V):\n vMax.append(zi[i])\nprint(vMax)\nfor i in range(0, len(zi)):\n if V[i] == min(V):\n vMin.append(zi[i])\nprint(vMin)\n",
"step-4": "zi=[\"L\",\"Ma\",\"Mi\",\"J\",\"Vi\",\"S\",\"D\"]\r\nV=[]\r\nfor i in range(0,len(zi)):\r\n x=input(\"dati salariul de: {} \".format(zi[i]))\r\n V.append(int(x))\r\nprint(\"Salariul in fiecare zi: {}\".format(V))\r\nprint(sum(V))\r\nprint(round(sum(V)/7,2))\r\nprint(max(V))\r\nvMax=[]\r\nvMin=[]\r\nfor i in range(0,len(zi)):\r\n if V[i]==max(V):\r\n vMax.append(zi[i])\r\nprint(vMax)\r\nfor i in range(0,len(zi)):\r\n if V[i]==min(V):\r\n vMin.append(zi[i])\r\nprint(vMin)\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#https://codecombat.com/play/level/village-champion
# Incoming munchkins! Defend the town!
# Define your own function to fight the enemy!
# In the function, find an enemy, then cleave or attack it.
def attttaaaaacccckkkk():
enemy = hero.findNearest(hero.findEnemies())
#enemy = hero.findNearestEnemy()
if enemy:
if enemy and hero.isReady('cleave'):
hero.cleave(enemy)
else:
hero.attack(enemy)
# Move between patrol points and call the function.
while True:
hero.moveXY(35, 34)
# Use whatever function name you defined above.
attttaaaaacccckkkk()
hero.moveXY(47, 27)
# Call the function again.
attttaaaaacccckkkk()
hero.moveXY(60, 31)
# Call the function again.
attttaaaaacccckkkk()
|
normal
|
{
"blob_id": "ce365e011d8cc88d9aa6b4df18ea3f4e70d48f5c",
"index": 4887,
"step-1": "<mask token>\n",
"step-2": "def attttaaaaacccckkkk():\n enemy = hero.findNearest(hero.findEnemies())\n if enemy:\n if enemy and hero.isReady('cleave'):\n hero.cleave(enemy)\n else:\n hero.attack(enemy)\n\n\n<mask token>\n",
"step-3": "def attttaaaaacccckkkk():\n enemy = hero.findNearest(hero.findEnemies())\n if enemy:\n if enemy and hero.isReady('cleave'):\n hero.cleave(enemy)\n else:\n hero.attack(enemy)\n\n\nwhile True:\n hero.moveXY(35, 34)\n attttaaaaacccckkkk()\n hero.moveXY(47, 27)\n attttaaaaacccckkkk()\n hero.moveXY(60, 31)\n attttaaaaacccckkkk()\n",
"step-4": "#https://codecombat.com/play/level/village-champion\n# Incoming munchkins! Defend the town!\n\n# Define your own function to fight the enemy!\n# In the function, find an enemy, then cleave or attack it.\ndef attttaaaaacccckkkk():\n enemy = hero.findNearest(hero.findEnemies())\n #enemy = hero.findNearestEnemy()\n if enemy:\n if enemy and hero.isReady('cleave'):\n hero.cleave(enemy)\n else:\n hero.attack(enemy)\n\n# Move between patrol points and call the function.\nwhile True:\n hero.moveXY(35, 34)\n # Use whatever function name you defined above.\n attttaaaaacccckkkk()\n hero.moveXY(47, 27)\n # Call the function again.\n attttaaaaacccckkkk()\n hero.moveXY(60, 31)\n # Call the function again.\n attttaaaaacccckkkk()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import numpy as np
catogory = np.array([50, 30, 40, 20])
data = np.array([[20, 50, 10, 15, 20], [30, 40, 20, 65, 35], [75, 30, 42,
70, 45], [40, 25, 35, 22, 55]])
print(catogory)
print(data)
print(catogory.dot(data))
print(data.T.dot(catogory))
|
normal
|
{
"blob_id": "e4b49faaad648c6e85274abb18f994083a74013d",
"index": 7160,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(catogory)\nprint(data)\nprint(catogory.dot(data))\nprint(data.T.dot(catogory))\n",
"step-3": "<mask token>\ncatogory = np.array([50, 30, 40, 20])\ndata = np.array([[20, 50, 10, 15, 20], [30, 40, 20, 65, 35], [75, 30, 42, \n 70, 45], [40, 25, 35, 22, 55]])\nprint(catogory)\nprint(data)\nprint(catogory.dot(data))\nprint(data.T.dot(catogory))\n",
"step-4": "import numpy as np\ncatogory = np.array([50, 30, 40, 20])\ndata = np.array([[20, 50, 10, 15, 20], [30, 40, 20, 65, 35], [75, 30, 42, \n 70, 45], [40, 25, 35, 22, 55]])\nprint(catogory)\nprint(data)\nprint(catogory.dot(data))\nprint(data.T.dot(catogory))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import xlsxwriter
workbook = xlsxwriter.Workbook('商品编码.xlsx')
worksheet = workbook.add_worksheet()
with open('商品编码.txt', 'rt') as f:
data = f.read()
data = data.splitlines(True)
count = 1
row = 0
for x in data:
if count < 3:
count += 1
continue
x = x.split(',')
column = 0
for e in x:
if row == 0 and column == 0:
e = e[3:]
worksheet.write(row, column, e)
column += 1
row += 1
workbook.close()
|
normal
|
{
"blob_id": "59a8a4cf4b04a191bfb70fd07668141dbfeda790",
"index": 6822,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('商品编码.txt', 'rt') as f:\n data = f.read()\n<mask token>\nfor x in data:\n if count < 3:\n count += 1\n continue\n x = x.split(',')\n column = 0\n for e in x:\n if row == 0 and column == 0:\n e = e[3:]\n worksheet.write(row, column, e)\n column += 1\n row += 1\nworkbook.close()\n",
"step-3": "<mask token>\nworkbook = xlsxwriter.Workbook('商品编码.xlsx')\nworksheet = workbook.add_worksheet()\nwith open('商品编码.txt', 'rt') as f:\n data = f.read()\ndata = data.splitlines(True)\ncount = 1\nrow = 0\nfor x in data:\n if count < 3:\n count += 1\n continue\n x = x.split(',')\n column = 0\n for e in x:\n if row == 0 and column == 0:\n e = e[3:]\n worksheet.write(row, column, e)\n column += 1\n row += 1\nworkbook.close()\n",
"step-4": "import xlsxwriter\nworkbook = xlsxwriter.Workbook('商品编码.xlsx')\nworksheet = workbook.add_worksheet()\nwith open('商品编码.txt', 'rt') as f:\n data = f.read()\ndata = data.splitlines(True)\ncount = 1\nrow = 0\nfor x in data:\n if count < 3:\n count += 1\n continue\n x = x.split(',')\n column = 0\n for e in x:\n if row == 0 and column == 0:\n e = e[3:]\n worksheet.write(row, column, e)\n column += 1\n row += 1\nworkbook.close()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
x = 1
while x <= 24:
if x % 5 == 0:
x = x + 1
continue
print(x)
x = x + 1
|
normal
|
{
"blob_id": "61cfc583cd87ac0528cb07f4e051392167414920",
"index": 1960,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile x <= 24:\n if x % 5 == 0:\n x = x + 1\n continue\n print(x)\n x = x + 1\n",
"step-3": "x = 1\nwhile x <= 24:\n if x % 5 == 0:\n x = x + 1\n continue\n print(x)\n x = x + 1\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#os for file system
import os
from sys import platform as _platform
import fnmatch
import inspect
files = 0
lines = 0
extension0 = '.c'
extension1 = '.cpp'
extension2 = '.h'
extension3 = '.hpp'
filename = inspect.getframeinfo(inspect.currentframe()).filename
startPath = os.path.dirname(os.path.abspath(filename))
with open("files_with_extensions.txt", "w", encoding="utf-8") as filewrite:
for r, d, f in os.walk(startPath):
for file in f:
if file.endswith(extension0) or file.endswith(extension1) or file.endswith(extension2) or file.endswith(extension3):
if _platform == "linux" or _platform == "linux2":
ss = '/'
elif _platform == "win32" or _platform == "win64":
ss = '\\'
filePathAndName = r + ss + file
files += 1
filewrite.write(f"{filePathAndName}")
fi = open(filePathAndName, 'r')
pos = fi.tell()
fileLines = 0
while (True):
li = fi.readline()
# check for any hidden symbols
if li.isspace():
continue
newpos = fi.tell()
fileLines += 1
if newpos == pos: # stream position hasn't changed -> EOF
break
else:
pos = newpos
lines += fileLines
filewrite.write(f"{fileLines}\n")
print(file + " " + str(fileLines))
fi.close()
print(files)
print(lines)
filewrite.write(f"{files}\n")
filewrite.write(f"{lines}\n")
|
normal
|
{
"blob_id": "d287123acdbabdd5a223e774c89945ab888fcbcc",
"index": 5439,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('files_with_extensions.txt', 'w', encoding='utf-8') as filewrite:\n for r, d, f in os.walk(startPath):\n for file in f:\n if file.endswith(extension0) or file.endswith(extension1\n ) or file.endswith(extension2) or file.endswith(extension3):\n if _platform == 'linux' or _platform == 'linux2':\n ss = '/'\n elif _platform == 'win32' or _platform == 'win64':\n ss = '\\\\'\n filePathAndName = r + ss + file\n files += 1\n filewrite.write(f'{filePathAndName}')\n fi = open(filePathAndName, 'r')\n pos = fi.tell()\n fileLines = 0\n while True:\n li = fi.readline()\n if li.isspace():\n continue\n newpos = fi.tell()\n fileLines += 1\n if newpos == pos:\n break\n else:\n pos = newpos\n lines += fileLines\n filewrite.write(f'{fileLines}\\n')\n print(file + ' ' + str(fileLines))\n fi.close()\n print(files)\n print(lines)\n filewrite.write(f'{files}\\n')\n filewrite.write(f'{lines}\\n')\n",
"step-3": "<mask token>\nfiles = 0\nlines = 0\nextension0 = '.c'\nextension1 = '.cpp'\nextension2 = '.h'\nextension3 = '.hpp'\nfilename = inspect.getframeinfo(inspect.currentframe()).filename\nstartPath = os.path.dirname(os.path.abspath(filename))\nwith open('files_with_extensions.txt', 'w', encoding='utf-8') as filewrite:\n for r, d, f in os.walk(startPath):\n for file in f:\n if file.endswith(extension0) or file.endswith(extension1\n ) or file.endswith(extension2) or file.endswith(extension3):\n if _platform == 'linux' or _platform == 'linux2':\n ss = '/'\n elif _platform == 'win32' or _platform == 'win64':\n ss = '\\\\'\n filePathAndName = r + ss + file\n files += 1\n filewrite.write(f'{filePathAndName}')\n fi = open(filePathAndName, 'r')\n pos = fi.tell()\n fileLines = 0\n while True:\n li = fi.readline()\n if li.isspace():\n continue\n newpos = fi.tell()\n fileLines += 1\n if newpos == pos:\n break\n else:\n pos = newpos\n lines += fileLines\n filewrite.write(f'{fileLines}\\n')\n print(file + ' ' + str(fileLines))\n fi.close()\n print(files)\n print(lines)\n filewrite.write(f'{files}\\n')\n filewrite.write(f'{lines}\\n')\n",
"step-4": "import os\nfrom sys import platform as _platform\nimport fnmatch\nimport inspect\nfiles = 0\nlines = 0\nextension0 = '.c'\nextension1 = '.cpp'\nextension2 = '.h'\nextension3 = '.hpp'\nfilename = inspect.getframeinfo(inspect.currentframe()).filename\nstartPath = os.path.dirname(os.path.abspath(filename))\nwith open('files_with_extensions.txt', 'w', encoding='utf-8') as filewrite:\n for r, d, f in os.walk(startPath):\n for file in f:\n if file.endswith(extension0) or file.endswith(extension1\n ) or file.endswith(extension2) or file.endswith(extension3):\n if _platform == 'linux' or _platform == 'linux2':\n ss = '/'\n elif _platform == 'win32' or _platform == 'win64':\n ss = '\\\\'\n filePathAndName = r + ss + file\n files += 1\n filewrite.write(f'{filePathAndName}')\n fi = open(filePathAndName, 'r')\n pos = fi.tell()\n fileLines = 0\n while True:\n li = fi.readline()\n if li.isspace():\n continue\n newpos = fi.tell()\n fileLines += 1\n if newpos == pos:\n break\n else:\n pos = newpos\n lines += fileLines\n filewrite.write(f'{fileLines}\\n')\n print(file + ' ' + str(fileLines))\n fi.close()\n print(files)\n print(lines)\n filewrite.write(f'{files}\\n')\n filewrite.write(f'{lines}\\n')\n",
"step-5": "#os for file system\nimport os\n\nfrom sys import platform as _platform\n\nimport fnmatch\nimport inspect\n\nfiles = 0\nlines = 0 \n \nextension0 = '.c'\nextension1 = '.cpp'\nextension2 = '.h'\t\nextension3 = '.hpp'\t\n\nfilename = inspect.getframeinfo(inspect.currentframe()).filename\nstartPath = os.path.dirname(os.path.abspath(filename))\n\nwith open(\"files_with_extensions.txt\", \"w\", encoding=\"utf-8\") as filewrite:\n for r, d, f in os.walk(startPath):\n for file in f:\n if file.endswith(extension0) or file.endswith(extension1) or file.endswith(extension2) or file.endswith(extension3):\n\n if _platform == \"linux\" or _platform == \"linux2\":\n ss = '/'\n elif _platform == \"win32\" or _platform == \"win64\":\n ss = '\\\\'\n\n filePathAndName = r + ss + file\n\n files += 1\n\n filewrite.write(f\"{filePathAndName}\")\n \n fi = open(filePathAndName, 'r')\n pos = fi.tell()\n\n fileLines = 0\n while (True):\n li = fi.readline()\n\n # check for any hidden symbols\n if li.isspace():\n continue\n \n newpos = fi.tell()\n fileLines += 1\n if newpos == pos: # stream position hasn't changed -> EOF\n break\n else:\n pos = newpos\n\n lines += fileLines\n\n filewrite.write(f\"{fileLines}\\n\")\n print(file + \" \" + str(fileLines))\n\n fi.close()\n \n\n print(files)\n print(lines)\n\n filewrite.write(f\"{files}\\n\")\n filewrite.write(f\"{lines}\\n\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Definition for a Node.
class Node:
def __init__(self, val, children):
self.val = val
self.children = children
class Solution(object):
def postorder(self, root):
"""
:type root: Node
:rtype: List[int]
"""
if not root:
return([])
if not root.children:
return([root.val])
result = []
for child in root.children:
result += self.postorder(child)
result += [root.val]
return(result)
n5 = Node(5,None)
n6 = Node(6,None)
n3 = Node(2,None)
n4 = Node(4,None)
n2 = Node(3,[n5,n6])
n1 = Node(1,[n2,n3,n4])
s = Solution()
result = s.postorder(n1)
print(result)
|
normal
|
{
"blob_id": "93ec15a37bd5f022e8f6e226e3bf0e91cc0457c6",
"index": 2178,
"step-1": "class Node:\n <mask token>\n\n\nclass Solution(object):\n\n def postorder(self, root):\n \"\"\"\n :type root: Node\n :rtype: List[int]\n \"\"\"\n if not root:\n return []\n if not root.children:\n return [root.val]\n result = []\n for child in root.children:\n result += self.postorder(child)\n result += [root.val]\n return result\n\n\n<mask token>\n",
"step-2": "class Node:\n\n def __init__(self, val, children):\n self.val = val\n self.children = children\n\n\nclass Solution(object):\n\n def postorder(self, root):\n \"\"\"\n :type root: Node\n :rtype: List[int]\n \"\"\"\n if not root:\n return []\n if not root.children:\n return [root.val]\n result = []\n for child in root.children:\n result += self.postorder(child)\n result += [root.val]\n return result\n\n\n<mask token>\n",
"step-3": "class Node:\n\n def __init__(self, val, children):\n self.val = val\n self.children = children\n\n\nclass Solution(object):\n\n def postorder(self, root):\n \"\"\"\n :type root: Node\n :rtype: List[int]\n \"\"\"\n if not root:\n return []\n if not root.children:\n return [root.val]\n result = []\n for child in root.children:\n result += self.postorder(child)\n result += [root.val]\n return result\n\n\n<mask token>\nprint(result)\n",
"step-4": "class Node:\n\n def __init__(self, val, children):\n self.val = val\n self.children = children\n\n\nclass Solution(object):\n\n def postorder(self, root):\n \"\"\"\n :type root: Node\n :rtype: List[int]\n \"\"\"\n if not root:\n return []\n if not root.children:\n return [root.val]\n result = []\n for child in root.children:\n result += self.postorder(child)\n result += [root.val]\n return result\n\n\nn5 = Node(5, None)\nn6 = Node(6, None)\nn3 = Node(2, None)\nn4 = Node(4, None)\nn2 = Node(3, [n5, n6])\nn1 = Node(1, [n2, n3, n4])\ns = Solution()\nresult = s.postorder(n1)\nprint(result)\n",
"step-5": "# Definition for a Node.\nclass Node:\n def __init__(self, val, children):\n self.val = val\n self.children = children\n\nclass Solution(object):\n def postorder(self, root):\n \"\"\"\n :type root: Node\n :rtype: List[int]\n \"\"\"\n if not root:\n return([])\n if not root.children:\n return([root.val])\n result = []\n for child in root.children:\n result += self.postorder(child)\n result += [root.val]\n return(result)\n\n \nn5 = Node(5,None)\nn6 = Node(6,None)\nn3 = Node(2,None)\nn4 = Node(4,None)\nn2 = Node(3,[n5,n6])\nn1 = Node(1,[n2,n3,n4])\n\ns = Solution()\nresult = s.postorder(n1)\nprint(result)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# coding: utf-8
import os, sys
import numpy as np
from math import exp, sqrt, pi
def factorial(n):
value = 1
for i in range(n,1,-1):
value *= i
return value
def double_factorial(n):
k = 1
for i in range(n, 1, -2):
k *= i
#print("n:", n, "double factorial:", k)
return k
"""\int_0^\infty r^m e^{-alpha * r^2} dr"""
def gaussian_integral(alpha, m):
if int(m/2)*2 == m: # even number
n = int(m/2)
value = double_factorial(2*n-1) * sqrt(pi) / pow(2, n+1) / pow(alpha, n+0.5)
else:
n = int((m-1)/2)
value = factorial(n) / 2 / pow(alpha, n+1)
return value
def overlap_s_gaussians(expo1, expo2, power_of_r):
norm1 = pow(2*expo1/pi, 0.75)
norm2 = pow(2*expo2/pi, 0.75)
value = norm1 * norm2 * 4 * pi * gaussian_integral(expo1+expo2, power_of_r+2)
return value
|
normal
|
{
"blob_id": "005650e2747c61b730960a29891b6ba6c8bd381b",
"index": 1334,
"step-1": "<mask token>\n\n\ndef double_factorial(n):\n k = 1\n for i in range(n, 1, -2):\n k *= i\n return k\n\n\n<mask token>\n\n\ndef gaussian_integral(alpha, m):\n if int(m / 2) * 2 == m:\n n = int(m / 2)\n value = double_factorial(2 * n - 1) * sqrt(pi) / pow(2, n + 1) / pow(\n alpha, n + 0.5)\n else:\n n = int((m - 1) / 2)\n value = factorial(n) / 2 / pow(alpha, n + 1)\n return value\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef factorial(n):\n value = 1\n for i in range(n, 1, -1):\n value *= i\n return value\n\n\ndef double_factorial(n):\n k = 1\n for i in range(n, 1, -2):\n k *= i\n return k\n\n\n<mask token>\n\n\ndef gaussian_integral(alpha, m):\n if int(m / 2) * 2 == m:\n n = int(m / 2)\n value = double_factorial(2 * n - 1) * sqrt(pi) / pow(2, n + 1) / pow(\n alpha, n + 0.5)\n else:\n n = int((m - 1) / 2)\n value = factorial(n) / 2 / pow(alpha, n + 1)\n return value\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef factorial(n):\n value = 1\n for i in range(n, 1, -1):\n value *= i\n return value\n\n\ndef double_factorial(n):\n k = 1\n for i in range(n, 1, -2):\n k *= i\n return k\n\n\n<mask token>\n\n\ndef gaussian_integral(alpha, m):\n if int(m / 2) * 2 == m:\n n = int(m / 2)\n value = double_factorial(2 * n - 1) * sqrt(pi) / pow(2, n + 1) / pow(\n alpha, n + 0.5)\n else:\n n = int((m - 1) / 2)\n value = factorial(n) / 2 / pow(alpha, n + 1)\n return value\n\n\ndef overlap_s_gaussians(expo1, expo2, power_of_r):\n norm1 = pow(2 * expo1 / pi, 0.75)\n norm2 = pow(2 * expo2 / pi, 0.75)\n value = norm1 * norm2 * 4 * pi * gaussian_integral(expo1 + expo2, \n power_of_r + 2)\n return value\n",
"step-4": "import os, sys\nimport numpy as np\nfrom math import exp, sqrt, pi\n\n\ndef factorial(n):\n value = 1\n for i in range(n, 1, -1):\n value *= i\n return value\n\n\ndef double_factorial(n):\n k = 1\n for i in range(n, 1, -2):\n k *= i\n return k\n\n\n<mask token>\n\n\ndef gaussian_integral(alpha, m):\n if int(m / 2) * 2 == m:\n n = int(m / 2)\n value = double_factorial(2 * n - 1) * sqrt(pi) / pow(2, n + 1) / pow(\n alpha, n + 0.5)\n else:\n n = int((m - 1) / 2)\n value = factorial(n) / 2 / pow(alpha, n + 1)\n return value\n\n\ndef overlap_s_gaussians(expo1, expo2, power_of_r):\n norm1 = pow(2 * expo1 / pi, 0.75)\n norm2 = pow(2 * expo2 / pi, 0.75)\n value = norm1 * norm2 * 4 * pi * gaussian_integral(expo1 + expo2, \n power_of_r + 2)\n return value\n",
"step-5": "# coding: utf-8\n\nimport os, sys\nimport numpy as np\nfrom math import exp, sqrt, pi\n\ndef factorial(n):\n value = 1\n for i in range(n,1,-1):\n value *= i\n return value\n \ndef double_factorial(n):\n k = 1\n for i in range(n, 1, -2):\n k *= i\n #print(\"n:\", n, \"double factorial:\", k)\n return k\n\n\"\"\"\\int_0^\\infty r^m e^{-alpha * r^2} dr\"\"\"\ndef gaussian_integral(alpha, m):\n if int(m/2)*2 == m: # even number\n n = int(m/2)\n value = double_factorial(2*n-1) * sqrt(pi) / pow(2, n+1) / pow(alpha, n+0.5)\n else:\n n = int((m-1)/2)\n value = factorial(n) / 2 / pow(alpha, n+1)\n return value\n\ndef overlap_s_gaussians(expo1, expo2, power_of_r):\n norm1 = pow(2*expo1/pi, 0.75)\n norm2 = pow(2*expo2/pi, 0.75)\n value = norm1 * norm2 * 4 * pi * gaussian_integral(expo1+expo2, power_of_r+2)\n return value\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
"""
默认查询所有
> db.test1000.find()
{ "_id" : ObjectId("5c3559ab648171cce9135dd6"), "name" : "zhangdapeng" }
{ "_id" : ObjectId("5c3559af648171cce9135dd7"), "name" : "zhangdapeng1" }
{ "_id" : ObjectId("5c3559b2648171cce9135dd8"), "name" : "zhangdapeng2" }
{ "_id" : ObjectId("5c3559b4648171cce9135dd9"), "name" : "zhangdapeng3" }
查询匹配参数
> db.test1000.find({'name':'zhangdapeng'})
{ "_id" : ObjectId("5c3559ab648171cce9135dd6"), "name" : "zhangdapeng" }
>
"""
"""
小于$lt
小于等于$lte
大于$gt
大于等于$gte
不等于$ne
查询年龄小于等于18岁的
> db.test1000.find({age:{$lte:18}})
{ "_id" : ObjectId("5c355a61648171cce9135dda"), "name" : "zhangdapeng3", "age" : 18 }
{ "_id" : ObjectId("5c355a69648171cce9135ddc"), "name" : "zhangdapeng3", "age" : 17 }
>
查询年龄大于等于18岁的
> db.test1000.find({age:{$gte:18}})
{ "_id" : ObjectId("5c355a61648171cce9135dda"), "name" : "zhangdapeng3", "age" : 18 }
{ "_id" : ObjectId("5c355a65648171cce9135ddb"), "name" : "zhangdapeng3", "age" : 19 }
范围 $in $nin不在某个范围类
> db.test1000.find({age:{$in:[17,18,19]}})
{ "_id" : ObjectId("5c355a61648171cce9135dda"), "name" : "zhangdapeng3", "age" : 18 }
{ "_id" : ObjectId("5c355a65648171cce9135ddb"), "name" : "zhangdapeng3", "age" : 19 }
{ "_id" : ObjectId("5c355a69648171cce9135ddc"), "name" : "zhangdapeng3", "age" : 17 }
逻辑查询
并且关系直接用,逗号
或关系$or
> db.test1000.find({$or:[{'age':18},{'age':19}]})
{ "_id" : ObjectId("5c355a61648171cce9135dda"), "name" : "zhangdapeng3", "age" : 18 }
{ "_id" : ObjectId("5c355a65648171cce9135ddb"), "name" : "zhangdapeng3", "age" : 19 }
>
正则表达式
直接用两个/正则表达式就行/
> db.test1000.find({'name':/zhangdapeng*/})
{ "_id" : ObjectId("5c3559ab648171cce9135dd6"), "name" : "zhangdapeng" }
{ "_id" : ObjectId("5c3559af648171cce9135dd7"), "name" : "zhangdapeng1" }
{ "_id" : ObjectId("5c3559b2648171cce9135dd8"), "name" : "zhangdapeng2" }
{ "_id" : ObjectId("5c3559b4648171cce9135dd9"), "name" : "zhangdapeng3" }
{ "_id" : ObjectId("5c355a61648171cce9135dda"), "name" : "zhangdapeng3", "age" : 18 }
{ "_id" : ObjectId("5c355a65648171cce9135ddb"), "name" : "zhangdapeng3", "age" : 19 }
{ "_id" : ObjectId("5c355a69648171cce9135ddc"), "name" : "zhangdapeng3", "age" : 17 }
>
限制内容-输出控制
find().limit(数字)
find().skip(数字)
同时使用可以实现翻页
find().skip(5).limit(20)
自定义查询
db.stu.find({
$where:function(){
return this.age>30;
}
})
"""
|
normal
|
{
"blob_id": "d8e0198244c3df77fa0258cc97a55042e36d056f",
"index": 7756,
"step-1": "<mask token>\n",
"step-2": "\"\"\"\n默认查询所有\n > db.test1000.find()\n { \"_id\" : ObjectId(\"5c3559ab648171cce9135dd6\"), \"name\" : \"zhangdapeng\" }\n { \"_id\" : ObjectId(\"5c3559af648171cce9135dd7\"), \"name\" : \"zhangdapeng1\" }\n { \"_id\" : ObjectId(\"5c3559b2648171cce9135dd8\"), \"name\" : \"zhangdapeng2\" }\n { \"_id\" : ObjectId(\"5c3559b4648171cce9135dd9\"), \"name\" : \"zhangdapeng3\" }\n\n查询匹配参数\n > db.test1000.find({'name':'zhangdapeng'})\n { \"_id\" : ObjectId(\"5c3559ab648171cce9135dd6\"), \"name\" : \"zhangdapeng\" }\n >\n\n\"\"\"\n\n\"\"\"\n小于$lt\n小于等于$lte\n大于$gt\n大于等于$gte\n不等于$ne\n\n查询年龄小于等于18岁的\n > db.test1000.find({age:{$lte:18}})\n { \"_id\" : ObjectId(\"5c355a61648171cce9135dda\"), \"name\" : \"zhangdapeng3\", \"age\" : 18 }\n { \"_id\" : ObjectId(\"5c355a69648171cce9135ddc\"), \"name\" : \"zhangdapeng3\", \"age\" : 17 }\n > \n查询年龄大于等于18岁的\n > db.test1000.find({age:{$gte:18}})\n { \"_id\" : ObjectId(\"5c355a61648171cce9135dda\"), \"name\" : \"zhangdapeng3\", \"age\" : 18 }\n { \"_id\" : ObjectId(\"5c355a65648171cce9135ddb\"), \"name\" : \"zhangdapeng3\", \"age\" : 19 }\n\n范围 $in $nin不在某个范围类\n > db.test1000.find({age:{$in:[17,18,19]}})\n { \"_id\" : ObjectId(\"5c355a61648171cce9135dda\"), \"name\" : \"zhangdapeng3\", \"age\" : 18 }\n { \"_id\" : ObjectId(\"5c355a65648171cce9135ddb\"), \"name\" : \"zhangdapeng3\", \"age\" : 19 }\n { \"_id\" : ObjectId(\"5c355a69648171cce9135ddc\"), \"name\" : \"zhangdapeng3\", \"age\" : 17 }\n\n\n逻辑查询\n 并且关系直接用,逗号\n 或关系$or\n > db.test1000.find({$or:[{'age':18},{'age':19}]})\n { \"_id\" : ObjectId(\"5c355a61648171cce9135dda\"), \"name\" : \"zhangdapeng3\", \"age\" : 18 }\n { \"_id\" : ObjectId(\"5c355a65648171cce9135ddb\"), \"name\" : \"zhangdapeng3\", \"age\" : 19 }\n > \n正则表达式\n 直接用两个/正则表达式就行/ \n > db.test1000.find({'name':/zhangdapeng*/})\n { \"_id\" : ObjectId(\"5c3559ab648171cce9135dd6\"), \"name\" : \"zhangdapeng\" }\n { \"_id\" : ObjectId(\"5c3559af648171cce9135dd7\"), \"name\" : \"zhangdapeng1\" }\n { \"_id\" : ObjectId(\"5c3559b2648171cce9135dd8\"), \"name\" : \"zhangdapeng2\" }\n { \"_id\" : ObjectId(\"5c3559b4648171cce9135dd9\"), \"name\" : \"zhangdapeng3\" }\n { \"_id\" : ObjectId(\"5c355a61648171cce9135dda\"), \"name\" : \"zhangdapeng3\", \"age\" : 18 }\n { \"_id\" : ObjectId(\"5c355a65648171cce9135ddb\"), \"name\" : \"zhangdapeng3\", \"age\" : 19 }\n { \"_id\" : ObjectId(\"5c355a69648171cce9135ddc\"), \"name\" : \"zhangdapeng3\", \"age\" : 17 }\n > \n限制内容-输出控制\n find().limit(数字)\n find().skip(数字)\n 同时使用可以实现翻页\n find().skip(5).limit(20)\n \n自定义查询\n db.stu.find({\n $where:function(){\n return this.age>30;\n }\n })\n\"\"\"\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
from pathlib import Path
file = Path(__file__).parent / 'input.txt'
Y = 2000000
MAX_X = 4000000
MIN_X = 0
MAX_Y = 4000000
MIN_Y = 0
# file = Path(__file__).parent / 'test_input.txt'
# Y = 10
# MAX_X = 20
# MIN_X = 0
# MAX_Y = 20
# MIN_Y = 0
text = file.read_text().splitlines()
class Beacon():
def __init__(self, pos, sensor) -> None:
self.pos = pos
self.sensor = sensor
def __str__(self) -> str:
return f"B{self.pos}"
def __repr__(self) -> str:
return f"{self}"
def __hash__(self) -> int:
return hash(self.pos)
@property
def x(self):
return self.pos[0]
@property
def y(self):
return self.pos[1]
@property
def distance_to(self, pos):
x,y = pos
return abs(self.x - x) + (self.y - y)
class Sensor():
def __init__(self, pos, beacon) -> None:
self.pos = pos
self.beacon = Beacon(beacon, self)
self.range = self.distance_to(self.beacon.pos)
self.min_x = self.x - self.range
self.min_y = self.y - self.range
self.max_x = self.x + self.range
self.max_y = self.y + self.range
def __str__(self) -> str:
return f"S{self.pos}"
def __repr__(self) -> str:
return f"{self}"
def __hash__(self) -> int:
return hash(self.pos)
@staticmethod
def from_text(text):
text = text.split()
sx, sy = int(text[2][2:-1]), int(text[3][2:-1])
bx, by = int(text[-2][2:-1]), int(text[-1][2:])
return Sensor((sx,sy), (bx,by))
@property
def x(self):
return self.pos[0]
@property
def y(self):
return self.pos[1]
def distance_to(self, pos):
x,y = pos
return abs(self.x - x) + abs(self.y - y)
def covers(self, pos):
return self.distance_to(pos) <= self.range
class Grid():
def __init__(self, sensors, beacons) -> None:
self.sensors = sensors
self.beacons = beacons
@property
def min_x(self):
bx = min([b.x for b in self.beacons.values()])
sx = min([s.min_x for s in self.sensors.values()])
return min(bx, sx)
@property
def max_x(self):
bx = max([b.x for b in self.beacons.values()])
sx = max([s.max_x for s in self.sensors.values()])
return max(bx, sx)
@property
def min_y(self):
by = min([b.y for b in self.beacons.values()])
sy = min([s.min_y for s in self.sensors.values()])
return min(by, sy)
@property
def max_y(self):
by = max([b.y for b in self.beacons.values()])
sy = max([s.max_y for s in self.sensors.values()])
return max(by, sy)
def __getitem__(self, idx):
row = []
for x in range(self.min_x, self.max_x):
pos = (x, idx)
if pos in self.beacons:
row.append((x, self.beacons[pos]))
elif pos in self.sensors:
row.append((x, self.sensors[pos]))
else:
row.append((x, None))
return row
def __iter__(self):
self.__row = min(self.ys)
return self
def __next__(self):
row = None
if self.__row <= max(self.ys):
row = self[self.__row]
self.__row += 1
else:
raise StopIteration
return row
def is_covered(self, pos):
for s in self.sensors.values():
if s.covers(pos):
return True
return False
beacons = {}
sensors = {}
for line in text:
s = Sensor.from_text(line)
beacons[s.beacon.pos] = s.beacon
sensors[s.pos] = s
grid = Grid(sensors, beacons)
def print_row(grid, row_idx):
r = ""
for x,v in grid[row_idx]:
if isinstance(v, Beacon):
r += 'B'
elif isinstance(v, Sensor):
r += 'S'
elif grid.is_covered((x,row_idx)):
r += '#'
else:
r += '.'
return r
def count_covered(prow):
count = 0
for c in prow:
if c == '#':
count += 1
return count
print("Part 1:", count_covered(print_row(grid, Y)))
def walk_perimeters(grid):
for sensor in grid.sensors.values():
# walk the perimeter and check if each adjacent position is
# covered. If not, we have a winner
for dx in range(sensor.range + 2):
dy = (sensor.range + 1) - dx
for signx, signy in [(-1,-1),(-1,1),(1,-1),(1,1)]:
x = sensor.x + (dx * signx)
y = sensor.y + (dy * signy)
if not(0 <= x <= MAX_X and 0 <= y <= MAX_Y):
continue
if not grid.is_covered((x,y)):
return x * 4000000 + y
print("Part 2:", walk_perimeters(grid))
|
normal
|
{
"blob_id": "f3a1a926feabcabc870f0a41ae239939c331d09d",
"index": 4106,
"step-1": "<mask token>\n\n\nclass Beacon:\n\n def __init__(self, pos, sensor) ->None:\n self.pos = pos\n self.sensor = sensor\n <mask token>\n\n def __repr__(self) ->str:\n return f'{self}'\n <mask token>\n <mask token>\n\n @property\n def y(self):\n return self.pos[1]\n <mask token>\n\n\nclass Sensor:\n\n def __init__(self, pos, beacon) ->None:\n self.pos = pos\n self.beacon = Beacon(beacon, self)\n self.range = self.distance_to(self.beacon.pos)\n self.min_x = self.x - self.range\n self.min_y = self.y - self.range\n self.max_x = self.x + self.range\n self.max_y = self.y + self.range\n\n def __str__(self) ->str:\n return f'S{self.pos}'\n\n def __repr__(self) ->str:\n return f'{self}'\n\n def __hash__(self) ->int:\n return hash(self.pos)\n\n @staticmethod\n def from_text(text):\n text = text.split()\n sx, sy = int(text[2][2:-1]), int(text[3][2:-1])\n bx, by = int(text[-2][2:-1]), int(text[-1][2:])\n return Sensor((sx, sy), (bx, by))\n\n @property\n def x(self):\n return self.pos[0]\n\n @property\n def y(self):\n return self.pos[1]\n\n def distance_to(self, pos):\n x, y = pos\n return abs(self.x - x) + abs(self.y - y)\n\n def covers(self, pos):\n return self.distance_to(pos) <= self.range\n\n\nclass Grid:\n\n def __init__(self, sensors, beacons) ->None:\n self.sensors = sensors\n self.beacons = beacons\n\n @property\n def min_x(self):\n bx = min([b.x for b in self.beacons.values()])\n sx = min([s.min_x for s in self.sensors.values()])\n return min(bx, sx)\n\n @property\n def max_x(self):\n bx = max([b.x for b in self.beacons.values()])\n sx = max([s.max_x for s in self.sensors.values()])\n return max(bx, sx)\n\n @property\n def min_y(self):\n by = min([b.y for b in self.beacons.values()])\n sy = min([s.min_y for s in self.sensors.values()])\n return min(by, sy)\n\n @property\n def max_y(self):\n by = max([b.y for b in self.beacons.values()])\n sy = max([s.max_y for s in self.sensors.values()])\n return max(by, sy)\n\n def __getitem__(self, idx):\n row = []\n for x in range(self.min_x, self.max_x):\n pos = x, idx\n if pos in self.beacons:\n row.append((x, self.beacons[pos]))\n elif pos in self.sensors:\n row.append((x, self.sensors[pos]))\n else:\n row.append((x, None))\n return row\n\n def __iter__(self):\n self.__row = min(self.ys)\n return self\n\n def __next__(self):\n row = None\n if self.__row <= max(self.ys):\n row = self[self.__row]\n self.__row += 1\n else:\n raise StopIteration\n return row\n\n def is_covered(self, pos):\n for s in self.sensors.values():\n if s.covers(pos):\n return True\n return False\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Beacon:\n\n def __init__(self, pos, sensor) ->None:\n self.pos = pos\n self.sensor = sensor\n\n def __str__(self) ->str:\n return f'B{self.pos}'\n\n def __repr__(self) ->str:\n return f'{self}'\n\n def __hash__(self) ->int:\n return hash(self.pos)\n\n @property\n def x(self):\n return self.pos[0]\n\n @property\n def y(self):\n return self.pos[1]\n\n @property\n def distance_to(self, pos):\n x, y = pos\n return abs(self.x - x) + (self.y - y)\n\n\nclass Sensor:\n\n def __init__(self, pos, beacon) ->None:\n self.pos = pos\n self.beacon = Beacon(beacon, self)\n self.range = self.distance_to(self.beacon.pos)\n self.min_x = self.x - self.range\n self.min_y = self.y - self.range\n self.max_x = self.x + self.range\n self.max_y = self.y + self.range\n\n def __str__(self) ->str:\n return f'S{self.pos}'\n\n def __repr__(self) ->str:\n return f'{self}'\n\n def __hash__(self) ->int:\n return hash(self.pos)\n\n @staticmethod\n def from_text(text):\n text = text.split()\n sx, sy = int(text[2][2:-1]), int(text[3][2:-1])\n bx, by = int(text[-2][2:-1]), int(text[-1][2:])\n return Sensor((sx, sy), (bx, by))\n\n @property\n def x(self):\n return self.pos[0]\n\n @property\n def y(self):\n return self.pos[1]\n\n def distance_to(self, pos):\n x, y = pos\n return abs(self.x - x) + abs(self.y - y)\n\n def covers(self, pos):\n return self.distance_to(pos) <= self.range\n\n\nclass Grid:\n\n def __init__(self, sensors, beacons) ->None:\n self.sensors = sensors\n self.beacons = beacons\n\n @property\n def min_x(self):\n bx = min([b.x for b in self.beacons.values()])\n sx = min([s.min_x for s in self.sensors.values()])\n return min(bx, sx)\n\n @property\n def max_x(self):\n bx = max([b.x for b in self.beacons.values()])\n sx = max([s.max_x for s in self.sensors.values()])\n return max(bx, sx)\n\n @property\n def min_y(self):\n by = min([b.y for b in self.beacons.values()])\n sy = min([s.min_y for s in self.sensors.values()])\n return min(by, sy)\n\n @property\n def max_y(self):\n by = max([b.y for b in self.beacons.values()])\n sy = max([s.max_y for s in self.sensors.values()])\n return max(by, sy)\n\n def __getitem__(self, idx):\n row = []\n for x in range(self.min_x, self.max_x):\n pos = x, idx\n if pos in self.beacons:\n row.append((x, self.beacons[pos]))\n elif pos in self.sensors:\n row.append((x, self.sensors[pos]))\n else:\n row.append((x, None))\n return row\n\n def __iter__(self):\n self.__row = min(self.ys)\n return self\n\n def __next__(self):\n row = None\n if self.__row <= max(self.ys):\n row = self[self.__row]\n self.__row += 1\n else:\n raise StopIteration\n return row\n\n def is_covered(self, pos):\n for s in self.sensors.values():\n if s.covers(pos):\n return True\n return False\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Beacon:\n\n def __init__(self, pos, sensor) ->None:\n self.pos = pos\n self.sensor = sensor\n\n def __str__(self) ->str:\n return f'B{self.pos}'\n\n def __repr__(self) ->str:\n return f'{self}'\n\n def __hash__(self) ->int:\n return hash(self.pos)\n\n @property\n def x(self):\n return self.pos[0]\n\n @property\n def y(self):\n return self.pos[1]\n\n @property\n def distance_to(self, pos):\n x, y = pos\n return abs(self.x - x) + (self.y - y)\n\n\nclass Sensor:\n\n def __init__(self, pos, beacon) ->None:\n self.pos = pos\n self.beacon = Beacon(beacon, self)\n self.range = self.distance_to(self.beacon.pos)\n self.min_x = self.x - self.range\n self.min_y = self.y - self.range\n self.max_x = self.x + self.range\n self.max_y = self.y + self.range\n\n def __str__(self) ->str:\n return f'S{self.pos}'\n\n def __repr__(self) ->str:\n return f'{self}'\n\n def __hash__(self) ->int:\n return hash(self.pos)\n\n @staticmethod\n def from_text(text):\n text = text.split()\n sx, sy = int(text[2][2:-1]), int(text[3][2:-1])\n bx, by = int(text[-2][2:-1]), int(text[-1][2:])\n return Sensor((sx, sy), (bx, by))\n\n @property\n def x(self):\n return self.pos[0]\n\n @property\n def y(self):\n return self.pos[1]\n\n def distance_to(self, pos):\n x, y = pos\n return abs(self.x - x) + abs(self.y - y)\n\n def covers(self, pos):\n return self.distance_to(pos) <= self.range\n\n\nclass Grid:\n\n def __init__(self, sensors, beacons) ->None:\n self.sensors = sensors\n self.beacons = beacons\n\n @property\n def min_x(self):\n bx = min([b.x for b in self.beacons.values()])\n sx = min([s.min_x for s in self.sensors.values()])\n return min(bx, sx)\n\n @property\n def max_x(self):\n bx = max([b.x for b in self.beacons.values()])\n sx = max([s.max_x for s in self.sensors.values()])\n return max(bx, sx)\n\n @property\n def min_y(self):\n by = min([b.y for b in self.beacons.values()])\n sy = min([s.min_y for s in self.sensors.values()])\n return min(by, sy)\n\n @property\n def max_y(self):\n by = max([b.y for b in self.beacons.values()])\n sy = max([s.max_y for s in self.sensors.values()])\n return max(by, sy)\n\n def __getitem__(self, idx):\n row = []\n for x in range(self.min_x, self.max_x):\n pos = x, idx\n if pos in self.beacons:\n row.append((x, self.beacons[pos]))\n elif pos in self.sensors:\n row.append((x, self.sensors[pos]))\n else:\n row.append((x, None))\n return row\n\n def __iter__(self):\n self.__row = min(self.ys)\n return self\n\n def __next__(self):\n row = None\n if self.__row <= max(self.ys):\n row = self[self.__row]\n self.__row += 1\n else:\n raise StopIteration\n return row\n\n def is_covered(self, pos):\n for s in self.sensors.values():\n if s.covers(pos):\n return True\n return False\n\n\n<mask token>\n\n\ndef print_row(grid, row_idx):\n r = ''\n for x, v in grid[row_idx]:\n if isinstance(v, Beacon):\n r += 'B'\n elif isinstance(v, Sensor):\n r += 'S'\n elif grid.is_covered((x, row_idx)):\n r += '#'\n else:\n r += '.'\n return r\n\n\n<mask token>\n\n\ndef walk_perimeters(grid):\n for sensor in grid.sensors.values():\n for dx in range(sensor.range + 2):\n dy = sensor.range + 1 - dx\n for signx, signy in [(-1, -1), (-1, 1), (1, -1), (1, 1)]:\n x = sensor.x + dx * signx\n y = sensor.y + dy * signy\n if not (0 <= x <= MAX_X and 0 <= y <= MAX_Y):\n continue\n if not grid.is_covered((x, y)):\n return x * 4000000 + y\n\n\n<mask token>\n",
"step-4": "from pathlib import Path\nfile = Path(__file__).parent / 'input.txt'\nY = 2000000\nMAX_X = 4000000\nMIN_X = 0\nMAX_Y = 4000000\nMIN_Y = 0\ntext = file.read_text().splitlines()\n\n\nclass Beacon:\n\n def __init__(self, pos, sensor) ->None:\n self.pos = pos\n self.sensor = sensor\n\n def __str__(self) ->str:\n return f'B{self.pos}'\n\n def __repr__(self) ->str:\n return f'{self}'\n\n def __hash__(self) ->int:\n return hash(self.pos)\n\n @property\n def x(self):\n return self.pos[0]\n\n @property\n def y(self):\n return self.pos[1]\n\n @property\n def distance_to(self, pos):\n x, y = pos\n return abs(self.x - x) + (self.y - y)\n\n\nclass Sensor:\n\n def __init__(self, pos, beacon) ->None:\n self.pos = pos\n self.beacon = Beacon(beacon, self)\n self.range = self.distance_to(self.beacon.pos)\n self.min_x = self.x - self.range\n self.min_y = self.y - self.range\n self.max_x = self.x + self.range\n self.max_y = self.y + self.range\n\n def __str__(self) ->str:\n return f'S{self.pos}'\n\n def __repr__(self) ->str:\n return f'{self}'\n\n def __hash__(self) ->int:\n return hash(self.pos)\n\n @staticmethod\n def from_text(text):\n text = text.split()\n sx, sy = int(text[2][2:-1]), int(text[3][2:-1])\n bx, by = int(text[-2][2:-1]), int(text[-1][2:])\n return Sensor((sx, sy), (bx, by))\n\n @property\n def x(self):\n return self.pos[0]\n\n @property\n def y(self):\n return self.pos[1]\n\n def distance_to(self, pos):\n x, y = pos\n return abs(self.x - x) + abs(self.y - y)\n\n def covers(self, pos):\n return self.distance_to(pos) <= self.range\n\n\nclass Grid:\n\n def __init__(self, sensors, beacons) ->None:\n self.sensors = sensors\n self.beacons = beacons\n\n @property\n def min_x(self):\n bx = min([b.x for b in self.beacons.values()])\n sx = min([s.min_x for s in self.sensors.values()])\n return min(bx, sx)\n\n @property\n def max_x(self):\n bx = max([b.x for b in self.beacons.values()])\n sx = max([s.max_x for s in self.sensors.values()])\n return max(bx, sx)\n\n @property\n def min_y(self):\n by = min([b.y for b in self.beacons.values()])\n sy = min([s.min_y for s in self.sensors.values()])\n return min(by, sy)\n\n @property\n def max_y(self):\n by = max([b.y for b in self.beacons.values()])\n sy = max([s.max_y for s in self.sensors.values()])\n return max(by, sy)\n\n def __getitem__(self, idx):\n row = []\n for x in range(self.min_x, self.max_x):\n pos = x, idx\n if pos in self.beacons:\n row.append((x, self.beacons[pos]))\n elif pos in self.sensors:\n row.append((x, self.sensors[pos]))\n else:\n row.append((x, None))\n return row\n\n def __iter__(self):\n self.__row = min(self.ys)\n return self\n\n def __next__(self):\n row = None\n if self.__row <= max(self.ys):\n row = self[self.__row]\n self.__row += 1\n else:\n raise StopIteration\n return row\n\n def is_covered(self, pos):\n for s in self.sensors.values():\n if s.covers(pos):\n return True\n return False\n\n\nbeacons = {}\nsensors = {}\nfor line in text:\n s = Sensor.from_text(line)\n beacons[s.beacon.pos] = s.beacon\n sensors[s.pos] = s\ngrid = Grid(sensors, beacons)\n\n\ndef print_row(grid, row_idx):\n r = ''\n for x, v in grid[row_idx]:\n if isinstance(v, Beacon):\n r += 'B'\n elif isinstance(v, Sensor):\n r += 'S'\n elif grid.is_covered((x, row_idx)):\n r += '#'\n else:\n r += '.'\n return r\n\n\ndef count_covered(prow):\n count = 0\n for c in prow:\n if c == '#':\n count += 1\n return count\n\n\nprint('Part 1:', count_covered(print_row(grid, Y)))\n\n\ndef walk_perimeters(grid):\n for sensor in grid.sensors.values():\n for dx in range(sensor.range + 2):\n dy = sensor.range + 1 - dx\n for signx, signy in [(-1, -1), (-1, 1), (1, -1), (1, 1)]:\n x = sensor.x + dx * signx\n y = sensor.y + dy * signy\n if not (0 <= x <= MAX_X and 0 <= y <= MAX_Y):\n continue\n if not grid.is_covered((x, y)):\n return x * 4000000 + y\n\n\nprint('Part 2:', walk_perimeters(grid))\n",
"step-5": "from pathlib import Path\n\nfile = Path(__file__).parent / 'input.txt'\nY = 2000000\nMAX_X = 4000000\nMIN_X = 0\nMAX_Y = 4000000\nMIN_Y = 0\n\n# file = Path(__file__).parent / 'test_input.txt'\n# Y = 10\n# MAX_X = 20\n# MIN_X = 0\n# MAX_Y = 20\n# MIN_Y = 0\n\ntext = file.read_text().splitlines()\n\n\nclass Beacon():\n def __init__(self, pos, sensor) -> None:\n self.pos = pos\n self.sensor = sensor\n\n def __str__(self) -> str:\n return f\"B{self.pos}\"\n\n def __repr__(self) -> str:\n return f\"{self}\"\n\n def __hash__(self) -> int:\n return hash(self.pos)\n\n @property\n def x(self):\n return self.pos[0]\n \n @property\n def y(self):\n return self.pos[1]\n \n @property\n def distance_to(self, pos):\n x,y = pos\n return abs(self.x - x) + (self.y - y)\n\nclass Sensor():\n def __init__(self, pos, beacon) -> None:\n self.pos = pos\n self.beacon = Beacon(beacon, self)\n self.range = self.distance_to(self.beacon.pos)\n self.min_x = self.x - self.range\n self.min_y = self.y - self.range\n self.max_x = self.x + self.range\n self.max_y = self.y + self.range\n\n def __str__(self) -> str:\n return f\"S{self.pos}\"\n\n def __repr__(self) -> str:\n return f\"{self}\"\n\n def __hash__(self) -> int:\n return hash(self.pos)\n\n @staticmethod\n def from_text(text):\n text = text.split()\n sx, sy = int(text[2][2:-1]), int(text[3][2:-1])\n bx, by = int(text[-2][2:-1]), int(text[-1][2:])\n return Sensor((sx,sy), (bx,by))\n\n @property\n def x(self):\n return self.pos[0]\n \n @property\n def y(self):\n return self.pos[1]\n\n def distance_to(self, pos):\n x,y = pos\n return abs(self.x - x) + abs(self.y - y)\n\n def covers(self, pos):\n return self.distance_to(pos) <= self.range\n \n\nclass Grid():\n def __init__(self, sensors, beacons) -> None:\n self.sensors = sensors\n self.beacons = beacons\n\n @property\n def min_x(self):\n bx = min([b.x for b in self.beacons.values()])\n sx = min([s.min_x for s in self.sensors.values()])\n return min(bx, sx)\n @property\n def max_x(self):\n bx = max([b.x for b in self.beacons.values()])\n sx = max([s.max_x for s in self.sensors.values()])\n return max(bx, sx)\n @property\n def min_y(self):\n by = min([b.y for b in self.beacons.values()])\n sy = min([s.min_y for s in self.sensors.values()])\n return min(by, sy)\n @property\n def max_y(self):\n by = max([b.y for b in self.beacons.values()])\n sy = max([s.max_y for s in self.sensors.values()])\n return max(by, sy)\n\n def __getitem__(self, idx):\n row = []\n for x in range(self.min_x, self.max_x):\n pos = (x, idx)\n if pos in self.beacons:\n row.append((x, self.beacons[pos]))\n elif pos in self.sensors:\n row.append((x, self.sensors[pos]))\n else:\n row.append((x, None))\n\n return row\n\n def __iter__(self):\n self.__row = min(self.ys)\n return self\n\n def __next__(self):\n row = None\n if self.__row <= max(self.ys):\n row = self[self.__row]\n self.__row += 1\n else:\n raise StopIteration\n\n return row\n\n def is_covered(self, pos):\n for s in self.sensors.values():\n if s.covers(pos):\n return True\n\n return False\n\nbeacons = {}\nsensors = {}\n\nfor line in text:\n s = Sensor.from_text(line)\n beacons[s.beacon.pos] = s.beacon\n sensors[s.pos] = s\n\ngrid = Grid(sensors, beacons)\n\ndef print_row(grid, row_idx):\n r = \"\"\n for x,v in grid[row_idx]:\n if isinstance(v, Beacon):\n r += 'B'\n elif isinstance(v, Sensor):\n r += 'S'\n elif grid.is_covered((x,row_idx)):\n r += '#'\n else:\n r += '.'\n\n return r\n\ndef count_covered(prow):\n count = 0\n for c in prow:\n if c == '#':\n count += 1\n return count\n\nprint(\"Part 1:\", count_covered(print_row(grid, Y)))\n\ndef walk_perimeters(grid):\n for sensor in grid.sensors.values():\n # walk the perimeter and check if each adjacent position is \n # covered. If not, we have a winner\n for dx in range(sensor.range + 2):\n dy = (sensor.range + 1) - dx\n for signx, signy in [(-1,-1),(-1,1),(1,-1),(1,1)]:\n x = sensor.x + (dx * signx)\n y = sensor.y + (dy * signy)\n \n if not(0 <= x <= MAX_X and 0 <= y <= MAX_Y):\n continue\n \n if not grid.is_covered((x,y)):\n return x * 4000000 + y\n\n\nprint(\"Part 2:\", walk_perimeters(grid))",
"step-ids": [
24,
28,
30,
34,
35
]
}
|
[
24,
28,
30,
34,
35
] |
import requests
from lxml import etree
from pymongo import MongoClient
from lib.rabbitmq import Rabbit
from lib.log import LogHandler
from lib.proxy_iterator import Proxies
import yaml
import json
import datetime
import re
import time
setting = yaml.load(open('config_local.yaml'))
log = LogHandler('article_consumer')
m = MongoClient(setting['mongo_config']['config_host'], setting['mongo_config']['port'])
m.admin.authenticate(setting['mongo_config']['user_name'],setting['mongo_config']['password'] )
collection = m[setting['mongo_config']['config_db']][setting['mongo_config']['coll_detail']]
clean_coll = m[setting['mongo_config']['config_db']][setting['mongo_config']['clean']]
rabbit = Rabbit(setting['rabbitmq_host'],setting['rabbitmq_port'])
connection = rabbit.connection
class CrawlerDetail:
def __init__(self):
self.proxy = Proxies()
def start_consume(self):
channel = connection.channel()
channel.queue_declare(queue='usual_article')
channel.basic_qos(prefetch_count=1)
channel.basic_consume(self.consume_article_detail_url,
queue='usual_article',
no_ack=False)
channel.start_consuming()
def clean(self,message):
"""
作者,发布时间,详细来源字段清洗
:param message:
:return:
"""
clean = clean_coll.find_one({'source': message['source']})
if clean['post_time'] is not None:
try:
post_time = re.search(clean['post_time'],message['post_time'],re.S|re.M).group(1)
message['post_time'] = post_time
except:
log.info("post_time清洗失败{}".format(message['post_time']))
message['post_time'] = None
if clean['author'] is not None:
try:
author = re.search(clean['author'],message['author']).group(1)
message['author'] = author
except:
log.info("author清洗失败{}".format(message['author']))
message['author'] = None
if clean['source_detail'] is not None:
try:
source_detail = re.search(clean['source_detail'],message['source_detail'],re.S|re.M).group(1)
message['source_detail'] = source_detail
except:
log.info("source_detail清洗失败{}".format(message['source_detail']))
message['source_detail'] = None
return message
def consume_article_detail_url(self,ch, method, properties, body):
"""
文章详情页解析
:param ch:
:param method:
:param properties:
:param body: json格式字符串
:return:
"""
message = json.loads(body.decode())
for i in range(10):
try:
html = requests.get(message['detail_url'],timeout=10,proxies=next(self.proxy))
connection.process_data_events()
if html.status_code == 200:
break
except Exception as e:
connection.process_data_events()
if i == 10:
log.error("请求文章详情页{}失败".format(message['detail_url']))
ch.basic_ack(delivery_tag=method.delivery_tag)
try:
con = html.content.decode()
except:
try:
con = html.content.decode('gbk')
except:
log.error('{}utf-8,gbk编码解析失败'.format(message['detail_url']))
ch.basic_ack(delivery_tag=method.delivery_tag)
return
page = etree.HTML(con)
# 获取详情页的解析方式
detail_config_dict = collection.find_one({'source': message['source']})
if detail_config_dict['body'] is not None:
try:
for pattern in detail_config_dict['body']:
if page.xpath(pattern):
article_body = page.xpath(pattern)[0]
message['body'] = etree.tounicode(article_body)
break
except:
log.error('xpath语句未能解析body')
ch.basic_ack(delivery_tag=method.delivery_tag)
return
if detail_config_dict['comment_count'] is not None:
message['comment_count'] = page.xpath(detail_config_dict['comment_count'])[0]
if detail_config_dict['like_count'] is not None:
message['like_count'] = page.xpath(detail_config_dict['like_count'])[0]
if detail_config_dict['read_num'] is not None:
message['read_num'] = page.xpath(detail_config_dict['read_num'])[0]
if detail_config_dict['author'] is not None:
try:
message['author'] = page.xpath(detail_config_dict['author'])[0]
except:
log.info("没有提取到{}作者字段".format(message['detail_url']))
if detail_config_dict['post_time'] is not None:
try:
message['post_time'] = page.xpath(detail_config_dict['post_time'])[0]
except:
log.info("没有提取到{}文章发表时间".format(message['detail_url']))
if detail_config_dict['tag'] is not None:
message['tag'] = page.xpath(detail_config_dict['tag'])[0]
if detail_config_dict['source_detail'] is not None:
try:
message['source_detail'] = page.xpath(detail_config_dict['source_detail'])[0]
except:
log.info("没有提取到{}文章详细来源".format(message['detail_url']))
self.clean(message)
# 放入消息队列做正文替换清洗
produce_channel = connection.channel()
produce_channel.queue_declare('article_body')
article_text = json.dumps(message)
produce_channel.basic_publish(exchange='',
routing_key='article_body',
body=article_text)
log.info('{}已经放入清洗队列'.format(message['title']))
ch.basic_ack(delivery_tag=method.delivery_tag)
produce_channel.close()
|
normal
|
{
"blob_id": "cd1d8a73b6958775a212d80b50de74f4b4de18bf",
"index": 6319,
"step-1": "<mask token>\n\n\nclass CrawlerDetail:\n\n def __init__(self):\n self.proxy = Proxies()\n\n def start_consume(self):\n channel = connection.channel()\n channel.queue_declare(queue='usual_article')\n channel.basic_qos(prefetch_count=1)\n channel.basic_consume(self.consume_article_detail_url, queue=\n 'usual_article', no_ack=False)\n channel.start_consuming()\n <mask token>\n\n def consume_article_detail_url(self, ch, method, properties, body):\n \"\"\"\n 文章详情页解析\n :param ch:\n :param method:\n :param properties:\n :param body: json格式字符串\n :return:\n \"\"\"\n message = json.loads(body.decode())\n for i in range(10):\n try:\n html = requests.get(message['detail_url'], timeout=10,\n proxies=next(self.proxy))\n connection.process_data_events()\n if html.status_code == 200:\n break\n except Exception as e:\n connection.process_data_events()\n if i == 10:\n log.error('请求文章详情页{}失败'.format(message['detail_url']))\n ch.basic_ack(delivery_tag=method.delivery_tag)\n try:\n con = html.content.decode()\n except:\n try:\n con = html.content.decode('gbk')\n except:\n log.error('{}utf-8,gbk编码解析失败'.format(message['detail_url']))\n ch.basic_ack(delivery_tag=method.delivery_tag)\n return\n page = etree.HTML(con)\n detail_config_dict = collection.find_one({'source': message['source']})\n if detail_config_dict['body'] is not None:\n try:\n for pattern in detail_config_dict['body']:\n if page.xpath(pattern):\n article_body = page.xpath(pattern)[0]\n message['body'] = etree.tounicode(article_body)\n break\n except:\n log.error('xpath语句未能解析body')\n ch.basic_ack(delivery_tag=method.delivery_tag)\n return\n if detail_config_dict['comment_count'] is not None:\n message['comment_count'] = page.xpath(detail_config_dict[\n 'comment_count'])[0]\n if detail_config_dict['like_count'] is not None:\n message['like_count'] = page.xpath(detail_config_dict['like_count']\n )[0]\n if detail_config_dict['read_num'] is not None:\n message['read_num'] = page.xpath(detail_config_dict['read_num'])[0]\n if detail_config_dict['author'] is not None:\n try:\n message['author'] = page.xpath(detail_config_dict['author'])[0]\n except:\n log.info('没有提取到{}作者字段'.format(message['detail_url']))\n if detail_config_dict['post_time'] is not None:\n try:\n message['post_time'] = page.xpath(detail_config_dict[\n 'post_time'])[0]\n except:\n log.info('没有提取到{}文章发表时间'.format(message['detail_url']))\n if detail_config_dict['tag'] is not None:\n message['tag'] = page.xpath(detail_config_dict['tag'])[0]\n if detail_config_dict['source_detail'] is not None:\n try:\n message['source_detail'] = page.xpath(detail_config_dict[\n 'source_detail'])[0]\n except:\n log.info('没有提取到{}文章详细来源'.format(message['detail_url']))\n self.clean(message)\n produce_channel = connection.channel()\n produce_channel.queue_declare('article_body')\n article_text = json.dumps(message)\n produce_channel.basic_publish(exchange='', routing_key=\n 'article_body', body=article_text)\n log.info('{}已经放入清洗队列'.format(message['title']))\n ch.basic_ack(delivery_tag=method.delivery_tag)\n produce_channel.close()\n",
"step-2": "<mask token>\n\n\nclass CrawlerDetail:\n\n def __init__(self):\n self.proxy = Proxies()\n\n def start_consume(self):\n channel = connection.channel()\n channel.queue_declare(queue='usual_article')\n channel.basic_qos(prefetch_count=1)\n channel.basic_consume(self.consume_article_detail_url, queue=\n 'usual_article', no_ack=False)\n channel.start_consuming()\n\n def clean(self, message):\n \"\"\"\n 作者,发布时间,详细来源字段清洗\n :param message:\n :return:\n \"\"\"\n clean = clean_coll.find_one({'source': message['source']})\n if clean['post_time'] is not None:\n try:\n post_time = re.search(clean['post_time'], message[\n 'post_time'], re.S | re.M).group(1)\n message['post_time'] = post_time\n except:\n log.info('post_time清洗失败{}'.format(message['post_time']))\n message['post_time'] = None\n if clean['author'] is not None:\n try:\n author = re.search(clean['author'], message['author']).group(1)\n message['author'] = author\n except:\n log.info('author清洗失败{}'.format(message['author']))\n message['author'] = None\n if clean['source_detail'] is not None:\n try:\n source_detail = re.search(clean['source_detail'], message[\n 'source_detail'], re.S | re.M).group(1)\n message['source_detail'] = source_detail\n except:\n log.info('source_detail清洗失败{}'.format(message['source_detail'])\n )\n message['source_detail'] = None\n return message\n\n def consume_article_detail_url(self, ch, method, properties, body):\n \"\"\"\n 文章详情页解析\n :param ch:\n :param method:\n :param properties:\n :param body: json格式字符串\n :return:\n \"\"\"\n message = json.loads(body.decode())\n for i in range(10):\n try:\n html = requests.get(message['detail_url'], timeout=10,\n proxies=next(self.proxy))\n connection.process_data_events()\n if html.status_code == 200:\n break\n except Exception as e:\n connection.process_data_events()\n if i == 10:\n log.error('请求文章详情页{}失败'.format(message['detail_url']))\n ch.basic_ack(delivery_tag=method.delivery_tag)\n try:\n con = html.content.decode()\n except:\n try:\n con = html.content.decode('gbk')\n except:\n log.error('{}utf-8,gbk编码解析失败'.format(message['detail_url']))\n ch.basic_ack(delivery_tag=method.delivery_tag)\n return\n page = etree.HTML(con)\n detail_config_dict = collection.find_one({'source': message['source']})\n if detail_config_dict['body'] is not None:\n try:\n for pattern in detail_config_dict['body']:\n if page.xpath(pattern):\n article_body = page.xpath(pattern)[0]\n message['body'] = etree.tounicode(article_body)\n break\n except:\n log.error('xpath语句未能解析body')\n ch.basic_ack(delivery_tag=method.delivery_tag)\n return\n if detail_config_dict['comment_count'] is not None:\n message['comment_count'] = page.xpath(detail_config_dict[\n 'comment_count'])[0]\n if detail_config_dict['like_count'] is not None:\n message['like_count'] = page.xpath(detail_config_dict['like_count']\n )[0]\n if detail_config_dict['read_num'] is not None:\n message['read_num'] = page.xpath(detail_config_dict['read_num'])[0]\n if detail_config_dict['author'] is not None:\n try:\n message['author'] = page.xpath(detail_config_dict['author'])[0]\n except:\n log.info('没有提取到{}作者字段'.format(message['detail_url']))\n if detail_config_dict['post_time'] is not None:\n try:\n message['post_time'] = page.xpath(detail_config_dict[\n 'post_time'])[0]\n except:\n log.info('没有提取到{}文章发表时间'.format(message['detail_url']))\n if detail_config_dict['tag'] is not None:\n message['tag'] = page.xpath(detail_config_dict['tag'])[0]\n if detail_config_dict['source_detail'] is not None:\n try:\n message['source_detail'] = page.xpath(detail_config_dict[\n 'source_detail'])[0]\n except:\n log.info('没有提取到{}文章详细来源'.format(message['detail_url']))\n self.clean(message)\n produce_channel = connection.channel()\n produce_channel.queue_declare('article_body')\n article_text = json.dumps(message)\n produce_channel.basic_publish(exchange='', routing_key=\n 'article_body', body=article_text)\n log.info('{}已经放入清洗队列'.format(message['title']))\n ch.basic_ack(delivery_tag=method.delivery_tag)\n produce_channel.close()\n",
"step-3": "<mask token>\nm.admin.authenticate(setting['mongo_config']['user_name'], setting[\n 'mongo_config']['password'])\n<mask token>\n\n\nclass CrawlerDetail:\n\n def __init__(self):\n self.proxy = Proxies()\n\n def start_consume(self):\n channel = connection.channel()\n channel.queue_declare(queue='usual_article')\n channel.basic_qos(prefetch_count=1)\n channel.basic_consume(self.consume_article_detail_url, queue=\n 'usual_article', no_ack=False)\n channel.start_consuming()\n\n def clean(self, message):\n \"\"\"\n 作者,发布时间,详细来源字段清洗\n :param message:\n :return:\n \"\"\"\n clean = clean_coll.find_one({'source': message['source']})\n if clean['post_time'] is not None:\n try:\n post_time = re.search(clean['post_time'], message[\n 'post_time'], re.S | re.M).group(1)\n message['post_time'] = post_time\n except:\n log.info('post_time清洗失败{}'.format(message['post_time']))\n message['post_time'] = None\n if clean['author'] is not None:\n try:\n author = re.search(clean['author'], message['author']).group(1)\n message['author'] = author\n except:\n log.info('author清洗失败{}'.format(message['author']))\n message['author'] = None\n if clean['source_detail'] is not None:\n try:\n source_detail = re.search(clean['source_detail'], message[\n 'source_detail'], re.S | re.M).group(1)\n message['source_detail'] = source_detail\n except:\n log.info('source_detail清洗失败{}'.format(message['source_detail'])\n )\n message['source_detail'] = None\n return message\n\n def consume_article_detail_url(self, ch, method, properties, body):\n \"\"\"\n 文章详情页解析\n :param ch:\n :param method:\n :param properties:\n :param body: json格式字符串\n :return:\n \"\"\"\n message = json.loads(body.decode())\n for i in range(10):\n try:\n html = requests.get(message['detail_url'], timeout=10,\n proxies=next(self.proxy))\n connection.process_data_events()\n if html.status_code == 200:\n break\n except Exception as e:\n connection.process_data_events()\n if i == 10:\n log.error('请求文章详情页{}失败'.format(message['detail_url']))\n ch.basic_ack(delivery_tag=method.delivery_tag)\n try:\n con = html.content.decode()\n except:\n try:\n con = html.content.decode('gbk')\n except:\n log.error('{}utf-8,gbk编码解析失败'.format(message['detail_url']))\n ch.basic_ack(delivery_tag=method.delivery_tag)\n return\n page = etree.HTML(con)\n detail_config_dict = collection.find_one({'source': message['source']})\n if detail_config_dict['body'] is not None:\n try:\n for pattern in detail_config_dict['body']:\n if page.xpath(pattern):\n article_body = page.xpath(pattern)[0]\n message['body'] = etree.tounicode(article_body)\n break\n except:\n log.error('xpath语句未能解析body')\n ch.basic_ack(delivery_tag=method.delivery_tag)\n return\n if detail_config_dict['comment_count'] is not None:\n message['comment_count'] = page.xpath(detail_config_dict[\n 'comment_count'])[0]\n if detail_config_dict['like_count'] is not None:\n message['like_count'] = page.xpath(detail_config_dict['like_count']\n )[0]\n if detail_config_dict['read_num'] is not None:\n message['read_num'] = page.xpath(detail_config_dict['read_num'])[0]\n if detail_config_dict['author'] is not None:\n try:\n message['author'] = page.xpath(detail_config_dict['author'])[0]\n except:\n log.info('没有提取到{}作者字段'.format(message['detail_url']))\n if detail_config_dict['post_time'] is not None:\n try:\n message['post_time'] = page.xpath(detail_config_dict[\n 'post_time'])[0]\n except:\n log.info('没有提取到{}文章发表时间'.format(message['detail_url']))\n if detail_config_dict['tag'] is not None:\n message['tag'] = page.xpath(detail_config_dict['tag'])[0]\n if detail_config_dict['source_detail'] is not None:\n try:\n message['source_detail'] = page.xpath(detail_config_dict[\n 'source_detail'])[0]\n except:\n log.info('没有提取到{}文章详细来源'.format(message['detail_url']))\n self.clean(message)\n produce_channel = connection.channel()\n produce_channel.queue_declare('article_body')\n article_text = json.dumps(message)\n produce_channel.basic_publish(exchange='', routing_key=\n 'article_body', body=article_text)\n log.info('{}已经放入清洗队列'.format(message['title']))\n ch.basic_ack(delivery_tag=method.delivery_tag)\n produce_channel.close()\n",
"step-4": "import requests\nfrom lxml import etree\nfrom pymongo import MongoClient\nfrom lib.rabbitmq import Rabbit\nfrom lib.log import LogHandler\nfrom lib.proxy_iterator import Proxies\nimport yaml\nimport json\nimport datetime\nimport re\nimport time\nsetting = yaml.load(open('config_local.yaml'))\nlog = LogHandler('article_consumer')\nm = MongoClient(setting['mongo_config']['config_host'], setting[\n 'mongo_config']['port'])\nm.admin.authenticate(setting['mongo_config']['user_name'], setting[\n 'mongo_config']['password'])\ncollection = m[setting['mongo_config']['config_db']][setting['mongo_config'\n ]['coll_detail']]\nclean_coll = m[setting['mongo_config']['config_db']][setting['mongo_config'\n ]['clean']]\nrabbit = Rabbit(setting['rabbitmq_host'], setting['rabbitmq_port'])\nconnection = rabbit.connection\n\n\nclass CrawlerDetail:\n\n def __init__(self):\n self.proxy = Proxies()\n\n def start_consume(self):\n channel = connection.channel()\n channel.queue_declare(queue='usual_article')\n channel.basic_qos(prefetch_count=1)\n channel.basic_consume(self.consume_article_detail_url, queue=\n 'usual_article', no_ack=False)\n channel.start_consuming()\n\n def clean(self, message):\n \"\"\"\n 作者,发布时间,详细来源字段清洗\n :param message:\n :return:\n \"\"\"\n clean = clean_coll.find_one({'source': message['source']})\n if clean['post_time'] is not None:\n try:\n post_time = re.search(clean['post_time'], message[\n 'post_time'], re.S | re.M).group(1)\n message['post_time'] = post_time\n except:\n log.info('post_time清洗失败{}'.format(message['post_time']))\n message['post_time'] = None\n if clean['author'] is not None:\n try:\n author = re.search(clean['author'], message['author']).group(1)\n message['author'] = author\n except:\n log.info('author清洗失败{}'.format(message['author']))\n message['author'] = None\n if clean['source_detail'] is not None:\n try:\n source_detail = re.search(clean['source_detail'], message[\n 'source_detail'], re.S | re.M).group(1)\n message['source_detail'] = source_detail\n except:\n log.info('source_detail清洗失败{}'.format(message['source_detail'])\n )\n message['source_detail'] = None\n return message\n\n def consume_article_detail_url(self, ch, method, properties, body):\n \"\"\"\n 文章详情页解析\n :param ch:\n :param method:\n :param properties:\n :param body: json格式字符串\n :return:\n \"\"\"\n message = json.loads(body.decode())\n for i in range(10):\n try:\n html = requests.get(message['detail_url'], timeout=10,\n proxies=next(self.proxy))\n connection.process_data_events()\n if html.status_code == 200:\n break\n except Exception as e:\n connection.process_data_events()\n if i == 10:\n log.error('请求文章详情页{}失败'.format(message['detail_url']))\n ch.basic_ack(delivery_tag=method.delivery_tag)\n try:\n con = html.content.decode()\n except:\n try:\n con = html.content.decode('gbk')\n except:\n log.error('{}utf-8,gbk编码解析失败'.format(message['detail_url']))\n ch.basic_ack(delivery_tag=method.delivery_tag)\n return\n page = etree.HTML(con)\n detail_config_dict = collection.find_one({'source': message['source']})\n if detail_config_dict['body'] is not None:\n try:\n for pattern in detail_config_dict['body']:\n if page.xpath(pattern):\n article_body = page.xpath(pattern)[0]\n message['body'] = etree.tounicode(article_body)\n break\n except:\n log.error('xpath语句未能解析body')\n ch.basic_ack(delivery_tag=method.delivery_tag)\n return\n if detail_config_dict['comment_count'] is not None:\n message['comment_count'] = page.xpath(detail_config_dict[\n 'comment_count'])[0]\n if detail_config_dict['like_count'] is not None:\n message['like_count'] = page.xpath(detail_config_dict['like_count']\n )[0]\n if detail_config_dict['read_num'] is not None:\n message['read_num'] = page.xpath(detail_config_dict['read_num'])[0]\n if detail_config_dict['author'] is not None:\n try:\n message['author'] = page.xpath(detail_config_dict['author'])[0]\n except:\n log.info('没有提取到{}作者字段'.format(message['detail_url']))\n if detail_config_dict['post_time'] is not None:\n try:\n message['post_time'] = page.xpath(detail_config_dict[\n 'post_time'])[0]\n except:\n log.info('没有提取到{}文章发表时间'.format(message['detail_url']))\n if detail_config_dict['tag'] is not None:\n message['tag'] = page.xpath(detail_config_dict['tag'])[0]\n if detail_config_dict['source_detail'] is not None:\n try:\n message['source_detail'] = page.xpath(detail_config_dict[\n 'source_detail'])[0]\n except:\n log.info('没有提取到{}文章详细来源'.format(message['detail_url']))\n self.clean(message)\n produce_channel = connection.channel()\n produce_channel.queue_declare('article_body')\n article_text = json.dumps(message)\n produce_channel.basic_publish(exchange='', routing_key=\n 'article_body', body=article_text)\n log.info('{}已经放入清洗队列'.format(message['title']))\n ch.basic_ack(delivery_tag=method.delivery_tag)\n produce_channel.close()\n",
"step-5": "import requests\nfrom lxml import etree\nfrom pymongo import MongoClient\nfrom lib.rabbitmq import Rabbit\nfrom lib.log import LogHandler\nfrom lib.proxy_iterator import Proxies\nimport yaml\nimport json\nimport datetime\nimport re\nimport time\n\n\nsetting = yaml.load(open('config_local.yaml'))\nlog = LogHandler('article_consumer')\nm = MongoClient(setting['mongo_config']['config_host'], setting['mongo_config']['port'])\nm.admin.authenticate(setting['mongo_config']['user_name'],setting['mongo_config']['password'] )\ncollection = m[setting['mongo_config']['config_db']][setting['mongo_config']['coll_detail']]\nclean_coll = m[setting['mongo_config']['config_db']][setting['mongo_config']['clean']]\nrabbit = Rabbit(setting['rabbitmq_host'],setting['rabbitmq_port'])\nconnection = rabbit.connection\n\n\nclass CrawlerDetail:\n\n def __init__(self):\n self.proxy = Proxies()\n\n def start_consume(self):\n channel = connection.channel()\n channel.queue_declare(queue='usual_article')\n channel.basic_qos(prefetch_count=1)\n channel.basic_consume(self.consume_article_detail_url,\n queue='usual_article',\n no_ack=False)\n channel.start_consuming()\n\n def clean(self,message):\n \"\"\"\n 作者,发布时间,详细来源字段清洗\n :param message:\n :return:\n \"\"\"\n clean = clean_coll.find_one({'source': message['source']})\n if clean['post_time'] is not None:\n try:\n post_time = re.search(clean['post_time'],message['post_time'],re.S|re.M).group(1)\n message['post_time'] = post_time\n except:\n log.info(\"post_time清洗失败{}\".format(message['post_time']))\n message['post_time'] = None\n if clean['author'] is not None:\n try:\n author = re.search(clean['author'],message['author']).group(1)\n message['author'] = author\n except:\n log.info(\"author清洗失败{}\".format(message['author']))\n message['author'] = None\n\n if clean['source_detail'] is not None:\n try:\n source_detail = re.search(clean['source_detail'],message['source_detail'],re.S|re.M).group(1)\n message['source_detail'] = source_detail\n except:\n log.info(\"source_detail清洗失败{}\".format(message['source_detail']))\n message['source_detail'] = None\n\n return message\n\n\n def consume_article_detail_url(self,ch, method, properties, body):\n \"\"\"\n 文章详情页解析\n :param ch:\n :param method:\n :param properties:\n :param body: json格式字符串\n :return:\n \"\"\"\n message = json.loads(body.decode())\n for i in range(10):\n try:\n html = requests.get(message['detail_url'],timeout=10,proxies=next(self.proxy))\n connection.process_data_events()\n if html.status_code == 200:\n break\n except Exception as e:\n connection.process_data_events()\n if i == 10:\n log.error(\"请求文章详情页{}失败\".format(message['detail_url']))\n ch.basic_ack(delivery_tag=method.delivery_tag)\n try:\n con = html.content.decode()\n except:\n try:\n con = html.content.decode('gbk')\n except:\n log.error('{}utf-8,gbk编码解析失败'.format(message['detail_url']))\n ch.basic_ack(delivery_tag=method.delivery_tag)\n return\n page = etree.HTML(con)\n\n # 获取详情页的解析方式\n detail_config_dict = collection.find_one({'source': message['source']})\n\n if detail_config_dict['body'] is not None:\n try:\n for pattern in detail_config_dict['body']:\n if page.xpath(pattern):\n article_body = page.xpath(pattern)[0]\n message['body'] = etree.tounicode(article_body)\n break\n except:\n log.error('xpath语句未能解析body')\n ch.basic_ack(delivery_tag=method.delivery_tag)\n return\n if detail_config_dict['comment_count'] is not None:\n message['comment_count'] = page.xpath(detail_config_dict['comment_count'])[0]\n if detail_config_dict['like_count'] is not None:\n message['like_count'] = page.xpath(detail_config_dict['like_count'])[0]\n if detail_config_dict['read_num'] is not None:\n message['read_num'] = page.xpath(detail_config_dict['read_num'])[0]\n if detail_config_dict['author'] is not None:\n try:\n message['author'] = page.xpath(detail_config_dict['author'])[0]\n except:\n log.info(\"没有提取到{}作者字段\".format(message['detail_url']))\n if detail_config_dict['post_time'] is not None:\n try:\n message['post_time'] = page.xpath(detail_config_dict['post_time'])[0]\n except:\n log.info(\"没有提取到{}文章发表时间\".format(message['detail_url']))\n if detail_config_dict['tag'] is not None:\n message['tag'] = page.xpath(detail_config_dict['tag'])[0]\n if detail_config_dict['source_detail'] is not None:\n try:\n message['source_detail'] = page.xpath(detail_config_dict['source_detail'])[0]\n except:\n log.info(\"没有提取到{}文章详细来源\".format(message['detail_url']))\n\n self.clean(message)\n\n # 放入消息队列做正文替换清洗\n produce_channel = connection.channel()\n produce_channel.queue_declare('article_body')\n article_text = json.dumps(message)\n produce_channel.basic_publish(exchange='',\n routing_key='article_body',\n body=article_text)\n log.info('{}已经放入清洗队列'.format(message['title']))\n ch.basic_ack(delivery_tag=method.delivery_tag)\n produce_channel.close()",
"step-ids": [
4,
5,
6,
8,
9
]
}
|
[
4,
5,
6,
8,
9
] |
import scraperwiki, lxml.html, urllib2, re
from datetime import datetime
#html = scraperwiki.scrape("http://www.public.health.wa.gov.au/2/1035/2/publication_of_names_of_offenders_list.pm")
doc = lxml.html.parse(urllib2.urlopen("http://www.public.health.wa.gov.au/2/1035/2/publication_of_names_of_offenders_list.pm"))
root = doc.getroot()
#select the table that contains the offenders, ignoring the first one that contains the header row
for tr in root.xpath("//div[@id='verdiSection10']/div/div/table/tbody/tr")[1:]:
data = {
'conviction_date': datetime.strptime(
re.match("(\d+/\d+/\d+)", tr[0].text_content().strip()).group(1),
"%d/%m/%Y"), #sometimes they include two dates in the entry, so we'll have to grab the first (damnit)
'business_name': tr[1].text_content().strip(),
'business_address': tr[2].text_content().strip(),
'convicted_name': tr[3].text_content().strip(),
'agency': tr[4].text_content().strip(),
'pdf': tr[5].xpath(".//a")[0].get("href")
}
scraperwiki.sqlite.save(unique_keys=['pdf'], data=data)
import scraperwiki, lxml.html, urllib2, re
from datetime import datetime
#html = scraperwiki.scrape("http://www.public.health.wa.gov.au/2/1035/2/publication_of_names_of_offenders_list.pm")
doc = lxml.html.parse(urllib2.urlopen("http://www.public.health.wa.gov.au/2/1035/2/publication_of_names_of_offenders_list.pm"))
root = doc.getroot()
#select the table that contains the offenders, ignoring the first one that contains the header row
for tr in root.xpath("//div[@id='verdiSection10']/div/div/table/tbody/tr")[1:]:
data = {
'conviction_date': datetime.strptime(
re.match("(\d+/\d+/\d+)", tr[0].text_content().strip()).group(1),
"%d/%m/%Y"), #sometimes they include two dates in the entry, so we'll have to grab the first (damnit)
'business_name': tr[1].text_content().strip(),
'business_address': tr[2].text_content().strip(),
'convicted_name': tr[3].text_content().strip(),
'agency': tr[4].text_content().strip(),
'pdf': tr[5].xpath(".//a")[0].get("href")
}
scraperwiki.sqlite.save(unique_keys=['pdf'], data=data)
|
normal
|
{
"blob_id": "e870900249b121f2416d7be543752ebf6392b6be",
"index": 6868,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor tr in root.xpath(\"//div[@id='verdiSection10']/div/div/table/tbody/tr\")[1:]:\n data = {'conviction_date': datetime.strptime(re.match(\n '(\\\\d+/\\\\d+/\\\\d+)', tr[0].text_content().strip()).group(1),\n '%d/%m/%Y'), 'business_name': tr[1].text_content().strip(),\n 'business_address': tr[2].text_content().strip(), 'convicted_name':\n tr[3].text_content().strip(), 'agency': tr[4].text_content().strip(\n ), 'pdf': tr[5].xpath('.//a')[0].get('href')}\n scraperwiki.sqlite.save(unique_keys=['pdf'], data=data)\n<mask token>\nfor tr in root.xpath(\"//div[@id='verdiSection10']/div/div/table/tbody/tr\")[1:]:\n data = {'conviction_date': datetime.strptime(re.match(\n '(\\\\d+/\\\\d+/\\\\d+)', tr[0].text_content().strip()).group(1),\n '%d/%m/%Y'), 'business_name': tr[1].text_content().strip(),\n 'business_address': tr[2].text_content().strip(), 'convicted_name':\n tr[3].text_content().strip(), 'agency': tr[4].text_content().strip(\n ), 'pdf': tr[5].xpath('.//a')[0].get('href')}\n scraperwiki.sqlite.save(unique_keys=['pdf'], data=data)\n",
"step-3": "<mask token>\ndoc = lxml.html.parse(urllib2.urlopen(\n 'http://www.public.health.wa.gov.au/2/1035/2/publication_of_names_of_offenders_list.pm'\n ))\nroot = doc.getroot()\nfor tr in root.xpath(\"//div[@id='verdiSection10']/div/div/table/tbody/tr\")[1:]:\n data = {'conviction_date': datetime.strptime(re.match(\n '(\\\\d+/\\\\d+/\\\\d+)', tr[0].text_content().strip()).group(1),\n '%d/%m/%Y'), 'business_name': tr[1].text_content().strip(),\n 'business_address': tr[2].text_content().strip(), 'convicted_name':\n tr[3].text_content().strip(), 'agency': tr[4].text_content().strip(\n ), 'pdf': tr[5].xpath('.//a')[0].get('href')}\n scraperwiki.sqlite.save(unique_keys=['pdf'], data=data)\n<mask token>\ndoc = lxml.html.parse(urllib2.urlopen(\n 'http://www.public.health.wa.gov.au/2/1035/2/publication_of_names_of_offenders_list.pm'\n ))\nroot = doc.getroot()\nfor tr in root.xpath(\"//div[@id='verdiSection10']/div/div/table/tbody/tr\")[1:]:\n data = {'conviction_date': datetime.strptime(re.match(\n '(\\\\d+/\\\\d+/\\\\d+)', tr[0].text_content().strip()).group(1),\n '%d/%m/%Y'), 'business_name': tr[1].text_content().strip(),\n 'business_address': tr[2].text_content().strip(), 'convicted_name':\n tr[3].text_content().strip(), 'agency': tr[4].text_content().strip(\n ), 'pdf': tr[5].xpath('.//a')[0].get('href')}\n scraperwiki.sqlite.save(unique_keys=['pdf'], data=data)\n",
"step-4": "import scraperwiki, lxml.html, urllib2, re\nfrom datetime import datetime\ndoc = lxml.html.parse(urllib2.urlopen(\n 'http://www.public.health.wa.gov.au/2/1035/2/publication_of_names_of_offenders_list.pm'\n ))\nroot = doc.getroot()\nfor tr in root.xpath(\"//div[@id='verdiSection10']/div/div/table/tbody/tr\")[1:]:\n data = {'conviction_date': datetime.strptime(re.match(\n '(\\\\d+/\\\\d+/\\\\d+)', tr[0].text_content().strip()).group(1),\n '%d/%m/%Y'), 'business_name': tr[1].text_content().strip(),\n 'business_address': tr[2].text_content().strip(), 'convicted_name':\n tr[3].text_content().strip(), 'agency': tr[4].text_content().strip(\n ), 'pdf': tr[5].xpath('.//a')[0].get('href')}\n scraperwiki.sqlite.save(unique_keys=['pdf'], data=data)\nimport scraperwiki, lxml.html, urllib2, re\nfrom datetime import datetime\ndoc = lxml.html.parse(urllib2.urlopen(\n 'http://www.public.health.wa.gov.au/2/1035/2/publication_of_names_of_offenders_list.pm'\n ))\nroot = doc.getroot()\nfor tr in root.xpath(\"//div[@id='verdiSection10']/div/div/table/tbody/tr\")[1:]:\n data = {'conviction_date': datetime.strptime(re.match(\n '(\\\\d+/\\\\d+/\\\\d+)', tr[0].text_content().strip()).group(1),\n '%d/%m/%Y'), 'business_name': tr[1].text_content().strip(),\n 'business_address': tr[2].text_content().strip(), 'convicted_name':\n tr[3].text_content().strip(), 'agency': tr[4].text_content().strip(\n ), 'pdf': tr[5].xpath('.//a')[0].get('href')}\n scraperwiki.sqlite.save(unique_keys=['pdf'], data=data)\n",
"step-5": "import scraperwiki, lxml.html, urllib2, re\nfrom datetime import datetime\n\n#html = scraperwiki.scrape(\"http://www.public.health.wa.gov.au/2/1035/2/publication_of_names_of_offenders_list.pm\")\ndoc = lxml.html.parse(urllib2.urlopen(\"http://www.public.health.wa.gov.au/2/1035/2/publication_of_names_of_offenders_list.pm\"))\nroot = doc.getroot()\n\n#select the table that contains the offenders, ignoring the first one that contains the header row\nfor tr in root.xpath(\"//div[@id='verdiSection10']/div/div/table/tbody/tr\")[1:]:\n data = {\n 'conviction_date': datetime.strptime(\n re.match(\"(\\d+/\\d+/\\d+)\", tr[0].text_content().strip()).group(1),\n \"%d/%m/%Y\"), #sometimes they include two dates in the entry, so we'll have to grab the first (damnit)\n 'business_name': tr[1].text_content().strip(),\n 'business_address': tr[2].text_content().strip(),\n 'convicted_name': tr[3].text_content().strip(),\n 'agency': tr[4].text_content().strip(),\n 'pdf': tr[5].xpath(\".//a\")[0].get(\"href\")\n }\n \n scraperwiki.sqlite.save(unique_keys=['pdf'], data=data)\nimport scraperwiki, lxml.html, urllib2, re\nfrom datetime import datetime\n\n#html = scraperwiki.scrape(\"http://www.public.health.wa.gov.au/2/1035/2/publication_of_names_of_offenders_list.pm\")\ndoc = lxml.html.parse(urllib2.urlopen(\"http://www.public.health.wa.gov.au/2/1035/2/publication_of_names_of_offenders_list.pm\"))\nroot = doc.getroot()\n\n#select the table that contains the offenders, ignoring the first one that contains the header row\nfor tr in root.xpath(\"//div[@id='verdiSection10']/div/div/table/tbody/tr\")[1:]:\n data = {\n 'conviction_date': datetime.strptime(\n re.match(\"(\\d+/\\d+/\\d+)\", tr[0].text_content().strip()).group(1),\n \"%d/%m/%Y\"), #sometimes they include two dates in the entry, so we'll have to grab the first (damnit)\n 'business_name': tr[1].text_content().strip(),\n 'business_address': tr[2].text_content().strip(),\n 'convicted_name': tr[3].text_content().strip(),\n 'agency': tr[4].text_content().strip(),\n 'pdf': tr[5].xpath(\".//a\")[0].get(\"href\")\n }\n \n scraperwiki.sqlite.save(unique_keys=['pdf'], data=data)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: UTF-8 -*-
import lava
from lava.api.constants.vk import QueueType
from lava.api.device import Device
from lava.api.util import Destroyable
__all__ = ["Session"]
sessions = set()
class Session(Destroyable):
def __init__(self, physical_device, queue_index=None):
super(Session, self).__init__()
self.instance = lava.instance() # validation level might has been changed
if physical_device not in lava.devices():
raise RuntimeError("Provided invalid / outdated device object")
self.queue_index = queue_index or physical_device.get_queue_indices(QueueType.COMPUTE)[0]
self.device = Device(physical_device, [(QueueType.COMPUTE, self.queue_index)],
validation_lvl=lava.VALIDATION_LEVEL)
self.buffers = set()
self.shaders = set()
self.stages = set()
sessions.add(self)
def _destroy(self):
for stage in self.stages:
stage.destroy()
for shader in self.shaders:
shader.destroy()
for buffer in self.buffers:
buffer.destroy()
self.device.destroy()
def register_buffer(self, buffer):
self.buffers.add(buffer)
def register_shader(self, shader):
self.shaders.add(shader)
def register_stage(self, stage):
self.stages.add(stage)
|
normal
|
{
"blob_id": "193dcf7bd658f88afe0a1f2fa28605f262e45bc2",
"index": 1554,
"step-1": "<mask token>\n\n\nclass Session(Destroyable):\n\n def __init__(self, physical_device, queue_index=None):\n super(Session, self).__init__()\n self.instance = lava.instance()\n if physical_device not in lava.devices():\n raise RuntimeError('Provided invalid / outdated device object')\n self.queue_index = queue_index or physical_device.get_queue_indices(\n QueueType.COMPUTE)[0]\n self.device = Device(physical_device, [(QueueType.COMPUTE, self.\n queue_index)], validation_lvl=lava.VALIDATION_LEVEL)\n self.buffers = set()\n self.shaders = set()\n self.stages = set()\n sessions.add(self)\n\n def _destroy(self):\n for stage in self.stages:\n stage.destroy()\n for shader in self.shaders:\n shader.destroy()\n for buffer in self.buffers:\n buffer.destroy()\n self.device.destroy()\n\n def register_buffer(self, buffer):\n self.buffers.add(buffer)\n <mask token>\n\n def register_stage(self, stage):\n self.stages.add(stage)\n",
"step-2": "<mask token>\n\n\nclass Session(Destroyable):\n\n def __init__(self, physical_device, queue_index=None):\n super(Session, self).__init__()\n self.instance = lava.instance()\n if physical_device not in lava.devices():\n raise RuntimeError('Provided invalid / outdated device object')\n self.queue_index = queue_index or physical_device.get_queue_indices(\n QueueType.COMPUTE)[0]\n self.device = Device(physical_device, [(QueueType.COMPUTE, self.\n queue_index)], validation_lvl=lava.VALIDATION_LEVEL)\n self.buffers = set()\n self.shaders = set()\n self.stages = set()\n sessions.add(self)\n\n def _destroy(self):\n for stage in self.stages:\n stage.destroy()\n for shader in self.shaders:\n shader.destroy()\n for buffer in self.buffers:\n buffer.destroy()\n self.device.destroy()\n\n def register_buffer(self, buffer):\n self.buffers.add(buffer)\n\n def register_shader(self, shader):\n self.shaders.add(shader)\n\n def register_stage(self, stage):\n self.stages.add(stage)\n",
"step-3": "<mask token>\n__all__ = ['Session']\nsessions = set()\n\n\nclass Session(Destroyable):\n\n def __init__(self, physical_device, queue_index=None):\n super(Session, self).__init__()\n self.instance = lava.instance()\n if physical_device not in lava.devices():\n raise RuntimeError('Provided invalid / outdated device object')\n self.queue_index = queue_index or physical_device.get_queue_indices(\n QueueType.COMPUTE)[0]\n self.device = Device(physical_device, [(QueueType.COMPUTE, self.\n queue_index)], validation_lvl=lava.VALIDATION_LEVEL)\n self.buffers = set()\n self.shaders = set()\n self.stages = set()\n sessions.add(self)\n\n def _destroy(self):\n for stage in self.stages:\n stage.destroy()\n for shader in self.shaders:\n shader.destroy()\n for buffer in self.buffers:\n buffer.destroy()\n self.device.destroy()\n\n def register_buffer(self, buffer):\n self.buffers.add(buffer)\n\n def register_shader(self, shader):\n self.shaders.add(shader)\n\n def register_stage(self, stage):\n self.stages.add(stage)\n",
"step-4": "import lava\nfrom lava.api.constants.vk import QueueType\nfrom lava.api.device import Device\nfrom lava.api.util import Destroyable\n__all__ = ['Session']\nsessions = set()\n\n\nclass Session(Destroyable):\n\n def __init__(self, physical_device, queue_index=None):\n super(Session, self).__init__()\n self.instance = lava.instance()\n if physical_device not in lava.devices():\n raise RuntimeError('Provided invalid / outdated device object')\n self.queue_index = queue_index or physical_device.get_queue_indices(\n QueueType.COMPUTE)[0]\n self.device = Device(physical_device, [(QueueType.COMPUTE, self.\n queue_index)], validation_lvl=lava.VALIDATION_LEVEL)\n self.buffers = set()\n self.shaders = set()\n self.stages = set()\n sessions.add(self)\n\n def _destroy(self):\n for stage in self.stages:\n stage.destroy()\n for shader in self.shaders:\n shader.destroy()\n for buffer in self.buffers:\n buffer.destroy()\n self.device.destroy()\n\n def register_buffer(self, buffer):\n self.buffers.add(buffer)\n\n def register_shader(self, shader):\n self.shaders.add(shader)\n\n def register_stage(self, stage):\n self.stages.add(stage)\n",
"step-5": "# -*- coding: UTF-8 -*-\n\nimport lava\nfrom lava.api.constants.vk import QueueType\nfrom lava.api.device import Device\nfrom lava.api.util import Destroyable\n\n__all__ = [\"Session\"]\n\nsessions = set()\n\n\nclass Session(Destroyable):\n\n def __init__(self, physical_device, queue_index=None):\n super(Session, self).__init__()\n\n self.instance = lava.instance() # validation level might has been changed\n if physical_device not in lava.devices():\n raise RuntimeError(\"Provided invalid / outdated device object\")\n\n self.queue_index = queue_index or physical_device.get_queue_indices(QueueType.COMPUTE)[0]\n self.device = Device(physical_device, [(QueueType.COMPUTE, self.queue_index)],\n validation_lvl=lava.VALIDATION_LEVEL)\n\n self.buffers = set()\n self.shaders = set()\n self.stages = set()\n\n sessions.add(self)\n\n def _destroy(self):\n for stage in self.stages:\n stage.destroy()\n for shader in self.shaders:\n shader.destroy()\n for buffer in self.buffers:\n buffer.destroy()\n self.device.destroy()\n\n def register_buffer(self, buffer):\n self.buffers.add(buffer)\n\n def register_shader(self, shader):\n self.shaders.add(shader)\n\n def register_stage(self, stage):\n self.stages.add(stage)\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
#roblem: Have the function PrimeTime(num)
# take the num parameter being passed and return
# the string true if the parameter is a prime number, \
# otherwise return the string false.
# The range will be between 1 and 2^16.
def PrimeTime(num):
prime1 = (num-1)%6
prime2 = (num+1)%6
if prime1 * prime2 == 0:
return 'True'
else:
return 'False'
print(PrimeTime(12))
|
normal
|
{
"blob_id": "5068a78a1aa31a277b3b5854ddd1d8990d07b104",
"index": 3627,
"step-1": "<mask token>\n",
"step-2": "def PrimeTime(num):\n prime1 = (num - 1) % 6\n prime2 = (num + 1) % 6\n if prime1 * prime2 == 0:\n return 'True'\n else:\n return 'False'\n\n\n<mask token>\n",
"step-3": "def PrimeTime(num):\n prime1 = (num - 1) % 6\n prime2 = (num + 1) % 6\n if prime1 * prime2 == 0:\n return 'True'\n else:\n return 'False'\n\n\nprint(PrimeTime(12))\n",
"step-4": "#roblem: Have the function PrimeTime(num) \n# take the num parameter being passed and return \n# the string true if the parameter is a prime number, \\\n# otherwise return the string false.\n# The range will be between 1 and 2^16.\n\ndef PrimeTime(num):\n\n prime1 = (num-1)%6\n prime2 = (num+1)%6\n\n if prime1 * prime2 == 0:\n return 'True'\n else:\n return 'False'\n\nprint(PrimeTime(12))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import os
import json
import librosa
# Constants
# Dataset used for training
DATASET_PATH = "dataset"
# Where the data is stored
JSON_PATH = "data.json"
# Number of samples considered to preprocess data
SAMPLES_TO_CONSIDER = 22050 # 1 sec worth of sound
# Main function to preprocess the data
def prepare_dataset(dataset_path, json_path, n_mfcc=13, hop_length=512, n_fft=2048):
# create data dictionary
data = {
"mappings": [], # keywords
"labels": [], # a value for each audio file in the dataset
"MFCCs": [], # MFCC for each audio file
"files": [] # filenames with path for each audio file
}
# loop through all the sub-dirs
# walk through a folder structure recursively top-down
for i, (dir_path, dir_names, filenames) in enumerate(os.walk(dataset_path)):
# we need to ensure that we are not at root level
if dir_path is not dataset_path:
# update mappings
category = dir_path.split("\\")[-1] # category name ex: dataset\\wahad -> [dataset, wahad]
data["mappings"].append(category)
print(f"Processing {category}")
# loop through filenames and extract MFCCs
for f in filenames:
# get file path
file_path = os.path.join(dir_path, f) # gives us the whole file path
# load audio file
signal, sr = librosa.load(file_path)
# ensure the audio file is at least 1 second
if len(signal) >= SAMPLES_TO_CONSIDER:
# enforce on 1 sec. long signal
signal = signal[:SAMPLES_TO_CONSIDER]
# extract the MFCCs
MFCCs = librosa.feature.mfcc(signal, n_mfcc=n_mfcc,
hop_length=hop_length, n_fft=n_fft)
# store data
data["labels"].append(i - 1)
data["MFCCs"].append(MFCCs.T.tolist())
data["files"].append(file_path)
print(f"{file_path}: {i - 1}")
# store in json file
with open(json_path, "w") as fp:
json.dump(data, fp, indent=4)
if __name__ == "__main__":
prepare_dataset(DATASET_PATH, JSON_PATH)
|
normal
|
{
"blob_id": "ba808d23f6a8226f40e1c214012a1535ee1e9e98",
"index": 2947,
"step-1": "<mask token>\n\n\ndef prepare_dataset(dataset_path, json_path, n_mfcc=13, hop_length=512,\n n_fft=2048):\n data = {'mappings': [], 'labels': [], 'MFCCs': [], 'files': []}\n for i, (dir_path, dir_names, filenames) in enumerate(os.walk(dataset_path)\n ):\n if dir_path is not dataset_path:\n category = dir_path.split('\\\\')[-1]\n data['mappings'].append(category)\n print(f'Processing {category}')\n for f in filenames:\n file_path = os.path.join(dir_path, f)\n signal, sr = librosa.load(file_path)\n if len(signal) >= SAMPLES_TO_CONSIDER:\n signal = signal[:SAMPLES_TO_CONSIDER]\n MFCCs = librosa.feature.mfcc(signal, n_mfcc=n_mfcc,\n hop_length=hop_length, n_fft=n_fft)\n data['labels'].append(i - 1)\n data['MFCCs'].append(MFCCs.T.tolist())\n data['files'].append(file_path)\n print(f'{file_path}: {i - 1}')\n with open(json_path, 'w') as fp:\n json.dump(data, fp, indent=4)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef prepare_dataset(dataset_path, json_path, n_mfcc=13, hop_length=512,\n n_fft=2048):\n data = {'mappings': [], 'labels': [], 'MFCCs': [], 'files': []}\n for i, (dir_path, dir_names, filenames) in enumerate(os.walk(dataset_path)\n ):\n if dir_path is not dataset_path:\n category = dir_path.split('\\\\')[-1]\n data['mappings'].append(category)\n print(f'Processing {category}')\n for f in filenames:\n file_path = os.path.join(dir_path, f)\n signal, sr = librosa.load(file_path)\n if len(signal) >= SAMPLES_TO_CONSIDER:\n signal = signal[:SAMPLES_TO_CONSIDER]\n MFCCs = librosa.feature.mfcc(signal, n_mfcc=n_mfcc,\n hop_length=hop_length, n_fft=n_fft)\n data['labels'].append(i - 1)\n data['MFCCs'].append(MFCCs.T.tolist())\n data['files'].append(file_path)\n print(f'{file_path}: {i - 1}')\n with open(json_path, 'w') as fp:\n json.dump(data, fp, indent=4)\n\n\nif __name__ == '__main__':\n prepare_dataset(DATASET_PATH, JSON_PATH)\n",
"step-3": "<mask token>\nDATASET_PATH = 'dataset'\nJSON_PATH = 'data.json'\nSAMPLES_TO_CONSIDER = 22050\n\n\ndef prepare_dataset(dataset_path, json_path, n_mfcc=13, hop_length=512,\n n_fft=2048):\n data = {'mappings': [], 'labels': [], 'MFCCs': [], 'files': []}\n for i, (dir_path, dir_names, filenames) in enumerate(os.walk(dataset_path)\n ):\n if dir_path is not dataset_path:\n category = dir_path.split('\\\\')[-1]\n data['mappings'].append(category)\n print(f'Processing {category}')\n for f in filenames:\n file_path = os.path.join(dir_path, f)\n signal, sr = librosa.load(file_path)\n if len(signal) >= SAMPLES_TO_CONSIDER:\n signal = signal[:SAMPLES_TO_CONSIDER]\n MFCCs = librosa.feature.mfcc(signal, n_mfcc=n_mfcc,\n hop_length=hop_length, n_fft=n_fft)\n data['labels'].append(i - 1)\n data['MFCCs'].append(MFCCs.T.tolist())\n data['files'].append(file_path)\n print(f'{file_path}: {i - 1}')\n with open(json_path, 'w') as fp:\n json.dump(data, fp, indent=4)\n\n\nif __name__ == '__main__':\n prepare_dataset(DATASET_PATH, JSON_PATH)\n",
"step-4": "import os\nimport json\nimport librosa\nDATASET_PATH = 'dataset'\nJSON_PATH = 'data.json'\nSAMPLES_TO_CONSIDER = 22050\n\n\ndef prepare_dataset(dataset_path, json_path, n_mfcc=13, hop_length=512,\n n_fft=2048):\n data = {'mappings': [], 'labels': [], 'MFCCs': [], 'files': []}\n for i, (dir_path, dir_names, filenames) in enumerate(os.walk(dataset_path)\n ):\n if dir_path is not dataset_path:\n category = dir_path.split('\\\\')[-1]\n data['mappings'].append(category)\n print(f'Processing {category}')\n for f in filenames:\n file_path = os.path.join(dir_path, f)\n signal, sr = librosa.load(file_path)\n if len(signal) >= SAMPLES_TO_CONSIDER:\n signal = signal[:SAMPLES_TO_CONSIDER]\n MFCCs = librosa.feature.mfcc(signal, n_mfcc=n_mfcc,\n hop_length=hop_length, n_fft=n_fft)\n data['labels'].append(i - 1)\n data['MFCCs'].append(MFCCs.T.tolist())\n data['files'].append(file_path)\n print(f'{file_path}: {i - 1}')\n with open(json_path, 'w') as fp:\n json.dump(data, fp, indent=4)\n\n\nif __name__ == '__main__':\n prepare_dataset(DATASET_PATH, JSON_PATH)\n",
"step-5": "import os\nimport json\nimport librosa\n\n# Constants\n# Dataset used for training\nDATASET_PATH = \"dataset\"\n# Where the data is stored\nJSON_PATH = \"data.json\"\n# Number of samples considered to preprocess data\nSAMPLES_TO_CONSIDER = 22050 # 1 sec worth of sound\n\n\n# Main function to preprocess the data\ndef prepare_dataset(dataset_path, json_path, n_mfcc=13, hop_length=512, n_fft=2048):\n # create data dictionary\n data = {\n \"mappings\": [], # keywords\n \"labels\": [], # a value for each audio file in the dataset\n \"MFCCs\": [], # MFCC for each audio file\n \"files\": [] # filenames with path for each audio file\n }\n # loop through all the sub-dirs\n # walk through a folder structure recursively top-down\n for i, (dir_path, dir_names, filenames) in enumerate(os.walk(dataset_path)):\n # we need to ensure that we are not at root level\n if dir_path is not dataset_path:\n # update mappings\n category = dir_path.split(\"\\\\\")[-1] # category name ex: dataset\\\\wahad -> [dataset, wahad]\n data[\"mappings\"].append(category)\n print(f\"Processing {category}\")\n\n # loop through filenames and extract MFCCs\n for f in filenames:\n # get file path\n file_path = os.path.join(dir_path, f) # gives us the whole file path\n\n # load audio file\n signal, sr = librosa.load(file_path)\n\n # ensure the audio file is at least 1 second\n if len(signal) >= SAMPLES_TO_CONSIDER:\n # enforce on 1 sec. long signal\n signal = signal[:SAMPLES_TO_CONSIDER]\n\n # extract the MFCCs\n MFCCs = librosa.feature.mfcc(signal, n_mfcc=n_mfcc,\n hop_length=hop_length, n_fft=n_fft)\n\n # store data\n data[\"labels\"].append(i - 1)\n data[\"MFCCs\"].append(MFCCs.T.tolist())\n data[\"files\"].append(file_path)\n print(f\"{file_path}: {i - 1}\")\n\n # store in json file\n with open(json_path, \"w\") as fp:\n json.dump(data, fp, indent=4)\n\n\nif __name__ == \"__main__\":\n prepare_dataset(DATASET_PATH, JSON_PATH)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# Generated by Django 2.0.4 on 2018-04-30 14:01
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('base', '0007_topfilter'),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('base_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='base.Base')),
('title', models.CharField(max_length=50)),
('text', models.TextField()),
('post_related_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='post_related_user_name', to=settings.AUTH_USER_MODEL)),
],
bases=('base.base',),
),
]
|
normal
|
{
"blob_id": "d13589979ba7b6facd8339111323270c9920a9bf",
"index": 8127,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('base', '0007_topfilter')]\n operations = [migrations.CreateModel(name='Post', fields=[('base_ptr',\n models.OneToOneField(auto_created=True, on_delete=django.db.models.\n deletion.CASCADE, parent_link=True, primary_key=True, serialize=\n False, to='base.Base')), ('title', models.CharField(max_length=50)),\n ('text', models.TextField()), ('post_related_user', models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE,\n related_name='post_related_user_name', to=settings.AUTH_USER_MODEL)\n )], bases=('base.base',))]\n",
"step-4": "from django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('base', '0007_topfilter')]\n operations = [migrations.CreateModel(name='Post', fields=[('base_ptr',\n models.OneToOneField(auto_created=True, on_delete=django.db.models.\n deletion.CASCADE, parent_link=True, primary_key=True, serialize=\n False, to='base.Base')), ('title', models.CharField(max_length=50)),\n ('text', models.TextField()), ('post_related_user', models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE,\n related_name='post_related_user_name', to=settings.AUTH_USER_MODEL)\n )], bases=('base.base',))]\n",
"step-5": "# Generated by Django 2.0.4 on 2018-04-30 14:01\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('base', '0007_topfilter'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Post',\n fields=[\n ('base_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='base.Base')),\n ('title', models.CharField(max_length=50)),\n ('text', models.TextField()),\n ('post_related_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='post_related_user_name', to=settings.AUTH_USER_MODEL)),\n ],\n bases=('base.base',),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
class Solution:
"""
@param head: The first node of the linked list.
@return: The node where the cycle begins.
if there is no cycle, return null
"""
def detectCycle(self, head):
# write your code here
# 先确定是否有环,然后确定环的大小,再遍历确定位置。
cycle_len = -1
one_node, two_node = head, head
while two_node:
for i in xrange(2):
if two_node:
two_node = two_node.next
if two_node == one_node:
cycle_len = 1
two_node = one_node.next
while two_node != one_node: # 算出环的长度
cycle_len += 1
two_node = two_node.next
break
else:
break
one_node = one_node.next
if (not two_node) or (cycle_len != -1):
break
if cycle_len == -1:
return None
one_node, two_node = head, head # two_node先前进的距离等于环的长度
i = 0
while i < cycle_len:
two_node = two_node.next
i += 1
while one_node != two_node:
one_node = one_node.next
two_node = two_node.next
return one_node
|
normal
|
{
"blob_id": "3319614d154b16190f3cd8f4f65c3b0e0da277e9",
"index": 9751,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n <mask token>\n",
"step-3": "class Solution:\n <mask token>\n\n def detectCycle(self, head):\n cycle_len = -1\n one_node, two_node = head, head\n while two_node:\n for i in xrange(2):\n if two_node:\n two_node = two_node.next\n if two_node == one_node:\n cycle_len = 1\n two_node = one_node.next\n while two_node != one_node:\n cycle_len += 1\n two_node = two_node.next\n break\n else:\n break\n one_node = one_node.next\n if not two_node or cycle_len != -1:\n break\n if cycle_len == -1:\n return None\n one_node, two_node = head, head\n i = 0\n while i < cycle_len:\n two_node = two_node.next\n i += 1\n while one_node != two_node:\n one_node = one_node.next\n two_node = two_node.next\n return one_node\n",
"step-4": "class Solution:\n \"\"\"\n @param head: The first node of the linked list.\n @return: The node where the cycle begins. \n if there is no cycle, return null\n \"\"\"\n\n def detectCycle(self, head):\n cycle_len = -1\n one_node, two_node = head, head\n while two_node:\n for i in xrange(2):\n if two_node:\n two_node = two_node.next\n if two_node == one_node:\n cycle_len = 1\n two_node = one_node.next\n while two_node != one_node:\n cycle_len += 1\n two_node = two_node.next\n break\n else:\n break\n one_node = one_node.next\n if not two_node or cycle_len != -1:\n break\n if cycle_len == -1:\n return None\n one_node, two_node = head, head\n i = 0\n while i < cycle_len:\n two_node = two_node.next\n i += 1\n while one_node != two_node:\n one_node = one_node.next\n two_node = two_node.next\n return one_node\n",
"step-5": "# -*- coding: utf-8 -*-\n\nclass Solution:\n \"\"\"\n @param head: The first node of the linked list.\n @return: The node where the cycle begins. \n if there is no cycle, return null\n \"\"\"\n def detectCycle(self, head):\n # write your code here\n # 先确定是否有环,然后确定环的大小,再遍历确定位置。\n cycle_len = -1\n one_node, two_node = head, head\n while two_node:\n for i in xrange(2):\n if two_node:\n two_node = two_node.next\n if two_node == one_node:\n cycle_len = 1\n two_node = one_node.next\n while two_node != one_node: # 算出环的长度\n cycle_len += 1\n two_node = two_node.next\n break\n else:\n break\n one_node = one_node.next\n if (not two_node) or (cycle_len != -1):\n break\n if cycle_len == -1:\n return None\n one_node, two_node = head, head # two_node先前进的距离等于环的长度\n i = 0\n while i < cycle_len:\n two_node = two_node.next\n i += 1\n while one_node != two_node:\n one_node = one_node.next\n two_node = two_node.next\n return one_node",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import requests
from bs4 import BeautifulSoup
'''
OCWから学院一覧を取得するスクリプト(6個くらいだから必要ない気もする)
gakuinListの各要素は次のような辞書に鳴っている
{
'name' : 学院名,
'url' : その学院の授業の一覧のurl,
}
'''
def getGakuinList():
url = "http://www.ocw.titech.ac.jp/"
response = requests.get(url)
soup = BeautifulSoup(response.content,"lxml")
topMainNav = soup.find("ul",id="top-mein-navi")
gakubus = topMainNav.find_all(class_="gakubuBox")
gakuinList = []
for gakubu_div in gakubus:
gakuin = gakubu_div.find(class_="gakubuHead").span.string
if gakuin[-2::] != "学院":
continue
gakuin_url = url + gakubu_div.parent['href']
gakuinList.append({'name':gakuin,'url':gakuin_url})
return gakuinList
'''
学院名とurlを渡されたらその学院の授業一覧を持ってくる
'''
def getLectures(name,url):
urlprefix = "http://www.ocw.titech.ac.jp"
response = requests.get(url)
soup = BeautifulSoup(response.content,'lxml')
table = soup.find('table',class_='ranking-list').tbody
for item in table.find_all('tr'):
code = item.find('td',class_='code').string
name = item.find('td',class_='course_title').a.string #講義名
lecture_url = urlprefix + item.find('td',class_='course_title').a['href']
teachers = [te.string for te in item.find('td',class_='lecturer').find_all('a')]
quaterColumn = item.find('td',class_='opening_department') #TODO ちゃんととれてない
quater = quaterColumn.a.string if quaterColumn != None else ''
if not name or not code: # 文字列が空の場合はスキップ
continue
if code:
code = code.strip()
if name:
name = name.strip()
if quater:
quater = quater.strip()
print(name)
print(teachers)
print(lecture_url)
print(quater)
if __name__=='__main__':
#print(getGakuinList())
getLectures('情報理工学院','http://www.ocw.titech.ac.jp/index.php?module=General&action=T0100&GakubuCD=4&lang=JA')
|
normal
|
{
"blob_id": "24274dddbeb1be743cfcac331ee688d48c9a46dd",
"index": 8647,
"step-1": "<mask token>\n\n\ndef getLectures(name, url):\n urlprefix = 'http://www.ocw.titech.ac.jp'\n response = requests.get(url)\n soup = BeautifulSoup(response.content, 'lxml')\n table = soup.find('table', class_='ranking-list').tbody\n for item in table.find_all('tr'):\n code = item.find('td', class_='code').string\n name = item.find('td', class_='course_title').a.string\n lecture_url = urlprefix + item.find('td', class_='course_title').a[\n 'href']\n teachers = [te.string for te in item.find('td', class_='lecturer').\n find_all('a')]\n quaterColumn = item.find('td', class_='opening_department')\n quater = quaterColumn.a.string if quaterColumn != None else ''\n if not name or not code:\n continue\n if code:\n code = code.strip()\n if name:\n name = name.strip()\n if quater:\n quater = quater.strip()\n print(name)\n print(teachers)\n print(lecture_url)\n print(quater)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef getGakuinList():\n url = 'http://www.ocw.titech.ac.jp/'\n response = requests.get(url)\n soup = BeautifulSoup(response.content, 'lxml')\n topMainNav = soup.find('ul', id='top-mein-navi')\n gakubus = topMainNav.find_all(class_='gakubuBox')\n gakuinList = []\n for gakubu_div in gakubus:\n gakuin = gakubu_div.find(class_='gakubuHead').span.string\n if gakuin[-2:] != '学院':\n continue\n gakuin_url = url + gakubu_div.parent['href']\n gakuinList.append({'name': gakuin, 'url': gakuin_url})\n return gakuinList\n\n\n<mask token>\n\n\ndef getLectures(name, url):\n urlprefix = 'http://www.ocw.titech.ac.jp'\n response = requests.get(url)\n soup = BeautifulSoup(response.content, 'lxml')\n table = soup.find('table', class_='ranking-list').tbody\n for item in table.find_all('tr'):\n code = item.find('td', class_='code').string\n name = item.find('td', class_='course_title').a.string\n lecture_url = urlprefix + item.find('td', class_='course_title').a[\n 'href']\n teachers = [te.string for te in item.find('td', class_='lecturer').\n find_all('a')]\n quaterColumn = item.find('td', class_='opening_department')\n quater = quaterColumn.a.string if quaterColumn != None else ''\n if not name or not code:\n continue\n if code:\n code = code.strip()\n if name:\n name = name.strip()\n if quater:\n quater = quater.strip()\n print(name)\n print(teachers)\n print(lecture_url)\n print(quater)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef getGakuinList():\n url = 'http://www.ocw.titech.ac.jp/'\n response = requests.get(url)\n soup = BeautifulSoup(response.content, 'lxml')\n topMainNav = soup.find('ul', id='top-mein-navi')\n gakubus = topMainNav.find_all(class_='gakubuBox')\n gakuinList = []\n for gakubu_div in gakubus:\n gakuin = gakubu_div.find(class_='gakubuHead').span.string\n if gakuin[-2:] != '学院':\n continue\n gakuin_url = url + gakubu_div.parent['href']\n gakuinList.append({'name': gakuin, 'url': gakuin_url})\n return gakuinList\n\n\n<mask token>\n\n\ndef getLectures(name, url):\n urlprefix = 'http://www.ocw.titech.ac.jp'\n response = requests.get(url)\n soup = BeautifulSoup(response.content, 'lxml')\n table = soup.find('table', class_='ranking-list').tbody\n for item in table.find_all('tr'):\n code = item.find('td', class_='code').string\n name = item.find('td', class_='course_title').a.string\n lecture_url = urlprefix + item.find('td', class_='course_title').a[\n 'href']\n teachers = [te.string for te in item.find('td', class_='lecturer').\n find_all('a')]\n quaterColumn = item.find('td', class_='opening_department')\n quater = quaterColumn.a.string if quaterColumn != None else ''\n if not name or not code:\n continue\n if code:\n code = code.strip()\n if name:\n name = name.strip()\n if quater:\n quater = quater.strip()\n print(name)\n print(teachers)\n print(lecture_url)\n print(quater)\n\n\nif __name__ == '__main__':\n getLectures('情報理工学院',\n 'http://www.ocw.titech.ac.jp/index.php?module=General&action=T0100&GakubuCD=4&lang=JA'\n )\n",
"step-4": "import requests\nfrom bs4 import BeautifulSoup\n<mask token>\n\n\ndef getGakuinList():\n url = 'http://www.ocw.titech.ac.jp/'\n response = requests.get(url)\n soup = BeautifulSoup(response.content, 'lxml')\n topMainNav = soup.find('ul', id='top-mein-navi')\n gakubus = topMainNav.find_all(class_='gakubuBox')\n gakuinList = []\n for gakubu_div in gakubus:\n gakuin = gakubu_div.find(class_='gakubuHead').span.string\n if gakuin[-2:] != '学院':\n continue\n gakuin_url = url + gakubu_div.parent['href']\n gakuinList.append({'name': gakuin, 'url': gakuin_url})\n return gakuinList\n\n\n<mask token>\n\n\ndef getLectures(name, url):\n urlprefix = 'http://www.ocw.titech.ac.jp'\n response = requests.get(url)\n soup = BeautifulSoup(response.content, 'lxml')\n table = soup.find('table', class_='ranking-list').tbody\n for item in table.find_all('tr'):\n code = item.find('td', class_='code').string\n name = item.find('td', class_='course_title').a.string\n lecture_url = urlprefix + item.find('td', class_='course_title').a[\n 'href']\n teachers = [te.string for te in item.find('td', class_='lecturer').\n find_all('a')]\n quaterColumn = item.find('td', class_='opening_department')\n quater = quaterColumn.a.string if quaterColumn != None else ''\n if not name or not code:\n continue\n if code:\n code = code.strip()\n if name:\n name = name.strip()\n if quater:\n quater = quater.strip()\n print(name)\n print(teachers)\n print(lecture_url)\n print(quater)\n\n\nif __name__ == '__main__':\n getLectures('情報理工学院',\n 'http://www.ocw.titech.ac.jp/index.php?module=General&action=T0100&GakubuCD=4&lang=JA'\n )\n",
"step-5": "import requests\nfrom bs4 import BeautifulSoup\n\n'''\nOCWから学院一覧を取得するスクリプト(6個くらいだから必要ない気もする)\ngakuinListの各要素は次のような辞書に鳴っている\n{\n\t'name' : 学院名,\n\t'url' : その学院の授業の一覧のurl,\n}\n'''\ndef getGakuinList():\n\turl = \"http://www.ocw.titech.ac.jp/\"\n\tresponse = requests.get(url)\n\tsoup = BeautifulSoup(response.content,\"lxml\")\n\n\ttopMainNav = soup.find(\"ul\",id=\"top-mein-navi\")\n\n\tgakubus = topMainNav.find_all(class_=\"gakubuBox\")\n\n\tgakuinList = []\n\tfor gakubu_div in gakubus:\n\t\tgakuin = gakubu_div.find(class_=\"gakubuHead\").span.string\n\t\tif gakuin[-2::] != \"学院\":\n\t\t\tcontinue\n\t\tgakuin_url = url + gakubu_div.parent['href']\n\t\tgakuinList.append({'name':gakuin,'url':gakuin_url})\n\n\treturn gakuinList\n\n'''\n学院名とurlを渡されたらその学院の授業一覧を持ってくる\n'''\ndef getLectures(name,url):\n\turlprefix = \"http://www.ocw.titech.ac.jp\"\n\tresponse = requests.get(url)\n\tsoup = BeautifulSoup(response.content,'lxml')\n\ttable = soup.find('table',class_='ranking-list').tbody\n\n\tfor item in table.find_all('tr'):\n\t\tcode = item.find('td',class_='code').string\n\t\tname = item.find('td',class_='course_title').a.string #講義名\n\t\tlecture_url = urlprefix + item.find('td',class_='course_title').a['href']\n\t\tteachers = [te.string for te in item.find('td',class_='lecturer').find_all('a')]\n\t\tquaterColumn = item.find('td',class_='opening_department') #TODO ちゃんととれてない\n\t\tquater = quaterColumn.a.string if quaterColumn != None else ''\n\t\tif not name or not code:\t# 文字列が空の場合はスキップ\n\t\t\tcontinue\n\t\tif code:\n\t\t\tcode = code.strip()\n\t\tif name:\n\t\t\tname = name.strip()\n\t\tif quater:\n\t\t\tquater = quater.strip()\n\t\tprint(name)\n\t\tprint(teachers)\n\t\tprint(lecture_url)\n\t\tprint(quater)\n\n\nif __name__=='__main__':\n\t#print(getGakuinList())\n\tgetLectures('情報理工学院','http://www.ocw.titech.ac.jp/index.php?module=General&action=T0100&GakubuCD=4&lang=JA')\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 8 17:14:14 2018
@author: Winry
"""
import pandas as pd
# 显示所有的列
pd.set_option('display.max_columns', None)
# 读取数据
file_name = "data_11_8.csv"
file_open = open(file_name)
df = pd.read_csv(file_open)
file_open.close()
Newtaxiout_time = df['Newtaxiout_time']
time = df['time']
file_name2 = "df_append.csv"
file_open2 = open(file_name2)
df2 = pd.read_csv(file_open2)
# append1
append1_res = []
for i in range(len(df)):
count = []
count = df2["Newappend1"][(df2["Newappend1"] > Newtaxiout_time[i]) & (df2["Newappend1"] < time[i]*60+Newtaxiout_time[i])]
append1_res.append(len(count))
# append2
append2_res = []
for i in range(len(df)):
count = []
count = df2["Newappend2"][(df2["Newappend2"] > Newtaxiout_time[i]) & (df2["Newappend2"] < time[i]*60+Newtaxiout_time[i])]
append2_res.append(len(count))
df['append1_res'] = append1_res
df['append2_res'] = append2_res
df.to_csv('df_11_9.csv',index=False)
|
normal
|
{
"blob_id": "f5a474cdc8aa22322b252b980c0334a9db21bd5c",
"index": 9300,
"step-1": "<mask token>\n",
"step-2": "<mask token>\npd.set_option('display.max_columns', None)\n<mask token>\nfile_open.close()\n<mask token>\nfor i in range(len(df)):\n count = []\n count = df2['Newappend1'][(df2['Newappend1'] > Newtaxiout_time[i]) & (\n df2['Newappend1'] < time[i] * 60 + Newtaxiout_time[i])]\n append1_res.append(len(count))\n<mask token>\nfor i in range(len(df)):\n count = []\n count = df2['Newappend2'][(df2['Newappend2'] > Newtaxiout_time[i]) & (\n df2['Newappend2'] < time[i] * 60 + Newtaxiout_time[i])]\n append2_res.append(len(count))\n<mask token>\ndf.to_csv('df_11_9.csv', index=False)\n",
"step-3": "<mask token>\npd.set_option('display.max_columns', None)\nfile_name = 'data_11_8.csv'\nfile_open = open(file_name)\ndf = pd.read_csv(file_open)\nfile_open.close()\nNewtaxiout_time = df['Newtaxiout_time']\ntime = df['time']\nfile_name2 = 'df_append.csv'\nfile_open2 = open(file_name2)\ndf2 = pd.read_csv(file_open2)\nappend1_res = []\nfor i in range(len(df)):\n count = []\n count = df2['Newappend1'][(df2['Newappend1'] > Newtaxiout_time[i]) & (\n df2['Newappend1'] < time[i] * 60 + Newtaxiout_time[i])]\n append1_res.append(len(count))\nappend2_res = []\nfor i in range(len(df)):\n count = []\n count = df2['Newappend2'][(df2['Newappend2'] > Newtaxiout_time[i]) & (\n df2['Newappend2'] < time[i] * 60 + Newtaxiout_time[i])]\n append2_res.append(len(count))\ndf['append1_res'] = append1_res\ndf['append2_res'] = append2_res\ndf.to_csv('df_11_9.csv', index=False)\n",
"step-4": "<mask token>\nimport pandas as pd\npd.set_option('display.max_columns', None)\nfile_name = 'data_11_8.csv'\nfile_open = open(file_name)\ndf = pd.read_csv(file_open)\nfile_open.close()\nNewtaxiout_time = df['Newtaxiout_time']\ntime = df['time']\nfile_name2 = 'df_append.csv'\nfile_open2 = open(file_name2)\ndf2 = pd.read_csv(file_open2)\nappend1_res = []\nfor i in range(len(df)):\n count = []\n count = df2['Newappend1'][(df2['Newappend1'] > Newtaxiout_time[i]) & (\n df2['Newappend1'] < time[i] * 60 + Newtaxiout_time[i])]\n append1_res.append(len(count))\nappend2_res = []\nfor i in range(len(df)):\n count = []\n count = df2['Newappend2'][(df2['Newappend2'] > Newtaxiout_time[i]) & (\n df2['Newappend2'] < time[i] * 60 + Newtaxiout_time[i])]\n append2_res.append(len(count))\ndf['append1_res'] = append1_res\ndf['append2_res'] = append2_res\ndf.to_csv('df_11_9.csv', index=False)\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 8 17:14:14 2018\n\n@author: Winry\n\"\"\"\n\nimport pandas as pd\n# 显示所有的列\npd.set_option('display.max_columns', None)\n\n# 读取数据\nfile_name = \"data_11_8.csv\"\nfile_open = open(file_name)\ndf = pd.read_csv(file_open)\nfile_open.close()\n\nNewtaxiout_time = df['Newtaxiout_time']\ntime = df['time']\n\nfile_name2 = \"df_append.csv\"\nfile_open2 = open(file_name2)\ndf2 = pd.read_csv(file_open2)\n\n# append1\n\nappend1_res = []\nfor i in range(len(df)):\n count = []\n count = df2[\"Newappend1\"][(df2[\"Newappend1\"] > Newtaxiout_time[i]) & (df2[\"Newappend1\"] < time[i]*60+Newtaxiout_time[i])]\n append1_res.append(len(count))\n\n\n# append2\nappend2_res = []\nfor i in range(len(df)):\n count = []\n count = df2[\"Newappend2\"][(df2[\"Newappend2\"] > Newtaxiout_time[i]) & (df2[\"Newappend2\"] < time[i]*60+Newtaxiout_time[i])]\n append2_res.append(len(count))\n \ndf['append1_res'] = append1_res\ndf['append2_res'] = append2_res\ndf.to_csv('df_11_9.csv',index=False)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
def login():
usernameInput = input("Username : ")
passwordInput = input("Password : ")
if usernameInput == "admin" and passwordInput == "1234":
return (showMenu())
else:
print("User or Password Wrong.")
return login()
def showMenu():
print("---Please Choose Menu---")
print("1. Vat7")
print("2. Calculation")
print("3. Vat Calulation")
return menuSelect()
def menuSelect():
usernameSelect1 = int(input("เลือกเมนู "))
if usernameSelect1 == 1:
price = int(input("Price : "))
vat = 7
result = price + (price * vat / 100)
print("ราคารวม Vat7 %",result)
return menuSelect()
elif usernameSelect1 == 2:
price1 = int(input("ราคาชิ้นที่ 1 : "))
price2 = int(input("ราคาชิ้นที่ 2 : "))
sum = price1 + price2
print("ราคารวม :",sum)
return menuSelect()
elif usernameSelect1 == 3:
return (priceResult())
def vat7(totalPrice):
vat = 7
result = totalPrice + (totalPrice * vat / 100)
return result
def priceResult():
price1 = int(input("ราคาชิ้นที่ 1 : "))
price2 = int(input("ราคาชิ้นที่ 2 : "))
return vat7(price1+price2)
print(login())
|
normal
|
{
"blob_id": "34dd6966a971e3d32e82a17cd08c3b66bb88163b",
"index": 1277,
"step-1": "<mask token>\n\n\ndef showMenu():\n print('---Please Choose Menu---')\n print('1. Vat7')\n print('2. Calculation')\n print('3. Vat Calulation')\n return menuSelect()\n\n\n<mask token>\n\n\ndef priceResult():\n price1 = int(input('ราคาชิ้นที่ 1 : '))\n price2 = int(input('ราคาชิ้นที่ 2 : '))\n return vat7(price1 + price2)\n\n\n<mask token>\n",
"step-2": "def login():\n usernameInput = input('Username : ')\n passwordInput = input('Password : ')\n if usernameInput == 'admin' and passwordInput == '1234':\n return showMenu()\n else:\n print('User or Password Wrong.')\n return login()\n\n\ndef showMenu():\n print('---Please Choose Menu---')\n print('1. Vat7')\n print('2. Calculation')\n print('3. Vat Calulation')\n return menuSelect()\n\n\ndef menuSelect():\n usernameSelect1 = int(input('เลือกเมนู '))\n if usernameSelect1 == 1:\n price = int(input('Price : '))\n vat = 7\n result = price + price * vat / 100\n print('ราคารวม Vat7 %', result)\n return menuSelect()\n elif usernameSelect1 == 2:\n price1 = int(input('ราคาชิ้นที่ 1 : '))\n price2 = int(input('ราคาชิ้นที่ 2 : '))\n sum = price1 + price2\n print('ราคารวม :', sum)\n return menuSelect()\n elif usernameSelect1 == 3:\n return priceResult()\n\n\n<mask token>\n\n\ndef priceResult():\n price1 = int(input('ราคาชิ้นที่ 1 : '))\n price2 = int(input('ราคาชิ้นที่ 2 : '))\n return vat7(price1 + price2)\n\n\n<mask token>\n",
"step-3": "def login():\n usernameInput = input('Username : ')\n passwordInput = input('Password : ')\n if usernameInput == 'admin' and passwordInput == '1234':\n return showMenu()\n else:\n print('User or Password Wrong.')\n return login()\n\n\ndef showMenu():\n print('---Please Choose Menu---')\n print('1. Vat7')\n print('2. Calculation')\n print('3. Vat Calulation')\n return menuSelect()\n\n\ndef menuSelect():\n usernameSelect1 = int(input('เลือกเมนู '))\n if usernameSelect1 == 1:\n price = int(input('Price : '))\n vat = 7\n result = price + price * vat / 100\n print('ราคารวม Vat7 %', result)\n return menuSelect()\n elif usernameSelect1 == 2:\n price1 = int(input('ราคาชิ้นที่ 1 : '))\n price2 = int(input('ราคาชิ้นที่ 2 : '))\n sum = price1 + price2\n print('ราคารวม :', sum)\n return menuSelect()\n elif usernameSelect1 == 3:\n return priceResult()\n\n\ndef vat7(totalPrice):\n vat = 7\n result = totalPrice + totalPrice * vat / 100\n return result\n\n\ndef priceResult():\n price1 = int(input('ราคาชิ้นที่ 1 : '))\n price2 = int(input('ราคาชิ้นที่ 2 : '))\n return vat7(price1 + price2)\n\n\n<mask token>\n",
"step-4": "def login():\n usernameInput = input('Username : ')\n passwordInput = input('Password : ')\n if usernameInput == 'admin' and passwordInput == '1234':\n return showMenu()\n else:\n print('User or Password Wrong.')\n return login()\n\n\ndef showMenu():\n print('---Please Choose Menu---')\n print('1. Vat7')\n print('2. Calculation')\n print('3. Vat Calulation')\n return menuSelect()\n\n\ndef menuSelect():\n usernameSelect1 = int(input('เลือกเมนู '))\n if usernameSelect1 == 1:\n price = int(input('Price : '))\n vat = 7\n result = price + price * vat / 100\n print('ราคารวม Vat7 %', result)\n return menuSelect()\n elif usernameSelect1 == 2:\n price1 = int(input('ราคาชิ้นที่ 1 : '))\n price2 = int(input('ราคาชิ้นที่ 2 : '))\n sum = price1 + price2\n print('ราคารวม :', sum)\n return menuSelect()\n elif usernameSelect1 == 3:\n return priceResult()\n\n\ndef vat7(totalPrice):\n vat = 7\n result = totalPrice + totalPrice * vat / 100\n return result\n\n\ndef priceResult():\n price1 = int(input('ราคาชิ้นที่ 1 : '))\n price2 = int(input('ราคาชิ้นที่ 2 : '))\n return vat7(price1 + price2)\n\n\nprint(login())\n",
"step-5": "def login():\n usernameInput = input(\"Username : \")\n passwordInput = input(\"Password : \")\n if usernameInput == \"admin\" and passwordInput == \"1234\":\n return (showMenu())\n else:\n print(\"User or Password Wrong.\")\n return login()\ndef showMenu():\n print(\"---Please Choose Menu---\")\n print(\"1. Vat7\")\n print(\"2. Calculation\")\n print(\"3. Vat Calulation\")\n return menuSelect()\ndef menuSelect():\n usernameSelect1 = int(input(\"เลือกเมนู \"))\n if usernameSelect1 == 1:\n price = int(input(\"Price : \"))\n vat = 7\n result = price + (price * vat / 100)\n print(\"ราคารวม Vat7 %\",result)\n return menuSelect()\n elif usernameSelect1 == 2:\n price1 = int(input(\"ราคาชิ้นที่ 1 : \"))\n price2 = int(input(\"ราคาชิ้นที่ 2 : \"))\n sum = price1 + price2\n print(\"ราคารวม :\",sum)\n return menuSelect()\n elif usernameSelect1 == 3:\n return (priceResult())\ndef vat7(totalPrice):\n vat = 7\n result = totalPrice + (totalPrice * vat / 100)\n return result\ndef priceResult():\n price1 = int(input(\"ราคาชิ้นที่ 1 : \"))\n price2 = int(input(\"ราคาชิ้นที่ 2 : \"))\n return vat7(price1+price2)\nprint(login())\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
# operatorTest02.py
x = 5
x += 3 #복함 대입 연산자
print("x : ", x)
print("-"*30)
total = 0
total += 1
total
|
normal
|
{
"blob_id": "4f8bc19bb113c9eac7c2ac774ac7b16f569d9704",
"index": 3083,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nx += 3\nprint('x : ', x)\nprint('-' * 30)\n<mask token>\ntotal += 1\ntotal\n",
"step-3": "x = 5\nx += 3\nprint('x : ', x)\nprint('-' * 30)\ntotal = 0\ntotal += 1\ntotal\n",
"step-4": "# operatorTest02.py\n\nx = 5\nx += 3 #복함 대입 연산자\nprint(\"x : \", x)\nprint(\"-\"*30)\n\ntotal = 0\ntotal += 1\ntotal ",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from tkinter import ttk
import tkinter as tk
import pyodbc
#ConnectingDatabase#
from tkinter import messagebox
conn = pyodbc.connect('Driver={SQL Server};'
'Server=MUTHUCOMPUTER;'
'Database=Class4c v1;'
'Trusted_Connection=yes;')
cursor = conn.cursor()
#Adding new record#
def save():
Names= Name.get()
Ages= Age.get()
Genders= Gender.get()
Heights= height.get()
weights= weight.get()
rollnos= StudentId.get()
Sports=Sport.get()
cursor.execute("""
INSERT INTO Students(Name, Age, Gender, Height,_weight,StudentId)
VALUES (?,?,?,?,?,?)""",(Names,Ages,Genders,Heights,weights,rollnos))
conn.commit()
cursor.execute("""
INSERT INTO Activity(Name,StudentId,Activity)
VALUES (?,?,?)
""",(Names,rollnos,Sports))
conn.commit()
clearfields()
messagebox.showinfo("Tkinter", "Saved successfully!")
#deleting selected record and currently works only with rollnumber
def delete():
x=StudentId.get()
cursor.execute("""
DELETE FROM Students
WHERE StudentId = (?)""",(x))
conn.commit()
cursor.execute("""
DELETE FROM Activity
WHERE StudentId = (?)""",(x))
clearfields()
messagebox.showinfo("Tkinter", "Deleted successfully!")
#Searching records
def Search():
Names= Name.get()
Ages= Age.get()
Genders= Gender.get()
Heights= height.get()
Weights= weight.get()
Rollnos= StudentId.get()
Sports=Sport.get()
# clearing the tree
t=tree.get_children()
for f in t:
tree.delete(f)
#Search starts
if len(Names)!=0:
cursor.execute("""select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity
from Students A inner join Activity B on A.StudentId=B.StudentId where A.Name like(?)""",(Names))
records=cursor.fetchall()
for row in records:
tree.insert("", 3, text=row[0], values=(row[1],row[2],row[3],row[4],row[5],row[6]))
tree.pack(side=tk.TOP,fill=tk.X)
elif len(Ages)!=0:
cursor.execute("""select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity
from Students A inner join Sports B on A.StudentId=B.StudentId where A.Age like(?)""",(Ages))
records=cursor.fetchall()
for row in records:
tree.insert("", 3, text=row[0], values=(row[1],row[2],row[3],row[4],row[5],row[6]))
tree.pack(side=tk.TOP,fill=tk.X)
elif len(Genders)!=0:
cursor.execute("""select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity
from Students A inner join Sports B on A.StudentId=B.StudentId where A.Gender like(?)""",(Genders))
records=cursor.fetchall()
for row in records:
tree.insert("", 3, text=row[0], values=(row[1],row[2],row[3],row[4],row[5],row[6]))
tree.pack(side=tk.TOP,fill=tk.X)
elif len(Heights)!=0:
cursor.execute("""select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity
from Students A inner join Sports B on A.StudentId=B.StudentId where A.Height like(?)""",(Heights))
records=cursor.fetchall()
for row in records:
tree.insert("", 3, text=row[0], values=(row[1],row[2],row[3],row[4],row[5],row[6]))
tree.pack(side=tk.TOP,fill=tk.X)
elif len(Weights)!=0:
cursor.execute("""select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity
from Students A inner join Sports B on A.StudentId=B.StudentId where A._Weight like(?)""",(Weights))
records=cursor.fetchall()
for row in records:
tree.insert("", 3, text=row[0], values=(row[1],row[2],row[3],row[4],row[5],row[6]))
tree.pack(side=tk.TOP,fill=tk.X)
elif len(Rollnos)!=0:
cursor.execute("""select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity
from Students A inner join Sports B on A.StudentId=B.StudentId where A.StudentId like(?)""",(Rollnos))
records=cursor.fetchall()
for row in records:
tree.insert("", 3, text=row[0], values=(row[1],row[2],row[3],row[4],row[5],row[6]))
tree.pack(side=tk.TOP,fill=tk.X)
elif len(Sports)!=0:
cursor.execute("""select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity
from Students A inner join Sports B on A.StudentId=B.StudentId where B.Activity like(?)""",(Sports))
records=cursor.fetchall()
for row in records:
tree.insert("", 3, text=row[0], values=(row[1],row[2],row[3],row[4],row[5],row[6]))
tree.pack(side=tk.TOP,fill=tk.X)
else:
messagebox.showinfo("Tkinter", "Atleast one search criteria must be given!")
#Search ends
# function to clear all entry fields
def clearfields():
Name.delete(0 ,tk.END)
Age.delete(0 ,tk.END)
Gender.delete(0 ,tk.END)
height.delete(0 ,tk.END)
weight.delete(0 ,tk.END)
StudentId.delete(0 ,tk.END)
Sport.delete(0 ,tk.END)
# defining the canvas
root= tk.Tk()
canvas1 = tk.Canvas(root, width = 900, height = 300)
canvas1.pack()
# Defining the fields and labels and validating
Name = tk.Entry (root)
canvas1.create_window(300, 10, window=Name)
label1 = tk.Label(root, text='Name:')
label1.config(font=('helvetica', 10))
canvas1.create_window(200, 10, window=label1)
Age = tk.Entry (root)
canvas1.create_window(300, 40, window=Age)
label2 = tk.Label(root, text='Age:')
label2.config(font=('helvetica', 10))
canvas1.create_window(200, 40, window=label2)
Gender = tk.Entry (root)
canvas1.create_window(300, 70, window=Gender)
label3 = tk.Label(root, text='Gender:')
label3.config(font=('helvetica', 10))
canvas1.create_window(200, 70, window=label3)
height = tk.Entry (root)
canvas1.create_window(300, 100, window=height)
label4 = tk.Label(root, text='height in cm:')
label4.config(font=('helvetica', 10))
canvas1.create_window(200, 100, window=label4)
weight = tk.Entry (root)
canvas1.create_window(300, 130, window=weight)
label5 = tk.Label(root, text='weight in kg:')
label5.config(font=('helvetica', 10))
canvas1.create_window(200, 130, window=label5)
StudentId = tk.Entry (root)
canvas1.create_window(300, 160, window=StudentId)
label6 = tk.Label(root, text='StudentId:')
label6.config(font=('helvetica', 10))
canvas1.create_window(200, 160, window=label6)
Sport = tk.Entry (root)
canvas1.create_window(300, 190, window=Sport)
label7 = tk.Label(root, text='Sport:')
label7.config(font=('helvetica', 10))
canvas1.create_window(200, 190, window=label7)
# Defining the buttons
button1 = tk.Button(text='Save',command = save)
canvas1.create_window(500, 250, window=button1)
button5 = tk.Button(text='Search',command=Search)
canvas1.create_window(400, 250, window=button5)
button3 = tk.Button(text='delete',command=delete)
canvas1.create_window(450, 250, window=button3)
# Defining the tree
tree=ttk.Treeview(root)
tree["columns"]=("one","two","three","four","five","six")
tree.column("#0", width=130, minwidth=270, stretch=tk.NO)
tree.column("one", width=100, minwidth=150, stretch=tk.NO)
tree.column("two", width=100, minwidth=100)
tree.column("three", width=100, minwidth=50, stretch=tk.NO)
tree.column("three", width=100, minwidth=50, stretch=tk.NO)
tree.column("three", width=100, minwidth=50, stretch=tk.NO)
tree.heading("#0",text="Name",anchor=tk.W)
tree.heading("one", text="Age",anchor=tk.W)
tree.heading("two", text="Gender",anchor=tk.W)
tree.heading("three", text="Height",anchor=tk.W)
tree.heading("four", text="Weight",anchor=tk.W)
tree.heading("five", text="StudentId",anchor=tk.W)
tree.heading("six", text="Sports",anchor=tk.W)
tree.pack()
root.mainloop()
|
normal
|
{
"blob_id": "8058ff209af03b7365ffad2a9ce2e2805b548f53",
"index": 9927,
"step-1": "<mask token>\n\n\ndef save():\n Names = Name.get()\n Ages = Age.get()\n Genders = Gender.get()\n Heights = height.get()\n weights = weight.get()\n rollnos = StudentId.get()\n Sports = Sport.get()\n cursor.execute(\n \"\"\"\n INSERT INTO Students(Name, Age, Gender, Height,_weight,StudentId)\n VALUES (?,?,?,?,?,?)\"\"\"\n , (Names, Ages, Genders, Heights, weights, rollnos))\n conn.commit()\n cursor.execute(\n \"\"\"\n INSERT INTO Activity(Name,StudentId,Activity)\n VALUES (?,?,?)\n \"\"\"\n , (Names, rollnos, Sports))\n conn.commit()\n clearfields()\n messagebox.showinfo('Tkinter', 'Saved successfully!')\n\n\ndef delete():\n x = StudentId.get()\n cursor.execute(\n \"\"\"\n DELETE FROM Students\n WHERE StudentId = (?)\"\"\", x)\n conn.commit()\n cursor.execute(\n \"\"\"\n DELETE FROM Activity\n WHERE StudentId = (?)\"\"\", x)\n clearfields()\n messagebox.showinfo('Tkinter', 'Deleted successfully!')\n\n\ndef Search():\n Names = Name.get()\n Ages = Age.get()\n Genders = Gender.get()\n Heights = height.get()\n Weights = weight.get()\n Rollnos = StudentId.get()\n Sports = Sport.get()\n t = tree.get_children()\n for f in t:\n tree.delete(f)\n if len(Names) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Activity B on A.StudentId=B.StudentId where A.Name like(?)\"\"\"\n , Names)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Ages) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where A.Age like(?)\"\"\"\n , Ages)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Genders) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where A.Gender like(?)\"\"\"\n , Genders)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Heights) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where A.Height like(?)\"\"\"\n , Heights)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Weights) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where A._Weight like(?)\"\"\"\n , Weights)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Rollnos) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where A.StudentId like(?)\"\"\"\n , Rollnos)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Sports) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where B.Activity like(?)\"\"\"\n , Sports)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n else:\n messagebox.showinfo('Tkinter',\n 'Atleast one search criteria must be given!')\n\n\ndef clearfields():\n Name.delete(0, tk.END)\n Age.delete(0, tk.END)\n Gender.delete(0, tk.END)\n height.delete(0, tk.END)\n weight.delete(0, tk.END)\n StudentId.delete(0, tk.END)\n Sport.delete(0, tk.END)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef save():\n Names = Name.get()\n Ages = Age.get()\n Genders = Gender.get()\n Heights = height.get()\n weights = weight.get()\n rollnos = StudentId.get()\n Sports = Sport.get()\n cursor.execute(\n \"\"\"\n INSERT INTO Students(Name, Age, Gender, Height,_weight,StudentId)\n VALUES (?,?,?,?,?,?)\"\"\"\n , (Names, Ages, Genders, Heights, weights, rollnos))\n conn.commit()\n cursor.execute(\n \"\"\"\n INSERT INTO Activity(Name,StudentId,Activity)\n VALUES (?,?,?)\n \"\"\"\n , (Names, rollnos, Sports))\n conn.commit()\n clearfields()\n messagebox.showinfo('Tkinter', 'Saved successfully!')\n\n\ndef delete():\n x = StudentId.get()\n cursor.execute(\n \"\"\"\n DELETE FROM Students\n WHERE StudentId = (?)\"\"\", x)\n conn.commit()\n cursor.execute(\n \"\"\"\n DELETE FROM Activity\n WHERE StudentId = (?)\"\"\", x)\n clearfields()\n messagebox.showinfo('Tkinter', 'Deleted successfully!')\n\n\ndef Search():\n Names = Name.get()\n Ages = Age.get()\n Genders = Gender.get()\n Heights = height.get()\n Weights = weight.get()\n Rollnos = StudentId.get()\n Sports = Sport.get()\n t = tree.get_children()\n for f in t:\n tree.delete(f)\n if len(Names) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Activity B on A.StudentId=B.StudentId where A.Name like(?)\"\"\"\n , Names)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Ages) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where A.Age like(?)\"\"\"\n , Ages)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Genders) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where A.Gender like(?)\"\"\"\n , Genders)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Heights) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where A.Height like(?)\"\"\"\n , Heights)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Weights) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where A._Weight like(?)\"\"\"\n , Weights)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Rollnos) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where A.StudentId like(?)\"\"\"\n , Rollnos)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Sports) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where B.Activity like(?)\"\"\"\n , Sports)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n else:\n messagebox.showinfo('Tkinter',\n 'Atleast one search criteria must be given!')\n\n\ndef clearfields():\n Name.delete(0, tk.END)\n Age.delete(0, tk.END)\n Gender.delete(0, tk.END)\n height.delete(0, tk.END)\n weight.delete(0, tk.END)\n StudentId.delete(0, tk.END)\n Sport.delete(0, tk.END)\n\n\n<mask token>\ncanvas1.pack()\n<mask token>\ncanvas1.create_window(300, 10, window=Name)\n<mask token>\nlabel1.config(font=('helvetica', 10))\ncanvas1.create_window(200, 10, window=label1)\n<mask token>\ncanvas1.create_window(300, 40, window=Age)\n<mask token>\nlabel2.config(font=('helvetica', 10))\ncanvas1.create_window(200, 40, window=label2)\n<mask token>\ncanvas1.create_window(300, 70, window=Gender)\n<mask token>\nlabel3.config(font=('helvetica', 10))\ncanvas1.create_window(200, 70, window=label3)\n<mask token>\ncanvas1.create_window(300, 100, window=height)\n<mask token>\nlabel4.config(font=('helvetica', 10))\ncanvas1.create_window(200, 100, window=label4)\n<mask token>\ncanvas1.create_window(300, 130, window=weight)\n<mask token>\nlabel5.config(font=('helvetica', 10))\ncanvas1.create_window(200, 130, window=label5)\n<mask token>\ncanvas1.create_window(300, 160, window=StudentId)\n<mask token>\nlabel6.config(font=('helvetica', 10))\ncanvas1.create_window(200, 160, window=label6)\n<mask token>\ncanvas1.create_window(300, 190, window=Sport)\n<mask token>\nlabel7.config(font=('helvetica', 10))\ncanvas1.create_window(200, 190, window=label7)\n<mask token>\ncanvas1.create_window(500, 250, window=button1)\n<mask token>\ncanvas1.create_window(400, 250, window=button5)\n<mask token>\ncanvas1.create_window(450, 250, window=button3)\n<mask token>\ntree.column('#0', width=130, minwidth=270, stretch=tk.NO)\ntree.column('one', width=100, minwidth=150, stretch=tk.NO)\ntree.column('two', width=100, minwidth=100)\ntree.column('three', width=100, minwidth=50, stretch=tk.NO)\ntree.column('three', width=100, minwidth=50, stretch=tk.NO)\ntree.column('three', width=100, minwidth=50, stretch=tk.NO)\ntree.heading('#0', text='Name', anchor=tk.W)\ntree.heading('one', text='Age', anchor=tk.W)\ntree.heading('two', text='Gender', anchor=tk.W)\ntree.heading('three', text='Height', anchor=tk.W)\ntree.heading('four', text='Weight', anchor=tk.W)\ntree.heading('five', text='StudentId', anchor=tk.W)\ntree.heading('six', text='Sports', anchor=tk.W)\ntree.pack()\nroot.mainloop()\n",
"step-3": "<mask token>\nconn = pyodbc.connect(\n 'Driver={SQL Server};Server=MUTHUCOMPUTER;Database=Class4c v1;Trusted_Connection=yes;'\n )\ncursor = conn.cursor()\n\n\ndef save():\n Names = Name.get()\n Ages = Age.get()\n Genders = Gender.get()\n Heights = height.get()\n weights = weight.get()\n rollnos = StudentId.get()\n Sports = Sport.get()\n cursor.execute(\n \"\"\"\n INSERT INTO Students(Name, Age, Gender, Height,_weight,StudentId)\n VALUES (?,?,?,?,?,?)\"\"\"\n , (Names, Ages, Genders, Heights, weights, rollnos))\n conn.commit()\n cursor.execute(\n \"\"\"\n INSERT INTO Activity(Name,StudentId,Activity)\n VALUES (?,?,?)\n \"\"\"\n , (Names, rollnos, Sports))\n conn.commit()\n clearfields()\n messagebox.showinfo('Tkinter', 'Saved successfully!')\n\n\ndef delete():\n x = StudentId.get()\n cursor.execute(\n \"\"\"\n DELETE FROM Students\n WHERE StudentId = (?)\"\"\", x)\n conn.commit()\n cursor.execute(\n \"\"\"\n DELETE FROM Activity\n WHERE StudentId = (?)\"\"\", x)\n clearfields()\n messagebox.showinfo('Tkinter', 'Deleted successfully!')\n\n\ndef Search():\n Names = Name.get()\n Ages = Age.get()\n Genders = Gender.get()\n Heights = height.get()\n Weights = weight.get()\n Rollnos = StudentId.get()\n Sports = Sport.get()\n t = tree.get_children()\n for f in t:\n tree.delete(f)\n if len(Names) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Activity B on A.StudentId=B.StudentId where A.Name like(?)\"\"\"\n , Names)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Ages) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where A.Age like(?)\"\"\"\n , Ages)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Genders) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where A.Gender like(?)\"\"\"\n , Genders)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Heights) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where A.Height like(?)\"\"\"\n , Heights)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Weights) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where A._Weight like(?)\"\"\"\n , Weights)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Rollnos) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where A.StudentId like(?)\"\"\"\n , Rollnos)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Sports) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where B.Activity like(?)\"\"\"\n , Sports)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n else:\n messagebox.showinfo('Tkinter',\n 'Atleast one search criteria must be given!')\n\n\ndef clearfields():\n Name.delete(0, tk.END)\n Age.delete(0, tk.END)\n Gender.delete(0, tk.END)\n height.delete(0, tk.END)\n weight.delete(0, tk.END)\n StudentId.delete(0, tk.END)\n Sport.delete(0, tk.END)\n\n\nroot = tk.Tk()\ncanvas1 = tk.Canvas(root, width=900, height=300)\ncanvas1.pack()\nName = tk.Entry(root)\ncanvas1.create_window(300, 10, window=Name)\nlabel1 = tk.Label(root, text='Name:')\nlabel1.config(font=('helvetica', 10))\ncanvas1.create_window(200, 10, window=label1)\nAge = tk.Entry(root)\ncanvas1.create_window(300, 40, window=Age)\nlabel2 = tk.Label(root, text='Age:')\nlabel2.config(font=('helvetica', 10))\ncanvas1.create_window(200, 40, window=label2)\nGender = tk.Entry(root)\ncanvas1.create_window(300, 70, window=Gender)\nlabel3 = tk.Label(root, text='Gender:')\nlabel3.config(font=('helvetica', 10))\ncanvas1.create_window(200, 70, window=label3)\nheight = tk.Entry(root)\ncanvas1.create_window(300, 100, window=height)\nlabel4 = tk.Label(root, text='height in cm:')\nlabel4.config(font=('helvetica', 10))\ncanvas1.create_window(200, 100, window=label4)\nweight = tk.Entry(root)\ncanvas1.create_window(300, 130, window=weight)\nlabel5 = tk.Label(root, text='weight in kg:')\nlabel5.config(font=('helvetica', 10))\ncanvas1.create_window(200, 130, window=label5)\nStudentId = tk.Entry(root)\ncanvas1.create_window(300, 160, window=StudentId)\nlabel6 = tk.Label(root, text='StudentId:')\nlabel6.config(font=('helvetica', 10))\ncanvas1.create_window(200, 160, window=label6)\nSport = tk.Entry(root)\ncanvas1.create_window(300, 190, window=Sport)\nlabel7 = tk.Label(root, text='Sport:')\nlabel7.config(font=('helvetica', 10))\ncanvas1.create_window(200, 190, window=label7)\nbutton1 = tk.Button(text='Save', command=save)\ncanvas1.create_window(500, 250, window=button1)\nbutton5 = tk.Button(text='Search', command=Search)\ncanvas1.create_window(400, 250, window=button5)\nbutton3 = tk.Button(text='delete', command=delete)\ncanvas1.create_window(450, 250, window=button3)\ntree = ttk.Treeview(root)\ntree['columns'] = 'one', 'two', 'three', 'four', 'five', 'six'\ntree.column('#0', width=130, minwidth=270, stretch=tk.NO)\ntree.column('one', width=100, minwidth=150, stretch=tk.NO)\ntree.column('two', width=100, minwidth=100)\ntree.column('three', width=100, minwidth=50, stretch=tk.NO)\ntree.column('three', width=100, minwidth=50, stretch=tk.NO)\ntree.column('three', width=100, minwidth=50, stretch=tk.NO)\ntree.heading('#0', text='Name', anchor=tk.W)\ntree.heading('one', text='Age', anchor=tk.W)\ntree.heading('two', text='Gender', anchor=tk.W)\ntree.heading('three', text='Height', anchor=tk.W)\ntree.heading('four', text='Weight', anchor=tk.W)\ntree.heading('five', text='StudentId', anchor=tk.W)\ntree.heading('six', text='Sports', anchor=tk.W)\ntree.pack()\nroot.mainloop()\n",
"step-4": "from tkinter import ttk\nimport tkinter as tk\nimport pyodbc\nfrom tkinter import messagebox\nconn = pyodbc.connect(\n 'Driver={SQL Server};Server=MUTHUCOMPUTER;Database=Class4c v1;Trusted_Connection=yes;'\n )\ncursor = conn.cursor()\n\n\ndef save():\n Names = Name.get()\n Ages = Age.get()\n Genders = Gender.get()\n Heights = height.get()\n weights = weight.get()\n rollnos = StudentId.get()\n Sports = Sport.get()\n cursor.execute(\n \"\"\"\n INSERT INTO Students(Name, Age, Gender, Height,_weight,StudentId)\n VALUES (?,?,?,?,?,?)\"\"\"\n , (Names, Ages, Genders, Heights, weights, rollnos))\n conn.commit()\n cursor.execute(\n \"\"\"\n INSERT INTO Activity(Name,StudentId,Activity)\n VALUES (?,?,?)\n \"\"\"\n , (Names, rollnos, Sports))\n conn.commit()\n clearfields()\n messagebox.showinfo('Tkinter', 'Saved successfully!')\n\n\ndef delete():\n x = StudentId.get()\n cursor.execute(\n \"\"\"\n DELETE FROM Students\n WHERE StudentId = (?)\"\"\", x)\n conn.commit()\n cursor.execute(\n \"\"\"\n DELETE FROM Activity\n WHERE StudentId = (?)\"\"\", x)\n clearfields()\n messagebox.showinfo('Tkinter', 'Deleted successfully!')\n\n\ndef Search():\n Names = Name.get()\n Ages = Age.get()\n Genders = Gender.get()\n Heights = height.get()\n Weights = weight.get()\n Rollnos = StudentId.get()\n Sports = Sport.get()\n t = tree.get_children()\n for f in t:\n tree.delete(f)\n if len(Names) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Activity B on A.StudentId=B.StudentId where A.Name like(?)\"\"\"\n , Names)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Ages) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where A.Age like(?)\"\"\"\n , Ages)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Genders) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where A.Gender like(?)\"\"\"\n , Genders)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Heights) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where A.Height like(?)\"\"\"\n , Heights)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Weights) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where A._Weight like(?)\"\"\"\n , Weights)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Rollnos) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where A.StudentId like(?)\"\"\"\n , Rollnos)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Sports) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where B.Activity like(?)\"\"\"\n , Sports)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n else:\n messagebox.showinfo('Tkinter',\n 'Atleast one search criteria must be given!')\n\n\ndef clearfields():\n Name.delete(0, tk.END)\n Age.delete(0, tk.END)\n Gender.delete(0, tk.END)\n height.delete(0, tk.END)\n weight.delete(0, tk.END)\n StudentId.delete(0, tk.END)\n Sport.delete(0, tk.END)\n\n\nroot = tk.Tk()\ncanvas1 = tk.Canvas(root, width=900, height=300)\ncanvas1.pack()\nName = tk.Entry(root)\ncanvas1.create_window(300, 10, window=Name)\nlabel1 = tk.Label(root, text='Name:')\nlabel1.config(font=('helvetica', 10))\ncanvas1.create_window(200, 10, window=label1)\nAge = tk.Entry(root)\ncanvas1.create_window(300, 40, window=Age)\nlabel2 = tk.Label(root, text='Age:')\nlabel2.config(font=('helvetica', 10))\ncanvas1.create_window(200, 40, window=label2)\nGender = tk.Entry(root)\ncanvas1.create_window(300, 70, window=Gender)\nlabel3 = tk.Label(root, text='Gender:')\nlabel3.config(font=('helvetica', 10))\ncanvas1.create_window(200, 70, window=label3)\nheight = tk.Entry(root)\ncanvas1.create_window(300, 100, window=height)\nlabel4 = tk.Label(root, text='height in cm:')\nlabel4.config(font=('helvetica', 10))\ncanvas1.create_window(200, 100, window=label4)\nweight = tk.Entry(root)\ncanvas1.create_window(300, 130, window=weight)\nlabel5 = tk.Label(root, text='weight in kg:')\nlabel5.config(font=('helvetica', 10))\ncanvas1.create_window(200, 130, window=label5)\nStudentId = tk.Entry(root)\ncanvas1.create_window(300, 160, window=StudentId)\nlabel6 = tk.Label(root, text='StudentId:')\nlabel6.config(font=('helvetica', 10))\ncanvas1.create_window(200, 160, window=label6)\nSport = tk.Entry(root)\ncanvas1.create_window(300, 190, window=Sport)\nlabel7 = tk.Label(root, text='Sport:')\nlabel7.config(font=('helvetica', 10))\ncanvas1.create_window(200, 190, window=label7)\nbutton1 = tk.Button(text='Save', command=save)\ncanvas1.create_window(500, 250, window=button1)\nbutton5 = tk.Button(text='Search', command=Search)\ncanvas1.create_window(400, 250, window=button5)\nbutton3 = tk.Button(text='delete', command=delete)\ncanvas1.create_window(450, 250, window=button3)\ntree = ttk.Treeview(root)\ntree['columns'] = 'one', 'two', 'three', 'four', 'five', 'six'\ntree.column('#0', width=130, minwidth=270, stretch=tk.NO)\ntree.column('one', width=100, minwidth=150, stretch=tk.NO)\ntree.column('two', width=100, minwidth=100)\ntree.column('three', width=100, minwidth=50, stretch=tk.NO)\ntree.column('three', width=100, minwidth=50, stretch=tk.NO)\ntree.column('three', width=100, minwidth=50, stretch=tk.NO)\ntree.heading('#0', text='Name', anchor=tk.W)\ntree.heading('one', text='Age', anchor=tk.W)\ntree.heading('two', text='Gender', anchor=tk.W)\ntree.heading('three', text='Height', anchor=tk.W)\ntree.heading('four', text='Weight', anchor=tk.W)\ntree.heading('five', text='StudentId', anchor=tk.W)\ntree.heading('six', text='Sports', anchor=tk.W)\ntree.pack()\nroot.mainloop()\n",
"step-5": "from tkinter import ttk\r\nimport tkinter as tk\r\nimport pyodbc\r\n\r\n\r\n#ConnectingDatabase#\r\n\r\nfrom tkinter import messagebox\r\nconn = pyodbc.connect('Driver={SQL Server};'\r\n 'Server=MUTHUCOMPUTER;'\r\n 'Database=Class4c v1;'\r\n 'Trusted_Connection=yes;')\r\ncursor = conn.cursor()\r\n\r\n\r\n#Adding new record#\r\n\r\ndef save():\r\n Names= Name.get()\r\n Ages= Age.get()\r\n Genders= Gender.get()\r\n Heights= height.get()\r\n weights= weight.get()\r\n rollnos= StudentId.get()\r\n Sports=Sport.get()\r\n\r\n cursor.execute(\"\"\"\r\n INSERT INTO Students(Name, Age, Gender, Height,_weight,StudentId)\r\n VALUES (?,?,?,?,?,?)\"\"\",(Names,Ages,Genders,Heights,weights,rollnos))\r\n conn.commit()\r\n cursor.execute(\"\"\"\r\n INSERT INTO Activity(Name,StudentId,Activity)\r\n VALUES (?,?,?)\r\n \"\"\",(Names,rollnos,Sports))\r\n conn.commit()\r\n clearfields()\r\n messagebox.showinfo(\"Tkinter\", \"Saved successfully!\")\r\n\r\n\r\n \r\n \r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n#deleting selected record and currently works only with rollnumber\r\n \r\n \r\ndef delete():\r\n x=StudentId.get()\r\n cursor.execute(\"\"\"\r\n DELETE FROM Students\r\n WHERE StudentId = (?)\"\"\",(x))\r\n conn.commit()\r\n cursor.execute(\"\"\"\r\n DELETE FROM Activity\r\n WHERE StudentId = (?)\"\"\",(x))\r\n clearfields()\r\n messagebox.showinfo(\"Tkinter\", \"Deleted successfully!\")\r\n \r\n\r\n#Searching records \r\n\r\ndef Search():\r\n \r\n Names= Name.get()\r\n Ages= Age.get()\r\n Genders= Gender.get()\r\n Heights= height.get()\r\n Weights= weight.get()\r\n Rollnos= StudentId.get()\r\n Sports=Sport.get()\r\n\r\n# clearing the tree\r\n \r\n t=tree.get_children()\r\n for f in t:\r\n tree.delete(f)\r\n \r\n\r\n#Search starts\r\n \r\n\r\n if len(Names)!=0:\r\n cursor.execute(\"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\r\n from Students A inner join Activity B on A.StudentId=B.StudentId where A.Name like(?)\"\"\",(Names))\r\n records=cursor.fetchall()\r\n for row in records:\r\n tree.insert(\"\", 3, text=row[0], values=(row[1],row[2],row[3],row[4],row[5],row[6]))\r\n tree.pack(side=tk.TOP,fill=tk.X)\r\n \r\n\t\t\r\n elif len(Ages)!=0:\r\n cursor.execute(\"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\r\n from Students A inner join Sports B on A.StudentId=B.StudentId where A.Age like(?)\"\"\",(Ages))\r\n records=cursor.fetchall()\r\n for row in records:\r\n tree.insert(\"\", 3, text=row[0], values=(row[1],row[2],row[3],row[4],row[5],row[6]))\r\n tree.pack(side=tk.TOP,fill=tk.X)\r\n\r\n\r\n elif len(Genders)!=0:\r\n cursor.execute(\"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\r\n from Students A inner join Sports B on A.StudentId=B.StudentId where A.Gender like(?)\"\"\",(Genders))\r\n records=cursor.fetchall()\r\n for row in records:\r\n tree.insert(\"\", 3, text=row[0], values=(row[1],row[2],row[3],row[4],row[5],row[6]))\r\n tree.pack(side=tk.TOP,fill=tk.X)\r\n\r\n\r\n elif len(Heights)!=0:\r\n cursor.execute(\"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\r\n from Students A inner join Sports B on A.StudentId=B.StudentId where A.Height like(?)\"\"\",(Heights))\r\n records=cursor.fetchall()\r\n for row in records:\r\n tree.insert(\"\", 3, text=row[0], values=(row[1],row[2],row[3],row[4],row[5],row[6]))\r\n tree.pack(side=tk.TOP,fill=tk.X) \r\n\r\n\r\n elif len(Weights)!=0:\r\n cursor.execute(\"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\r\n from Students A inner join Sports B on A.StudentId=B.StudentId where A._Weight like(?)\"\"\",(Weights))\r\n records=cursor.fetchall()\r\n for row in records:\r\n tree.insert(\"\", 3, text=row[0], values=(row[1],row[2],row[3],row[4],row[5],row[6]))\r\n tree.pack(side=tk.TOP,fill=tk.X)\r\n\r\n\r\n elif len(Rollnos)!=0:\r\n cursor.execute(\"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\r\n from Students A inner join Sports B on A.StudentId=B.StudentId where A.StudentId like(?)\"\"\",(Rollnos))\r\n records=cursor.fetchall()\r\n for row in records:\r\n tree.insert(\"\", 3, text=row[0], values=(row[1],row[2],row[3],row[4],row[5],row[6]))\r\n tree.pack(side=tk.TOP,fill=tk.X)\r\n\r\n\r\n elif len(Sports)!=0:\r\n cursor.execute(\"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\r\n from Students A inner join Sports B on A.StudentId=B.StudentId where B.Activity like(?)\"\"\",(Sports))\r\n records=cursor.fetchall()\r\n for row in records:\r\n tree.insert(\"\", 3, text=row[0], values=(row[1],row[2],row[3],row[4],row[5],row[6]))\r\n tree.pack(side=tk.TOP,fill=tk.X)\r\n\r\n else:\r\n \r\n messagebox.showinfo(\"Tkinter\", \"Atleast one search criteria must be given!\") \r\n\r\n#Search ends\r\n\r\n# function to clear all entry fields\r\n\r\ndef clearfields():\r\n Name.delete(0 ,tk.END)\r\n Age.delete(0 ,tk.END)\r\n Gender.delete(0 ,tk.END)\r\n height.delete(0 ,tk.END)\r\n weight.delete(0 ,tk.END)\r\n StudentId.delete(0 ,tk.END)\r\n Sport.delete(0 ,tk.END)\r\n \r\n\r\n\r\n \r\n# defining the canvas\r\n\r\nroot= tk.Tk()\r\ncanvas1 = tk.Canvas(root, width = 900, height = 300)\r\ncanvas1.pack()\r\n\r\n# Defining the fields and labels and validating\r\n\r\nName = tk.Entry (root)\r\ncanvas1.create_window(300, 10, window=Name)\r\nlabel1 = tk.Label(root, text='Name:')\r\nlabel1.config(font=('helvetica', 10))\r\ncanvas1.create_window(200, 10, window=label1)\r\n\r\n\r\nAge = tk.Entry (root)\r\ncanvas1.create_window(300, 40, window=Age)\r\nlabel2 = tk.Label(root, text='Age:')\r\nlabel2.config(font=('helvetica', 10))\r\ncanvas1.create_window(200, 40, window=label2)\r\n\r\nGender = tk.Entry (root)\r\ncanvas1.create_window(300, 70, window=Gender)\r\nlabel3 = tk.Label(root, text='Gender:')\r\nlabel3.config(font=('helvetica', 10))\r\ncanvas1.create_window(200, 70, window=label3)\r\n\r\nheight = tk.Entry (root)\r\ncanvas1.create_window(300, 100, window=height)\r\nlabel4 = tk.Label(root, text='height in cm:')\r\nlabel4.config(font=('helvetica', 10))\r\ncanvas1.create_window(200, 100, window=label4)\r\n\r\nweight = tk.Entry (root)\r\ncanvas1.create_window(300, 130, window=weight)\r\nlabel5 = tk.Label(root, text='weight in kg:')\r\nlabel5.config(font=('helvetica', 10))\r\ncanvas1.create_window(200, 130, window=label5)\r\n\r\nStudentId = tk.Entry (root)\r\ncanvas1.create_window(300, 160, window=StudentId)\r\nlabel6 = tk.Label(root, text='StudentId:')\r\nlabel6.config(font=('helvetica', 10))\r\ncanvas1.create_window(200, 160, window=label6)\r\n\r\nSport = tk.Entry (root)\r\ncanvas1.create_window(300, 190, window=Sport)\r\nlabel7 = tk.Label(root, text='Sport:')\r\nlabel7.config(font=('helvetica', 10))\r\ncanvas1.create_window(200, 190, window=label7)\r\n\r\n\r\n# Defining the buttons\r\n\r\nbutton1 = tk.Button(text='Save',command = save)\r\ncanvas1.create_window(500, 250, window=button1)\r\n\r\nbutton5 = tk.Button(text='Search',command=Search)\r\ncanvas1.create_window(400, 250, window=button5)\r\n\r\nbutton3 = tk.Button(text='delete',command=delete)\r\ncanvas1.create_window(450, 250, window=button3)\r\n\r\n# Defining the tree\r\n\r\ntree=ttk.Treeview(root)\r\ntree[\"columns\"]=(\"one\",\"two\",\"three\",\"four\",\"five\",\"six\")\r\ntree.column(\"#0\", width=130, minwidth=270, stretch=tk.NO)\r\ntree.column(\"one\", width=100, minwidth=150, stretch=tk.NO)\r\ntree.column(\"two\", width=100, minwidth=100)\r\ntree.column(\"three\", width=100, minwidth=50, stretch=tk.NO)\r\ntree.column(\"three\", width=100, minwidth=50, stretch=tk.NO)\r\ntree.column(\"three\", width=100, minwidth=50, stretch=tk.NO)\r\ntree.heading(\"#0\",text=\"Name\",anchor=tk.W)\r\ntree.heading(\"one\", text=\"Age\",anchor=tk.W)\r\ntree.heading(\"two\", text=\"Gender\",anchor=tk.W)\r\ntree.heading(\"three\", text=\"Height\",anchor=tk.W)\r\ntree.heading(\"four\", text=\"Weight\",anchor=tk.W)\r\ntree.heading(\"five\", text=\"StudentId\",anchor=tk.W)\r\ntree.heading(\"six\", text=\"Sports\",anchor=tk.W)\r\ntree.pack()\r\nroot.mainloop()\r\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
# importing regular stuff
import os
import sys
import thread
import threading
import time
import datetime
from datetime import datetime
import random
import filecmp
import ConfigParser
import socket
#my stuff will go here
import include.action as action
import include.logreader as logreader
import include.command as command
import include.logger as log
import include.database as database
import include.timetrack as timetrack
#### code start ####
legit = True
serverstop = False
#### version ####
v = "3.6-revival"
print "Starting up MineMon "+v
time.sleep(0.2)
print "Author: Oscar Carlberg"
#### Load settings ####
setting_file = sys.argv[1]
config = ConfigParser.RawConfigParser()
config.read(setting_file)
#### Connect to MC rcon ####
mchost = config.get('config', 'rhost')
mcport = config.get('config', 'rport')
mcpwd = config.get('config', 'rpass')
#### Connect to MySQL ####
myhost = config.get('config', 'mysqlhost')
myuser = config.get('config', 'mysqluser')
mypass = config.get('config', 'mysqlpass')
mydb = config.get('config', 'mysqldb')
database.settings(myhost, myuser, mypass, mydb)
#### some settings-var ####
mcpath = config.get('config', 'mcpath')
mapurl = config.get('config', 'mapurl')
helpurl = config.get('config', 'helpurl')
screen = config.get('config', 'screen')
mc_mem = config.get('config', 'mc_mem')
gmail = config.get('config', 'gmail')
gpw = config.get('config', 'gmailpw')
mailrcvr = config.get('config', 'sendto')
#### announce that i'm running ####
try:
action.connect(mchost, mcport, mcpwd)
except:
print "Coult not connect to Minecraft Rcon!"
sys.exit()
action.load(gmail, gpw, mailrcvr, screen, mc_mem)
action.say("§aMinecraft Monitor Version "+v+" now running!", 1)
action.say("§aType !help for available commands", 0)
ops = action.load_op(mcpath)
timetrk=timetrack.playtime()
#### check if enabled & op func ####
def enabled(onoroff):
#Check if regular command or feature
if "!" in onoroff:
setting = database.check_enabled_command(onoroff)
#If not enabled say so.
if not setting:
action.say("This command has been disabled for this world!", 0)
return setting
else:
try:
setting = config.get('config', onoroff)
except:
setting = "disabled"
print "NO setting entry for "+onoroff+", disabled."
if "enabled" in setting:
return True
else:
action.say("This command has been disabled for this world!", 0)
return False
def silent_enabled(onoroff):
try:
setting = config.get('config', onoroff)
except:
setting = "disabled"
print "NO setting entry for "+onoroff+", disabled."
if "enabled" in setting:
return True
else:
return False
def check_op(name, command):
op = database.check_command_op(command)
#If commmand does not need op, return OK
if not op:
return True
else:
#else, check if user is op, and return true
if name.lower() in ops:
return True
#if not, deny.
else:
action.say("This command is not allowed for non-op's.", 0)
def nick_washer(nick):
while "§" in nick:
nick = nick.replace("§1", "")
nick = nick.replace("§2", "")
nick = nick.replace("§3", "")
nick = nick.replace("§4", "")
nick = nick.replace("§5", "")
nick = nick.replace("§6", "")
nick = nick.replace("§7", "")
nick = nick.replace("§8", "")
nick = nick.replace("§9", "")
nick = nick.replace("§a", "")
nick = nick.replace("§b", "")
nick = nick.replace("§c", "")
nick = nick.replace("§d", "")
nick = nick.replace("§e", "")
nick = nick.replace("§f", "")
nick = nick.replace("§r", "")
#print "Washed: "+nick
return nick
#### Trigger on chattlog stuff ####
def trigger(name):
global serverstop
if "!help" in chatlog:
if enabled("!help"):
if check_op(name, "!help"):
helpcmnd = command.help(helpurl, chatlog)
log.save2(timestamp, "SYSTEM", "!help", name, "] [", helpcmnd)
elif "!sheen" in chatlog:
if enabled("!sheen"):
if check_op(name, "!sheen"):
command.sheen()
log.save(timestamp, "TEXT", "!sheen", name)
elif "joined the game" in chatlog and not "[Rcon]" in chatlog:
if enabled("login_manner"):
player = command.login(chatlog, v, helpurl)
log.save(timestamp, "GREEN", "Login:", player)
elif "left the game" in chatlog and not "[Rcon]" in chatlog:
if enabled("logout_manner"):
player = command.logout(chatlog)
log.save(timestamp, "RED", "Logout:", player)
elif "!hax" in chatlog and not "[Rcon]" in chatlog:
if enabled("!hax"):
if check_op(name, "!hax"):
command.hax(name)
log.save(timestamp, "SYSTEM", "!hax", name)
elif "!unhax" in chatlog and not "[Rcon]" in chatlog:
if enabled("!unhax"):
if check_op(name, "!unhax"):
command.unhax(name)
log.save(timestamp, "SYSTEM", "!unhax", name)
elif "!adv" in chatlog and not "[Rcon]" in chatlog:
if enabled("!adv"):
if check_op(name, "!adv"):
command.adv(name)
log.save(timestamp, "SYSTEM", "!adv", name)
elif "!day" in chatlog:
if enabled("!day"):
if check_op(name, "!day"):
command.day()
log.save(timestamp, "SYSTEM", "!day", name)
elif "!night" in chatlog:
if enabled("!night"):
if check_op(name, "!night"):
command.night()
log.save(timestamp, "SYSTEM", "!night", name)
elif "!tp" in chatlog and not "[Rcon]" in chatlog:
if enabled("!tp"):
if check_op(name, "!tp"):
who = command.tp(name, chatlog)
log.save2(timestamp, "TEXT", "!tp", name, "] -> [", who)
elif "!pull" in chatlog and not "[Rcon]" in chatlog:
if enabled("!pull"):
if check_op(name, "!pull"):
who = command.pull(name, chatlog)
log.save2(timestamp, "TEXT", "!pull", name, "] <- [", who)
elif "!map" in chatlog:
if enabled("!map"):
if check_op(name, "!map"):
command.map(mapurl)
log.save(timestamp, "SYSTEM", "!map", name)
elif "!version" in chatlog and not "[Rcon]" in chatlog:
if enabled("!version"):
if check_op(name, "!version"):
command.version(v)
log.save(timestamp, "SYSTEM", "!version", name)
elif "!list" in chatlog:
action.say("Deprecated. Press Tab on your keyboard", 0)
elif "!roll" in chatlog and not "[Rcon]" in chatlog:
if enabled("!roll"):
if check_op(name, "!roll"):
roll = command.roll(name)
log.save2(timestamp, "TEXT", "!roll", name, "] [", roll)
elif "!rain" in chatlog and not "[Rcon]" in chatlog:
if enabled("!rain"):
if check_op(name, "!rain"):
command.rain()
log.save(timestamp, "SYSTEM", "!rain", name)
elif "!xp" in chatlog and not "[Rcon]" in chatlog:
if enabled("!xp"):
if check_op(name, "!xp"):
command.xp(name)
log.save(timestamp, "TEXT", "!xp", name)
elif "!kit" in chatlog and not "[Rcon]" in chatlog:
if enabled("!kit"):
if check_op(name, "!kit"):
command.kit(name)
log.save(timestamp, "TEXT", "!kit", name)
elif "!leatherset" in chatlog and not "[Rcon]" in chatlog:
if enabled("!leatherset"):
if check_op(name, "!leatherset"):
command.leatherset(name)
log.save(timestamp, "TEXT", "!leatherset", name)
elif "!diamondset" in chatlog and not "[Rcon]" in chatlog:
if enabled("!diamondset"):
if check_op(name, "!diamondset"):
command.diamondset(name)
log.save(timestamp, "TEXT", "!diamondset", name)
elif "!bow" in chatlog and not "[Rcon]" in chatlog:
if enabled("!bow"):
if check_op(name, "!bow"):
command.bow(name)
log.save(timestamp, "TEXT", "!bow", name)
elif "!train" in chatlog and not "[Rcon]" in chatlog:
if enabled("!train"):
if check_op(name, "!train"):
command.train(name)
log.save(timestamp, "TEXT", "!train", name)
elif "!sleep" in chatlog and not "[Rcon]" in chatlog:
if enabled("!sleep"):
if check_op(name, "!sleep"):
command.sleep(name)
log.save(timestamp, "TEXT", "!sleep", name)
elif "!rail" in chatlog and not "[Rcon]" in chatlog:
if enabled("!rail"):
if check_op(name, "!rail"):
command.rail(name)
log.save(timestamp, "TEXT", "!rail", name)
elif "!food" in chatlog and not "[Rcon]" in chatlog:
if enabled("!food"):
if check_op(name, "!food"):
command.food(name)
log.save(timestamp, "TEXT", "!food", name)
elif "!item" in chatlog and not "[Rcon]" in chatlog:
if enabled("!item"):
if check_op(name, "!item"):
item = command.item(name, chatlog)
log.save2(timestamp, "TEXT", "!item", name, "] [", item)
elif "!restart" in chatlog and not "[Rcon]" in chatlog:
if enabled("!restart"):
if check_op(name, "!restart"):
command.restart()
log.save(timestamp, "SYSTEM", "!restart", name)
elif "!monsters" in chatlog:
if enabled("!monsters"):
if check_op(name, "!monsters"):
onoff = command.monsters(mcpath)
log.save2(timestamp, "SYSTEM", "!monsters", name, "] [", onoff)
elif "!update" in chatlog:
if enabled("!update"):
if check_op(name, "!update") or "Banned Player" in chatlog:
status = command.update(mcpath, mcport)
log.save2(timestamp, "SYSTEM", "!update", name, "] [", status)
elif "!temphax" in chatlog and not "[Rcon]" in chatlog:
if enabled("!temphax"):
if check_op(name, "!temphax"):
who = command.temphax(chatlog)
log.save2(timestamp, "TEXT", "!temphax", name, "] -> [", who)
elif "!report" in chatlog and not "[Rcon]" in chatlog:
if enabled("!report"):
if check_op(name, "!report"):
command.mail(name, chatlog, False)
log.save(timestamp, "SYSTEM", "!report", name)
elif "!played" in chatlog and not "[Rcon]" in chatlog:
if enabled("!played"):
if check_op(name, "!played"):
print "Checking played with name:"+ str(name)
command.played(name)
log.save(timestamp, "TEXT", "!played", name)
elif "!world" in chatlog and not "[Rcon]" in chatlog:
if enabled("!world"):
if check_op(name, "!world"):
success = command.world(name, chatlog, mcpath)
if success:
log.save2(timestamp, "SYSTEM", "!world", name, "] [", success)
elif "!clear" in chatlog and not "[Rcon]" in chatlog:
if enabled("!clear"):
if check_op(name, "!clear"):
command.clear(name)
log.save(timestamp, "TEXT", "!clear", name)
elif "!spawn" in chatlog and not "[Rcon]" in chatlog:
if enabled("!spawn"):
if check_op(name, "!spawn"):
command.spawn(name)
log.save(timestamp, "TEXT", "!spawn", name)
elif "!gateway" in chatlog and not "[Rcon]" in chatlog:
if enabled("!gateway"):
if check_op(name, "!gateway"):
gw = command.gateway(name, chatlog)
log.save2(timestamp, "TEXT", "!gateway", name, gw[0], gw[1])
elif "!dial" in chatlog and not "[Rcon]" in chatlog:
if enabled("!dial"):
if check_op(name, "!dial"):
dest = command.dial(name, chatlog)
log.save2(timestamp, "TEXT", "!dial", name, "] -> [", dest)
elif "!warp" in chatlog and not "[Rcon]" in chatlog:
if enabled("!warp"):
if check_op(name, "!warp"):
dest = command.warp(name, chatlog)
log.save2(timestamp, "TEXT", "!warp", name, "] -> [", dest)
elif "Opped" in chatlog or "De-opped" in chatlog:
global ops
ops = action.load_op(mcpath)
action.say("Detecting change in OP's, refreshing list!", 0)
log.save(timestamp, "SYSTEM", "OP-refresh", "SYSTEM")
elif "[INFO] Done (" in chatlog or "[INFO] RCON running on" in chatlog:
print "< STARTING SERVER > - Reconnecting to rcon"
action.connect(mchost, mcport, mcpwd)
log.raw_log("< STARTING SERVER >")
serverstop = False
global timetrk
if silent_enabled("timetrack"):
timetrk=timetrack.playtime()
timetrk.start()
print "< Playtime-tracking started >"
elif "[INFO] Saving chunks" in chatlog and serverstop == False:
print "< STOPPING SERVER >"
log.raw_log("< STOPPING SERVER >")
serverstop = True
if silent_enabled("timetrack"):
try:
timetrk.stop()
while timetrk.isAlive():
time.sleep(1)
del timetrk
print "< Playtime-tracking stopped >"
except:
print "Could not stop timetrack!"
log.raw_log("Could not stop timetrack!")
#old non-supported commands
elif "!tnt" in chatlog or "!stone" in chatlog or "!wood" in chatlog or "!dirt" in chatlog:
action.say("Deprecated command. use !hax or !item", 0)
# Un-commented since mc console now logs severe @ disconnect
# elif "[SEVERE]" in chatlog or "(SourceFile:" in chatlog and not "<" in chatlog:
# command.mail("SYSTEM", "MINECRAFT SEVERE EXCEPTION - TRYING TO RESTART", True)
# action.say("§c[FATAL]: Minecraft Server encountered a serious error.", 4)
# action.say("§c[WARNING] MineMon will try to restart the server as a precaution", 3)
# time.sleep(2)
# command.restart()
elif "qwophidden" in chatlog:
command.late()
else:
if '<' in chatlog:
log.save_chat(name, chatlog)
#### Name extractor
def extract_name(player):
# extrahera namn
player = player[34:]
bort = '>'
player = player.split(bort, 1)[0]
return player
#### Mainloop ####
def func_checkLastLine(lastLine):
global chatlog
global timestamp
chatlog = lastLine.replace("\n", "")
timestamp = datetime.now()
name = extract_name(lastLine)
name = nick_washer(name)
#print "running trigger on name: "+str(name)
trigger(name)
#### start of S3rR1 hax, i dont even what is this ####
class newLoopingThread (threading.Thread):
def __init__(self, threadID):
self.threadID = threadID
threading.Thread.__init__(self)
def run(self):
func_loop()
def func_loop():
tempList = fileList
while running:
time.sleep(0.5)
fileHandle = open(logfile, 'r')
newLines = fileHandle.readlines()
if newLines != tempList and tempList != None:
tempList = newLines
newList = [item for item in tempList if item != '\n']
if len(newList) > 0: func_checkLastLine(newList[len(newList) - 1])
def func_getLastLine():
fileHandle = open(logfile, 'r')
allLines = fileHandle.readlines()
allLines = [item for item in allLines if item != '\n']
return allLines[len(allLines) - 1]
#### Start application
running = True
logfile = mcpath + "logs/latest.log"
fileHandle = open(logfile, 'r')
fileList = fileHandle.readlines()
loopThread = newLoopingThread(1)
loopThread.start()
if silent_enabled("timetrack"):
print "Timetracking enabled, starting timer"
timetrk.start()
#log the start
log.raw_log("Minecraft Monitor Version "+v+" started!")
#### exit ####
print "press any key to exit"
raw_input()
running = False
print "Waiting for looping thread to stop..."
while loopThread.isAlive(): time.sleep(0.5)
if enabled("timetrack"):
try:
timetrk.stop()
time.sleep(1)
except:
print "Could not stop timetracking, although its enabled - perhaps MC is not running?"
action.say("§cMinecraft Monitor Version "+v+" stopped!", 0)
#log the shutdown
log.raw_log("Minecraft Monitor Version "+v+" stopped!")
|
normal
|
{
"blob_id": "a3301180e53da4a6970c082e72d8721b29dcae2e",
"index": 1403,
"step-1": "#!/usr/local/bin/python\n# -*- coding: utf-8 -*-\n\n# importing regular stuff\nimport os\nimport sys\nimport thread\nimport threading\nimport time\nimport datetime\nfrom datetime import datetime\nimport random\nimport filecmp\nimport ConfigParser\nimport socket\n\n#my stuff will go here\nimport include.action as action\nimport include.logreader as logreader\nimport include.command as command\nimport include.logger as log\nimport include.database as database\nimport include.timetrack as timetrack\n\n#### code start ####\nlegit = True\nserverstop = False\n\n#### version ####\nv = \"3.6-revival\"\nprint \"Starting up MineMon \"+v\ntime.sleep(0.2)\nprint \"Author: Oscar Carlberg\"\n\n#### Load settings ####\nsetting_file = sys.argv[1]\nconfig = ConfigParser.RawConfigParser()\nconfig.read(setting_file)\n\n#### Connect to MC rcon ####\nmchost = config.get('config', 'rhost')\nmcport = config.get('config', 'rport')\nmcpwd = config.get('config', 'rpass')\n\n#### Connect to MySQL ####\nmyhost = config.get('config', 'mysqlhost')\nmyuser = config.get('config', 'mysqluser')\nmypass = config.get('config', 'mysqlpass')\nmydb = config.get('config', 'mysqldb')\ndatabase.settings(myhost, myuser, mypass, mydb)\n\n#### some settings-var ####\nmcpath = config.get('config', 'mcpath')\nmapurl = config.get('config', 'mapurl')\nhelpurl = config.get('config', 'helpurl')\nscreen = config.get('config', 'screen')\nmc_mem = config.get('config', 'mc_mem')\n\ngmail = config.get('config', 'gmail')\ngpw = config.get('config', 'gmailpw')\nmailrcvr = config.get('config', 'sendto')\n\n#### announce that i'm running ####\ntry:\n action.connect(mchost, mcport, mcpwd)\nexcept:\n print \"Coult not connect to Minecraft Rcon!\"\n sys.exit()\n\naction.load(gmail, gpw, mailrcvr, screen, mc_mem)\naction.say(\"§aMinecraft Monitor Version \"+v+\" now running!\", 1)\naction.say(\"§aType !help for available commands\", 0)\n\nops = action.load_op(mcpath)\ntimetrk=timetrack.playtime()\n\n#### check if enabled & op func ####\n\ndef enabled(onoroff):\n #Check if regular command or feature\n if \"!\" in onoroff:\n setting = database.check_enabled_command(onoroff)\n\n #If not enabled say so.\n if not setting:\n action.say(\"This command has been disabled for this world!\", 0)\n return setting\n\n else:\n try:\n setting = config.get('config', onoroff)\n except:\n setting = \"disabled\"\n print \"NO setting entry for \"+onoroff+\", disabled.\"\n if \"enabled\" in setting:\n return True\n else:\n action.say(\"This command has been disabled for this world!\", 0)\n return False\n\ndef silent_enabled(onoroff):\n try:\n setting = config.get('config', onoroff)\n except:\n setting = \"disabled\"\n print \"NO setting entry for \"+onoroff+\", disabled.\"\n if \"enabled\" in setting:\n return True\n else:\n return False\n\ndef check_op(name, command):\n op = database.check_command_op(command)\n\n #If commmand does not need op, return OK\n if not op:\n return True\n\n else:\n #else, check if user is op, and return true\n if name.lower() in ops:\n return True\n\n #if not, deny.\n else:\n action.say(\"This command is not allowed for non-op's.\", 0)\n\ndef nick_washer(nick):\n while \"§\" in nick:\n nick = nick.replace(\"§1\", \"\")\n nick = nick.replace(\"§2\", \"\")\n nick = nick.replace(\"§3\", \"\")\n nick = nick.replace(\"§4\", \"\")\n nick = nick.replace(\"§5\", \"\")\n nick = nick.replace(\"§6\", \"\")\n nick = nick.replace(\"§7\", \"\")\n nick = nick.replace(\"§8\", \"\")\n nick = nick.replace(\"§9\", \"\")\n nick = nick.replace(\"§a\", \"\")\n nick = nick.replace(\"§b\", \"\")\n nick = nick.replace(\"§c\", \"\")\n nick = nick.replace(\"§d\", \"\")\n nick = nick.replace(\"§e\", \"\")\n nick = nick.replace(\"§f\", \"\")\n\tnick = nick.replace(\"§r\", \"\")\n\n #print \"Washed: \"+nick\n return nick\n\n\n#### Trigger on chattlog stuff ####\ndef trigger(name):\n global serverstop\n\n if \"!help\" in chatlog:\n if enabled(\"!help\"):\n if check_op(name, \"!help\"):\n helpcmnd = command.help(helpurl, chatlog)\n log.save2(timestamp, \"SYSTEM\", \"!help\", name, \"] [\", helpcmnd)\n\n elif \"!sheen\" in chatlog:\n if enabled(\"!sheen\"):\n if check_op(name, \"!sheen\"):\n command.sheen()\n log.save(timestamp, \"TEXT\", \"!sheen\", name)\n\n elif \"joined the game\" in chatlog and not \"[Rcon]\" in chatlog:\n if enabled(\"login_manner\"):\n player = command.login(chatlog, v, helpurl)\n log.save(timestamp, \"GREEN\", \"Login:\", player)\n\n elif \"left the game\" in chatlog and not \"[Rcon]\" in chatlog:\n if enabled(\"logout_manner\"):\n player = command.logout(chatlog)\n log.save(timestamp, \"RED\", \"Logout:\", player)\n\n elif \"!hax\" in chatlog and not \"[Rcon]\" in chatlog:\n if enabled(\"!hax\"):\n if check_op(name, \"!hax\"):\n command.hax(name)\n log.save(timestamp, \"SYSTEM\", \"!hax\", name)\n\n elif \"!unhax\" in chatlog and not \"[Rcon]\" in chatlog:\n if enabled(\"!unhax\"):\n if check_op(name, \"!unhax\"):\n command.unhax(name)\n log.save(timestamp, \"SYSTEM\", \"!unhax\", name)\n\n elif \"!adv\" in chatlog and not \"[Rcon]\" in chatlog:\n if enabled(\"!adv\"):\n if check_op(name, \"!adv\"):\n command.adv(name)\n log.save(timestamp, \"SYSTEM\", \"!adv\", name)\n\n elif \"!day\" in chatlog:\n if enabled(\"!day\"):\n if check_op(name, \"!day\"):\n command.day()\n log.save(timestamp, \"SYSTEM\", \"!day\", name)\n\n elif \"!night\" in chatlog:\n if enabled(\"!night\"):\n if check_op(name, \"!night\"):\n command.night()\n log.save(timestamp, \"SYSTEM\", \"!night\", name)\n\n elif \"!tp\" in chatlog and not \"[Rcon]\" in chatlog:\n if enabled(\"!tp\"):\n if check_op(name, \"!tp\"):\n who = command.tp(name, chatlog)\n log.save2(timestamp, \"TEXT\", \"!tp\", name, \"] -> [\", who)\n\n elif \"!pull\" in chatlog and not \"[Rcon]\" in chatlog:\n if enabled(\"!pull\"):\n if check_op(name, \"!pull\"):\n who = command.pull(name, chatlog)\n log.save2(timestamp, \"TEXT\", \"!pull\", name, \"] <- [\", who)\n\n elif \"!map\" in chatlog:\n if enabled(\"!map\"):\n if check_op(name, \"!map\"):\n command.map(mapurl)\n log.save(timestamp, \"SYSTEM\", \"!map\", name)\n\n elif \"!version\" in chatlog and not \"[Rcon]\" in chatlog:\n if enabled(\"!version\"):\n if check_op(name, \"!version\"):\n command.version(v)\n log.save(timestamp, \"SYSTEM\", \"!version\", name)\n\n elif \"!list\" in chatlog:\n action.say(\"Deprecated. Press Tab on your keyboard\", 0)\n\n elif \"!roll\" in chatlog and not \"[Rcon]\" in chatlog:\n if enabled(\"!roll\"):\n if check_op(name, \"!roll\"):\n roll = command.roll(name)\n log.save2(timestamp, \"TEXT\", \"!roll\", name, \"] [\", roll)\n\n elif \"!rain\" in chatlog and not \"[Rcon]\" in chatlog:\n if enabled(\"!rain\"):\n if check_op(name, \"!rain\"):\n command.rain()\n log.save(timestamp, \"SYSTEM\", \"!rain\", name)\n\n elif \"!xp\" in chatlog and not \"[Rcon]\" in chatlog:\n if enabled(\"!xp\"):\n if check_op(name, \"!xp\"):\n command.xp(name)\n log.save(timestamp, \"TEXT\", \"!xp\", name)\n\n elif \"!kit\" in chatlog and not \"[Rcon]\" in chatlog:\n if enabled(\"!kit\"):\n if check_op(name, \"!kit\"):\n command.kit(name)\n log.save(timestamp, \"TEXT\", \"!kit\", name)\n\n elif \"!leatherset\" in chatlog and not \"[Rcon]\" in chatlog:\n if enabled(\"!leatherset\"):\n if check_op(name, \"!leatherset\"):\n command.leatherset(name)\n log.save(timestamp, \"TEXT\", \"!leatherset\", name)\n\n elif \"!diamondset\" in chatlog and not \"[Rcon]\" in chatlog:\n if enabled(\"!diamondset\"):\n if check_op(name, \"!diamondset\"):\n command.diamondset(name)\n log.save(timestamp, \"TEXT\", \"!diamondset\", name)\n\n elif \"!bow\" in chatlog and not \"[Rcon]\" in chatlog:\n if enabled(\"!bow\"):\n if check_op(name, \"!bow\"):\n command.bow(name)\n log.save(timestamp, \"TEXT\", \"!bow\", name)\n\n elif \"!train\" in chatlog and not \"[Rcon]\" in chatlog:\n if enabled(\"!train\"):\n if check_op(name, \"!train\"):\n command.train(name)\n log.save(timestamp, \"TEXT\", \"!train\", name)\n\n elif \"!sleep\" in chatlog and not \"[Rcon]\" in chatlog:\n if enabled(\"!sleep\"):\n if check_op(name, \"!sleep\"):\n command.sleep(name)\n log.save(timestamp, \"TEXT\", \"!sleep\", name)\n\n elif \"!rail\" in chatlog and not \"[Rcon]\" in chatlog:\n if enabled(\"!rail\"):\n if check_op(name, \"!rail\"):\n command.rail(name)\n log.save(timestamp, \"TEXT\", \"!rail\", name)\n\n elif \"!food\" in chatlog and not \"[Rcon]\" in chatlog:\n if enabled(\"!food\"):\n if check_op(name, \"!food\"):\n command.food(name)\n log.save(timestamp, \"TEXT\", \"!food\", name)\n\n elif \"!item\" in chatlog and not \"[Rcon]\" in chatlog:\n if enabled(\"!item\"):\n if check_op(name, \"!item\"):\n item = command.item(name, chatlog)\n log.save2(timestamp, \"TEXT\", \"!item\", name, \"] [\", item)\n\n elif \"!restart\" in chatlog and not \"[Rcon]\" in chatlog:\n if enabled(\"!restart\"):\n if check_op(name, \"!restart\"):\n command.restart()\n log.save(timestamp, \"SYSTEM\", \"!restart\", name)\n\n elif \"!monsters\" in chatlog:\n if enabled(\"!monsters\"):\n if check_op(name, \"!monsters\"):\n onoff = command.monsters(mcpath)\n log.save2(timestamp, \"SYSTEM\", \"!monsters\", name, \"] [\", onoff)\n\n elif \"!update\" in chatlog:\n if enabled(\"!update\"):\n if check_op(name, \"!update\") or \"Banned Player\" in chatlog:\n status = command.update(mcpath, mcport)\n log.save2(timestamp, \"SYSTEM\", \"!update\", name, \"] [\", status)\n\n elif \"!temphax\" in chatlog and not \"[Rcon]\" in chatlog:\n if enabled(\"!temphax\"):\n if check_op(name, \"!temphax\"):\n who = command.temphax(chatlog)\n log.save2(timestamp, \"TEXT\", \"!temphax\", name, \"] -> [\", who)\n\n elif \"!report\" in chatlog and not \"[Rcon]\" in chatlog:\n if enabled(\"!report\"):\n if check_op(name, \"!report\"):\n command.mail(name, chatlog, False)\n log.save(timestamp, \"SYSTEM\", \"!report\", name)\n\n elif \"!played\" in chatlog and not \"[Rcon]\" in chatlog:\n if enabled(\"!played\"):\n if check_op(name, \"!played\"):\n\t\tprint \"Checking played with name:\"+ str(name)\n command.played(name)\n log.save(timestamp, \"TEXT\", \"!played\", name)\n\n elif \"!world\" in chatlog and not \"[Rcon]\" in chatlog:\n if enabled(\"!world\"):\n if check_op(name, \"!world\"):\n success = command.world(name, chatlog, mcpath)\n if success:\n log.save2(timestamp, \"SYSTEM\", \"!world\", name, \"] [\", success)\n\n elif \"!clear\" in chatlog and not \"[Rcon]\" in chatlog:\n if enabled(\"!clear\"):\n if check_op(name, \"!clear\"):\n command.clear(name)\n log.save(timestamp, \"TEXT\", \"!clear\", name)\n\n elif \"!spawn\" in chatlog and not \"[Rcon]\" in chatlog:\n if enabled(\"!spawn\"):\n if check_op(name, \"!spawn\"):\n command.spawn(name)\n log.save(timestamp, \"TEXT\", \"!spawn\", name)\n\n elif \"!gateway\" in chatlog and not \"[Rcon]\" in chatlog:\n if enabled(\"!gateway\"):\n if check_op(name, \"!gateway\"):\n gw = command.gateway(name, chatlog)\n log.save2(timestamp, \"TEXT\", \"!gateway\", name, gw[0], gw[1])\n\n elif \"!dial\" in chatlog and not \"[Rcon]\" in chatlog:\n if enabled(\"!dial\"):\n if check_op(name, \"!dial\"):\n dest = command.dial(name, chatlog)\n log.save2(timestamp, \"TEXT\", \"!dial\", name, \"] -> [\", dest)\n\n elif \"!warp\" in chatlog and not \"[Rcon]\" in chatlog:\n if enabled(\"!warp\"):\n if check_op(name, \"!warp\"):\n dest = command.warp(name, chatlog)\n log.save2(timestamp, \"TEXT\", \"!warp\", name, \"] -> [\", dest)\n\n\n elif \"Opped\" in chatlog or \"De-opped\" in chatlog:\n global ops\n ops = action.load_op(mcpath)\n action.say(\"Detecting change in OP's, refreshing list!\", 0)\n log.save(timestamp, \"SYSTEM\", \"OP-refresh\", \"SYSTEM\")\n\n elif \"[INFO] Done (\" in chatlog or \"[INFO] RCON running on\" in chatlog:\n print \"< STARTING SERVER > - Reconnecting to rcon\"\n action.connect(mchost, mcport, mcpwd)\n log.raw_log(\"< STARTING SERVER >\")\n serverstop = False\n global timetrk\n if silent_enabled(\"timetrack\"):\n timetrk=timetrack.playtime()\n timetrk.start()\n print \"< Playtime-tracking started >\"\n\n\n elif \"[INFO] Saving chunks\" in chatlog and serverstop == False:\n print \"< STOPPING SERVER >\"\n log.raw_log(\"< STOPPING SERVER >\")\n serverstop = True\n if silent_enabled(\"timetrack\"):\n try:\n timetrk.stop()\n while timetrk.isAlive():\n time.sleep(1)\n del timetrk\n print \"< Playtime-tracking stopped >\"\n except:\n print \"Could not stop timetrack!\"\n log.raw_log(\"Could not stop timetrack!\")\n\n #old non-supported commands\n elif \"!tnt\" in chatlog or \"!stone\" in chatlog or \"!wood\" in chatlog or \"!dirt\" in chatlog:\n action.say(\"Deprecated command. use !hax or !item\", 0)\n\n# Un-commented since mc console now logs severe @ disconnect\n# elif \"[SEVERE]\" in chatlog or \"(SourceFile:\" in chatlog and not \"<\" in chatlog:\n# command.mail(\"SYSTEM\", \"MINECRAFT SEVERE EXCEPTION - TRYING TO RESTART\", True)\n# action.say(\"§c[FATAL]: Minecraft Server encountered a serious error.\", 4)\n# action.say(\"§c[WARNING] MineMon will try to restart the server as a precaution\", 3)\n# time.sleep(2)\n# command.restart()\n\n elif \"qwophidden\" in chatlog:\n command.late()\n\n else:\n if '<' in chatlog:\n log.save_chat(name, chatlog)\n\n\n\n#### Name extractor\ndef extract_name(player):\n # extrahera namn\n player = player[34:]\n bort = '>'\n player = player.split(bort, 1)[0]\n return player\n\n#### Mainloop ####\ndef func_checkLastLine(lastLine):\n global chatlog\n global timestamp\n chatlog = lastLine.replace(\"\\n\", \"\")\n timestamp = datetime.now()\n name = extract_name(lastLine)\n name = nick_washer(name)\n #print \"running trigger on name: \"+str(name)\n trigger(name)\n\n#### start of S3rR1 hax, i dont even what is this ####\nclass newLoopingThread (threading.Thread):\n def __init__(self, threadID):\n self.threadID = threadID\n threading.Thread.__init__(self)\n def run(self):\n func_loop()\n\ndef func_loop():\n tempList = fileList\n while running:\n time.sleep(0.5)\n fileHandle = open(logfile, 'r')\n newLines = fileHandle.readlines()\n if newLines != tempList and tempList != None:\n tempList = newLines\n newList = [item for item in tempList if item != '\\n']\n if len(newList) > 0: func_checkLastLine(newList[len(newList) - 1])\n\ndef func_getLastLine():\n fileHandle = open(logfile, 'r')\n allLines = fileHandle.readlines()\n allLines = [item for item in allLines if item != '\\n']\n return allLines[len(allLines) - 1]\n\n#### Start application\nrunning = True\nlogfile = mcpath + \"logs/latest.log\"\n\nfileHandle = open(logfile, 'r')\nfileList = fileHandle.readlines()\n\nloopThread = newLoopingThread(1)\nloopThread.start()\n\nif silent_enabled(\"timetrack\"):\n print \"Timetracking enabled, starting timer\"\n timetrk.start()\n\n#log the start\nlog.raw_log(\"Minecraft Monitor Version \"+v+\" started!\")\n\n#### exit ####\nprint \"press any key to exit\"\nraw_input()\nrunning = False\nprint \"Waiting for looping thread to stop...\"\nwhile loopThread.isAlive(): time.sleep(0.5)\n\nif enabled(\"timetrack\"):\n try:\n timetrk.stop()\n time.sleep(1)\n except:\n print \"Could not stop timetracking, although its enabled - perhaps MC is not running?\"\n\naction.say(\"§cMinecraft Monitor Version \"+v+\" stopped!\", 0)\n\n#log the shutdown\nlog.raw_log(\"Minecraft Monitor Version \"+v+\" stopped!\")\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-30 14:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('books', '0007_auto_20170127_2254'),
]
operations = [
migrations.AlterField(
model_name='book',
name='subtitle',
field=models.CharField(blank=True, help_text='e.g. There and Back Again', max_length=200),
),
migrations.AlterField(
model_name='book',
name='title',
field=models.CharField(db_index=True, help_text='e.g. The Hobbit', max_length=200, unique=True),
),
]
|
normal
|
{
"blob_id": "65ea27851d9db0f0a06d42bd37eff633d22a1548",
"index": 9528,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('books', '0007_auto_20170127_2254')]\n operations = [migrations.AlterField(model_name='book', name='subtitle',\n field=models.CharField(blank=True, help_text=\n 'e.g. There and Back Again', max_length=200)), migrations.\n AlterField(model_name='book', name='title', field=models.CharField(\n db_index=True, help_text='e.g. The Hobbit', max_length=200, unique=\n True))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('books', '0007_auto_20170127_2254')]\n operations = [migrations.AlterField(model_name='book', name='subtitle',\n field=models.CharField(blank=True, help_text=\n 'e.g. There and Back Again', max_length=200)), migrations.\n AlterField(model_name='book', name='title', field=models.CharField(\n db_index=True, help_text='e.g. The Hobbit', max_length=200, unique=\n True))]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.5 on 2017-01-30 14:50\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('books', '0007_auto_20170127_2254'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='book',\n name='subtitle',\n field=models.CharField(blank=True, help_text='e.g. There and Back Again', max_length=200),\n ),\n migrations.AlterField(\n model_name='book',\n name='title',\n field=models.CharField(db_index=True, help_text='e.g. The Hobbit', max_length=200, unique=True),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
''' 단어 수학
시간 : 68ms (~2초), 메모리 : 29200KB (~256MB)
분류 : greedy
'''
import sys
input = sys.stdin.readline
# 입력
N = int(input()) # 단어의 개수
arr = [list(input().strip()) for _ in range(N)]
# 풀이
alphabet = []
for word in arr:
for a in word:
if a not in alphabet:
alphabet.append(a)
value_list = []
for a in alphabet:
value = 0
for word in arr:
if a not in word: # 알파벳 없으면 넘어감
continue
s = ""
for w in word:
s += "1" if w == a else "0"
value += int(s)
value_list.append(value)
value_list.sort(reverse=True) # 내림차순 정렬
answer = 0
value = 9
for s in value_list:
answer += value * s
value -= 1
# 출력
print(answer)
|
normal
|
{
"blob_id": "6efc7ff304a05dfc5a7bed7d646e5d6ac034ce85",
"index": 4706,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor word in arr:\n for a in word:\n if a not in alphabet:\n alphabet.append(a)\n<mask token>\nfor a in alphabet:\n value = 0\n for word in arr:\n if a not in word:\n continue\n s = ''\n for w in word:\n s += '1' if w == a else '0'\n value += int(s)\n value_list.append(value)\nvalue_list.sort(reverse=True)\n<mask token>\nfor s in value_list:\n answer += value * s\n value -= 1\nprint(answer)\n",
"step-3": "<mask token>\ninput = sys.stdin.readline\nN = int(input())\narr = [list(input().strip()) for _ in range(N)]\nalphabet = []\nfor word in arr:\n for a in word:\n if a not in alphabet:\n alphabet.append(a)\nvalue_list = []\nfor a in alphabet:\n value = 0\n for word in arr:\n if a not in word:\n continue\n s = ''\n for w in word:\n s += '1' if w == a else '0'\n value += int(s)\n value_list.append(value)\nvalue_list.sort(reverse=True)\nanswer = 0\nvalue = 9\nfor s in value_list:\n answer += value * s\n value -= 1\nprint(answer)\n",
"step-4": "<mask token>\nimport sys\ninput = sys.stdin.readline\nN = int(input())\narr = [list(input().strip()) for _ in range(N)]\nalphabet = []\nfor word in arr:\n for a in word:\n if a not in alphabet:\n alphabet.append(a)\nvalue_list = []\nfor a in alphabet:\n value = 0\n for word in arr:\n if a not in word:\n continue\n s = ''\n for w in word:\n s += '1' if w == a else '0'\n value += int(s)\n value_list.append(value)\nvalue_list.sort(reverse=True)\nanswer = 0\nvalue = 9\nfor s in value_list:\n answer += value * s\n value -= 1\nprint(answer)\n",
"step-5": "''' 단어 수학\n시간 : 68ms (~2초), 메모리 : 29200KB (~256MB)\n분류 : greedy\n'''\n\nimport sys\ninput = sys.stdin.readline\n\n# 입력\nN = int(input()) # 단어의 개수\narr = [list(input().strip()) for _ in range(N)]\n\n# 풀이\nalphabet = []\nfor word in arr:\n for a in word:\n if a not in alphabet:\n alphabet.append(a)\n\nvalue_list = []\nfor a in alphabet:\n value = 0\n for word in arr:\n if a not in word: # 알파벳 없으면 넘어감\n continue\n\n s = \"\"\n for w in word:\n s += \"1\" if w == a else \"0\"\n value += int(s)\n\n value_list.append(value)\n\nvalue_list.sort(reverse=True) # 내림차순 정렬\n\nanswer = 0\nvalue = 9\nfor s in value_list:\n answer += value * s\n value -= 1\n\n# 출력\nprint(answer)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
api_id = "2168275"
api_hash = "e011a9cb95b7e7e153aa5840985fc883"
|
normal
|
{
"blob_id": "c6d6fcc242e1b63104a3f3eb788880635257ff4c",
"index": 7503,
"step-1": "<mask token>\n",
"step-2": "api_id = '2168275'\napi_hash = 'e011a9cb95b7e7e153aa5840985fc883'\n",
"step-3": "api_id = \"2168275\"\napi_hash = \"e011a9cb95b7e7e153aa5840985fc883\"\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# -*- coding: utf-8 -*-
"""pytest People functions, fixtures and tests."""
import pytest
import ciscosparkapi
from tests.utils import create_string
# Helper Functions
# pytest Fixtures
@pytest.fixture(scope="session")
def me(api):
return api.people.me()
|
normal
|
{
"blob_id": "9b7ffa2bb62a8decbec51c6bdea38b4338726816",
"index": 1891,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected](scope='session')\ndef me(api):\n return api.people.me()\n",
"step-3": "<mask token>\nimport pytest\nimport ciscosparkapi\nfrom tests.utils import create_string\n\n\[email protected](scope='session')\ndef me(api):\n return api.people.me()\n",
"step-4": "# -*- coding: utf-8 -*-\n\n\"\"\"pytest People functions, fixtures and tests.\"\"\"\n\n\nimport pytest\n\nimport ciscosparkapi\nfrom tests.utils import create_string\n\n\n# Helper Functions\n\n\n\n\n# pytest Fixtures\n\[email protected](scope=\"session\")\ndef me(api):\n return api.people.me()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from kivy.uix.boxlayout import BoxLayout
from kivy.graphics import *
from kivy.clock import Clock
from kivy.properties import StringProperty, BooleanProperty
from kivy.uix.popup import Popup
import time
from math import sin, pi
from kivy.lang import Builder
from ui.custom_widgets import I18NPopup, I18NLabel
Builder.load_file('ui/peachy_widgets.kv')
class TouchyLabel(I18NLabel):
is_on = BooleanProperty(False)
def on_touch_down(self, touch):
if touch.is_triple_tap:
self.is_on = not self.is_on
class I18NHelpPopup(I18NPopup):
text_source = StringProperty()
class Dripper(BoxLayout):
def __init__(self, **kwargs):
super(Dripper, self).__init__(**kwargs)
self.index = 0.0
self.sections = 20
self.section_height = 1
self.lasttime = time.time()
Clock.schedule_once(self.redraw)
self.drip_history = []
self.count = 0
def update(self, data):
self.drip_history = data['drip_history']
self.count = data['drips']
def update_parts(self, drips, history):
self.drip_history = history
self.count = drips
def redraw(self, key):
self.index += (time.time() - self.lasttime) * self.sections
self.lasttime = time.time()
if self.index > self.section_height * 2:
self.index = 0
self.draw()
Clock.schedule_once(self.redraw, 1.0 / 30.0)
def on_height(self, instance, value):
self.section_height = self.height / self.sections
def draw(self):
self.canvas.clear()
top = time.time()
bottom = top - self.sections
self.canvas.add(Color(0.99, 0.99, 0.6, 1.0))
self.canvas.add(Rectangle(pos=self.pos, size=self.size))
for (index, drip) in zip(range(len(self.drip_history), 0, -1), self.drip_history):
if drip > bottom:
self.canvas.add(Color(0.35, 0.4, 1.0, 1.0))
y = ((drip - bottom) / self.sections) * self.height
s = sin((self.count - index) / (2 * pi))
self.canvas.add(Ellipse(pos=(self.x + abs(self.width / 2.0 * s), y), size=(self.width / 5.0, 5)))
class LaserWarningPopup(I18NPopup):
text_source = StringProperty()
accepted = StringProperty(None)
def __init__(self, **kwargs):
super(LaserWarningPopup, self).__init__(**kwargs)
def is_safe(self):
if self.accepted is "True":
return True
return False
|
normal
|
{
"blob_id": "96086885e5353f3b4b3277c1daf4ee74831c3b73",
"index": 8841,
"step-1": "<mask token>\n\n\nclass Dripper(BoxLayout):\n\n def __init__(self, **kwargs):\n super(Dripper, self).__init__(**kwargs)\n self.index = 0.0\n self.sections = 20\n self.section_height = 1\n self.lasttime = time.time()\n Clock.schedule_once(self.redraw)\n self.drip_history = []\n self.count = 0\n\n def update(self, data):\n self.drip_history = data['drip_history']\n self.count = data['drips']\n\n def update_parts(self, drips, history):\n self.drip_history = history\n self.count = drips\n\n def redraw(self, key):\n self.index += (time.time() - self.lasttime) * self.sections\n self.lasttime = time.time()\n if self.index > self.section_height * 2:\n self.index = 0\n self.draw()\n Clock.schedule_once(self.redraw, 1.0 / 30.0)\n <mask token>\n\n def draw(self):\n self.canvas.clear()\n top = time.time()\n bottom = top - self.sections\n self.canvas.add(Color(0.99, 0.99, 0.6, 1.0))\n self.canvas.add(Rectangle(pos=self.pos, size=self.size))\n for index, drip in zip(range(len(self.drip_history), 0, -1), self.\n drip_history):\n if drip > bottom:\n self.canvas.add(Color(0.35, 0.4, 1.0, 1.0))\n y = (drip - bottom) / self.sections * self.height\n s = sin((self.count - index) / (2 * pi))\n self.canvas.add(Ellipse(pos=(self.x + abs(self.width / 2.0 *\n s), y), size=(self.width / 5.0, 5)))\n\n\nclass LaserWarningPopup(I18NPopup):\n text_source = StringProperty()\n accepted = StringProperty(None)\n\n def __init__(self, **kwargs):\n super(LaserWarningPopup, self).__init__(**kwargs)\n\n def is_safe(self):\n if self.accepted is 'True':\n return True\n return False\n",
"step-2": "<mask token>\n\n\nclass I18NHelpPopup(I18NPopup):\n <mask token>\n\n\nclass Dripper(BoxLayout):\n\n def __init__(self, **kwargs):\n super(Dripper, self).__init__(**kwargs)\n self.index = 0.0\n self.sections = 20\n self.section_height = 1\n self.lasttime = time.time()\n Clock.schedule_once(self.redraw)\n self.drip_history = []\n self.count = 0\n\n def update(self, data):\n self.drip_history = data['drip_history']\n self.count = data['drips']\n\n def update_parts(self, drips, history):\n self.drip_history = history\n self.count = drips\n\n def redraw(self, key):\n self.index += (time.time() - self.lasttime) * self.sections\n self.lasttime = time.time()\n if self.index > self.section_height * 2:\n self.index = 0\n self.draw()\n Clock.schedule_once(self.redraw, 1.0 / 30.0)\n\n def on_height(self, instance, value):\n self.section_height = self.height / self.sections\n\n def draw(self):\n self.canvas.clear()\n top = time.time()\n bottom = top - self.sections\n self.canvas.add(Color(0.99, 0.99, 0.6, 1.0))\n self.canvas.add(Rectangle(pos=self.pos, size=self.size))\n for index, drip in zip(range(len(self.drip_history), 0, -1), self.\n drip_history):\n if drip > bottom:\n self.canvas.add(Color(0.35, 0.4, 1.0, 1.0))\n y = (drip - bottom) / self.sections * self.height\n s = sin((self.count - index) / (2 * pi))\n self.canvas.add(Ellipse(pos=(self.x + abs(self.width / 2.0 *\n s), y), size=(self.width / 5.0, 5)))\n\n\nclass LaserWarningPopup(I18NPopup):\n text_source = StringProperty()\n accepted = StringProperty(None)\n\n def __init__(self, **kwargs):\n super(LaserWarningPopup, self).__init__(**kwargs)\n\n def is_safe(self):\n if self.accepted is 'True':\n return True\n return False\n",
"step-3": "<mask token>\n\n\nclass TouchyLabel(I18NLabel):\n <mask token>\n <mask token>\n\n\nclass I18NHelpPopup(I18NPopup):\n text_source = StringProperty()\n\n\nclass Dripper(BoxLayout):\n\n def __init__(self, **kwargs):\n super(Dripper, self).__init__(**kwargs)\n self.index = 0.0\n self.sections = 20\n self.section_height = 1\n self.lasttime = time.time()\n Clock.schedule_once(self.redraw)\n self.drip_history = []\n self.count = 0\n\n def update(self, data):\n self.drip_history = data['drip_history']\n self.count = data['drips']\n\n def update_parts(self, drips, history):\n self.drip_history = history\n self.count = drips\n\n def redraw(self, key):\n self.index += (time.time() - self.lasttime) * self.sections\n self.lasttime = time.time()\n if self.index > self.section_height * 2:\n self.index = 0\n self.draw()\n Clock.schedule_once(self.redraw, 1.0 / 30.0)\n\n def on_height(self, instance, value):\n self.section_height = self.height / self.sections\n\n def draw(self):\n self.canvas.clear()\n top = time.time()\n bottom = top - self.sections\n self.canvas.add(Color(0.99, 0.99, 0.6, 1.0))\n self.canvas.add(Rectangle(pos=self.pos, size=self.size))\n for index, drip in zip(range(len(self.drip_history), 0, -1), self.\n drip_history):\n if drip > bottom:\n self.canvas.add(Color(0.35, 0.4, 1.0, 1.0))\n y = (drip - bottom) / self.sections * self.height\n s = sin((self.count - index) / (2 * pi))\n self.canvas.add(Ellipse(pos=(self.x + abs(self.width / 2.0 *\n s), y), size=(self.width / 5.0, 5)))\n\n\nclass LaserWarningPopup(I18NPopup):\n text_source = StringProperty()\n accepted = StringProperty(None)\n\n def __init__(self, **kwargs):\n super(LaserWarningPopup, self).__init__(**kwargs)\n\n def is_safe(self):\n if self.accepted is 'True':\n return True\n return False\n",
"step-4": "<mask token>\n\n\nclass TouchyLabel(I18NLabel):\n <mask token>\n\n def on_touch_down(self, touch):\n if touch.is_triple_tap:\n self.is_on = not self.is_on\n\n\nclass I18NHelpPopup(I18NPopup):\n text_source = StringProperty()\n\n\nclass Dripper(BoxLayout):\n\n def __init__(self, **kwargs):\n super(Dripper, self).__init__(**kwargs)\n self.index = 0.0\n self.sections = 20\n self.section_height = 1\n self.lasttime = time.time()\n Clock.schedule_once(self.redraw)\n self.drip_history = []\n self.count = 0\n\n def update(self, data):\n self.drip_history = data['drip_history']\n self.count = data['drips']\n\n def update_parts(self, drips, history):\n self.drip_history = history\n self.count = drips\n\n def redraw(self, key):\n self.index += (time.time() - self.lasttime) * self.sections\n self.lasttime = time.time()\n if self.index > self.section_height * 2:\n self.index = 0\n self.draw()\n Clock.schedule_once(self.redraw, 1.0 / 30.0)\n\n def on_height(self, instance, value):\n self.section_height = self.height / self.sections\n\n def draw(self):\n self.canvas.clear()\n top = time.time()\n bottom = top - self.sections\n self.canvas.add(Color(0.99, 0.99, 0.6, 1.0))\n self.canvas.add(Rectangle(pos=self.pos, size=self.size))\n for index, drip in zip(range(len(self.drip_history), 0, -1), self.\n drip_history):\n if drip > bottom:\n self.canvas.add(Color(0.35, 0.4, 1.0, 1.0))\n y = (drip - bottom) / self.sections * self.height\n s = sin((self.count - index) / (2 * pi))\n self.canvas.add(Ellipse(pos=(self.x + abs(self.width / 2.0 *\n s), y), size=(self.width / 5.0, 5)))\n\n\nclass LaserWarningPopup(I18NPopup):\n text_source = StringProperty()\n accepted = StringProperty(None)\n\n def __init__(self, **kwargs):\n super(LaserWarningPopup, self).__init__(**kwargs)\n\n def is_safe(self):\n if self.accepted is 'True':\n return True\n return False\n",
"step-5": "from kivy.uix.boxlayout import BoxLayout\nfrom kivy.graphics import *\nfrom kivy.clock import Clock\nfrom kivy.properties import StringProperty, BooleanProperty\nfrom kivy.uix.popup import Popup\nimport time\nfrom math import sin, pi\n\nfrom kivy.lang import Builder\nfrom ui.custom_widgets import I18NPopup, I18NLabel\n\n\nBuilder.load_file('ui/peachy_widgets.kv')\n\n\nclass TouchyLabel(I18NLabel):\n\n is_on = BooleanProperty(False)\n\n def on_touch_down(self, touch):\n if touch.is_triple_tap:\n self.is_on = not self.is_on\n\n\nclass I18NHelpPopup(I18NPopup):\n text_source = StringProperty()\n\n\nclass Dripper(BoxLayout):\n def __init__(self, **kwargs):\n super(Dripper, self).__init__(**kwargs)\n self.index = 0.0\n self.sections = 20\n self.section_height = 1\n self.lasttime = time.time()\n Clock.schedule_once(self.redraw)\n self.drip_history = []\n self.count = 0\n\n def update(self, data):\n self.drip_history = data['drip_history']\n self.count = data['drips']\n\n def update_parts(self, drips, history):\n self.drip_history = history\n self.count = drips\n\n def redraw(self, key):\n self.index += (time.time() - self.lasttime) * self.sections\n self.lasttime = time.time()\n if self.index > self.section_height * 2:\n self.index = 0\n self.draw()\n Clock.schedule_once(self.redraw, 1.0 / 30.0)\n\n def on_height(self, instance, value):\n self.section_height = self.height / self.sections\n\n def draw(self):\n self.canvas.clear()\n top = time.time()\n bottom = top - self.sections\n self.canvas.add(Color(0.99, 0.99, 0.6, 1.0))\n self.canvas.add(Rectangle(pos=self.pos, size=self.size))\n for (index, drip) in zip(range(len(self.drip_history), 0, -1), self.drip_history):\n if drip > bottom:\n self.canvas.add(Color(0.35, 0.4, 1.0, 1.0))\n y = ((drip - bottom) / self.sections) * self.height\n s = sin((self.count - index) / (2 * pi))\n self.canvas.add(Ellipse(pos=(self.x + abs(self.width / 2.0 * s), y), size=(self.width / 5.0, 5)))\n\n\nclass LaserWarningPopup(I18NPopup):\n text_source = StringProperty()\n accepted = StringProperty(None)\n\n def __init__(self, **kwargs):\n super(LaserWarningPopup, self).__init__(**kwargs)\n\n def is_safe(self):\n if self.accepted is \"True\":\n return True\n return False\n",
"step-ids": [
10,
12,
14,
15,
19
]
}
|
[
10,
12,
14,
15,
19
] |
# 효율적인 해킹
# https://www.acmicpc.net/problem/1325
from collections import deque
import sys
input = sys.stdin.readline
n, m = map(int, input().split())
graph = [[] for _ in range(n + 1)]
for _ in range(m):
a, b = map(int, input().split())
graph[b].append(a) # B를 해킹하면 A도 해킹할 수 있다
def bfs(start):
visited = [False] * (n + 1)
visited[start] = True
q = deque()
q.append(start)
cnt = 1 # start를 해킹했을 때 해킹할 수 있는 컴퓨터의 개수
while q:
now = q.popleft()
for i in graph[now]:
if not visited[i]:
visited[i] = True
q.append(i)
cnt += 1
return cnt
answer = []
max_cnt = 0
for i in range(1, n + 1):
result = bfs(i)
if result > max_cnt:
answer = [i]
max_cnt = result
elif result == max_cnt:
answer.append(i)
print(*answer)
|
normal
|
{
"blob_id": "8a631adc8d919fb1dded27177818c4cb30148e94",
"index": 610,
"step-1": "<mask token>\n\n\ndef bfs(start):\n visited = [False] * (n + 1)\n visited[start] = True\n q = deque()\n q.append(start)\n cnt = 1\n while q:\n now = q.popleft()\n for i in graph[now]:\n if not visited[i]:\n visited[i] = True\n q.append(i)\n cnt += 1\n return cnt\n\n\n<mask token>\n",
"step-2": "<mask token>\nfor _ in range(m):\n a, b = map(int, input().split())\n graph[b].append(a)\n\n\ndef bfs(start):\n visited = [False] * (n + 1)\n visited[start] = True\n q = deque()\n q.append(start)\n cnt = 1\n while q:\n now = q.popleft()\n for i in graph[now]:\n if not visited[i]:\n visited[i] = True\n q.append(i)\n cnt += 1\n return cnt\n\n\n<mask token>\nfor i in range(1, n + 1):\n result = bfs(i)\n if result > max_cnt:\n answer = [i]\n max_cnt = result\n elif result == max_cnt:\n answer.append(i)\nprint(*answer)\n",
"step-3": "<mask token>\ninput = sys.stdin.readline\nn, m = map(int, input().split())\ngraph = [[] for _ in range(n + 1)]\nfor _ in range(m):\n a, b = map(int, input().split())\n graph[b].append(a)\n\n\ndef bfs(start):\n visited = [False] * (n + 1)\n visited[start] = True\n q = deque()\n q.append(start)\n cnt = 1\n while q:\n now = q.popleft()\n for i in graph[now]:\n if not visited[i]:\n visited[i] = True\n q.append(i)\n cnt += 1\n return cnt\n\n\nanswer = []\nmax_cnt = 0\nfor i in range(1, n + 1):\n result = bfs(i)\n if result > max_cnt:\n answer = [i]\n max_cnt = result\n elif result == max_cnt:\n answer.append(i)\nprint(*answer)\n",
"step-4": "from collections import deque\nimport sys\ninput = sys.stdin.readline\nn, m = map(int, input().split())\ngraph = [[] for _ in range(n + 1)]\nfor _ in range(m):\n a, b = map(int, input().split())\n graph[b].append(a)\n\n\ndef bfs(start):\n visited = [False] * (n + 1)\n visited[start] = True\n q = deque()\n q.append(start)\n cnt = 1\n while q:\n now = q.popleft()\n for i in graph[now]:\n if not visited[i]:\n visited[i] = True\n q.append(i)\n cnt += 1\n return cnt\n\n\nanswer = []\nmax_cnt = 0\nfor i in range(1, n + 1):\n result = bfs(i)\n if result > max_cnt:\n answer = [i]\n max_cnt = result\n elif result == max_cnt:\n answer.append(i)\nprint(*answer)\n",
"step-5": "# 효율적인 해킹\n# https://www.acmicpc.net/problem/1325\n\nfrom collections import deque\nimport sys\ninput = sys.stdin.readline\n\nn, m = map(int, input().split())\n\ngraph = [[] for _ in range(n + 1)]\nfor _ in range(m):\n a, b = map(int, input().split())\n graph[b].append(a) # B를 해킹하면 A도 해킹할 수 있다\n\ndef bfs(start):\n visited = [False] * (n + 1)\n visited[start] = True\n q = deque()\n q.append(start)\n\n cnt = 1 # start를 해킹했을 때 해킹할 수 있는 컴퓨터의 개수\n while q:\n now = q.popleft()\n for i in graph[now]:\n if not visited[i]:\n visited[i] = True\n q.append(i)\n cnt += 1\n\n return cnt\n\nanswer = []\nmax_cnt = 0\nfor i in range(1, n + 1):\n result = bfs(i)\n if result > max_cnt:\n answer = [i]\n max_cnt = result\n elif result == max_cnt:\n answer.append(i)\n\nprint(*answer)",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
"""Implementation of the Brainpool standard, see
https://tools.ietf.org/pdf/rfc5639.pdf#15
"""
from sage.all import ZZ, GF, EllipticCurve
from utils import increment_seed, embedding_degree, find_integer, SimulatedCurves, VerifiableCurve, \
class_number_check
CHECK_CLASS_NUMBER = False
def gen_brainpool_prime(seed: str, nbits: int) -> ZZ:
"""Generates a prime of length nbits out of 160bit seed s"""
while True:
p = find_integer(seed, nbits, brainpool_prime=True)
while not (p % 4 == 3 and p.is_prime()):
p += 1
if p.nbits() == nbits:
return p
seed = increment_seed(seed)
class Brainpool(VerifiableCurve):
def __init__(self, seed, p):
super().__init__(seed, p, cofactor_bound=1, cofactor_div=1)
self._standard = "brainpool"
self._category = "brainpool"
self._cofactor = 1
self._original_seed = seed
def security(self):
self._secure = False
try:
curve = EllipticCurve(GF(self._p), [self._a, self._b])
except ArithmeticError:
return
order = curve.__pari__().ellsea(1)
if order == 0:
return
order = ZZ(order)
if order >= self._p:
return
if not order.is_prime():
return
self._embedding_degree = embedding_degree(prime=self._p, order=order)
if not (order - 1) / self._embedding_degree < 100:
return
if CHECK_CLASS_NUMBER and not class_number_check(curve, order, 10 ** 7):
return
self._cardinality = order
self._order = order
self._secure = True
def set_ab(self):
pass
def set_a(self):
self._a = find_integer(self._seed, self._bits)
def check_a(self):
if self._a is None:
return False
try:
c = -3 * self._field(self._a) ** (-1)
c.nth_root(4)
return True
except ValueError:
return False
def set_b(self, b_seed=None):
if b_seed is None:
b_seed = self._seed
self._b = find_integer(b_seed, self._bits)
def check_b(self):
return self._b is not None and not self._field(self._b).is_square()
def seed_update(self, offset=1):
self._seed = increment_seed(self._seed)
def set_seed(self, seed):
self._seed = seed
def generate_generator(self, seed=None):
"""Finds generator of curve as scalar*P where P has smallest x-coordinate"""
if seed is None:
seed = self._seed
scalar = find_integer(increment_seed(seed), self._bits)
x = None
for x in self._field:
if (x ** 3 + self._a * x + self._b).is_square():
break
y = (x ** 3 + self._a * x + self._b).sqrt()
y = ZZ(min(y, self._p - y))
point = scalar * self.curve()(x, y)
self._generator = point[0], point[1]
def find_curve(self):
"""Generates one Brainpool curve over F_p (number of bits of p is nbits) out of 160bit seed"""
self.set_a()
while True:
while not self.check_a():
self.seed_update()
self.set_a()
self.seed_update()
self.set_b()
while not self.check_b():
self.seed_update()
self.set_b()
if not self.secure():
self.seed_update()
continue
self.generate_generator()
break
def generate_brainpool_curves(count: int, p: ZZ, initial_seed: str) -> SimulatedCurves:
"""This is an implementation of the Brainpool standard suitable for large-scale simulations
For more readable implementation, see 'brainpool_curve' above
"""
simulated_curves = SimulatedCurves("brainpool", p.nbits(), initial_seed, count)
curve = Brainpool(initial_seed, p)
b_seed = None
for _ in range(count):
if curve.not_defined():
curve.set_a()
if not curve.check_a():
curve.seed_update()
curve.clear()
continue
b_seed = increment_seed(curve.seed())
curve.set_b(b_seed)
if not curve.check_b():
b_seed = increment_seed(b_seed)
continue
if not curve.secure():
curve.set_seed(increment_seed(b_seed))
curve.clear()
continue
curve.generate_generator(b_seed)
curve.compute_properties()
simulated_curves.add_curve(curve)
curve = Brainpool(curve.seed(), p)
curve.seed_update()
return simulated_curves
|
normal
|
{
"blob_id": "b717abaeecea2e97c6ec78d3e0e4c97a8de5eec3",
"index": 9169,
"step-1": "<mask token>\n\n\nclass Brainpool(VerifiableCurve):\n <mask token>\n\n def security(self):\n self._secure = False\n try:\n curve = EllipticCurve(GF(self._p), [self._a, self._b])\n except ArithmeticError:\n return\n order = curve.__pari__().ellsea(1)\n if order == 0:\n return\n order = ZZ(order)\n if order >= self._p:\n return\n if not order.is_prime():\n return\n self._embedding_degree = embedding_degree(prime=self._p, order=order)\n if not (order - 1) / self._embedding_degree < 100:\n return\n if CHECK_CLASS_NUMBER and not class_number_check(curve, order, 10 ** 7\n ):\n return\n self._cardinality = order\n self._order = order\n self._secure = True\n <mask token>\n\n def set_a(self):\n self._a = find_integer(self._seed, self._bits)\n\n def check_a(self):\n if self._a is None:\n return False\n try:\n c = -3 * self._field(self._a) ** -1\n c.nth_root(4)\n return True\n except ValueError:\n return False\n\n def set_b(self, b_seed=None):\n if b_seed is None:\n b_seed = self._seed\n self._b = find_integer(b_seed, self._bits)\n <mask token>\n <mask token>\n\n def set_seed(self, seed):\n self._seed = seed\n\n def generate_generator(self, seed=None):\n \"\"\"Finds generator of curve as scalar*P where P has smallest x-coordinate\"\"\"\n if seed is None:\n seed = self._seed\n scalar = find_integer(increment_seed(seed), self._bits)\n x = None\n for x in self._field:\n if (x ** 3 + self._a * x + self._b).is_square():\n break\n y = (x ** 3 + self._a * x + self._b).sqrt()\n y = ZZ(min(y, self._p - y))\n point = scalar * self.curve()(x, y)\n self._generator = point[0], point[1]\n\n def find_curve(self):\n \"\"\"Generates one Brainpool curve over F_p (number of bits of p is nbits) out of 160bit seed\"\"\"\n self.set_a()\n while True:\n while not self.check_a():\n self.seed_update()\n self.set_a()\n self.seed_update()\n self.set_b()\n while not self.check_b():\n self.seed_update()\n self.set_b()\n if not self.secure():\n self.seed_update()\n continue\n self.generate_generator()\n break\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Brainpool(VerifiableCurve):\n\n def __init__(self, seed, p):\n super().__init__(seed, p, cofactor_bound=1, cofactor_div=1)\n self._standard = 'brainpool'\n self._category = 'brainpool'\n self._cofactor = 1\n self._original_seed = seed\n\n def security(self):\n self._secure = False\n try:\n curve = EllipticCurve(GF(self._p), [self._a, self._b])\n except ArithmeticError:\n return\n order = curve.__pari__().ellsea(1)\n if order == 0:\n return\n order = ZZ(order)\n if order >= self._p:\n return\n if not order.is_prime():\n return\n self._embedding_degree = embedding_degree(prime=self._p, order=order)\n if not (order - 1) / self._embedding_degree < 100:\n return\n if CHECK_CLASS_NUMBER and not class_number_check(curve, order, 10 ** 7\n ):\n return\n self._cardinality = order\n self._order = order\n self._secure = True\n <mask token>\n\n def set_a(self):\n self._a = find_integer(self._seed, self._bits)\n\n def check_a(self):\n if self._a is None:\n return False\n try:\n c = -3 * self._field(self._a) ** -1\n c.nth_root(4)\n return True\n except ValueError:\n return False\n\n def set_b(self, b_seed=None):\n if b_seed is None:\n b_seed = self._seed\n self._b = find_integer(b_seed, self._bits)\n <mask token>\n <mask token>\n\n def set_seed(self, seed):\n self._seed = seed\n\n def generate_generator(self, seed=None):\n \"\"\"Finds generator of curve as scalar*P where P has smallest x-coordinate\"\"\"\n if seed is None:\n seed = self._seed\n scalar = find_integer(increment_seed(seed), self._bits)\n x = None\n for x in self._field:\n if (x ** 3 + self._a * x + self._b).is_square():\n break\n y = (x ** 3 + self._a * x + self._b).sqrt()\n y = ZZ(min(y, self._p - y))\n point = scalar * self.curve()(x, y)\n self._generator = point[0], point[1]\n\n def find_curve(self):\n \"\"\"Generates one Brainpool curve over F_p (number of bits of p is nbits) out of 160bit seed\"\"\"\n self.set_a()\n while True:\n while not self.check_a():\n self.seed_update()\n self.set_a()\n self.seed_update()\n self.set_b()\n while not self.check_b():\n self.seed_update()\n self.set_b()\n if not self.secure():\n self.seed_update()\n continue\n self.generate_generator()\n break\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Brainpool(VerifiableCurve):\n\n def __init__(self, seed, p):\n super().__init__(seed, p, cofactor_bound=1, cofactor_div=1)\n self._standard = 'brainpool'\n self._category = 'brainpool'\n self._cofactor = 1\n self._original_seed = seed\n\n def security(self):\n self._secure = False\n try:\n curve = EllipticCurve(GF(self._p), [self._a, self._b])\n except ArithmeticError:\n return\n order = curve.__pari__().ellsea(1)\n if order == 0:\n return\n order = ZZ(order)\n if order >= self._p:\n return\n if not order.is_prime():\n return\n self._embedding_degree = embedding_degree(prime=self._p, order=order)\n if not (order - 1) / self._embedding_degree < 100:\n return\n if CHECK_CLASS_NUMBER and not class_number_check(curve, order, 10 ** 7\n ):\n return\n self._cardinality = order\n self._order = order\n self._secure = True\n <mask token>\n\n def set_a(self):\n self._a = find_integer(self._seed, self._bits)\n\n def check_a(self):\n if self._a is None:\n return False\n try:\n c = -3 * self._field(self._a) ** -1\n c.nth_root(4)\n return True\n except ValueError:\n return False\n\n def set_b(self, b_seed=None):\n if b_seed is None:\n b_seed = self._seed\n self._b = find_integer(b_seed, self._bits)\n\n def check_b(self):\n return self._b is not None and not self._field(self._b).is_square()\n <mask token>\n\n def set_seed(self, seed):\n self._seed = seed\n\n def generate_generator(self, seed=None):\n \"\"\"Finds generator of curve as scalar*P where P has smallest x-coordinate\"\"\"\n if seed is None:\n seed = self._seed\n scalar = find_integer(increment_seed(seed), self._bits)\n x = None\n for x in self._field:\n if (x ** 3 + self._a * x + self._b).is_square():\n break\n y = (x ** 3 + self._a * x + self._b).sqrt()\n y = ZZ(min(y, self._p - y))\n point = scalar * self.curve()(x, y)\n self._generator = point[0], point[1]\n\n def find_curve(self):\n \"\"\"Generates one Brainpool curve over F_p (number of bits of p is nbits) out of 160bit seed\"\"\"\n self.set_a()\n while True:\n while not self.check_a():\n self.seed_update()\n self.set_a()\n self.seed_update()\n self.set_b()\n while not self.check_b():\n self.seed_update()\n self.set_b()\n if not self.secure():\n self.seed_update()\n continue\n self.generate_generator()\n break\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Brainpool(VerifiableCurve):\n\n def __init__(self, seed, p):\n super().__init__(seed, p, cofactor_bound=1, cofactor_div=1)\n self._standard = 'brainpool'\n self._category = 'brainpool'\n self._cofactor = 1\n self._original_seed = seed\n\n def security(self):\n self._secure = False\n try:\n curve = EllipticCurve(GF(self._p), [self._a, self._b])\n except ArithmeticError:\n return\n order = curve.__pari__().ellsea(1)\n if order == 0:\n return\n order = ZZ(order)\n if order >= self._p:\n return\n if not order.is_prime():\n return\n self._embedding_degree = embedding_degree(prime=self._p, order=order)\n if not (order - 1) / self._embedding_degree < 100:\n return\n if CHECK_CLASS_NUMBER and not class_number_check(curve, order, 10 ** 7\n ):\n return\n self._cardinality = order\n self._order = order\n self._secure = True\n\n def set_ab(self):\n pass\n\n def set_a(self):\n self._a = find_integer(self._seed, self._bits)\n\n def check_a(self):\n if self._a is None:\n return False\n try:\n c = -3 * self._field(self._a) ** -1\n c.nth_root(4)\n return True\n except ValueError:\n return False\n\n def set_b(self, b_seed=None):\n if b_seed is None:\n b_seed = self._seed\n self._b = find_integer(b_seed, self._bits)\n\n def check_b(self):\n return self._b is not None and not self._field(self._b).is_square()\n\n def seed_update(self, offset=1):\n self._seed = increment_seed(self._seed)\n\n def set_seed(self, seed):\n self._seed = seed\n\n def generate_generator(self, seed=None):\n \"\"\"Finds generator of curve as scalar*P where P has smallest x-coordinate\"\"\"\n if seed is None:\n seed = self._seed\n scalar = find_integer(increment_seed(seed), self._bits)\n x = None\n for x in self._field:\n if (x ** 3 + self._a * x + self._b).is_square():\n break\n y = (x ** 3 + self._a * x + self._b).sqrt()\n y = ZZ(min(y, self._p - y))\n point = scalar * self.curve()(x, y)\n self._generator = point[0], point[1]\n\n def find_curve(self):\n \"\"\"Generates one Brainpool curve over F_p (number of bits of p is nbits) out of 160bit seed\"\"\"\n self.set_a()\n while True:\n while not self.check_a():\n self.seed_update()\n self.set_a()\n self.seed_update()\n self.set_b()\n while not self.check_b():\n self.seed_update()\n self.set_b()\n if not self.secure():\n self.seed_update()\n continue\n self.generate_generator()\n break\n\n\n<mask token>\n",
"step-5": "\"\"\"Implementation of the Brainpool standard, see\n https://tools.ietf.org/pdf/rfc5639.pdf#15\n\"\"\"\nfrom sage.all import ZZ, GF, EllipticCurve\nfrom utils import increment_seed, embedding_degree, find_integer, SimulatedCurves, VerifiableCurve, \\\n class_number_check\n\nCHECK_CLASS_NUMBER = False\n\n\ndef gen_brainpool_prime(seed: str, nbits: int) -> ZZ:\n \"\"\"Generates a prime of length nbits out of 160bit seed s\"\"\"\n while True:\n p = find_integer(seed, nbits, brainpool_prime=True)\n while not (p % 4 == 3 and p.is_prime()):\n p += 1\n if p.nbits() == nbits:\n return p\n seed = increment_seed(seed)\n\n\nclass Brainpool(VerifiableCurve):\n def __init__(self, seed, p):\n super().__init__(seed, p, cofactor_bound=1, cofactor_div=1)\n self._standard = \"brainpool\"\n self._category = \"brainpool\"\n self._cofactor = 1\n self._original_seed = seed\n\n def security(self):\n self._secure = False\n try:\n curve = EllipticCurve(GF(self._p), [self._a, self._b])\n except ArithmeticError:\n return\n order = curve.__pari__().ellsea(1)\n if order == 0:\n return\n order = ZZ(order)\n if order >= self._p:\n return\n if not order.is_prime():\n return\n self._embedding_degree = embedding_degree(prime=self._p, order=order)\n if not (order - 1) / self._embedding_degree < 100:\n return\n if CHECK_CLASS_NUMBER and not class_number_check(curve, order, 10 ** 7):\n return\n self._cardinality = order\n self._order = order\n self._secure = True\n\n def set_ab(self):\n pass\n\n def set_a(self):\n self._a = find_integer(self._seed, self._bits)\n\n def check_a(self):\n if self._a is None:\n return False\n try:\n c = -3 * self._field(self._a) ** (-1)\n c.nth_root(4)\n return True\n except ValueError:\n return False\n\n def set_b(self, b_seed=None):\n if b_seed is None:\n b_seed = self._seed\n self._b = find_integer(b_seed, self._bits)\n\n def check_b(self):\n return self._b is not None and not self._field(self._b).is_square()\n\n def seed_update(self, offset=1):\n self._seed = increment_seed(self._seed)\n\n def set_seed(self, seed):\n self._seed = seed\n\n def generate_generator(self, seed=None):\n \"\"\"Finds generator of curve as scalar*P where P has smallest x-coordinate\"\"\"\n if seed is None:\n seed = self._seed\n scalar = find_integer(increment_seed(seed), self._bits)\n x = None\n for x in self._field:\n if (x ** 3 + self._a * x + self._b).is_square():\n break\n y = (x ** 3 + self._a * x + self._b).sqrt()\n y = ZZ(min(y, self._p - y))\n point = scalar * self.curve()(x, y)\n self._generator = point[0], point[1]\n\n def find_curve(self):\n \"\"\"Generates one Brainpool curve over F_p (number of bits of p is nbits) out of 160bit seed\"\"\"\n self.set_a()\n while True:\n while not self.check_a():\n self.seed_update()\n self.set_a()\n self.seed_update()\n self.set_b()\n while not self.check_b():\n self.seed_update()\n self.set_b()\n if not self.secure():\n self.seed_update()\n continue\n self.generate_generator()\n break\n\n\ndef generate_brainpool_curves(count: int, p: ZZ, initial_seed: str) -> SimulatedCurves:\n \"\"\"This is an implementation of the Brainpool standard suitable for large-scale simulations\n For more readable implementation, see 'brainpool_curve' above\n \"\"\"\n simulated_curves = SimulatedCurves(\"brainpool\", p.nbits(), initial_seed, count)\n curve = Brainpool(initial_seed, p)\n b_seed = None\n for _ in range(count):\n if curve.not_defined():\n curve.set_a()\n if not curve.check_a():\n curve.seed_update()\n curve.clear()\n continue\n b_seed = increment_seed(curve.seed())\n curve.set_b(b_seed)\n if not curve.check_b():\n b_seed = increment_seed(b_seed)\n continue\n if not curve.secure():\n curve.set_seed(increment_seed(b_seed))\n curve.clear()\n continue\n curve.generate_generator(b_seed)\n curve.compute_properties()\n simulated_curves.add_curve(curve)\n curve = Brainpool(curve.seed(), p)\n curve.seed_update()\n\n return simulated_curves\n",
"step-ids": [
8,
9,
10,
12,
17
]
}
|
[
8,
9,
10,
12,
17
] |
##############################################
# Binary Tree #
# by Vishal Nirmal #
# #
# A Binary Tree ADT implementation. #
##############################################
class BinaryTree:
def __init__(self, data=None):
self.data = data
self.left = None
self.right = None
def insert(self, data):
if self.data != None:
arr = [self]
while len(arr) > 0:
node = arr[0]
if node.left:
arr.append(node.left)
else:
node.left = BinaryTree(data)
break
if node.right:
arr.append(node.right)
else:
node.right = BinaryTree(data)
break
arr = arr[1:]
else:
self.data = data
def insertNodes(self, arr):
for i in arr:
self.insert(i)
def preorder(self):
print(self.data, end=' ')
if self.left:
self.left.preorder()
if self.right:
self.right.preorder()
def inorder(self):
if self.left:
self.left.inorder()
print(self.data, end=' ')
if self.right:
self.right.inorder()
def postorder(self):
if self.left:
self.left.postorder()
if self.right:
self.right.postorder()
print(self.data, end=' ')
def levelorder(self):
arr = [self]
while len(arr):
node = arr[0]
print(node.data, end=' ')
if node.left:
arr.append(node.left)
if node.right:
arr.append(node.right)
arr = arr[1:]
def height(self):
if self.left == None or self.right==None:
return 0
lh = self.left.height()
rh = self.right.height()
return max(lh, rh)+1
def level(self):
if self.left == None or self.right==None:
return 0
lh = self.left.level()
rh = self.right.level()
return max(lh, rh)+1
def search(self, data):
if self == None:
return False
if self.data == data:
return True
if self.left and self.left.search(data) == True:
return True
if self.right:
return self.right.search(data)
def size(self):
if self == None:
return 0
ls = rs = 0
if self.left:
ls = self.left.size()
if self.right:
rs = self.right.size()
return ls + rs + 1
def max(self):
if self == None:
return 0
lmx = rmx = 0
if self.left:
lmx = self.left.max()
if self.right:
rmx = self.right.max()
return max(lmx, rmx, self.data)
def min(self):
if self == None:
return 0
lmn = rmn = 0
if self.left:
lmn = self.left.min()
if self.right:
rmn = self.right.min()
return min(lmn, rmn, self.data)
def deepest(self):
if self==None:
return None
arr = [self]
while len(arr):
node = arr[0]
if node.left:
arr.append(node.left)
if node.right:
arr.append(node.right)
temp = arr[-1]
arr = arr[1:]
return temp.data
def leafNodes(self):
if self.left == None and self.right == None:
return 1
lln = rln = 0
if self.left:
lln = self.left.leafNodes()
if self.right:
rln = self.right.leafNodes()
return lln + rln
def fullNodes(self):
if self==None:
return 0
arr = [self]
count = 0
while len(arr):
node = arr[0]
if node.left:
arr.append(node.left)
if node.right:
arr.append(node.right)
if node.left and node.right:
count+=1
arr = arr[1:]
return count
def halfNodes(self):
if self==None:
return 0
arr = [self]
count = 0
while len(arr):
node = arr[0]
if node.left:
arr.append(node.left)
if node.right:
arr.append(node.right)
if (node.left==None and node.right) or (node.left and node.right==None):
count+=1
arr = arr[1:]
return count
def allPaths(self, path=[0]*1000, pathlen=0):
if self == None:
return
path[pathlen] = self.data
pathlen+=1
if self.left == None and self.right == None:
for i in range(pathlen-1):
print(path[i], end='->')
print(path[pathlen])
return
if self.left:
self.left.allPaths(path, pathlen)
if self.right:
self.right.allPaths(path, pathlen)
def sum(self):
if self == None:
return 0
ls = rs = 0
if self.left:
ls = self.left.sum()
if self.right:
rs = self.right.sum()
return self.data+ls+rs
def delete(self):
arr = [self]
while len(arr):
node = arr[0]
if node.left:
arr.append(node.left)
if node.right:
arr.append(node.right)
temp = arr[-1]
arr = arr[1:]
temp = None
|
normal
|
{
"blob_id": "3eaced9609c7adfa5457d7dcad8b2dfaeb697b16",
"index": 3220,
"step-1": "class BinaryTree:\n\n def __init__(self, data=None):\n self.data = data\n self.left = None\n self.right = None\n\n def insert(self, data):\n if self.data != None:\n arr = [self]\n while len(arr) > 0:\n node = arr[0]\n if node.left:\n arr.append(node.left)\n else:\n node.left = BinaryTree(data)\n break\n if node.right:\n arr.append(node.right)\n else:\n node.right = BinaryTree(data)\n break\n arr = arr[1:]\n else:\n self.data = data\n <mask token>\n\n def preorder(self):\n print(self.data, end=' ')\n if self.left:\n self.left.preorder()\n if self.right:\n self.right.preorder()\n\n def inorder(self):\n if self.left:\n self.left.inorder()\n print(self.data, end=' ')\n if self.right:\n self.right.inorder()\n\n def postorder(self):\n if self.left:\n self.left.postorder()\n if self.right:\n self.right.postorder()\n print(self.data, end=' ')\n <mask token>\n\n def height(self):\n if self.left == None or self.right == None:\n return 0\n lh = self.left.height()\n rh = self.right.height()\n return max(lh, rh) + 1\n\n def level(self):\n if self.left == None or self.right == None:\n return 0\n lh = self.left.level()\n rh = self.right.level()\n return max(lh, rh) + 1\n <mask token>\n\n def size(self):\n if self == None:\n return 0\n ls = rs = 0\n if self.left:\n ls = self.left.size()\n if self.right:\n rs = self.right.size()\n return ls + rs + 1\n <mask token>\n <mask token>\n\n def deepest(self):\n if self == None:\n return None\n arr = [self]\n while len(arr):\n node = arr[0]\n if node.left:\n arr.append(node.left)\n if node.right:\n arr.append(node.right)\n temp = arr[-1]\n arr = arr[1:]\n return temp.data\n <mask token>\n <mask token>\n\n def halfNodes(self):\n if self == None:\n return 0\n arr = [self]\n count = 0\n while len(arr):\n node = arr[0]\n if node.left:\n arr.append(node.left)\n if node.right:\n arr.append(node.right)\n if (node.left == None and node.right or node.left and node.\n right == None):\n count += 1\n arr = arr[1:]\n return count\n <mask token>\n <mask token>\n\n def delete(self):\n arr = [self]\n while len(arr):\n node = arr[0]\n if node.left:\n arr.append(node.left)\n if node.right:\n arr.append(node.right)\n temp = arr[-1]\n arr = arr[1:]\n temp = None\n",
"step-2": "class BinaryTree:\n\n def __init__(self, data=None):\n self.data = data\n self.left = None\n self.right = None\n\n def insert(self, data):\n if self.data != None:\n arr = [self]\n while len(arr) > 0:\n node = arr[0]\n if node.left:\n arr.append(node.left)\n else:\n node.left = BinaryTree(data)\n break\n if node.right:\n arr.append(node.right)\n else:\n node.right = BinaryTree(data)\n break\n arr = arr[1:]\n else:\n self.data = data\n\n def insertNodes(self, arr):\n for i in arr:\n self.insert(i)\n\n def preorder(self):\n print(self.data, end=' ')\n if self.left:\n self.left.preorder()\n if self.right:\n self.right.preorder()\n\n def inorder(self):\n if self.left:\n self.left.inorder()\n print(self.data, end=' ')\n if self.right:\n self.right.inorder()\n\n def postorder(self):\n if self.left:\n self.left.postorder()\n if self.right:\n self.right.postorder()\n print(self.data, end=' ')\n <mask token>\n\n def height(self):\n if self.left == None or self.right == None:\n return 0\n lh = self.left.height()\n rh = self.right.height()\n return max(lh, rh) + 1\n\n def level(self):\n if self.left == None or self.right == None:\n return 0\n lh = self.left.level()\n rh = self.right.level()\n return max(lh, rh) + 1\n <mask token>\n\n def size(self):\n if self == None:\n return 0\n ls = rs = 0\n if self.left:\n ls = self.left.size()\n if self.right:\n rs = self.right.size()\n return ls + rs + 1\n <mask token>\n <mask token>\n\n def deepest(self):\n if self == None:\n return None\n arr = [self]\n while len(arr):\n node = arr[0]\n if node.left:\n arr.append(node.left)\n if node.right:\n arr.append(node.right)\n temp = arr[-1]\n arr = arr[1:]\n return temp.data\n <mask token>\n\n def fullNodes(self):\n if self == None:\n return 0\n arr = [self]\n count = 0\n while len(arr):\n node = arr[0]\n if node.left:\n arr.append(node.left)\n if node.right:\n arr.append(node.right)\n if node.left and node.right:\n count += 1\n arr = arr[1:]\n return count\n\n def halfNodes(self):\n if self == None:\n return 0\n arr = [self]\n count = 0\n while len(arr):\n node = arr[0]\n if node.left:\n arr.append(node.left)\n if node.right:\n arr.append(node.right)\n if (node.left == None and node.right or node.left and node.\n right == None):\n count += 1\n arr = arr[1:]\n return count\n\n def allPaths(self, path=[0] * 1000, pathlen=0):\n if self == None:\n return\n path[pathlen] = self.data\n pathlen += 1\n if self.left == None and self.right == None:\n for i in range(pathlen - 1):\n print(path[i], end='->')\n print(path[pathlen])\n return\n if self.left:\n self.left.allPaths(path, pathlen)\n if self.right:\n self.right.allPaths(path, pathlen)\n <mask token>\n\n def delete(self):\n arr = [self]\n while len(arr):\n node = arr[0]\n if node.left:\n arr.append(node.left)\n if node.right:\n arr.append(node.right)\n temp = arr[-1]\n arr = arr[1:]\n temp = None\n",
"step-3": "class BinaryTree:\n\n def __init__(self, data=None):\n self.data = data\n self.left = None\n self.right = None\n\n def insert(self, data):\n if self.data != None:\n arr = [self]\n while len(arr) > 0:\n node = arr[0]\n if node.left:\n arr.append(node.left)\n else:\n node.left = BinaryTree(data)\n break\n if node.right:\n arr.append(node.right)\n else:\n node.right = BinaryTree(data)\n break\n arr = arr[1:]\n else:\n self.data = data\n\n def insertNodes(self, arr):\n for i in arr:\n self.insert(i)\n\n def preorder(self):\n print(self.data, end=' ')\n if self.left:\n self.left.preorder()\n if self.right:\n self.right.preorder()\n\n def inorder(self):\n if self.left:\n self.left.inorder()\n print(self.data, end=' ')\n if self.right:\n self.right.inorder()\n\n def postorder(self):\n if self.left:\n self.left.postorder()\n if self.right:\n self.right.postorder()\n print(self.data, end=' ')\n <mask token>\n\n def height(self):\n if self.left == None or self.right == None:\n return 0\n lh = self.left.height()\n rh = self.right.height()\n return max(lh, rh) + 1\n\n def level(self):\n if self.left == None or self.right == None:\n return 0\n lh = self.left.level()\n rh = self.right.level()\n return max(lh, rh) + 1\n <mask token>\n\n def size(self):\n if self == None:\n return 0\n ls = rs = 0\n if self.left:\n ls = self.left.size()\n if self.right:\n rs = self.right.size()\n return ls + rs + 1\n\n def max(self):\n if self == None:\n return 0\n lmx = rmx = 0\n if self.left:\n lmx = self.left.max()\n if self.right:\n rmx = self.right.max()\n return max(lmx, rmx, self.data)\n\n def min(self):\n if self == None:\n return 0\n lmn = rmn = 0\n if self.left:\n lmn = self.left.min()\n if self.right:\n rmn = self.right.min()\n return min(lmn, rmn, self.data)\n\n def deepest(self):\n if self == None:\n return None\n arr = [self]\n while len(arr):\n node = arr[0]\n if node.left:\n arr.append(node.left)\n if node.right:\n arr.append(node.right)\n temp = arr[-1]\n arr = arr[1:]\n return temp.data\n <mask token>\n\n def fullNodes(self):\n if self == None:\n return 0\n arr = [self]\n count = 0\n while len(arr):\n node = arr[0]\n if node.left:\n arr.append(node.left)\n if node.right:\n arr.append(node.right)\n if node.left and node.right:\n count += 1\n arr = arr[1:]\n return count\n\n def halfNodes(self):\n if self == None:\n return 0\n arr = [self]\n count = 0\n while len(arr):\n node = arr[0]\n if node.left:\n arr.append(node.left)\n if node.right:\n arr.append(node.right)\n if (node.left == None and node.right or node.left and node.\n right == None):\n count += 1\n arr = arr[1:]\n return count\n\n def allPaths(self, path=[0] * 1000, pathlen=0):\n if self == None:\n return\n path[pathlen] = self.data\n pathlen += 1\n if self.left == None and self.right == None:\n for i in range(pathlen - 1):\n print(path[i], end='->')\n print(path[pathlen])\n return\n if self.left:\n self.left.allPaths(path, pathlen)\n if self.right:\n self.right.allPaths(path, pathlen)\n\n def sum(self):\n if self == None:\n return 0\n ls = rs = 0\n if self.left:\n ls = self.left.sum()\n if self.right:\n rs = self.right.sum()\n return self.data + ls + rs\n\n def delete(self):\n arr = [self]\n while len(arr):\n node = arr[0]\n if node.left:\n arr.append(node.left)\n if node.right:\n arr.append(node.right)\n temp = arr[-1]\n arr = arr[1:]\n temp = None\n",
"step-4": "class BinaryTree:\n\n def __init__(self, data=None):\n self.data = data\n self.left = None\n self.right = None\n\n def insert(self, data):\n if self.data != None:\n arr = [self]\n while len(arr) > 0:\n node = arr[0]\n if node.left:\n arr.append(node.left)\n else:\n node.left = BinaryTree(data)\n break\n if node.right:\n arr.append(node.right)\n else:\n node.right = BinaryTree(data)\n break\n arr = arr[1:]\n else:\n self.data = data\n\n def insertNodes(self, arr):\n for i in arr:\n self.insert(i)\n\n def preorder(self):\n print(self.data, end=' ')\n if self.left:\n self.left.preorder()\n if self.right:\n self.right.preorder()\n\n def inorder(self):\n if self.left:\n self.left.inorder()\n print(self.data, end=' ')\n if self.right:\n self.right.inorder()\n\n def postorder(self):\n if self.left:\n self.left.postorder()\n if self.right:\n self.right.postorder()\n print(self.data, end=' ')\n <mask token>\n\n def height(self):\n if self.left == None or self.right == None:\n return 0\n lh = self.left.height()\n rh = self.right.height()\n return max(lh, rh) + 1\n\n def level(self):\n if self.left == None or self.right == None:\n return 0\n lh = self.left.level()\n rh = self.right.level()\n return max(lh, rh) + 1\n <mask token>\n\n def size(self):\n if self == None:\n return 0\n ls = rs = 0\n if self.left:\n ls = self.left.size()\n if self.right:\n rs = self.right.size()\n return ls + rs + 1\n\n def max(self):\n if self == None:\n return 0\n lmx = rmx = 0\n if self.left:\n lmx = self.left.max()\n if self.right:\n rmx = self.right.max()\n return max(lmx, rmx, self.data)\n\n def min(self):\n if self == None:\n return 0\n lmn = rmn = 0\n if self.left:\n lmn = self.left.min()\n if self.right:\n rmn = self.right.min()\n return min(lmn, rmn, self.data)\n\n def deepest(self):\n if self == None:\n return None\n arr = [self]\n while len(arr):\n node = arr[0]\n if node.left:\n arr.append(node.left)\n if node.right:\n arr.append(node.right)\n temp = arr[-1]\n arr = arr[1:]\n return temp.data\n\n def leafNodes(self):\n if self.left == None and self.right == None:\n return 1\n lln = rln = 0\n if self.left:\n lln = self.left.leafNodes()\n if self.right:\n rln = self.right.leafNodes()\n return lln + rln\n\n def fullNodes(self):\n if self == None:\n return 0\n arr = [self]\n count = 0\n while len(arr):\n node = arr[0]\n if node.left:\n arr.append(node.left)\n if node.right:\n arr.append(node.right)\n if node.left and node.right:\n count += 1\n arr = arr[1:]\n return count\n\n def halfNodes(self):\n if self == None:\n return 0\n arr = [self]\n count = 0\n while len(arr):\n node = arr[0]\n if node.left:\n arr.append(node.left)\n if node.right:\n arr.append(node.right)\n if (node.left == None and node.right or node.left and node.\n right == None):\n count += 1\n arr = arr[1:]\n return count\n\n def allPaths(self, path=[0] * 1000, pathlen=0):\n if self == None:\n return\n path[pathlen] = self.data\n pathlen += 1\n if self.left == None and self.right == None:\n for i in range(pathlen - 1):\n print(path[i], end='->')\n print(path[pathlen])\n return\n if self.left:\n self.left.allPaths(path, pathlen)\n if self.right:\n self.right.allPaths(path, pathlen)\n\n def sum(self):\n if self == None:\n return 0\n ls = rs = 0\n if self.left:\n ls = self.left.sum()\n if self.right:\n rs = self.right.sum()\n return self.data + ls + rs\n\n def delete(self):\n arr = [self]\n while len(arr):\n node = arr[0]\n if node.left:\n arr.append(node.left)\n if node.right:\n arr.append(node.right)\n temp = arr[-1]\n arr = arr[1:]\n temp = None\n",
"step-5": "##############################################\n# Binary Tree #\n# by Vishal Nirmal #\n# #\n# A Binary Tree ADT implementation. #\n##############################################\n\n\n\n\nclass BinaryTree:\n\n def __init__(self, data=None):\n\n self.data = data\n\n self.left = None\n\n self.right = None\n\n def insert(self, data):\n\n if self.data != None:\n\n arr = [self]\n\n while len(arr) > 0:\n\n node = arr[0]\n\n if node.left:\n\n arr.append(node.left)\n\n else:\n\n node.left = BinaryTree(data)\n\n break\n\n if node.right:\n\n arr.append(node.right)\n\n else:\n\n node.right = BinaryTree(data)\n\n break\n\n arr = arr[1:]\n\n else:\n\n self.data = data\n\n def insertNodes(self, arr):\n\n for i in arr:\n\n self.insert(i)\n\n def preorder(self):\n\n print(self.data, end=' ')\n\n if self.left:\n\n self.left.preorder()\n\n if self.right:\n\n self.right.preorder()\n\n def inorder(self):\n\n if self.left:\n\n self.left.inorder()\n\n print(self.data, end=' ')\n\n if self.right:\n\n self.right.inorder()\n\n def postorder(self):\n\n if self.left:\n\n self.left.postorder()\n\n if self.right:\n\n self.right.postorder()\n\n print(self.data, end=' ')\n\n def levelorder(self):\n\n arr = [self]\n\n while len(arr):\n\n node = arr[0]\n\n print(node.data, end=' ')\n\n if node.left:\n\n arr.append(node.left)\n\n if node.right:\n\n arr.append(node.right)\n\n arr = arr[1:]\n\n def height(self):\n\n if self.left == None or self.right==None:\n\n return 0\n\n lh = self.left.height()\n\n rh = self.right.height()\n\n return max(lh, rh)+1\n\n def level(self):\n\n if self.left == None or self.right==None:\n\n return 0\n\n lh = self.left.level()\n\n rh = self.right.level()\n\n return max(lh, rh)+1\n\n def search(self, data):\n\n if self == None:\n\n return False\n\n if self.data == data:\n\n return True\n\n if self.left and self.left.search(data) == True:\n\n return True\n\n if self.right:\n\n return self.right.search(data)\n\n def size(self):\n\n if self == None:\n\n return 0\n\n ls = rs = 0\n\n if self.left:\n\n ls = self.left.size()\n\n if self.right:\n\n rs = self.right.size()\n\n return ls + rs + 1\n\n def max(self):\n\n if self == None:\n\n return 0\n\n lmx = rmx = 0\n\n if self.left:\n\n lmx = self.left.max()\n\n if self.right:\n\n rmx = self.right.max()\n\n return max(lmx, rmx, self.data)\n\n def min(self):\n\n if self == None:\n\n return 0\n\n lmn = rmn = 0\n\n if self.left:\n\n lmn = self.left.min()\n\n if self.right:\n\n rmn = self.right.min()\n\n return min(lmn, rmn, self.data)\n\n def deepest(self):\n\n if self==None:\n\n return None\n\n arr = [self]\n\n while len(arr):\n\n node = arr[0]\n\n if node.left:\n\n arr.append(node.left)\n\n if node.right:\n\n arr.append(node.right)\n\n temp = arr[-1]\n\n arr = arr[1:]\n\n return temp.data\n\n def leafNodes(self):\n\n if self.left == None and self.right == None:\n\n return 1\n\n lln = rln = 0\n\n if self.left:\n\n lln = self.left.leafNodes()\n\n if self.right:\n\n rln = self.right.leafNodes()\n\n return lln + rln\n\n def fullNodes(self):\n\n if self==None:\n\n return 0\n\n arr = [self]\n\n count = 0\n\n while len(arr):\n\n node = arr[0]\n\n if node.left:\n\n arr.append(node.left)\n\n if node.right:\n\n arr.append(node.right)\n\n if node.left and node.right:\n\n count+=1\n\n arr = arr[1:]\n\n return count\n\n def halfNodes(self):\n\n if self==None:\n\n return 0\n\n arr = [self]\n\n count = 0\n\n while len(arr):\n\n node = arr[0]\n\n if node.left:\n\n arr.append(node.left)\n\n if node.right:\n\n arr.append(node.right)\n\n if (node.left==None and node.right) or (node.left and node.right==None):\n\n count+=1\n\n arr = arr[1:]\n\n return count\n\n def allPaths(self, path=[0]*1000, pathlen=0):\n\n if self == None:\n\n return\n\n path[pathlen] = self.data\n\n pathlen+=1\n\n if self.left == None and self.right == None:\n\n for i in range(pathlen-1):\n\n print(path[i], end='->')\n\n print(path[pathlen])\n\n return\n\n if self.left:\n\n self.left.allPaths(path, pathlen)\n\n if self.right:\n\n self.right.allPaths(path, pathlen)\n \n def sum(self):\n\n if self == None:\n\n return 0\n\n ls = rs = 0\n\n if self.left:\n\n ls = self.left.sum()\n\n if self.right:\n\n rs = self.right.sum()\n\n return self.data+ls+rs\n\n def delete(self):\n\n arr = [self]\n\n while len(arr):\n\n node = arr[0]\n\n if node.left:\n\n arr.append(node.left)\n\n if node.right:\n\n arr.append(node.right)\n\n temp = arr[-1]\n\n arr = arr[1:]\n\n temp = None",
"step-ids": [
12,
15,
18,
19,
22
]
}
|
[
12,
15,
18,
19,
22
] |
import sys
def ler (t):
i =0
for s in sys.stdin:
l=s.split(" ")
t.append(l)
def melhor (t):
i=1
x=int(t[0][0].strip("\n"))
n=len(t)
while(i<n):
u=int((t[i][2]).strip())
if(u<x)
i+=1
def vendedor():
t=[]
ler(t)
melhor(t)
vendedor()
|
normal
|
{
"blob_id": "76664114382bdeb0bffb996e4dd4448b6c87520d",
"index": 9719,
"step-1": "import sys \n\ndef ler (t):\n\ti =0\n\tfor s in sys.stdin:\n\t\tl=s.split(\" \")\n\t\tt.append(l)\n\ndef melhor (t):\n\ti=1\n\tx=int(t[0][0].strip(\"\\n\"))\n\tn=len(t)\n\twhile(i<n):\n\t\tu=int((t[i][2]).strip())\n\t\tif(u<x)\n\t\ti+=1\n\n\n\n\ndef vendedor():\n\tt=[]\n\tler(t)\n\tmelhor(t)\nvendedor()",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
"""This is the body of the low-level worker tool.
A worker is intended to run as a process that imports a module, mutates it in
one location with one operator, runs the tests, reports the results, and dies.
"""
import difflib
import importlib
import inspect
import json
import logging
import subprocess
import sys
import traceback
import astunparse
try:
import typing # the typing module does some fancy stuff at import time
# which we shall not do twice... by loading it here,
# preserve_modules does not delete it and therefore
# fancy stuff happens only once
except ImportError:
pass
from .config import serialize_config
from .importing import preserve_modules, using_ast
from .mutating import MutatingCore
from .parsing import get_ast
from .testing.test_runner import TestOutcome
from .work_item import WorkItem
log = logging.getLogger()
class WorkerOutcome:
"""Possible outcomes for a worker.
"""
NORMAL = 'normal'
EXCEPTION = 'exception'
NO_TEST = 'no-test'
TIMEOUT = 'timeout'
SKIPPED = 'skipped'
def worker(module_name,
operator_class,
occurrence,
test_runner):
"""Mutate the OCCURRENCE-th site for OPERATOR_CLASS in MODULE_NAME, run the
tests, and report the results.
This is fundamentally the single-mutation-and-test-run process
implementation.
There are three high-level ways that a worker can finish. First, it could
fail exceptionally, meaning that some uncaught exception made its way from
some part of the operation to terminate the function. This function will
intercept all exceptions and return it in a non-exceptional structure.
Second, the mutation testing machinery may determine that there is no
OCCURENCE-th instance for OPERATOR_NAME in the module under test. In this
case there is no way to report a test result (i.e. killed, survived, or
incompetent) so a special value is returned indicating that no mutation is
possible.
Finally, and hopefully normally, the worker will find that it can run a
test. It will do so and report back the result - killed, survived, or
incompetent - in a structured way.
Returns: a WorkItem
Raises: This will generally not raise any exceptions. Rather, exceptions
will be reported using the 'exception' result-type in the return value.
"""
try:
with preserve_modules():
module = importlib.import_module(module_name)
module_source_file = inspect.getsourcefile(module)
module_ast = get_ast(module)
module_source = astunparse.unparse(module_ast)
core = MutatingCore(occurrence)
operator = operator_class(core)
# note: after this step module_ast and modified_ast
# appear to be the same
modified_ast = operator.visit(module_ast)
modified_source = astunparse.unparse(modified_ast)
if not core.activation_record:
return WorkItem(
worker_outcome=WorkerOutcome.NO_TEST)
# generate a source diff to visualize how the mutation
# operator has changed the code
module_diff = ["--- mutation diff ---"]
for line in difflib.unified_diff(module_source.split('\n'),
modified_source.split('\n'),
fromfile="a" + module_source_file,
tofile="b" + module_source_file,
lineterm=""):
module_diff.append(line)
with using_ast(module_name, module_ast):
rec = test_runner()
rec.update({
'diff': module_diff,
'worker_outcome': WorkerOutcome.NORMAL
})
rec.update(core.activation_record)
return rec
except Exception: # noqa # pylint: disable=broad-except
return WorkItem(
data=traceback.format_exception(*sys.exc_info()),
test_outcome=TestOutcome.INCOMPETENT,
worker_outcome=WorkerOutcome.EXCEPTION)
def worker_process(work_item,
timeout,
config):
"""Run `cosmic-ray worker` in a subprocess and return the results,
passing `config` to it via stdin.
Returns: An updated WorkItem
"""
# The work_item param may come as just a dict (e.g. if it arrives over
# celery), so we reconstruct a WorkItem to make it easier to work with.
work_item = WorkItem(work_item)
command = 'cosmic-ray worker {module} {operator} {occurrence}'.format(
**work_item)
log.info('executing: %s', command)
proc = subprocess.Popen(command.split(),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
config_string = serialize_config(config)
try:
outs, _ = proc.communicate(input=config_string, timeout=timeout)
result = json.loads(outs)
work_item.update({
k: v
for k, v
in result.items()
if v is not None
})
except subprocess.TimeoutExpired as exc:
work_item.worker_outcome = WorkerOutcome.TIMEOUT
work_item.data = exc.timeout
proc.kill()
except json.JSONDecodeError as exc:
work_item.worker_outcome = WorkerOutcome.EXCEPTION
work_item.data = exc
work_item.command_line = command
return work_item
|
normal
|
{
"blob_id": "73a778c6e4216c23ac8d82eef96ce7b73b18f661",
"index": 9100,
"step-1": "<mask token>\n\n\nclass WorkerOutcome:\n \"\"\"Possible outcomes for a worker.\n \"\"\"\n NORMAL = 'normal'\n EXCEPTION = 'exception'\n NO_TEST = 'no-test'\n TIMEOUT = 'timeout'\n SKIPPED = 'skipped'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass WorkerOutcome:\n \"\"\"Possible outcomes for a worker.\n \"\"\"\n NORMAL = 'normal'\n EXCEPTION = 'exception'\n NO_TEST = 'no-test'\n TIMEOUT = 'timeout'\n SKIPPED = 'skipped'\n\n\ndef worker(module_name, operator_class, occurrence, test_runner):\n \"\"\"Mutate the OCCURRENCE-th site for OPERATOR_CLASS in MODULE_NAME, run the\n tests, and report the results.\n\n This is fundamentally the single-mutation-and-test-run process\n implementation.\n\n There are three high-level ways that a worker can finish. First, it could\n fail exceptionally, meaning that some uncaught exception made its way from\n some part of the operation to terminate the function. This function will\n intercept all exceptions and return it in a non-exceptional structure.\n\n Second, the mutation testing machinery may determine that there is no\n OCCURENCE-th instance for OPERATOR_NAME in the module under test. In this\n case there is no way to report a test result (i.e. killed, survived, or\n incompetent) so a special value is returned indicating that no mutation is\n possible.\n\n Finally, and hopefully normally, the worker will find that it can run a\n test. It will do so and report back the result - killed, survived, or\n incompetent - in a structured way.\n\n Returns: a WorkItem\n\n Raises: This will generally not raise any exceptions. Rather, exceptions\n will be reported using the 'exception' result-type in the return value.\n\n \"\"\"\n try:\n with preserve_modules():\n module = importlib.import_module(module_name)\n module_source_file = inspect.getsourcefile(module)\n module_ast = get_ast(module)\n module_source = astunparse.unparse(module_ast)\n core = MutatingCore(occurrence)\n operator = operator_class(core)\n modified_ast = operator.visit(module_ast)\n modified_source = astunparse.unparse(modified_ast)\n if not core.activation_record:\n return WorkItem(worker_outcome=WorkerOutcome.NO_TEST)\n module_diff = ['--- mutation diff ---']\n for line in difflib.unified_diff(module_source.split('\\n'),\n modified_source.split('\\n'), fromfile='a' +\n module_source_file, tofile='b' + module_source_file,\n lineterm=''):\n module_diff.append(line)\n with using_ast(module_name, module_ast):\n rec = test_runner()\n rec.update({'diff': module_diff, 'worker_outcome': WorkerOutcome.\n NORMAL})\n rec.update(core.activation_record)\n return rec\n except Exception:\n return WorkItem(data=traceback.format_exception(*sys.exc_info()),\n test_outcome=TestOutcome.INCOMPETENT, worker_outcome=\n WorkerOutcome.EXCEPTION)\n\n\ndef worker_process(work_item, timeout, config):\n \"\"\"Run `cosmic-ray worker` in a subprocess and return the results,\n passing `config` to it via stdin.\n\n Returns: An updated WorkItem\n\n \"\"\"\n work_item = WorkItem(work_item)\n command = 'cosmic-ray worker {module} {operator} {occurrence}'.format(**\n work_item)\n log.info('executing: %s', command)\n proc = subprocess.Popen(command.split(), stdin=subprocess.PIPE, stdout=\n subprocess.PIPE, universal_newlines=True)\n config_string = serialize_config(config)\n try:\n outs, _ = proc.communicate(input=config_string, timeout=timeout)\n result = json.loads(outs)\n work_item.update({k: v for k, v in result.items() if v is not None})\n except subprocess.TimeoutExpired as exc:\n work_item.worker_outcome = WorkerOutcome.TIMEOUT\n work_item.data = exc.timeout\n proc.kill()\n except json.JSONDecodeError as exc:\n work_item.worker_outcome = WorkerOutcome.EXCEPTION\n work_item.data = exc\n work_item.command_line = command\n return work_item\n",
"step-3": "<mask token>\ntry:\n import typing\nexcept ImportError:\n pass\n<mask token>\n\n\nclass WorkerOutcome:\n \"\"\"Possible outcomes for a worker.\n \"\"\"\n NORMAL = 'normal'\n EXCEPTION = 'exception'\n NO_TEST = 'no-test'\n TIMEOUT = 'timeout'\n SKIPPED = 'skipped'\n\n\ndef worker(module_name, operator_class, occurrence, test_runner):\n \"\"\"Mutate the OCCURRENCE-th site for OPERATOR_CLASS in MODULE_NAME, run the\n tests, and report the results.\n\n This is fundamentally the single-mutation-and-test-run process\n implementation.\n\n There are three high-level ways that a worker can finish. First, it could\n fail exceptionally, meaning that some uncaught exception made its way from\n some part of the operation to terminate the function. This function will\n intercept all exceptions and return it in a non-exceptional structure.\n\n Second, the mutation testing machinery may determine that there is no\n OCCURENCE-th instance for OPERATOR_NAME in the module under test. In this\n case there is no way to report a test result (i.e. killed, survived, or\n incompetent) so a special value is returned indicating that no mutation is\n possible.\n\n Finally, and hopefully normally, the worker will find that it can run a\n test. It will do so and report back the result - killed, survived, or\n incompetent - in a structured way.\n\n Returns: a WorkItem\n\n Raises: This will generally not raise any exceptions. Rather, exceptions\n will be reported using the 'exception' result-type in the return value.\n\n \"\"\"\n try:\n with preserve_modules():\n module = importlib.import_module(module_name)\n module_source_file = inspect.getsourcefile(module)\n module_ast = get_ast(module)\n module_source = astunparse.unparse(module_ast)\n core = MutatingCore(occurrence)\n operator = operator_class(core)\n modified_ast = operator.visit(module_ast)\n modified_source = astunparse.unparse(modified_ast)\n if not core.activation_record:\n return WorkItem(worker_outcome=WorkerOutcome.NO_TEST)\n module_diff = ['--- mutation diff ---']\n for line in difflib.unified_diff(module_source.split('\\n'),\n modified_source.split('\\n'), fromfile='a' +\n module_source_file, tofile='b' + module_source_file,\n lineterm=''):\n module_diff.append(line)\n with using_ast(module_name, module_ast):\n rec = test_runner()\n rec.update({'diff': module_diff, 'worker_outcome': WorkerOutcome.\n NORMAL})\n rec.update(core.activation_record)\n return rec\n except Exception:\n return WorkItem(data=traceback.format_exception(*sys.exc_info()),\n test_outcome=TestOutcome.INCOMPETENT, worker_outcome=\n WorkerOutcome.EXCEPTION)\n\n\ndef worker_process(work_item, timeout, config):\n \"\"\"Run `cosmic-ray worker` in a subprocess and return the results,\n passing `config` to it via stdin.\n\n Returns: An updated WorkItem\n\n \"\"\"\n work_item = WorkItem(work_item)\n command = 'cosmic-ray worker {module} {operator} {occurrence}'.format(**\n work_item)\n log.info('executing: %s', command)\n proc = subprocess.Popen(command.split(), stdin=subprocess.PIPE, stdout=\n subprocess.PIPE, universal_newlines=True)\n config_string = serialize_config(config)\n try:\n outs, _ = proc.communicate(input=config_string, timeout=timeout)\n result = json.loads(outs)\n work_item.update({k: v for k, v in result.items() if v is not None})\n except subprocess.TimeoutExpired as exc:\n work_item.worker_outcome = WorkerOutcome.TIMEOUT\n work_item.data = exc.timeout\n proc.kill()\n except json.JSONDecodeError as exc:\n work_item.worker_outcome = WorkerOutcome.EXCEPTION\n work_item.data = exc\n work_item.command_line = command\n return work_item\n",
"step-4": "<mask token>\nimport difflib\nimport importlib\nimport inspect\nimport json\nimport logging\nimport subprocess\nimport sys\nimport traceback\nimport astunparse\ntry:\n import typing\nexcept ImportError:\n pass\nfrom .config import serialize_config\nfrom .importing import preserve_modules, using_ast\nfrom .mutating import MutatingCore\nfrom .parsing import get_ast\nfrom .testing.test_runner import TestOutcome\nfrom .work_item import WorkItem\nlog = logging.getLogger()\n\n\nclass WorkerOutcome:\n \"\"\"Possible outcomes for a worker.\n \"\"\"\n NORMAL = 'normal'\n EXCEPTION = 'exception'\n NO_TEST = 'no-test'\n TIMEOUT = 'timeout'\n SKIPPED = 'skipped'\n\n\ndef worker(module_name, operator_class, occurrence, test_runner):\n \"\"\"Mutate the OCCURRENCE-th site for OPERATOR_CLASS in MODULE_NAME, run the\n tests, and report the results.\n\n This is fundamentally the single-mutation-and-test-run process\n implementation.\n\n There are three high-level ways that a worker can finish. First, it could\n fail exceptionally, meaning that some uncaught exception made its way from\n some part of the operation to terminate the function. This function will\n intercept all exceptions and return it in a non-exceptional structure.\n\n Second, the mutation testing machinery may determine that there is no\n OCCURENCE-th instance for OPERATOR_NAME in the module under test. In this\n case there is no way to report a test result (i.e. killed, survived, or\n incompetent) so a special value is returned indicating that no mutation is\n possible.\n\n Finally, and hopefully normally, the worker will find that it can run a\n test. It will do so and report back the result - killed, survived, or\n incompetent - in a structured way.\n\n Returns: a WorkItem\n\n Raises: This will generally not raise any exceptions. Rather, exceptions\n will be reported using the 'exception' result-type in the return value.\n\n \"\"\"\n try:\n with preserve_modules():\n module = importlib.import_module(module_name)\n module_source_file = inspect.getsourcefile(module)\n module_ast = get_ast(module)\n module_source = astunparse.unparse(module_ast)\n core = MutatingCore(occurrence)\n operator = operator_class(core)\n modified_ast = operator.visit(module_ast)\n modified_source = astunparse.unparse(modified_ast)\n if not core.activation_record:\n return WorkItem(worker_outcome=WorkerOutcome.NO_TEST)\n module_diff = ['--- mutation diff ---']\n for line in difflib.unified_diff(module_source.split('\\n'),\n modified_source.split('\\n'), fromfile='a' +\n module_source_file, tofile='b' + module_source_file,\n lineterm=''):\n module_diff.append(line)\n with using_ast(module_name, module_ast):\n rec = test_runner()\n rec.update({'diff': module_diff, 'worker_outcome': WorkerOutcome.\n NORMAL})\n rec.update(core.activation_record)\n return rec\n except Exception:\n return WorkItem(data=traceback.format_exception(*sys.exc_info()),\n test_outcome=TestOutcome.INCOMPETENT, worker_outcome=\n WorkerOutcome.EXCEPTION)\n\n\ndef worker_process(work_item, timeout, config):\n \"\"\"Run `cosmic-ray worker` in a subprocess and return the results,\n passing `config` to it via stdin.\n\n Returns: An updated WorkItem\n\n \"\"\"\n work_item = WorkItem(work_item)\n command = 'cosmic-ray worker {module} {operator} {occurrence}'.format(**\n work_item)\n log.info('executing: %s', command)\n proc = subprocess.Popen(command.split(), stdin=subprocess.PIPE, stdout=\n subprocess.PIPE, universal_newlines=True)\n config_string = serialize_config(config)\n try:\n outs, _ = proc.communicate(input=config_string, timeout=timeout)\n result = json.loads(outs)\n work_item.update({k: v for k, v in result.items() if v is not None})\n except subprocess.TimeoutExpired as exc:\n work_item.worker_outcome = WorkerOutcome.TIMEOUT\n work_item.data = exc.timeout\n proc.kill()\n except json.JSONDecodeError as exc:\n work_item.worker_outcome = WorkerOutcome.EXCEPTION\n work_item.data = exc\n work_item.command_line = command\n return work_item\n",
"step-5": "\"\"\"This is the body of the low-level worker tool.\n\nA worker is intended to run as a process that imports a module, mutates it in\none location with one operator, runs the tests, reports the results, and dies.\n\"\"\"\n\nimport difflib\nimport importlib\nimport inspect\nimport json\nimport logging\nimport subprocess\nimport sys\nimport traceback\n\nimport astunparse\ntry:\n import typing # the typing module does some fancy stuff at import time\n # which we shall not do twice... by loading it here,\n # preserve_modules does not delete it and therefore\n # fancy stuff happens only once\nexcept ImportError:\n pass\n\nfrom .config import serialize_config\nfrom .importing import preserve_modules, using_ast\nfrom .mutating import MutatingCore\nfrom .parsing import get_ast\nfrom .testing.test_runner import TestOutcome\nfrom .work_item import WorkItem\n\nlog = logging.getLogger()\n\n\nclass WorkerOutcome:\n \"\"\"Possible outcomes for a worker.\n \"\"\"\n NORMAL = 'normal'\n EXCEPTION = 'exception'\n NO_TEST = 'no-test'\n TIMEOUT = 'timeout'\n SKIPPED = 'skipped'\n\n\ndef worker(module_name,\n operator_class,\n occurrence,\n test_runner):\n \"\"\"Mutate the OCCURRENCE-th site for OPERATOR_CLASS in MODULE_NAME, run the\n tests, and report the results.\n\n This is fundamentally the single-mutation-and-test-run process\n implementation.\n\n There are three high-level ways that a worker can finish. First, it could\n fail exceptionally, meaning that some uncaught exception made its way from\n some part of the operation to terminate the function. This function will\n intercept all exceptions and return it in a non-exceptional structure.\n\n Second, the mutation testing machinery may determine that there is no\n OCCURENCE-th instance for OPERATOR_NAME in the module under test. In this\n case there is no way to report a test result (i.e. killed, survived, or\n incompetent) so a special value is returned indicating that no mutation is\n possible.\n\n Finally, and hopefully normally, the worker will find that it can run a\n test. It will do so and report back the result - killed, survived, or\n incompetent - in a structured way.\n\n Returns: a WorkItem\n\n Raises: This will generally not raise any exceptions. Rather, exceptions\n will be reported using the 'exception' result-type in the return value.\n\n \"\"\"\n try:\n with preserve_modules():\n module = importlib.import_module(module_name)\n module_source_file = inspect.getsourcefile(module)\n module_ast = get_ast(module)\n module_source = astunparse.unparse(module_ast)\n\n core = MutatingCore(occurrence)\n operator = operator_class(core)\n # note: after this step module_ast and modified_ast\n # appear to be the same\n modified_ast = operator.visit(module_ast)\n modified_source = astunparse.unparse(modified_ast)\n\n if not core.activation_record:\n return WorkItem(\n worker_outcome=WorkerOutcome.NO_TEST)\n\n # generate a source diff to visualize how the mutation\n # operator has changed the code\n module_diff = [\"--- mutation diff ---\"]\n for line in difflib.unified_diff(module_source.split('\\n'),\n modified_source.split('\\n'),\n fromfile=\"a\" + module_source_file,\n tofile=\"b\" + module_source_file,\n lineterm=\"\"):\n module_diff.append(line)\n\n with using_ast(module_name, module_ast):\n rec = test_runner()\n\n rec.update({\n 'diff': module_diff,\n 'worker_outcome': WorkerOutcome.NORMAL\n })\n rec.update(core.activation_record)\n return rec\n\n except Exception: # noqa # pylint: disable=broad-except\n return WorkItem(\n data=traceback.format_exception(*sys.exc_info()),\n test_outcome=TestOutcome.INCOMPETENT,\n worker_outcome=WorkerOutcome.EXCEPTION)\n\n\ndef worker_process(work_item,\n timeout,\n config):\n \"\"\"Run `cosmic-ray worker` in a subprocess and return the results,\n passing `config` to it via stdin.\n\n Returns: An updated WorkItem\n\n \"\"\"\n # The work_item param may come as just a dict (e.g. if it arrives over\n # celery), so we reconstruct a WorkItem to make it easier to work with.\n work_item = WorkItem(work_item)\n\n command = 'cosmic-ray worker {module} {operator} {occurrence}'.format(\n **work_item)\n\n log.info('executing: %s', command)\n\n proc = subprocess.Popen(command.split(),\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n universal_newlines=True)\n config_string = serialize_config(config)\n try:\n outs, _ = proc.communicate(input=config_string, timeout=timeout)\n result = json.loads(outs)\n work_item.update({\n k: v\n for k, v\n in result.items()\n if v is not None\n })\n except subprocess.TimeoutExpired as exc:\n work_item.worker_outcome = WorkerOutcome.TIMEOUT\n work_item.data = exc.timeout\n proc.kill()\n except json.JSONDecodeError as exc:\n work_item.worker_outcome = WorkerOutcome.EXCEPTION\n work_item.data = exc\n\n work_item.command_line = command\n return work_item\n",
"step-ids": [
3,
5,
6,
8,
9
]
}
|
[
3,
5,
6,
8,
9
] |
"""
Module for generic standard analysis plots.
"""
import numpy as np
import matplotlib.pyplot as plt
import cartopy as cart
import xarray as xr
import ecco_v4_py as ecco
def global_and_stereo_map(lat, lon, fld,
plot_type='pcolormesh',
cmap='YlOrRd',
title=None,
cmin=None,
cmax=None,
dpi=100,
show_colorbar=True):
"""Generate the Robinson and Arctic/Antarctic plot.
Parameters
----------
lat : xarray.DataArray
lon : xarray.DataArray
fld : xarray.DataArray
plot_type : string, optional
plot type to use, 'pcolormesh', or 'contourf'
cmap : string or colormap object (TBD)
cmin : double, optional
minimum value for colorbar
cmax : double, optional
maximum value for colorbar
dpi : int, optiopnal
plot resolution in dots (pixels) per inch
title,show_colorbar
figsize?
Output
------
"""
# to do
# -figsize option?
# -cmin/cmax defaults handling with plot_proj ...
# -colorbar defaults with diverging/sequential
# -number of colors in plot
# -suppress dask warnings
# -get the subplot size "just right" no matter the figsize
# -arrows for when colorbar is exceeded
# handle colorbar limits
cmin, cmax, extend_cbar = set_colorbar_limits(fld,cmin,cmax)
# default figsize which seems to work for a laptop screen
plt.figure(figsize=(12,6),dpi=dpi)
# the big top global plot
fig, ax1, p1, cb1 = ecco.plot_proj_to_latlon_grid(
lat,lon,fld,
cmap=cmap,
plot_type=plot_type,
subplot_grid=[2,1,1],
projection_type='robin',
show_colorbar=False,
cmin=cmin,
cmax=cmax,
user_lon_0=0
)
# Arctic: bottom left
fig, ax2, p2, cb2 = ecco.plot_proj_to_latlon_grid(
lat,lon,fld,
cmap=cmap,
plot_type=plot_type,
subplot_grid=[2,2,3],
projection_type='stereo',
show_colorbar=False,
cmin=cmin,
cmax=cmax,
lat_lim=50,
user_lon_0=0
)
# ACC: bottom right
fig, ax3, p3, cb3 = ecco.plot_proj_to_latlon_grid(
lat,lon,fld,
cmap=cmap,
plot_type=plot_type,
subplot_grid=[2,2,4],
projection_type='stereo',
show_colorbar=False,
cmin=cmin,
cmax=cmax,
lat_lim=-40,
user_lon_0=180
)
# Set land color to gray
ax1.add_feature(cart.feature.LAND,facecolor='0.7',zorder=2)
ax2.add_feature(cart.feature.LAND,facecolor='0.7',zorder=2)
ax3.add_feature(cart.feature.LAND,facecolor='0.7',zorder=2)
# Make a single title
if title is not None:
fig.suptitle(title,verticalalignment='top',fontsize=24)
# Make an overyling colorbar
if show_colorbar:
fig.subplots_adjust(right=0.9)
cbar_ax = fig.add_axes([0.87, 0.1, 0.025, 0.8])
fig.colorbar(p3, cax=cbar_ax, extend=extend_cbar)
return fig, (ax1,ax2,ax3)
def plot_depth_slice(x, depth, fld,
stretch_depth=-500,
plot_type='pcolormesh',
cmap='YlOrRd',
title=None,
cmin=None,
cmax=None,
dpi=100,
show_colorbar=True):
"""2D plot of depth vs some other variable, stretching first 500m of depth.
Parameters
----------
depth : xarray DataArray or numpy array
depth variable
x : xarray DataArray or numpy array
variable for x-axis. Likely to be time, latitude, or longitude
fld : xarray DataArray or numpy array
2D field with depth + 1 dim
stretch_depth : scalar (int or float), optional
stretch top depth to this limit
"""
# Ensure negative values
#if (depth>0).any():
# depth = -depth
#if stretch_depth > 0:
# stretch_depth = -stretch_depth
# Handle shape
if len(x) == fld.shape[0]:
fld = fld.transpose()
# handle colorbar limits
cmin, cmax, extend_cbar = set_colorbar_limits(fld,cmin,cmax)
# default figsize which seems to work for a laptop screen
fig = plt.figure(figsize=(12,6),dpi=dpi)
# Could also use plt.subplots here ...
# First top 500m
ax1 = plt.subplot(2,1,1)
if plot_type == 'pcolormesh':
p1 = ax1.pcolormesh(x,depth,fld,vmin=cmin,vmax=cmax,cmap=cmap)
elif plot_type == 'contourf':
p1 = ax1.contourf(x,depth,fld,vmin=cmin,vmax=cmax,cmap=cmap)
# Handle y-axis
plt.ylim([stretch_depth, 0])
ax1.yaxis.axes.set_yticks(np.arange(stretch_depth,1,100))
plt.ylabel('Depth [%s]' % depth.attrs['units'])
# Remove top plot xtick label
ax1.xaxis.axes.set_xticklabels([])
# Now the rest ...
ax2 = plt.subplot(2,1,2)
if plot_type == 'pcolormesh':
p2 = ax2.pcolormesh(x,depth,fld,vmin=cmin,vmax=cmax,cmap=cmap)
elif plot_type == 'contourf':
p2 = ax2.contourf(x,depth,fld,vmin=cmin,vmax=cmax,cmap=cmap)
# Handle y-axis
plt.ylim([depth.min(), stretch_depth])
yticks = np.flip(np.arange(2*stretch_depth,depth.min(),-1000))
ax2.yaxis.axes.set_yticks(yticks)
plt.ylabel('Depth [%s]' % depth.attrs['units'])
# Reduce space between subplots
fig.subplots_adjust(hspace=0.05)
# Make a single title
if title is not None:
fig.suptitle(title,verticalalignment='top',fontsize=24)
# Make an overyling colorbar
if show_colorbar:
fig.subplots_adjust(right=0.83)
cbar_ax = fig.add_axes([0.87, 0.1, 0.025, 0.8])
fig.colorbar(p2, cax=cbar_ax, extend=extend_cbar)
plt.show()
return fig,ax1,ax2
def set_colorbar_limits(fld,cmin,cmax):
"""If unset, compute colorbar limits based on field max/min values, sequential/divergent
Determine if colorbar needs to be extended
Parameters
----------
fld : xarray.DataArray
2D field to be plotted
Output
------
cmin : double
colorbar min value
cmax : double
colorbar max value
extend_cbar : string
flag to colorbar extension
"""
# handle input
if (cmin is None) and (cmax is not None):
raise RuntimeError('Only cmax given, must provide both cmin and cmax')
elif (cmin is not None) and (cmax is None):
raise RuntimeError('Only cmin given, must provide both cmin and cmax')
else:
# handle colorbar limits accidentally passed as with xarray functions
if type(cmin) is xr.DataArray:
cmin = cmin.values()
elif cmin is not None:
raise TypeError('Unsure of cmin type: ',type(cmin))
if type(cmax) is xr.DataArray:
cmax = cmax.values()
elif cmax is not None:
raise TypeError('Unsure of cmax type: ',type(cmax))
# compute fld limits
fld_min = fld.min(skipna=True).values
fld_max = fld.max(skipna=True).values
# if cmin/cmax not set, compute
if (cmin is None) and (cmax is None):
cmin = fld_min
cmax = fld_max
# determine if divergent colorbar
# Note: Not making divergent colorbar for temperature
# in degC because still sequential even though +/-
if (fld_max*fld_min < 0) and (fld.name is not 'THETA'):
cmax = np.nanmax(np.abs(fld.values))
cmin = -cmax
# determine if colorbar needs to be extended
if (cmin > fld_min) and (cmax < fld_max):
extend_cbar = "both"
elif cmin > fld_min:
extend_cbar = "min"
elif cmax < fld_max:
extend_cbar = "max"
else:
extend_cbar = "neither"
return cmin, cmax, extend_cbar
|
normal
|
{
"blob_id": "b039ed74e62f3a74e8506d4e14a3422499046c06",
"index": 860,
"step-1": "<mask token>\n\n\ndef plot_depth_slice(x, depth, fld, stretch_depth=-500, plot_type=\n 'pcolormesh', cmap='YlOrRd', title=None, cmin=None, cmax=None, dpi=100,\n show_colorbar=True):\n \"\"\"2D plot of depth vs some other variable, stretching first 500m of depth.\n\n Parameters\n ----------\n depth : xarray DataArray or numpy array\n depth variable\n x : xarray DataArray or numpy array\n variable for x-axis. Likely to be time, latitude, or longitude\n fld : xarray DataArray or numpy array\n 2D field with depth + 1 dim\n stretch_depth : scalar (int or float), optional\n stretch top depth to this limit\n \"\"\"\n if len(x) == fld.shape[0]:\n fld = fld.transpose()\n cmin, cmax, extend_cbar = set_colorbar_limits(fld, cmin, cmax)\n fig = plt.figure(figsize=(12, 6), dpi=dpi)\n ax1 = plt.subplot(2, 1, 1)\n if plot_type == 'pcolormesh':\n p1 = ax1.pcolormesh(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)\n elif plot_type == 'contourf':\n p1 = ax1.contourf(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)\n plt.ylim([stretch_depth, 0])\n ax1.yaxis.axes.set_yticks(np.arange(stretch_depth, 1, 100))\n plt.ylabel('Depth [%s]' % depth.attrs['units'])\n ax1.xaxis.axes.set_xticklabels([])\n ax2 = plt.subplot(2, 1, 2)\n if plot_type == 'pcolormesh':\n p2 = ax2.pcolormesh(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)\n elif plot_type == 'contourf':\n p2 = ax2.contourf(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)\n plt.ylim([depth.min(), stretch_depth])\n yticks = np.flip(np.arange(2 * stretch_depth, depth.min(), -1000))\n ax2.yaxis.axes.set_yticks(yticks)\n plt.ylabel('Depth [%s]' % depth.attrs['units'])\n fig.subplots_adjust(hspace=0.05)\n if title is not None:\n fig.suptitle(title, verticalalignment='top', fontsize=24)\n if show_colorbar:\n fig.subplots_adjust(right=0.83)\n cbar_ax = fig.add_axes([0.87, 0.1, 0.025, 0.8])\n fig.colorbar(p2, cax=cbar_ax, extend=extend_cbar)\n plt.show()\n return fig, ax1, ax2\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef plot_depth_slice(x, depth, fld, stretch_depth=-500, plot_type=\n 'pcolormesh', cmap='YlOrRd', title=None, cmin=None, cmax=None, dpi=100,\n show_colorbar=True):\n \"\"\"2D plot of depth vs some other variable, stretching first 500m of depth.\n\n Parameters\n ----------\n depth : xarray DataArray or numpy array\n depth variable\n x : xarray DataArray or numpy array\n variable for x-axis. Likely to be time, latitude, or longitude\n fld : xarray DataArray or numpy array\n 2D field with depth + 1 dim\n stretch_depth : scalar (int or float), optional\n stretch top depth to this limit\n \"\"\"\n if len(x) == fld.shape[0]:\n fld = fld.transpose()\n cmin, cmax, extend_cbar = set_colorbar_limits(fld, cmin, cmax)\n fig = plt.figure(figsize=(12, 6), dpi=dpi)\n ax1 = plt.subplot(2, 1, 1)\n if plot_type == 'pcolormesh':\n p1 = ax1.pcolormesh(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)\n elif plot_type == 'contourf':\n p1 = ax1.contourf(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)\n plt.ylim([stretch_depth, 0])\n ax1.yaxis.axes.set_yticks(np.arange(stretch_depth, 1, 100))\n plt.ylabel('Depth [%s]' % depth.attrs['units'])\n ax1.xaxis.axes.set_xticklabels([])\n ax2 = plt.subplot(2, 1, 2)\n if plot_type == 'pcolormesh':\n p2 = ax2.pcolormesh(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)\n elif plot_type == 'contourf':\n p2 = ax2.contourf(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)\n plt.ylim([depth.min(), stretch_depth])\n yticks = np.flip(np.arange(2 * stretch_depth, depth.min(), -1000))\n ax2.yaxis.axes.set_yticks(yticks)\n plt.ylabel('Depth [%s]' % depth.attrs['units'])\n fig.subplots_adjust(hspace=0.05)\n if title is not None:\n fig.suptitle(title, verticalalignment='top', fontsize=24)\n if show_colorbar:\n fig.subplots_adjust(right=0.83)\n cbar_ax = fig.add_axes([0.87, 0.1, 0.025, 0.8])\n fig.colorbar(p2, cax=cbar_ax, extend=extend_cbar)\n plt.show()\n return fig, ax1, ax2\n\n\ndef set_colorbar_limits(fld, cmin, cmax):\n \"\"\"If unset, compute colorbar limits based on field max/min values, sequential/divergent\n Determine if colorbar needs to be extended\n\n Parameters\n ----------\n fld : xarray.DataArray\n 2D field to be plotted\n\n Output\n ------\n cmin : double \n colorbar min value\n cmax : double \n colorbar max value\n extend_cbar : string \n flag to colorbar extension\n\n \"\"\"\n if cmin is None and cmax is not None:\n raise RuntimeError('Only cmax given, must provide both cmin and cmax')\n elif cmin is not None and cmax is None:\n raise RuntimeError('Only cmin given, must provide both cmin and cmax')\n else:\n if type(cmin) is xr.DataArray:\n cmin = cmin.values()\n elif cmin is not None:\n raise TypeError('Unsure of cmin type: ', type(cmin))\n if type(cmax) is xr.DataArray:\n cmax = cmax.values()\n elif cmax is not None:\n raise TypeError('Unsure of cmax type: ', type(cmax))\n fld_min = fld.min(skipna=True).values\n fld_max = fld.max(skipna=True).values\n if cmin is None and cmax is None:\n cmin = fld_min\n cmax = fld_max\n if fld_max * fld_min < 0 and fld.name is not 'THETA':\n cmax = np.nanmax(np.abs(fld.values))\n cmin = -cmax\n if cmin > fld_min and cmax < fld_max:\n extend_cbar = 'both'\n elif cmin > fld_min:\n extend_cbar = 'min'\n elif cmax < fld_max:\n extend_cbar = 'max'\n else:\n extend_cbar = 'neither'\n return cmin, cmax, extend_cbar\n",
"step-3": "<mask token>\n\n\ndef global_and_stereo_map(lat, lon, fld, plot_type='pcolormesh', cmap=\n 'YlOrRd', title=None, cmin=None, cmax=None, dpi=100, show_colorbar=True):\n \"\"\"Generate the Robinson and Arctic/Antarctic plot.\n\n Parameters\n ----------\n lat : xarray.DataArray \n\n lon : xarray.DataArray\n\n fld : xarray.DataArray\n\n plot_type : string, optional\n plot type to use, 'pcolormesh', or 'contourf'\n\n cmap : string or colormap object (TBD)\n\n cmin : double, optional\n minimum value for colorbar\n\n cmax : double, optional\n maximum value for colorbar\n\n dpi : int, optiopnal\n plot resolution in dots (pixels) per inch\n\n title,show_colorbar\n \n figsize?\n\n Output\n ------\n\n \"\"\"\n cmin, cmax, extend_cbar = set_colorbar_limits(fld, cmin, cmax)\n plt.figure(figsize=(12, 6), dpi=dpi)\n fig, ax1, p1, cb1 = ecco.plot_proj_to_latlon_grid(lat, lon, fld, cmap=\n cmap, plot_type=plot_type, subplot_grid=[2, 1, 1], projection_type=\n 'robin', show_colorbar=False, cmin=cmin, cmax=cmax, user_lon_0=0)\n fig, ax2, p2, cb2 = ecco.plot_proj_to_latlon_grid(lat, lon, fld, cmap=\n cmap, plot_type=plot_type, subplot_grid=[2, 2, 3], projection_type=\n 'stereo', show_colorbar=False, cmin=cmin, cmax=cmax, lat_lim=50,\n user_lon_0=0)\n fig, ax3, p3, cb3 = ecco.plot_proj_to_latlon_grid(lat, lon, fld, cmap=\n cmap, plot_type=plot_type, subplot_grid=[2, 2, 4], projection_type=\n 'stereo', show_colorbar=False, cmin=cmin, cmax=cmax, lat_lim=-40,\n user_lon_0=180)\n ax1.add_feature(cart.feature.LAND, facecolor='0.7', zorder=2)\n ax2.add_feature(cart.feature.LAND, facecolor='0.7', zorder=2)\n ax3.add_feature(cart.feature.LAND, facecolor='0.7', zorder=2)\n if title is not None:\n fig.suptitle(title, verticalalignment='top', fontsize=24)\n if show_colorbar:\n fig.subplots_adjust(right=0.9)\n cbar_ax = fig.add_axes([0.87, 0.1, 0.025, 0.8])\n fig.colorbar(p3, cax=cbar_ax, extend=extend_cbar)\n return fig, (ax1, ax2, ax3)\n\n\ndef plot_depth_slice(x, depth, fld, stretch_depth=-500, plot_type=\n 'pcolormesh', cmap='YlOrRd', title=None, cmin=None, cmax=None, dpi=100,\n show_colorbar=True):\n \"\"\"2D plot of depth vs some other variable, stretching first 500m of depth.\n\n Parameters\n ----------\n depth : xarray DataArray or numpy array\n depth variable\n x : xarray DataArray or numpy array\n variable for x-axis. Likely to be time, latitude, or longitude\n fld : xarray DataArray or numpy array\n 2D field with depth + 1 dim\n stretch_depth : scalar (int or float), optional\n stretch top depth to this limit\n \"\"\"\n if len(x) == fld.shape[0]:\n fld = fld.transpose()\n cmin, cmax, extend_cbar = set_colorbar_limits(fld, cmin, cmax)\n fig = plt.figure(figsize=(12, 6), dpi=dpi)\n ax1 = plt.subplot(2, 1, 1)\n if plot_type == 'pcolormesh':\n p1 = ax1.pcolormesh(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)\n elif plot_type == 'contourf':\n p1 = ax1.contourf(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)\n plt.ylim([stretch_depth, 0])\n ax1.yaxis.axes.set_yticks(np.arange(stretch_depth, 1, 100))\n plt.ylabel('Depth [%s]' % depth.attrs['units'])\n ax1.xaxis.axes.set_xticklabels([])\n ax2 = plt.subplot(2, 1, 2)\n if plot_type == 'pcolormesh':\n p2 = ax2.pcolormesh(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)\n elif plot_type == 'contourf':\n p2 = ax2.contourf(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)\n plt.ylim([depth.min(), stretch_depth])\n yticks = np.flip(np.arange(2 * stretch_depth, depth.min(), -1000))\n ax2.yaxis.axes.set_yticks(yticks)\n plt.ylabel('Depth [%s]' % depth.attrs['units'])\n fig.subplots_adjust(hspace=0.05)\n if title is not None:\n fig.suptitle(title, verticalalignment='top', fontsize=24)\n if show_colorbar:\n fig.subplots_adjust(right=0.83)\n cbar_ax = fig.add_axes([0.87, 0.1, 0.025, 0.8])\n fig.colorbar(p2, cax=cbar_ax, extend=extend_cbar)\n plt.show()\n return fig, ax1, ax2\n\n\ndef set_colorbar_limits(fld, cmin, cmax):\n \"\"\"If unset, compute colorbar limits based on field max/min values, sequential/divergent\n Determine if colorbar needs to be extended\n\n Parameters\n ----------\n fld : xarray.DataArray\n 2D field to be plotted\n\n Output\n ------\n cmin : double \n colorbar min value\n cmax : double \n colorbar max value\n extend_cbar : string \n flag to colorbar extension\n\n \"\"\"\n if cmin is None and cmax is not None:\n raise RuntimeError('Only cmax given, must provide both cmin and cmax')\n elif cmin is not None and cmax is None:\n raise RuntimeError('Only cmin given, must provide both cmin and cmax')\n else:\n if type(cmin) is xr.DataArray:\n cmin = cmin.values()\n elif cmin is not None:\n raise TypeError('Unsure of cmin type: ', type(cmin))\n if type(cmax) is xr.DataArray:\n cmax = cmax.values()\n elif cmax is not None:\n raise TypeError('Unsure of cmax type: ', type(cmax))\n fld_min = fld.min(skipna=True).values\n fld_max = fld.max(skipna=True).values\n if cmin is None and cmax is None:\n cmin = fld_min\n cmax = fld_max\n if fld_max * fld_min < 0 and fld.name is not 'THETA':\n cmax = np.nanmax(np.abs(fld.values))\n cmin = -cmax\n if cmin > fld_min and cmax < fld_max:\n extend_cbar = 'both'\n elif cmin > fld_min:\n extend_cbar = 'min'\n elif cmax < fld_max:\n extend_cbar = 'max'\n else:\n extend_cbar = 'neither'\n return cmin, cmax, extend_cbar\n",
"step-4": "<mask token>\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cartopy as cart\nimport xarray as xr\nimport ecco_v4_py as ecco\n\n\ndef global_and_stereo_map(lat, lon, fld, plot_type='pcolormesh', cmap=\n 'YlOrRd', title=None, cmin=None, cmax=None, dpi=100, show_colorbar=True):\n \"\"\"Generate the Robinson and Arctic/Antarctic plot.\n\n Parameters\n ----------\n lat : xarray.DataArray \n\n lon : xarray.DataArray\n\n fld : xarray.DataArray\n\n plot_type : string, optional\n plot type to use, 'pcolormesh', or 'contourf'\n\n cmap : string or colormap object (TBD)\n\n cmin : double, optional\n minimum value for colorbar\n\n cmax : double, optional\n maximum value for colorbar\n\n dpi : int, optiopnal\n plot resolution in dots (pixels) per inch\n\n title,show_colorbar\n \n figsize?\n\n Output\n ------\n\n \"\"\"\n cmin, cmax, extend_cbar = set_colorbar_limits(fld, cmin, cmax)\n plt.figure(figsize=(12, 6), dpi=dpi)\n fig, ax1, p1, cb1 = ecco.plot_proj_to_latlon_grid(lat, lon, fld, cmap=\n cmap, plot_type=plot_type, subplot_grid=[2, 1, 1], projection_type=\n 'robin', show_colorbar=False, cmin=cmin, cmax=cmax, user_lon_0=0)\n fig, ax2, p2, cb2 = ecco.plot_proj_to_latlon_grid(lat, lon, fld, cmap=\n cmap, plot_type=plot_type, subplot_grid=[2, 2, 3], projection_type=\n 'stereo', show_colorbar=False, cmin=cmin, cmax=cmax, lat_lim=50,\n user_lon_0=0)\n fig, ax3, p3, cb3 = ecco.plot_proj_to_latlon_grid(lat, lon, fld, cmap=\n cmap, plot_type=plot_type, subplot_grid=[2, 2, 4], projection_type=\n 'stereo', show_colorbar=False, cmin=cmin, cmax=cmax, lat_lim=-40,\n user_lon_0=180)\n ax1.add_feature(cart.feature.LAND, facecolor='0.7', zorder=2)\n ax2.add_feature(cart.feature.LAND, facecolor='0.7', zorder=2)\n ax3.add_feature(cart.feature.LAND, facecolor='0.7', zorder=2)\n if title is not None:\n fig.suptitle(title, verticalalignment='top', fontsize=24)\n if show_colorbar:\n fig.subplots_adjust(right=0.9)\n cbar_ax = fig.add_axes([0.87, 0.1, 0.025, 0.8])\n fig.colorbar(p3, cax=cbar_ax, extend=extend_cbar)\n return fig, (ax1, ax2, ax3)\n\n\ndef plot_depth_slice(x, depth, fld, stretch_depth=-500, plot_type=\n 'pcolormesh', cmap='YlOrRd', title=None, cmin=None, cmax=None, dpi=100,\n show_colorbar=True):\n \"\"\"2D plot of depth vs some other variable, stretching first 500m of depth.\n\n Parameters\n ----------\n depth : xarray DataArray or numpy array\n depth variable\n x : xarray DataArray or numpy array\n variable for x-axis. Likely to be time, latitude, or longitude\n fld : xarray DataArray or numpy array\n 2D field with depth + 1 dim\n stretch_depth : scalar (int or float), optional\n stretch top depth to this limit\n \"\"\"\n if len(x) == fld.shape[0]:\n fld = fld.transpose()\n cmin, cmax, extend_cbar = set_colorbar_limits(fld, cmin, cmax)\n fig = plt.figure(figsize=(12, 6), dpi=dpi)\n ax1 = plt.subplot(2, 1, 1)\n if plot_type == 'pcolormesh':\n p1 = ax1.pcolormesh(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)\n elif plot_type == 'contourf':\n p1 = ax1.contourf(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)\n plt.ylim([stretch_depth, 0])\n ax1.yaxis.axes.set_yticks(np.arange(stretch_depth, 1, 100))\n plt.ylabel('Depth [%s]' % depth.attrs['units'])\n ax1.xaxis.axes.set_xticklabels([])\n ax2 = plt.subplot(2, 1, 2)\n if plot_type == 'pcolormesh':\n p2 = ax2.pcolormesh(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)\n elif plot_type == 'contourf':\n p2 = ax2.contourf(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)\n plt.ylim([depth.min(), stretch_depth])\n yticks = np.flip(np.arange(2 * stretch_depth, depth.min(), -1000))\n ax2.yaxis.axes.set_yticks(yticks)\n plt.ylabel('Depth [%s]' % depth.attrs['units'])\n fig.subplots_adjust(hspace=0.05)\n if title is not None:\n fig.suptitle(title, verticalalignment='top', fontsize=24)\n if show_colorbar:\n fig.subplots_adjust(right=0.83)\n cbar_ax = fig.add_axes([0.87, 0.1, 0.025, 0.8])\n fig.colorbar(p2, cax=cbar_ax, extend=extend_cbar)\n plt.show()\n return fig, ax1, ax2\n\n\ndef set_colorbar_limits(fld, cmin, cmax):\n \"\"\"If unset, compute colorbar limits based on field max/min values, sequential/divergent\n Determine if colorbar needs to be extended\n\n Parameters\n ----------\n fld : xarray.DataArray\n 2D field to be plotted\n\n Output\n ------\n cmin : double \n colorbar min value\n cmax : double \n colorbar max value\n extend_cbar : string \n flag to colorbar extension\n\n \"\"\"\n if cmin is None and cmax is not None:\n raise RuntimeError('Only cmax given, must provide both cmin and cmax')\n elif cmin is not None and cmax is None:\n raise RuntimeError('Only cmin given, must provide both cmin and cmax')\n else:\n if type(cmin) is xr.DataArray:\n cmin = cmin.values()\n elif cmin is not None:\n raise TypeError('Unsure of cmin type: ', type(cmin))\n if type(cmax) is xr.DataArray:\n cmax = cmax.values()\n elif cmax is not None:\n raise TypeError('Unsure of cmax type: ', type(cmax))\n fld_min = fld.min(skipna=True).values\n fld_max = fld.max(skipna=True).values\n if cmin is None and cmax is None:\n cmin = fld_min\n cmax = fld_max\n if fld_max * fld_min < 0 and fld.name is not 'THETA':\n cmax = np.nanmax(np.abs(fld.values))\n cmin = -cmax\n if cmin > fld_min and cmax < fld_max:\n extend_cbar = 'both'\n elif cmin > fld_min:\n extend_cbar = 'min'\n elif cmax < fld_max:\n extend_cbar = 'max'\n else:\n extend_cbar = 'neither'\n return cmin, cmax, extend_cbar\n",
"step-5": "\"\"\"\nModule for generic standard analysis plots.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cartopy as cart\nimport xarray as xr\nimport ecco_v4_py as ecco\n\n\ndef global_and_stereo_map(lat, lon, fld,\n plot_type='pcolormesh',\n cmap='YlOrRd',\n title=None,\n cmin=None,\n cmax=None,\n dpi=100,\n show_colorbar=True):\n\n \"\"\"Generate the Robinson and Arctic/Antarctic plot.\n\n Parameters\n ----------\n lat : xarray.DataArray \n\n lon : xarray.DataArray\n\n fld : xarray.DataArray\n\n plot_type : string, optional\n plot type to use, 'pcolormesh', or 'contourf'\n\n cmap : string or colormap object (TBD)\n\n cmin : double, optional\n minimum value for colorbar\n\n cmax : double, optional\n maximum value for colorbar\n\n dpi : int, optiopnal\n plot resolution in dots (pixels) per inch\n\n title,show_colorbar\n \n figsize?\n\n Output\n ------\n\n \"\"\"\n\n # to do\n # -figsize option?\n # -cmin/cmax defaults handling with plot_proj ... \n # -colorbar defaults with diverging/sequential\n # -number of colors in plot\n # -suppress dask warnings\n # -get the subplot size \"just right\" no matter the figsize\n # -arrows for when colorbar is exceeded\n\n # handle colorbar limits\n cmin, cmax, extend_cbar = set_colorbar_limits(fld,cmin,cmax)\n\n # default figsize which seems to work for a laptop screen\n plt.figure(figsize=(12,6),dpi=dpi)\n\n # the big top global plot\n fig, ax1, p1, cb1 = ecco.plot_proj_to_latlon_grid(\n lat,lon,fld,\n cmap=cmap,\n plot_type=plot_type,\n subplot_grid=[2,1,1],\n projection_type='robin',\n show_colorbar=False,\n cmin=cmin,\n cmax=cmax,\n user_lon_0=0\n )\n\n # Arctic: bottom left\n fig, ax2, p2, cb2 = ecco.plot_proj_to_latlon_grid(\n lat,lon,fld,\n cmap=cmap,\n plot_type=plot_type,\n subplot_grid=[2,2,3],\n projection_type='stereo',\n show_colorbar=False,\n cmin=cmin,\n cmax=cmax,\n lat_lim=50,\n user_lon_0=0\n )\n\n\n # ACC: bottom right\n fig, ax3, p3, cb3 = ecco.plot_proj_to_latlon_grid(\n lat,lon,fld,\n cmap=cmap,\n plot_type=plot_type,\n subplot_grid=[2,2,4],\n projection_type='stereo',\n show_colorbar=False,\n cmin=cmin,\n cmax=cmax,\n lat_lim=-40,\n user_lon_0=180\n )\n\n # Set land color to gray\n ax1.add_feature(cart.feature.LAND,facecolor='0.7',zorder=2)\n ax2.add_feature(cart.feature.LAND,facecolor='0.7',zorder=2)\n ax3.add_feature(cart.feature.LAND,facecolor='0.7',zorder=2)\n\n # Make a single title\n if title is not None:\n fig.suptitle(title,verticalalignment='top',fontsize=24)\n\n # Make an overyling colorbar\n if show_colorbar:\n fig.subplots_adjust(right=0.9)\n cbar_ax = fig.add_axes([0.87, 0.1, 0.025, 0.8])\n fig.colorbar(p3, cax=cbar_ax, extend=extend_cbar)\n\n\n\n return fig, (ax1,ax2,ax3)\n\ndef plot_depth_slice(x, depth, fld, \n stretch_depth=-500,\n plot_type='pcolormesh',\n cmap='YlOrRd',\n title=None,\n cmin=None,\n cmax=None,\n dpi=100,\n show_colorbar=True):\n \"\"\"2D plot of depth vs some other variable, stretching first 500m of depth.\n\n Parameters\n ----------\n depth : xarray DataArray or numpy array\n depth variable\n x : xarray DataArray or numpy array\n variable for x-axis. Likely to be time, latitude, or longitude\n fld : xarray DataArray or numpy array\n 2D field with depth + 1 dim\n stretch_depth : scalar (int or float), optional\n stretch top depth to this limit\n \"\"\"\n\n # Ensure negative values \n #if (depth>0).any():\n # depth = -depth\n\n #if stretch_depth > 0:\n # stretch_depth = -stretch_depth\n\n # Handle shape\n if len(x) == fld.shape[0]:\n fld = fld.transpose()\n\n # handle colorbar limits\n cmin, cmax, extend_cbar = set_colorbar_limits(fld,cmin,cmax)\n\n # default figsize which seems to work for a laptop screen\n fig = plt.figure(figsize=(12,6),dpi=dpi)\n\n # Could also use plt.subplots here ...\n\n # First top 500m\n ax1 = plt.subplot(2,1,1)\n if plot_type == 'pcolormesh':\n p1 = ax1.pcolormesh(x,depth,fld,vmin=cmin,vmax=cmax,cmap=cmap)\n\n elif plot_type == 'contourf':\n p1 = ax1.contourf(x,depth,fld,vmin=cmin,vmax=cmax,cmap=cmap)\n\n # Handle y-axis\n plt.ylim([stretch_depth, 0])\n ax1.yaxis.axes.set_yticks(np.arange(stretch_depth,1,100))\n plt.ylabel('Depth [%s]' % depth.attrs['units'])\n\n\n # Remove top plot xtick label\n ax1.xaxis.axes.set_xticklabels([])\n\n # Now the rest ...\n ax2 = plt.subplot(2,1,2)\n if plot_type == 'pcolormesh':\n p2 = ax2.pcolormesh(x,depth,fld,vmin=cmin,vmax=cmax,cmap=cmap)\n\n elif plot_type == 'contourf':\n p2 = ax2.contourf(x,depth,fld,vmin=cmin,vmax=cmax,cmap=cmap)\n\n # Handle y-axis\n plt.ylim([depth.min(), stretch_depth])\n yticks = np.flip(np.arange(2*stretch_depth,depth.min(),-1000))\n ax2.yaxis.axes.set_yticks(yticks)\n plt.ylabel('Depth [%s]' % depth.attrs['units'])\n\n # Reduce space between subplots\n fig.subplots_adjust(hspace=0.05)\n\n # Make a single title\n if title is not None:\n fig.suptitle(title,verticalalignment='top',fontsize=24)\n\n # Make an overyling colorbar\n if show_colorbar:\n fig.subplots_adjust(right=0.83)\n cbar_ax = fig.add_axes([0.87, 0.1, 0.025, 0.8])\n fig.colorbar(p2, cax=cbar_ax, extend=extend_cbar)\n\n plt.show()\n\n return fig,ax1,ax2\n\n\ndef set_colorbar_limits(fld,cmin,cmax):\n \"\"\"If unset, compute colorbar limits based on field max/min values, sequential/divergent\n Determine if colorbar needs to be extended\n\n Parameters\n ----------\n fld : xarray.DataArray\n 2D field to be plotted\n\n Output\n ------\n cmin : double \n colorbar min value\n cmax : double \n colorbar max value\n extend_cbar : string \n flag to colorbar extension\n\n \"\"\"\n\n # handle input\n if (cmin is None) and (cmax is not None):\n raise RuntimeError('Only cmax given, must provide both cmin and cmax')\n elif (cmin is not None) and (cmax is None):\n raise RuntimeError('Only cmin given, must provide both cmin and cmax')\n else:\n # handle colorbar limits accidentally passed as with xarray functions\n if type(cmin) is xr.DataArray:\n cmin = cmin.values()\n elif cmin is not None:\n raise TypeError('Unsure of cmin type: ',type(cmin))\n if type(cmax) is xr.DataArray:\n cmax = cmax.values()\n elif cmax is not None:\n raise TypeError('Unsure of cmax type: ',type(cmax))\n\n # compute fld limits\n fld_min = fld.min(skipna=True).values\n fld_max = fld.max(skipna=True).values\n\n # if cmin/cmax not set, compute\n if (cmin is None) and (cmax is None):\n\n cmin = fld_min\n cmax = fld_max\n\n # determine if divergent colorbar \n # Note: Not making divergent colorbar for temperature\n # in degC because still sequential even though +/-\n if (fld_max*fld_min < 0) and (fld.name is not 'THETA'):\n cmax = np.nanmax(np.abs(fld.values))\n cmin = -cmax\n\n # determine if colorbar needs to be extended\n if (cmin > fld_min) and (cmax < fld_max):\n extend_cbar = \"both\"\n elif cmin > fld_min:\n extend_cbar = \"min\"\n elif cmax < fld_max:\n extend_cbar = \"max\"\n else:\n extend_cbar = \"neither\"\n\n return cmin, cmax, extend_cbar\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#
# struct_test.py
# Nazareno Bruschi <[email protected]>
#
# Copyright (C) 2019-2020 University of Bologna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
PULPNNInstallPath = cwd = os.getcwd() + "/../"
PULPNNSrcDirs = {'script': PULPNNInstallPath + "scripts/"}
PULPNNInstallPath32bit = cwd = os.getcwd() + "/../32bit/"
PULPNNInstallPath64bit = cwd = os.getcwd() + "/../64bit/"
PULPNNTestFolder32bit = PULPNNInstallPath32bit + "test/"
PULPNNTestFolder64bit = PULPNNInstallPath64bit + "test/"
PULPNNSrcDirs32bit = {'pulp_nn_inc': PULPNNInstallPath32bit + "include/",
'pulp_nn_pointwise_convolution': PULPNNInstallPath32bit + "src/StandardConvolutions/",
'pulp_nn_matmul': PULPNNInstallPath32bit + "src/MatrixMultiplications/",
'pulp_nn_depthwise_convolution': PULPNNInstallPath32bit + "src/DepthwiseConvolutions/",
'pulp_nn_linear_convolution_nq': PULPNNInstallPath32bit + "src/LinearConvolutionsNoQuant/",
'pulp_nn_linear_convolution_q': PULPNNInstallPath32bit + "src/LinearConvolutionsQuant/",
'pulp_nn_support_function': PULPNNInstallPath32bit + "src/SupportFunctions/",
'include': PULPNNTestFolder32bit + "include/",
'src': PULPNNTestFolder32bit + "src/",
'pointwise_convolution': PULPNNTestFolder32bit + "src/StandardConvolutions/",
'matmul': PULPNNTestFolder32bit + "src/MatrixMultiplications/",
'depthwise_convolution': PULPNNTestFolder32bit + "src/DepthwiseConvolutions/",
'linear_convolution_nq': PULPNNTestFolder32bit + "src/LinearConvolutionsNoQuant/",
'linear_convolution_q': PULPNNTestFolder32bit + "src/LinearConvolutionsQuant/",
'support_function': PULPNNTestFolder32bit + "src/SupportFunctions/",
'data_allocation_pw': PULPNNTestFolder32bit + "include/DataAllocationStandardConvolutions/",
'data_allocation_dw': PULPNNTestFolder32bit + "include/DataAllocationDepthwiseConvolutions/",
'data_allocation_ln_nq': PULPNNTestFolder32bit + "include/DataAllocationLinearConvolutionsNoQuant/",
'data_allocation_ln_q': PULPNNTestFolder32bit + "include/DataAllocationLinearConvolutionsQuant/",
'golden_model_pw': PULPNNTestFolder32bit + "include/GoldenModelStandardConvolutions/",
'golden_model_dw': PULPNNTestFolder32bit + "include/GoldenModelDepthwiseConvolutions/",
'golden_model_ln_nq': PULPNNTestFolder32bit + "include/GoldenModelLinearConvolutionsNoQuant/",
'golden_model_ln_q': PULPNNTestFolder32bit + "include/GoldenModelLinearConvolutionsQuant/",
'test': PULPNNTestFolder32bit}
PULPNNSrcDirs64bit = {'pulp_nn_inc': PULPNNInstallPath64bit + "include/",
'pulp_nn_pointwise_convolution': PULPNNInstallPath64bit + "src/StandardConvolutions/",
'pulp_nn_matmul': PULPNNInstallPath64bit + "src/MatrixMultiplications/",
'pulp_nn_depthwise_convolution': PULPNNInstallPath64bit + "src/DepthwiseConvolutions/",
'pulp_nn_linear_convolution_nq': PULPNNInstallPath64bit + "src/LinearConvolutionsNoQuant/",
'pulp_nn_linear_convolution_q': PULPNNInstallPath64bit + "src/LinearConvolutionsQuant/",
'pulp_nn_support_function': PULPNNInstallPath64bit + "src/SupportFunctions/",
'include': PULPNNTestFolder64bit + "include/",
'src': PULPNNTestFolder64bit + "src/",
'pointwise_convolution': PULPNNTestFolder64bit + "src/StandardConvolutions/",
'matmul': PULPNNTestFolder64bit + "src/MatrixMultiplications/",
'depthwise_convolution': PULPNNTestFolder64bit + "src/DepthwiseConvolutions/",
'linear_convolution_nq': PULPNNTestFolder64bit + "src/LinearConvolutionsNoQuant/",
'linear_convolution_q': PULPNNTestFolder64bit + "src/LinearConvolutionsQuant/",
'support_function': PULPNNTestFolder64bit + "src/SupportFunctions/",
'data_allocation_pw': PULPNNTestFolder64bit + "include/DataAllocationStandardConvolutions/",
'data_allocation_dw': PULPNNTestFolder64bit + "include/DataAllocationDepthwiseConvolutions/",
'data_allocation_ln_nq': PULPNNTestFolder64bit + "include/DataAllocationLinearConvolutionsNoQuant/",
'data_allocation_ln_q': PULPNNTestFolder64bit + "include/DataAllocationLinearConvolutionsQuant/",
'golden_model_pw': PULPNNTestFolder64bit + "include/GoldenModelStandardConvolutions/",
'golden_model_dw': PULPNNTestFolder64bit + "include/GoldenModelDepthwiseConvolutions/",
'golden_model_ln_nq': PULPNNTestFolder64bit + "include/GoldenModelLinearConvolutionsNoQuant/",
'golden_model_ln_q': PULPNNTestFolder64bit + "include/GoldenModelLinearConvolutionsQuant/",
'test': PULPNNTestFolder64bit}
|
normal
|
{
"blob_id": "d8d0c181fcfc9e0692369cc7a65259c43a68e931",
"index": 5688,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nPULPNNInstallPath = cwd = os.getcwd() + '/../'\nPULPNNSrcDirs = {'script': PULPNNInstallPath + 'scripts/'}\nPULPNNInstallPath32bit = cwd = os.getcwd() + '/../32bit/'\nPULPNNInstallPath64bit = cwd = os.getcwd() + '/../64bit/'\nPULPNNTestFolder32bit = PULPNNInstallPath32bit + 'test/'\nPULPNNTestFolder64bit = PULPNNInstallPath64bit + 'test/'\nPULPNNSrcDirs32bit = {'pulp_nn_inc': PULPNNInstallPath32bit + 'include/',\n 'pulp_nn_pointwise_convolution': PULPNNInstallPath32bit +\n 'src/StandardConvolutions/', 'pulp_nn_matmul': PULPNNInstallPath32bit +\n 'src/MatrixMultiplications/', 'pulp_nn_depthwise_convolution': \n PULPNNInstallPath32bit + 'src/DepthwiseConvolutions/',\n 'pulp_nn_linear_convolution_nq': PULPNNInstallPath32bit +\n 'src/LinearConvolutionsNoQuant/', 'pulp_nn_linear_convolution_q': \n PULPNNInstallPath32bit + 'src/LinearConvolutionsQuant/',\n 'pulp_nn_support_function': PULPNNInstallPath32bit +\n 'src/SupportFunctions/', 'include': PULPNNTestFolder32bit + 'include/',\n 'src': PULPNNTestFolder32bit + 'src/', 'pointwise_convolution': \n PULPNNTestFolder32bit + 'src/StandardConvolutions/', 'matmul': \n PULPNNTestFolder32bit + 'src/MatrixMultiplications/',\n 'depthwise_convolution': PULPNNTestFolder32bit +\n 'src/DepthwiseConvolutions/', 'linear_convolution_nq': \n PULPNNTestFolder32bit + 'src/LinearConvolutionsNoQuant/',\n 'linear_convolution_q': PULPNNTestFolder32bit +\n 'src/LinearConvolutionsQuant/', 'support_function': \n PULPNNTestFolder32bit + 'src/SupportFunctions/', 'data_allocation_pw': \n PULPNNTestFolder32bit + 'include/DataAllocationStandardConvolutions/',\n 'data_allocation_dw': PULPNNTestFolder32bit +\n 'include/DataAllocationDepthwiseConvolutions/', 'data_allocation_ln_nq':\n PULPNNTestFolder32bit +\n 'include/DataAllocationLinearConvolutionsNoQuant/',\n 'data_allocation_ln_q': PULPNNTestFolder32bit +\n 'include/DataAllocationLinearConvolutionsQuant/', 'golden_model_pw': \n PULPNNTestFolder32bit + 'include/GoldenModelStandardConvolutions/',\n 'golden_model_dw': PULPNNTestFolder32bit +\n 'include/GoldenModelDepthwiseConvolutions/', 'golden_model_ln_nq': \n PULPNNTestFolder32bit + 'include/GoldenModelLinearConvolutionsNoQuant/',\n 'golden_model_ln_q': PULPNNTestFolder32bit +\n 'include/GoldenModelLinearConvolutionsQuant/', 'test':\n PULPNNTestFolder32bit}\nPULPNNSrcDirs64bit = {'pulp_nn_inc': PULPNNInstallPath64bit + 'include/',\n 'pulp_nn_pointwise_convolution': PULPNNInstallPath64bit +\n 'src/StandardConvolutions/', 'pulp_nn_matmul': PULPNNInstallPath64bit +\n 'src/MatrixMultiplications/', 'pulp_nn_depthwise_convolution': \n PULPNNInstallPath64bit + 'src/DepthwiseConvolutions/',\n 'pulp_nn_linear_convolution_nq': PULPNNInstallPath64bit +\n 'src/LinearConvolutionsNoQuant/', 'pulp_nn_linear_convolution_q': \n PULPNNInstallPath64bit + 'src/LinearConvolutionsQuant/',\n 'pulp_nn_support_function': PULPNNInstallPath64bit +\n 'src/SupportFunctions/', 'include': PULPNNTestFolder64bit + 'include/',\n 'src': PULPNNTestFolder64bit + 'src/', 'pointwise_convolution': \n PULPNNTestFolder64bit + 'src/StandardConvolutions/', 'matmul': \n PULPNNTestFolder64bit + 'src/MatrixMultiplications/',\n 'depthwise_convolution': PULPNNTestFolder64bit +\n 'src/DepthwiseConvolutions/', 'linear_convolution_nq': \n PULPNNTestFolder64bit + 'src/LinearConvolutionsNoQuant/',\n 'linear_convolution_q': PULPNNTestFolder64bit +\n 'src/LinearConvolutionsQuant/', 'support_function': \n PULPNNTestFolder64bit + 'src/SupportFunctions/', 'data_allocation_pw': \n PULPNNTestFolder64bit + 'include/DataAllocationStandardConvolutions/',\n 'data_allocation_dw': PULPNNTestFolder64bit +\n 'include/DataAllocationDepthwiseConvolutions/', 'data_allocation_ln_nq':\n PULPNNTestFolder64bit +\n 'include/DataAllocationLinearConvolutionsNoQuant/',\n 'data_allocation_ln_q': PULPNNTestFolder64bit +\n 'include/DataAllocationLinearConvolutionsQuant/', 'golden_model_pw': \n PULPNNTestFolder64bit + 'include/GoldenModelStandardConvolutions/',\n 'golden_model_dw': PULPNNTestFolder64bit +\n 'include/GoldenModelDepthwiseConvolutions/', 'golden_model_ln_nq': \n PULPNNTestFolder64bit + 'include/GoldenModelLinearConvolutionsNoQuant/',\n 'golden_model_ln_q': PULPNNTestFolder64bit +\n 'include/GoldenModelLinearConvolutionsQuant/', 'test':\n PULPNNTestFolder64bit}\n",
"step-3": "import os\nPULPNNInstallPath = cwd = os.getcwd() + '/../'\nPULPNNSrcDirs = {'script': PULPNNInstallPath + 'scripts/'}\nPULPNNInstallPath32bit = cwd = os.getcwd() + '/../32bit/'\nPULPNNInstallPath64bit = cwd = os.getcwd() + '/../64bit/'\nPULPNNTestFolder32bit = PULPNNInstallPath32bit + 'test/'\nPULPNNTestFolder64bit = PULPNNInstallPath64bit + 'test/'\nPULPNNSrcDirs32bit = {'pulp_nn_inc': PULPNNInstallPath32bit + 'include/',\n 'pulp_nn_pointwise_convolution': PULPNNInstallPath32bit +\n 'src/StandardConvolutions/', 'pulp_nn_matmul': PULPNNInstallPath32bit +\n 'src/MatrixMultiplications/', 'pulp_nn_depthwise_convolution': \n PULPNNInstallPath32bit + 'src/DepthwiseConvolutions/',\n 'pulp_nn_linear_convolution_nq': PULPNNInstallPath32bit +\n 'src/LinearConvolutionsNoQuant/', 'pulp_nn_linear_convolution_q': \n PULPNNInstallPath32bit + 'src/LinearConvolutionsQuant/',\n 'pulp_nn_support_function': PULPNNInstallPath32bit +\n 'src/SupportFunctions/', 'include': PULPNNTestFolder32bit + 'include/',\n 'src': PULPNNTestFolder32bit + 'src/', 'pointwise_convolution': \n PULPNNTestFolder32bit + 'src/StandardConvolutions/', 'matmul': \n PULPNNTestFolder32bit + 'src/MatrixMultiplications/',\n 'depthwise_convolution': PULPNNTestFolder32bit +\n 'src/DepthwiseConvolutions/', 'linear_convolution_nq': \n PULPNNTestFolder32bit + 'src/LinearConvolutionsNoQuant/',\n 'linear_convolution_q': PULPNNTestFolder32bit +\n 'src/LinearConvolutionsQuant/', 'support_function': \n PULPNNTestFolder32bit + 'src/SupportFunctions/', 'data_allocation_pw': \n PULPNNTestFolder32bit + 'include/DataAllocationStandardConvolutions/',\n 'data_allocation_dw': PULPNNTestFolder32bit +\n 'include/DataAllocationDepthwiseConvolutions/', 'data_allocation_ln_nq':\n PULPNNTestFolder32bit +\n 'include/DataAllocationLinearConvolutionsNoQuant/',\n 'data_allocation_ln_q': PULPNNTestFolder32bit +\n 'include/DataAllocationLinearConvolutionsQuant/', 'golden_model_pw': \n PULPNNTestFolder32bit + 'include/GoldenModelStandardConvolutions/',\n 'golden_model_dw': PULPNNTestFolder32bit +\n 'include/GoldenModelDepthwiseConvolutions/', 'golden_model_ln_nq': \n PULPNNTestFolder32bit + 'include/GoldenModelLinearConvolutionsNoQuant/',\n 'golden_model_ln_q': PULPNNTestFolder32bit +\n 'include/GoldenModelLinearConvolutionsQuant/', 'test':\n PULPNNTestFolder32bit}\nPULPNNSrcDirs64bit = {'pulp_nn_inc': PULPNNInstallPath64bit + 'include/',\n 'pulp_nn_pointwise_convolution': PULPNNInstallPath64bit +\n 'src/StandardConvolutions/', 'pulp_nn_matmul': PULPNNInstallPath64bit +\n 'src/MatrixMultiplications/', 'pulp_nn_depthwise_convolution': \n PULPNNInstallPath64bit + 'src/DepthwiseConvolutions/',\n 'pulp_nn_linear_convolution_nq': PULPNNInstallPath64bit +\n 'src/LinearConvolutionsNoQuant/', 'pulp_nn_linear_convolution_q': \n PULPNNInstallPath64bit + 'src/LinearConvolutionsQuant/',\n 'pulp_nn_support_function': PULPNNInstallPath64bit +\n 'src/SupportFunctions/', 'include': PULPNNTestFolder64bit + 'include/',\n 'src': PULPNNTestFolder64bit + 'src/', 'pointwise_convolution': \n PULPNNTestFolder64bit + 'src/StandardConvolutions/', 'matmul': \n PULPNNTestFolder64bit + 'src/MatrixMultiplications/',\n 'depthwise_convolution': PULPNNTestFolder64bit +\n 'src/DepthwiseConvolutions/', 'linear_convolution_nq': \n PULPNNTestFolder64bit + 'src/LinearConvolutionsNoQuant/',\n 'linear_convolution_q': PULPNNTestFolder64bit +\n 'src/LinearConvolutionsQuant/', 'support_function': \n PULPNNTestFolder64bit + 'src/SupportFunctions/', 'data_allocation_pw': \n PULPNNTestFolder64bit + 'include/DataAllocationStandardConvolutions/',\n 'data_allocation_dw': PULPNNTestFolder64bit +\n 'include/DataAllocationDepthwiseConvolutions/', 'data_allocation_ln_nq':\n PULPNNTestFolder64bit +\n 'include/DataAllocationLinearConvolutionsNoQuant/',\n 'data_allocation_ln_q': PULPNNTestFolder64bit +\n 'include/DataAllocationLinearConvolutionsQuant/', 'golden_model_pw': \n PULPNNTestFolder64bit + 'include/GoldenModelStandardConvolutions/',\n 'golden_model_dw': PULPNNTestFolder64bit +\n 'include/GoldenModelDepthwiseConvolutions/', 'golden_model_ln_nq': \n PULPNNTestFolder64bit + 'include/GoldenModelLinearConvolutionsNoQuant/',\n 'golden_model_ln_q': PULPNNTestFolder64bit +\n 'include/GoldenModelLinearConvolutionsQuant/', 'test':\n PULPNNTestFolder64bit}\n",
"step-4": "#\n# struct_test.py\n# Nazareno Bruschi <[email protected]>\n#\n# Copyright (C) 2019-2020 University of Bologna\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport os\n\nPULPNNInstallPath = cwd = os.getcwd() + \"/../\"\nPULPNNSrcDirs = {'script': PULPNNInstallPath + \"scripts/\"}\nPULPNNInstallPath32bit = cwd = os.getcwd() + \"/../32bit/\"\nPULPNNInstallPath64bit = cwd = os.getcwd() + \"/../64bit/\"\nPULPNNTestFolder32bit = PULPNNInstallPath32bit + \"test/\"\nPULPNNTestFolder64bit = PULPNNInstallPath64bit + \"test/\"\nPULPNNSrcDirs32bit = {'pulp_nn_inc': PULPNNInstallPath32bit + \"include/\",\n 'pulp_nn_pointwise_convolution': PULPNNInstallPath32bit + \"src/StandardConvolutions/\",\n 'pulp_nn_matmul': PULPNNInstallPath32bit + \"src/MatrixMultiplications/\",\n 'pulp_nn_depthwise_convolution': PULPNNInstallPath32bit + \"src/DepthwiseConvolutions/\",\n 'pulp_nn_linear_convolution_nq': PULPNNInstallPath32bit + \"src/LinearConvolutionsNoQuant/\",\n 'pulp_nn_linear_convolution_q': PULPNNInstallPath32bit + \"src/LinearConvolutionsQuant/\",\n 'pulp_nn_support_function': PULPNNInstallPath32bit + \"src/SupportFunctions/\",\n 'include': PULPNNTestFolder32bit + \"include/\",\n 'src': PULPNNTestFolder32bit + \"src/\",\n 'pointwise_convolution': PULPNNTestFolder32bit + \"src/StandardConvolutions/\",\n 'matmul': PULPNNTestFolder32bit + \"src/MatrixMultiplications/\",\n 'depthwise_convolution': PULPNNTestFolder32bit + \"src/DepthwiseConvolutions/\",\n 'linear_convolution_nq': PULPNNTestFolder32bit + \"src/LinearConvolutionsNoQuant/\",\n 'linear_convolution_q': PULPNNTestFolder32bit + \"src/LinearConvolutionsQuant/\",\n 'support_function': PULPNNTestFolder32bit + \"src/SupportFunctions/\",\n 'data_allocation_pw': PULPNNTestFolder32bit + \"include/DataAllocationStandardConvolutions/\",\n 'data_allocation_dw': PULPNNTestFolder32bit + \"include/DataAllocationDepthwiseConvolutions/\",\n 'data_allocation_ln_nq': PULPNNTestFolder32bit + \"include/DataAllocationLinearConvolutionsNoQuant/\",\n 'data_allocation_ln_q': PULPNNTestFolder32bit + \"include/DataAllocationLinearConvolutionsQuant/\",\n 'golden_model_pw': PULPNNTestFolder32bit + \"include/GoldenModelStandardConvolutions/\",\n 'golden_model_dw': PULPNNTestFolder32bit + \"include/GoldenModelDepthwiseConvolutions/\",\n 'golden_model_ln_nq': PULPNNTestFolder32bit + \"include/GoldenModelLinearConvolutionsNoQuant/\",\n 'golden_model_ln_q': PULPNNTestFolder32bit + \"include/GoldenModelLinearConvolutionsQuant/\",\n 'test': PULPNNTestFolder32bit}\nPULPNNSrcDirs64bit = {'pulp_nn_inc': PULPNNInstallPath64bit + \"include/\",\n 'pulp_nn_pointwise_convolution': PULPNNInstallPath64bit + \"src/StandardConvolutions/\",\n 'pulp_nn_matmul': PULPNNInstallPath64bit + \"src/MatrixMultiplications/\",\n 'pulp_nn_depthwise_convolution': PULPNNInstallPath64bit + \"src/DepthwiseConvolutions/\",\n 'pulp_nn_linear_convolution_nq': PULPNNInstallPath64bit + \"src/LinearConvolutionsNoQuant/\",\n 'pulp_nn_linear_convolution_q': PULPNNInstallPath64bit + \"src/LinearConvolutionsQuant/\",\n 'pulp_nn_support_function': PULPNNInstallPath64bit + \"src/SupportFunctions/\",\n 'include': PULPNNTestFolder64bit + \"include/\",\n 'src': PULPNNTestFolder64bit + \"src/\",\n 'pointwise_convolution': PULPNNTestFolder64bit + \"src/StandardConvolutions/\",\n 'matmul': PULPNNTestFolder64bit + \"src/MatrixMultiplications/\",\n 'depthwise_convolution': PULPNNTestFolder64bit + \"src/DepthwiseConvolutions/\",\n 'linear_convolution_nq': PULPNNTestFolder64bit + \"src/LinearConvolutionsNoQuant/\",\n 'linear_convolution_q': PULPNNTestFolder64bit + \"src/LinearConvolutionsQuant/\",\n 'support_function': PULPNNTestFolder64bit + \"src/SupportFunctions/\",\n 'data_allocation_pw': PULPNNTestFolder64bit + \"include/DataAllocationStandardConvolutions/\",\n 'data_allocation_dw': PULPNNTestFolder64bit + \"include/DataAllocationDepthwiseConvolutions/\",\n 'data_allocation_ln_nq': PULPNNTestFolder64bit + \"include/DataAllocationLinearConvolutionsNoQuant/\",\n 'data_allocation_ln_q': PULPNNTestFolder64bit + \"include/DataAllocationLinearConvolutionsQuant/\",\n 'golden_model_pw': PULPNNTestFolder64bit + \"include/GoldenModelStandardConvolutions/\",\n 'golden_model_dw': PULPNNTestFolder64bit + \"include/GoldenModelDepthwiseConvolutions/\",\n 'golden_model_ln_nq': PULPNNTestFolder64bit + \"include/GoldenModelLinearConvolutionsNoQuant/\",\n 'golden_model_ln_q': PULPNNTestFolder64bit + \"include/GoldenModelLinearConvolutionsQuant/\",\n 'test': PULPNNTestFolder64bit}",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
'''
Created on 27 Mar 2015
@author: Jon
'''
import matplotlib.pyplot as plt
from numerical_functions import Timer
import numerical_functions.numba_funcs.indexing as indexing
import numpy as np
import unittest
class Test(unittest.TestCase):
def test_take(self):
x = np.linspace( 0, 100 )
idx = np.random.random_integers( 0, 50, 20 )
result = indexing.take( x, idx )
expected = np.take( x, idx )
np.testing.assert_array_equal( expected, result )
def test_take_comparison(self):
x = np.arange( 1e6 )
idx = np.random.random_integers( 0, 1e5, 1e6 )
indexing.take( x, idx )
np.take( x, idx )
with Timer( 'numba' ) as nbtimer:
indexing.take( x, idx )
with Timer( 'numpy' ) as nptimer:
np.take( x, idx )
ratio = nbtimer.interval / nptimer.interval
print( 'numba version of take took %0.2f as long as numpy'%ratio)
def test_square_take(self):
X = np.random.random_integers( 0, 50, 25 ).reshape( 5, 5 )
idx = np.arange( 0, 4, 2 )
result = np.empty( ( idx.shape[0], idx.shape[0] ) )
indexing.square_take_to_out( X, idx, result )
print( result )
expected = X.take( idx, axis=0 ).take( idx, axis=1 )
print( expected )
np.testing.assert_array_equal( expected, result )
def test_square_take_to_out(self):
X = np.arange(25).reshape(5,5)
idx = np.arange( 0, 4, 2 )
result = np.empty( ( idx.shape[0], idx.shape[0] ) )
indexing.square_take_to_out( X, idx, result )
print( result )
expected = X.take( idx, axis=0 ).take( idx, axis=1 )
print( expected )
np.testing.assert_array_equal( expected, result )
def test_square_take_performance(self):
X = np.arange(25).reshape(5,5)
idx = np.arange( 0, 4, 2 )
result = np.empty( ( idx.shape[0], idx.shape[0] ) )
indexing.square_take_to_out( X, idx, result )
result2 = indexing.square_take( X, idx )
np.testing.assert_array_equal( result, result2 )
num_tests = 1000
nbts = []
nbts2 = []
npts = []
ms = ( 10, 20, 40, 80, 160 )#, 320, 640 )
for m in ms:
X = np.arange(m*m).reshape(m,m)
idx = np.random.random_integers( 0, m-1, m//2 )
result = np.empty( ( idx.shape[0], idx.shape[0] ) )
with Timer( 'numba' ) as nbt:
for _ in range( num_tests ):
indexing.square_take_to_out( X, idx, result )
nbts.append( nbt.interval )
with Timer( 'numba2' ) as nbt:
for _ in range( num_tests ):
r=indexing.square_take( X, idx )
nbts2.append( nbt.interval )
with Timer( 'numpy') as npt:
for _ in range(num_tests):
X.take( idx, axis=0 ).take( idx, axis=1 )
npts.append( npt.interval )
plt.plot( ms, nbts, label='nb to out' )
plt.plot( ms, nbts2, label='nb new result')
plt.plot( ms, npts, label='np' )
plt.title( 'square_take_to_out performance test')
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
def test_square_and_rect_take_to_out(self):
X = np.arange( 100 ).reshape( (10, 10 ) )
idx0 = np.arange( 0, 4, 2 )
idx1 = np.arange( 4, 6 )
result = np.empty( ( idx0.shape[0], idx0.shape[0]+idx1.shape[0] ) )
indexing.square_and_rect_take_to_out( X, idx0, idx1, result )
np.testing.assert_array_equal( result[:,:2], indexing.square_take( X, idx0 ) )
r2 = np.array( [ [ 4, 5 ], [24, 25 ] ] )
np.testing.assert_array_equal( r2, result[:,2:])
def test_get_resample_indices(self):
raw_index = np.arange( 10 )
resampled_index = np.arange( 1, 10, 2 )
result = indexing.get_resample_indices(raw_index, resampled_index)
expected = np.arange( 0, 10, 2 )
np.testing.assert_array_equal( expected, result )
def test_take_upper_off_diagonal(self):
X = np.array( [[ 1, 2, 3],
[ np.nan, 5, 6],
[ np.nan, np.nan, 9]])
idx = np.array( [ 0, 1 ] )
expected = np.array( [ 2 ] )
actual = indexing.take_upper_off_diagonal( X, idx )
np.testing.assert_array_equal( actual, expected )
idx = np.array( [ 1, 2 ] )
expected = np.array( [ 6 ] )
actual = indexing.take_upper_off_diagonal( X, idx )
np.testing.assert_array_equal( actual, expected )
idx = np.array( [ 0, 2 ] )
expected = np.array( [ 3 ] )
actual = indexing.take_upper_off_diagonal( X, idx )
np.testing.assert_array_equal( actual, expected )
idx = np.array( [ 0, 1, 2 ] )
expected = np.array( [ 2, 3, 6 ] )
actual = indexing.take_upper_off_diagonal( X, idx )
np.testing.assert_array_equal( actual, expected )
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
normal
|
{
"blob_id": "ee80169afd4741854eff8619822a857bbf757575",
"index": 291,
"step-1": "<mask token>\n\n\nclass Test(unittest.TestCase):\n <mask token>\n\n def test_take_comparison(self):\n x = np.arange(1000000.0)\n idx = np.random.random_integers(0, 100000.0, 1000000.0)\n indexing.take(x, idx)\n np.take(x, idx)\n with Timer('numba') as nbtimer:\n indexing.take(x, idx)\n with Timer('numpy') as nptimer:\n np.take(x, idx)\n ratio = nbtimer.interval / nptimer.interval\n print('numba version of take took %0.2f as long as numpy' % ratio)\n\n def test_square_take(self):\n X = np.random.random_integers(0, 50, 25).reshape(5, 5)\n idx = np.arange(0, 4, 2)\n result = np.empty((idx.shape[0], idx.shape[0]))\n indexing.square_take_to_out(X, idx, result)\n print(result)\n expected = X.take(idx, axis=0).take(idx, axis=1)\n print(expected)\n np.testing.assert_array_equal(expected, result)\n\n def test_square_take_to_out(self):\n X = np.arange(25).reshape(5, 5)\n idx = np.arange(0, 4, 2)\n result = np.empty((idx.shape[0], idx.shape[0]))\n indexing.square_take_to_out(X, idx, result)\n print(result)\n expected = X.take(idx, axis=0).take(idx, axis=1)\n print(expected)\n np.testing.assert_array_equal(expected, result)\n <mask token>\n <mask token>\n\n def test_get_resample_indices(self):\n raw_index = np.arange(10)\n resampled_index = np.arange(1, 10, 2)\n result = indexing.get_resample_indices(raw_index, resampled_index)\n expected = np.arange(0, 10, 2)\n np.testing.assert_array_equal(expected, result)\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Test(unittest.TestCase):\n\n def test_take(self):\n x = np.linspace(0, 100)\n idx = np.random.random_integers(0, 50, 20)\n result = indexing.take(x, idx)\n expected = np.take(x, idx)\n np.testing.assert_array_equal(expected, result)\n\n def test_take_comparison(self):\n x = np.arange(1000000.0)\n idx = np.random.random_integers(0, 100000.0, 1000000.0)\n indexing.take(x, idx)\n np.take(x, idx)\n with Timer('numba') as nbtimer:\n indexing.take(x, idx)\n with Timer('numpy') as nptimer:\n np.take(x, idx)\n ratio = nbtimer.interval / nptimer.interval\n print('numba version of take took %0.2f as long as numpy' % ratio)\n\n def test_square_take(self):\n X = np.random.random_integers(0, 50, 25).reshape(5, 5)\n idx = np.arange(0, 4, 2)\n result = np.empty((idx.shape[0], idx.shape[0]))\n indexing.square_take_to_out(X, idx, result)\n print(result)\n expected = X.take(idx, axis=0).take(idx, axis=1)\n print(expected)\n np.testing.assert_array_equal(expected, result)\n\n def test_square_take_to_out(self):\n X = np.arange(25).reshape(5, 5)\n idx = np.arange(0, 4, 2)\n result = np.empty((idx.shape[0], idx.shape[0]))\n indexing.square_take_to_out(X, idx, result)\n print(result)\n expected = X.take(idx, axis=0).take(idx, axis=1)\n print(expected)\n np.testing.assert_array_equal(expected, result)\n <mask token>\n <mask token>\n\n def test_get_resample_indices(self):\n raw_index = np.arange(10)\n resampled_index = np.arange(1, 10, 2)\n result = indexing.get_resample_indices(raw_index, resampled_index)\n expected = np.arange(0, 10, 2)\n np.testing.assert_array_equal(expected, result)\n\n def test_take_upper_off_diagonal(self):\n X = np.array([[1, 2, 3], [np.nan, 5, 6], [np.nan, np.nan, 9]])\n idx = np.array([0, 1])\n expected = np.array([2])\n actual = indexing.take_upper_off_diagonal(X, idx)\n np.testing.assert_array_equal(actual, expected)\n idx = np.array([1, 2])\n expected = np.array([6])\n actual = indexing.take_upper_off_diagonal(X, idx)\n np.testing.assert_array_equal(actual, expected)\n idx = np.array([0, 2])\n expected = np.array([3])\n actual = indexing.take_upper_off_diagonal(X, idx)\n np.testing.assert_array_equal(actual, expected)\n idx = np.array([0, 1, 2])\n expected = np.array([2, 3, 6])\n actual = indexing.take_upper_off_diagonal(X, idx)\n np.testing.assert_array_equal(actual, expected)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Test(unittest.TestCase):\n\n def test_take(self):\n x = np.linspace(0, 100)\n idx = np.random.random_integers(0, 50, 20)\n result = indexing.take(x, idx)\n expected = np.take(x, idx)\n np.testing.assert_array_equal(expected, result)\n\n def test_take_comparison(self):\n x = np.arange(1000000.0)\n idx = np.random.random_integers(0, 100000.0, 1000000.0)\n indexing.take(x, idx)\n np.take(x, idx)\n with Timer('numba') as nbtimer:\n indexing.take(x, idx)\n with Timer('numpy') as nptimer:\n np.take(x, idx)\n ratio = nbtimer.interval / nptimer.interval\n print('numba version of take took %0.2f as long as numpy' % ratio)\n\n def test_square_take(self):\n X = np.random.random_integers(0, 50, 25).reshape(5, 5)\n idx = np.arange(0, 4, 2)\n result = np.empty((idx.shape[0], idx.shape[0]))\n indexing.square_take_to_out(X, idx, result)\n print(result)\n expected = X.take(idx, axis=0).take(idx, axis=1)\n print(expected)\n np.testing.assert_array_equal(expected, result)\n\n def test_square_take_to_out(self):\n X = np.arange(25).reshape(5, 5)\n idx = np.arange(0, 4, 2)\n result = np.empty((idx.shape[0], idx.shape[0]))\n indexing.square_take_to_out(X, idx, result)\n print(result)\n expected = X.take(idx, axis=0).take(idx, axis=1)\n print(expected)\n np.testing.assert_array_equal(expected, result)\n\n def test_square_take_performance(self):\n X = np.arange(25).reshape(5, 5)\n idx = np.arange(0, 4, 2)\n result = np.empty((idx.shape[0], idx.shape[0]))\n indexing.square_take_to_out(X, idx, result)\n result2 = indexing.square_take(X, idx)\n np.testing.assert_array_equal(result, result2)\n num_tests = 1000\n nbts = []\n nbts2 = []\n npts = []\n ms = 10, 20, 40, 80, 160\n for m in ms:\n X = np.arange(m * m).reshape(m, m)\n idx = np.random.random_integers(0, m - 1, m // 2)\n result = np.empty((idx.shape[0], idx.shape[0]))\n with Timer('numba') as nbt:\n for _ in range(num_tests):\n indexing.square_take_to_out(X, idx, result)\n nbts.append(nbt.interval)\n with Timer('numba2') as nbt:\n for _ in range(num_tests):\n r = indexing.square_take(X, idx)\n nbts2.append(nbt.interval)\n with Timer('numpy') as npt:\n for _ in range(num_tests):\n X.take(idx, axis=0).take(idx, axis=1)\n npts.append(npt.interval)\n plt.plot(ms, nbts, label='nb to out')\n plt.plot(ms, nbts2, label='nb new result')\n plt.plot(ms, npts, label='np')\n plt.title('square_take_to_out performance test')\n plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n plt.show()\n <mask token>\n\n def test_get_resample_indices(self):\n raw_index = np.arange(10)\n resampled_index = np.arange(1, 10, 2)\n result = indexing.get_resample_indices(raw_index, resampled_index)\n expected = np.arange(0, 10, 2)\n np.testing.assert_array_equal(expected, result)\n\n def test_take_upper_off_diagonal(self):\n X = np.array([[1, 2, 3], [np.nan, 5, 6], [np.nan, np.nan, 9]])\n idx = np.array([0, 1])\n expected = np.array([2])\n actual = indexing.take_upper_off_diagonal(X, idx)\n np.testing.assert_array_equal(actual, expected)\n idx = np.array([1, 2])\n expected = np.array([6])\n actual = indexing.take_upper_off_diagonal(X, idx)\n np.testing.assert_array_equal(actual, expected)\n idx = np.array([0, 2])\n expected = np.array([3])\n actual = indexing.take_upper_off_diagonal(X, idx)\n np.testing.assert_array_equal(actual, expected)\n idx = np.array([0, 1, 2])\n expected = np.array([2, 3, 6])\n actual = indexing.take_upper_off_diagonal(X, idx)\n np.testing.assert_array_equal(actual, expected)\n\n\n<mask token>\n",
"step-4": "<mask token>\nimport matplotlib.pyplot as plt\nfrom numerical_functions import Timer\nimport numerical_functions.numba_funcs.indexing as indexing\nimport numpy as np\nimport unittest\n\n\nclass Test(unittest.TestCase):\n\n def test_take(self):\n x = np.linspace(0, 100)\n idx = np.random.random_integers(0, 50, 20)\n result = indexing.take(x, idx)\n expected = np.take(x, idx)\n np.testing.assert_array_equal(expected, result)\n\n def test_take_comparison(self):\n x = np.arange(1000000.0)\n idx = np.random.random_integers(0, 100000.0, 1000000.0)\n indexing.take(x, idx)\n np.take(x, idx)\n with Timer('numba') as nbtimer:\n indexing.take(x, idx)\n with Timer('numpy') as nptimer:\n np.take(x, idx)\n ratio = nbtimer.interval / nptimer.interval\n print('numba version of take took %0.2f as long as numpy' % ratio)\n\n def test_square_take(self):\n X = np.random.random_integers(0, 50, 25).reshape(5, 5)\n idx = np.arange(0, 4, 2)\n result = np.empty((idx.shape[0], idx.shape[0]))\n indexing.square_take_to_out(X, idx, result)\n print(result)\n expected = X.take(idx, axis=0).take(idx, axis=1)\n print(expected)\n np.testing.assert_array_equal(expected, result)\n\n def test_square_take_to_out(self):\n X = np.arange(25).reshape(5, 5)\n idx = np.arange(0, 4, 2)\n result = np.empty((idx.shape[0], idx.shape[0]))\n indexing.square_take_to_out(X, idx, result)\n print(result)\n expected = X.take(idx, axis=0).take(idx, axis=1)\n print(expected)\n np.testing.assert_array_equal(expected, result)\n\n def test_square_take_performance(self):\n X = np.arange(25).reshape(5, 5)\n idx = np.arange(0, 4, 2)\n result = np.empty((idx.shape[0], idx.shape[0]))\n indexing.square_take_to_out(X, idx, result)\n result2 = indexing.square_take(X, idx)\n np.testing.assert_array_equal(result, result2)\n num_tests = 1000\n nbts = []\n nbts2 = []\n npts = []\n ms = 10, 20, 40, 80, 160\n for m in ms:\n X = np.arange(m * m).reshape(m, m)\n idx = np.random.random_integers(0, m - 1, m // 2)\n result = np.empty((idx.shape[0], idx.shape[0]))\n with Timer('numba') as nbt:\n for _ in range(num_tests):\n indexing.square_take_to_out(X, idx, result)\n nbts.append(nbt.interval)\n with Timer('numba2') as nbt:\n for _ in range(num_tests):\n r = indexing.square_take(X, idx)\n nbts2.append(nbt.interval)\n with Timer('numpy') as npt:\n for _ in range(num_tests):\n X.take(idx, axis=0).take(idx, axis=1)\n npts.append(npt.interval)\n plt.plot(ms, nbts, label='nb to out')\n plt.plot(ms, nbts2, label='nb new result')\n plt.plot(ms, npts, label='np')\n plt.title('square_take_to_out performance test')\n plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n plt.show()\n\n def test_square_and_rect_take_to_out(self):\n X = np.arange(100).reshape((10, 10))\n idx0 = np.arange(0, 4, 2)\n idx1 = np.arange(4, 6)\n result = np.empty((idx0.shape[0], idx0.shape[0] + idx1.shape[0]))\n indexing.square_and_rect_take_to_out(X, idx0, idx1, result)\n np.testing.assert_array_equal(result[:, :2], indexing.square_take(X,\n idx0))\n r2 = np.array([[4, 5], [24, 25]])\n np.testing.assert_array_equal(r2, result[:, 2:])\n\n def test_get_resample_indices(self):\n raw_index = np.arange(10)\n resampled_index = np.arange(1, 10, 2)\n result = indexing.get_resample_indices(raw_index, resampled_index)\n expected = np.arange(0, 10, 2)\n np.testing.assert_array_equal(expected, result)\n\n def test_take_upper_off_diagonal(self):\n X = np.array([[1, 2, 3], [np.nan, 5, 6], [np.nan, np.nan, 9]])\n idx = np.array([0, 1])\n expected = np.array([2])\n actual = indexing.take_upper_off_diagonal(X, idx)\n np.testing.assert_array_equal(actual, expected)\n idx = np.array([1, 2])\n expected = np.array([6])\n actual = indexing.take_upper_off_diagonal(X, idx)\n np.testing.assert_array_equal(actual, expected)\n idx = np.array([0, 2])\n expected = np.array([3])\n actual = indexing.take_upper_off_diagonal(X, idx)\n np.testing.assert_array_equal(actual, expected)\n idx = np.array([0, 1, 2])\n expected = np.array([2, 3, 6])\n actual = indexing.take_upper_off_diagonal(X, idx)\n np.testing.assert_array_equal(actual, expected)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "'''\nCreated on 27 Mar 2015\n\n@author: Jon\n'''\n\nimport matplotlib.pyplot as plt\nfrom numerical_functions import Timer\nimport numerical_functions.numba_funcs.indexing as indexing\nimport numpy as np\nimport unittest\n\n\nclass Test(unittest.TestCase):\n \n def test_take(self):\n x = np.linspace( 0, 100 )\n idx = np.random.random_integers( 0, 50, 20 )\n result = indexing.take( x, idx )\n expected = np.take( x, idx )\n np.testing.assert_array_equal( expected, result )\n \n def test_take_comparison(self):\n x = np.arange( 1e6 )\n idx = np.random.random_integers( 0, 1e5, 1e6 )\n \n indexing.take( x, idx )\n np.take( x, idx )\n \n with Timer( 'numba' ) as nbtimer:\n indexing.take( x, idx )\n \n with Timer( 'numpy' ) as nptimer:\n np.take( x, idx )\n \n ratio = nbtimer.interval / nptimer.interval\n print( 'numba version of take took %0.2f as long as numpy'%ratio) \n \n \n def test_square_take(self):\n\n X = np.random.random_integers( 0, 50, 25 ).reshape( 5, 5 )\n idx = np.arange( 0, 4, 2 )\n result = np.empty( ( idx.shape[0], idx.shape[0] ) )\n indexing.square_take_to_out( X, idx, result )\n print( result )\n \n expected = X.take( idx, axis=0 ).take( idx, axis=1 )\n print( expected )\n \n np.testing.assert_array_equal( expected, result )\n \n def test_square_take_to_out(self):\n X = np.arange(25).reshape(5,5)\n idx = np.arange( 0, 4, 2 )\n result = np.empty( ( idx.shape[0], idx.shape[0] ) )\n indexing.square_take_to_out( X, idx, result )\n print( result )\n \n expected = X.take( idx, axis=0 ).take( idx, axis=1 )\n print( expected )\n \n np.testing.assert_array_equal( expected, result )\n \n def test_square_take_performance(self):\n X = np.arange(25).reshape(5,5)\n idx = np.arange( 0, 4, 2 )\n result = np.empty( ( idx.shape[0], idx.shape[0] ) )\n indexing.square_take_to_out( X, idx, result )\n \n result2 = indexing.square_take( X, idx )\n \n np.testing.assert_array_equal( result, result2 )\n\n num_tests = 1000\n \n nbts = []\n nbts2 = []\n npts = [] \n \n ms = ( 10, 20, 40, 80, 160 )#, 320, 640 )\n for m in ms:\n X = np.arange(m*m).reshape(m,m)\n idx = np.random.random_integers( 0, m-1, m//2 )\n result = np.empty( ( idx.shape[0], idx.shape[0] ) )\n with Timer( 'numba' ) as nbt:\n for _ in range( num_tests ):\n indexing.square_take_to_out( X, idx, result )\n nbts.append( nbt.interval ) \n \n with Timer( 'numba2' ) as nbt:\n for _ in range( num_tests ):\n r=indexing.square_take( X, idx ) \n nbts2.append( nbt.interval ) \n \n with Timer( 'numpy') as npt:\n for _ in range(num_tests):\n X.take( idx, axis=0 ).take( idx, axis=1 )\n npts.append( npt.interval ) \n \n plt.plot( ms, nbts, label='nb to out' )\n plt.plot( ms, nbts2, label='nb new result')\n plt.plot( ms, npts, label='np' )\n plt.title( 'square_take_to_out performance test')\n plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n plt.show()\n \n def test_square_and_rect_take_to_out(self):\n \n X = np.arange( 100 ).reshape( (10, 10 ) )\n idx0 = np.arange( 0, 4, 2 )\n idx1 = np.arange( 4, 6 )\n \n result = np.empty( ( idx0.shape[0], idx0.shape[0]+idx1.shape[0] ) )\n indexing.square_and_rect_take_to_out( X, idx0, idx1, result )\n \n np.testing.assert_array_equal( result[:,:2], indexing.square_take( X, idx0 ) )\n r2 = np.array( [ [ 4, 5 ], [24, 25 ] ] )\n np.testing.assert_array_equal( r2, result[:,2:]) \n\n def test_get_resample_indices(self):\n \n raw_index = np.arange( 10 )\n resampled_index = np.arange( 1, 10, 2 )\n\n result = indexing.get_resample_indices(raw_index, resampled_index)\n expected = np.arange( 0, 10, 2 )\n \n np.testing.assert_array_equal( expected, result )\n\n def test_take_upper_off_diagonal(self):\n\n X = np.array( [[ 1, 2, 3],\n [ np.nan, 5, 6],\n [ np.nan, np.nan, 9]])\n\n idx = np.array( [ 0, 1 ] )\n expected = np.array( [ 2 ] )\n actual = indexing.take_upper_off_diagonal( X, idx )\n np.testing.assert_array_equal( actual, expected )\n\n idx = np.array( [ 1, 2 ] )\n expected = np.array( [ 6 ] )\n actual = indexing.take_upper_off_diagonal( X, idx )\n np.testing.assert_array_equal( actual, expected )\n\n idx = np.array( [ 0, 2 ] )\n expected = np.array( [ 3 ] )\n actual = indexing.take_upper_off_diagonal( X, idx )\n np.testing.assert_array_equal( actual, expected )\n\n idx = np.array( [ 0, 1, 2 ] )\n expected = np.array( [ 2, 3, 6 ] )\n actual = indexing.take_upper_off_diagonal( X, idx )\n np.testing.assert_array_equal( actual, expected )\n\n\n\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testName']\n unittest.main()",
"step-ids": [
5,
7,
8,
11,
12
]
}
|
[
5,
7,
8,
11,
12
] |
#!/usr/bin/python
import gzip
import os
infiles = []
ids=[]
ages=[]
with open('all_C_metadata.txt') as f:
f.readline()
f.readline()
for line in f:
infiles.append(line.split('\t')[0])
ids.append(line.split('\t')[1])
ages.append(line.split('\t')[2])
with open('all_C_samples/diversity.txt', 'w') as of:
#this stuff is specific to what i used if for before - not sure if you will need it
of.write('sample'+'\t' + 'age' + '\t' + 'd50' + '\n')
for i in range(len(infiles)):
infile = infiles[i]
os.system('gunzip -k %s'%infile)
with open(infile[:-3]) as f:
print infile
d50_not_reached=1
d50_clone=0
clone_count=0
read_count=0
total_clones=0
f.readline()
for line in f:
total_clones+=1
read_count+=float(line.strip().split('\t')[1])
clone_count+=1
if read_count>=.5 and d50_not_reached:
d50_clone=clone_count
d50_not_reached=0
os.system('rm %s'%infile[:-3])
of.write(ids[i] + '\t' + ages[i] + '\t' + str(d50_clone/float(total_clones))+'\n')
def d50(clones, num_Reads):
"""
clones should be a dict of clones
num_Reads is a property of a rep_seq object, so you can just
pass that if you are finding the d50 of the whole repertoire.
However, I don't think it is a property of each VJ pair, but you can pretty
easily calculate it with something like len(Reads_split_by_VJ[the_VJ_pair] )
This function will determine what percent of the top clones
make up 50% of reads (i.e. do the top X% of clones make up
50 % of reads? )
"""
d50_amount = num_Reads/2
read_count=0
for i in clones:
read_count+=clones[i].num_reads
if read_count>=d50_amount:
return i/float(len(clones))
|
normal
|
{
"blob_id": "c02f46e8d89dd4b141c86df461ecbb8ed608b61b",
"index": 7826,
"step-1": " #!/usr/bin/python\n\nimport gzip\nimport os\n\ninfiles = []\nids=[]\nages=[]\nwith open('all_C_metadata.txt') as f:\n f.readline()\n f.readline()\n for line in f:\n infiles.append(line.split('\\t')[0])\n ids.append(line.split('\\t')[1])\n ages.append(line.split('\\t')[2])\n\nwith open('all_C_samples/diversity.txt', 'w') as of:\n\n #this stuff is specific to what i used if for before - not sure if you will need it\n of.write('sample'+'\\t' + 'age' + '\\t' + 'd50' + '\\n')\n for i in range(len(infiles)):\n infile = infiles[i]\n os.system('gunzip -k %s'%infile)\n\n with open(infile[:-3]) as f:\n print infile\n d50_not_reached=1\n d50_clone=0\n clone_count=0\n read_count=0\n total_clones=0\n f.readline()\n for line in f:\n total_clones+=1\n read_count+=float(line.strip().split('\\t')[1])\n clone_count+=1\n if read_count>=.5 and d50_not_reached:\n d50_clone=clone_count\n d50_not_reached=0\n os.system('rm %s'%infile[:-3])\n of.write(ids[i] + '\\t' + ages[i] + '\\t' + str(d50_clone/float(total_clones))+'\\n')\n\n\n\n\ndef d50(clones, num_Reads):\n \"\"\"\n clones should be a dict of clones\n\n num_Reads is a property of a rep_seq object, so you can just \n pass that if you are finding the d50 of the whole repertoire.\n However, I don't think it is a property of each VJ pair, but you can pretty\n easily calculate it with something like len(Reads_split_by_VJ[the_VJ_pair] )\n\n This function will determine what percent of the top clones\n make up 50% of reads (i.e. do the top X% of clones make up\n 50 % of reads? )\n\n\n \"\"\" \n\n\n d50_amount = num_Reads/2\n read_count=0\n for i in clones:\n read_count+=clones[i].num_reads\n if read_count>=d50_amount:\n return i/float(len(clones))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from get_info import parse_matches as pm
def all_match_data(year):
"""
Searches through the parse_matches data for all games in a specific season prints them out with a game ID and
returns the data in a list to the main program
:param year: Specific format YYYY between 2008 - 2017
:return: year_match_data
"""
year_match_data = []
match_year_data = pm()
for count in range(len(match_year_data)):
if year == match_year_data[count][1]:
year_match_data.append(match_year_data[count])
for count in range(len(year_match_data)):
print(
f'Game ID: {count + 1} Match date: {year_match_data[count][3]} {year_match_data[count][4]} vs '
f'{year_match_data[count][5]}')
return year_match_data
|
normal
|
{
"blob_id": "bc53af24bb46d2be3122e290c4732b312f4ebdf5",
"index": 5313,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef all_match_data(year):\n \"\"\"\n Searches through the parse_matches data for all games in a specific season prints them out with a game ID and\n returns the data in a list to the main program\n :param year: Specific format YYYY between 2008 - 2017\n :return: year_match_data\n \"\"\"\n year_match_data = []\n match_year_data = pm()\n for count in range(len(match_year_data)):\n if year == match_year_data[count][1]:\n year_match_data.append(match_year_data[count])\n for count in range(len(year_match_data)):\n print(\n f'Game ID: {count + 1} Match date: {year_match_data[count][3]} {year_match_data[count][4]} vs {year_match_data[count][5]}'\n )\n return year_match_data\n",
"step-3": "from get_info import parse_matches as pm\n\n\ndef all_match_data(year):\n \"\"\"\n Searches through the parse_matches data for all games in a specific season prints them out with a game ID and\n returns the data in a list to the main program\n :param year: Specific format YYYY between 2008 - 2017\n :return: year_match_data\n \"\"\"\n year_match_data = []\n match_year_data = pm()\n for count in range(len(match_year_data)):\n if year == match_year_data[count][1]:\n year_match_data.append(match_year_data[count])\n for count in range(len(year_match_data)):\n print(\n f'Game ID: {count + 1} Match date: {year_match_data[count][3]} {year_match_data[count][4]} vs {year_match_data[count][5]}'\n )\n return year_match_data\n",
"step-4": "from get_info import parse_matches as pm\n\n\ndef all_match_data(year):\n \"\"\"\n Searches through the parse_matches data for all games in a specific season prints them out with a game ID and\n returns the data in a list to the main program\n :param year: Specific format YYYY between 2008 - 2017\n :return: year_match_data\n \"\"\"\n year_match_data = []\n match_year_data = pm()\n for count in range(len(match_year_data)):\n if year == match_year_data[count][1]:\n year_match_data.append(match_year_data[count])\n for count in range(len(year_match_data)):\n print(\n f'Game ID: {count + 1} Match date: {year_match_data[count][3]} {year_match_data[count][4]} vs '\n f'{year_match_data[count][5]}')\n\n return year_match_data\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from datetime import datetime
import whois
def age_domain(url):
try:
w = whois.whois(url)
if(w):
for l in w.expiration_date:
d1 = datetime.date(l)
print(d1)
for l1 in w.creation_date:
d2 = datetime.date(l1)
print(d2)
diff = (d1 - d2).days
print(diff)
if ((diff / 30) < 6):
return 1
else:
return 0
except:
return -1
|
normal
|
{
"blob_id": "07d574060ded0d98734b4f184dcba7377b3a5480",
"index": 685,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef age_domain(url):\n try:\n w = whois.whois(url)\n if w:\n for l in w.expiration_date:\n d1 = datetime.date(l)\n print(d1)\n for l1 in w.creation_date:\n d2 = datetime.date(l1)\n print(d2)\n diff = (d1 - d2).days\n print(diff)\n if diff / 30 < 6:\n return 1\n else:\n return 0\n except:\n return -1\n",
"step-3": "from datetime import datetime\nimport whois\n\n\ndef age_domain(url):\n try:\n w = whois.whois(url)\n if w:\n for l in w.expiration_date:\n d1 = datetime.date(l)\n print(d1)\n for l1 in w.creation_date:\n d2 = datetime.date(l1)\n print(d2)\n diff = (d1 - d2).days\n print(diff)\n if diff / 30 < 6:\n return 1\n else:\n return 0\n except:\n return -1\n",
"step-4": "from datetime import datetime\r\n\r\nimport whois\r\n\r\n\r\ndef age_domain(url):\r\n try:\r\n w = whois.whois(url)\r\n if(w):\r\n for l in w.expiration_date:\r\n d1 = datetime.date(l)\r\n print(d1)\r\n for l1 in w.creation_date:\r\n d2 = datetime.date(l1)\r\n print(d2)\r\n diff = (d1 - d2).days\r\n print(diff)\r\n if ((diff / 30) < 6):\r\n return 1\r\n else:\r\n return 0\r\n except:\r\n return -1\r\n\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"Widget for exporting the data"
import asyncio
from pathlib import Path
from typing import List
from bokeh.models import Div, CustomAction, CustomJS
from view.dialog import FileDialog
from utils.gui import startfile
class SaveFileDialog(FileDialog):
"A file dialog that adds a default save path"
def __init__(self, ctrl):
super().__init__(ctrl, storage = "save")
def _defaultpath(ext, bopen):
assert not bopen
pot = [i for i in self.storedpaths(ctrl, "load", ext) if i.exists()]
ope = next((i for i in pot if i.suffix not in ('', '.gr')), None)
if ope is None:
ope = self.firstexistingpath(pot)
pot = self.storedpaths(ctrl, "save", ext)
sav = self.firstexistingparent(pot)
if ope is None:
return sav
if sav is None:
if Path(ope).is_dir():
return ope
sav = Path(ope).with_suffix(ext[0][1])
else:
psa = Path(sav)
if psa.suffix == '':
sav = (psa/Path(ope).stem).with_suffix(ext[0][1])
else:
sav = (psa.parent/Path(ope).stem).with_suffix(psa.suffix)
self.defaultextension = sav.suffix[1:] if sav.suffix != '' else None
return str(sav)
self.__store = self.access[1]
self.access = _defaultpath, None
self.filetypes = "xlsx:*.xlsx"
self.title = "Export plot data to excel"
def store(self, *_):
"store the path"
return self.__store(*_)
class CSVExporter:
"exports all to csv"
@classmethod
def addtodoc(cls, mainviews, ctrl, doc) -> List[Div]:
"creates the widget"
dlg = SaveFileDialog(ctrl)
div = Div(text = "", width = 0, height = 0)
mainview = mainviews[0] if isinstance(mainviews, (list, tuple)) else mainviews
figure = mainview.getfigure()
figure.tools = (
figure.tools
+ [
CustomAction(
action_tooltip = dlg.title,
callback = CustomJS(
code = 'div.text = div.text + " ";',
args = dict(div = div)
)
)
]
)
if isinstance(mainviews, (list, tuple)):
for i in mainviews[1:]:
i.getfigure().tools = i.getfigure().tools + [figure.tools[-1]]
def _cb(attr, old, new):
if new == " " and div.text == ' ':
div.text = ""
asyncio.create_task(cls._run(dlg, mainview, ctrl, doc))
div.on_change("text", _cb)
return [div]
def reset(self, *_):
"reset all"
@staticmethod
async def _run(dlg: SaveFileDialog, mainview, ctrl, doc):
paths = await mainview.threadmethod(dlg.save)
if paths is None:
return
@doc.add_next_tick_callback
def _toolbarsave():
with ctrl.action:
dlg.store(paths, False) # pylint: disable=not-callable
path = paths if isinstance(paths, (str, Path)) else paths[0]
if mainview.export(path) and Path(path).exists():
startfile(path)
|
normal
|
{
"blob_id": "d120172e65f329b1137df38b693e5fe7145bc80d",
"index": 2840,
"step-1": "<mask token>\n\n\nclass CSVExporter:\n <mask token>\n <mask token>\n\n def reset(self, *_):\n \"\"\"reset all\"\"\"\n\n @staticmethod\n async def _run(dlg: SaveFileDialog, mainview, ctrl, doc):\n paths = await mainview.threadmethod(dlg.save)\n if paths is None:\n return\n\n @doc.add_next_tick_callback\n def _toolbarsave():\n with ctrl.action:\n dlg.store(paths, False)\n path = paths if isinstance(paths, (str, Path)) else paths[0]\n if mainview.export(path) and Path(path).exists():\n startfile(path)\n",
"step-2": "<mask token>\n\n\nclass SaveFileDialog(FileDialog):\n <mask token>\n\n def __init__(self, ctrl):\n super().__init__(ctrl, storage='save')\n\n def _defaultpath(ext, bopen):\n assert not bopen\n pot = [i for i in self.storedpaths(ctrl, 'load', ext) if i.exists()\n ]\n ope = next((i for i in pot if i.suffix not in ('', '.gr')), None)\n if ope is None:\n ope = self.firstexistingpath(pot)\n pot = self.storedpaths(ctrl, 'save', ext)\n sav = self.firstexistingparent(pot)\n if ope is None:\n return sav\n if sav is None:\n if Path(ope).is_dir():\n return ope\n sav = Path(ope).with_suffix(ext[0][1])\n else:\n psa = Path(sav)\n if psa.suffix == '':\n sav = (psa / Path(ope).stem).with_suffix(ext[0][1])\n else:\n sav = (psa.parent / Path(ope).stem).with_suffix(psa.suffix)\n self.defaultextension = sav.suffix[1:\n ] if sav.suffix != '' else None\n return str(sav)\n self.__store = self.access[1]\n self.access = _defaultpath, None\n self.filetypes = 'xlsx:*.xlsx'\n self.title = 'Export plot data to excel'\n\n def store(self, *_):\n \"\"\"store the path\"\"\"\n return self.__store(*_)\n\n\nclass CSVExporter:\n \"\"\"exports all to csv\"\"\"\n\n @classmethod\n def addtodoc(cls, mainviews, ctrl, doc) ->List[Div]:\n \"\"\"creates the widget\"\"\"\n dlg = SaveFileDialog(ctrl)\n div = Div(text='', width=0, height=0)\n mainview = mainviews[0] if isinstance(mainviews, (list, tuple)\n ) else mainviews\n figure = mainview.getfigure()\n figure.tools = figure.tools + [CustomAction(action_tooltip=dlg.\n title, callback=CustomJS(code='div.text = div.text + \" \";',\n args=dict(div=div)))]\n if isinstance(mainviews, (list, tuple)):\n for i in mainviews[1:]:\n i.getfigure().tools = i.getfigure().tools + [figure.tools[-1]]\n\n def _cb(attr, old, new):\n if new == ' ' and div.text == ' ':\n div.text = ''\n asyncio.create_task(cls._run(dlg, mainview, ctrl, doc))\n div.on_change('text', _cb)\n return [div]\n\n def reset(self, *_):\n \"\"\"reset all\"\"\"\n\n @staticmethod\n async def _run(dlg: SaveFileDialog, mainview, ctrl, doc):\n paths = await mainview.threadmethod(dlg.save)\n if paths is None:\n return\n\n @doc.add_next_tick_callback\n def _toolbarsave():\n with ctrl.action:\n dlg.store(paths, False)\n path = paths if isinstance(paths, (str, Path)) else paths[0]\n if mainview.export(path) and Path(path).exists():\n startfile(path)\n",
"step-3": "<mask token>\n\n\nclass SaveFileDialog(FileDialog):\n \"\"\"A file dialog that adds a default save path\"\"\"\n\n def __init__(self, ctrl):\n super().__init__(ctrl, storage='save')\n\n def _defaultpath(ext, bopen):\n assert not bopen\n pot = [i for i in self.storedpaths(ctrl, 'load', ext) if i.exists()\n ]\n ope = next((i for i in pot if i.suffix not in ('', '.gr')), None)\n if ope is None:\n ope = self.firstexistingpath(pot)\n pot = self.storedpaths(ctrl, 'save', ext)\n sav = self.firstexistingparent(pot)\n if ope is None:\n return sav\n if sav is None:\n if Path(ope).is_dir():\n return ope\n sav = Path(ope).with_suffix(ext[0][1])\n else:\n psa = Path(sav)\n if psa.suffix == '':\n sav = (psa / Path(ope).stem).with_suffix(ext[0][1])\n else:\n sav = (psa.parent / Path(ope).stem).with_suffix(psa.suffix)\n self.defaultextension = sav.suffix[1:\n ] if sav.suffix != '' else None\n return str(sav)\n self.__store = self.access[1]\n self.access = _defaultpath, None\n self.filetypes = 'xlsx:*.xlsx'\n self.title = 'Export plot data to excel'\n\n def store(self, *_):\n \"\"\"store the path\"\"\"\n return self.__store(*_)\n\n\nclass CSVExporter:\n \"\"\"exports all to csv\"\"\"\n\n @classmethod\n def addtodoc(cls, mainviews, ctrl, doc) ->List[Div]:\n \"\"\"creates the widget\"\"\"\n dlg = SaveFileDialog(ctrl)\n div = Div(text='', width=0, height=0)\n mainview = mainviews[0] if isinstance(mainviews, (list, tuple)\n ) else mainviews\n figure = mainview.getfigure()\n figure.tools = figure.tools + [CustomAction(action_tooltip=dlg.\n title, callback=CustomJS(code='div.text = div.text + \" \";',\n args=dict(div=div)))]\n if isinstance(mainviews, (list, tuple)):\n for i in mainviews[1:]:\n i.getfigure().tools = i.getfigure().tools + [figure.tools[-1]]\n\n def _cb(attr, old, new):\n if new == ' ' and div.text == ' ':\n div.text = ''\n asyncio.create_task(cls._run(dlg, mainview, ctrl, doc))\n div.on_change('text', _cb)\n return [div]\n\n def reset(self, *_):\n \"\"\"reset all\"\"\"\n\n @staticmethod\n async def _run(dlg: SaveFileDialog, mainview, ctrl, doc):\n paths = await mainview.threadmethod(dlg.save)\n if paths is None:\n return\n\n @doc.add_next_tick_callback\n def _toolbarsave():\n with ctrl.action:\n dlg.store(paths, False)\n path = paths if isinstance(paths, (str, Path)) else paths[0]\n if mainview.export(path) and Path(path).exists():\n startfile(path)\n",
"step-4": "<mask token>\nimport asyncio\nfrom pathlib import Path\nfrom typing import List\nfrom bokeh.models import Div, CustomAction, CustomJS\nfrom view.dialog import FileDialog\nfrom utils.gui import startfile\n\n\nclass SaveFileDialog(FileDialog):\n \"\"\"A file dialog that adds a default save path\"\"\"\n\n def __init__(self, ctrl):\n super().__init__(ctrl, storage='save')\n\n def _defaultpath(ext, bopen):\n assert not bopen\n pot = [i for i in self.storedpaths(ctrl, 'load', ext) if i.exists()\n ]\n ope = next((i for i in pot if i.suffix not in ('', '.gr')), None)\n if ope is None:\n ope = self.firstexistingpath(pot)\n pot = self.storedpaths(ctrl, 'save', ext)\n sav = self.firstexistingparent(pot)\n if ope is None:\n return sav\n if sav is None:\n if Path(ope).is_dir():\n return ope\n sav = Path(ope).with_suffix(ext[0][1])\n else:\n psa = Path(sav)\n if psa.suffix == '':\n sav = (psa / Path(ope).stem).with_suffix(ext[0][1])\n else:\n sav = (psa.parent / Path(ope).stem).with_suffix(psa.suffix)\n self.defaultextension = sav.suffix[1:\n ] if sav.suffix != '' else None\n return str(sav)\n self.__store = self.access[1]\n self.access = _defaultpath, None\n self.filetypes = 'xlsx:*.xlsx'\n self.title = 'Export plot data to excel'\n\n def store(self, *_):\n \"\"\"store the path\"\"\"\n return self.__store(*_)\n\n\nclass CSVExporter:\n \"\"\"exports all to csv\"\"\"\n\n @classmethod\n def addtodoc(cls, mainviews, ctrl, doc) ->List[Div]:\n \"\"\"creates the widget\"\"\"\n dlg = SaveFileDialog(ctrl)\n div = Div(text='', width=0, height=0)\n mainview = mainviews[0] if isinstance(mainviews, (list, tuple)\n ) else mainviews\n figure = mainview.getfigure()\n figure.tools = figure.tools + [CustomAction(action_tooltip=dlg.\n title, callback=CustomJS(code='div.text = div.text + \" \";',\n args=dict(div=div)))]\n if isinstance(mainviews, (list, tuple)):\n for i in mainviews[1:]:\n i.getfigure().tools = i.getfigure().tools + [figure.tools[-1]]\n\n def _cb(attr, old, new):\n if new == ' ' and div.text == ' ':\n div.text = ''\n asyncio.create_task(cls._run(dlg, mainview, ctrl, doc))\n div.on_change('text', _cb)\n return [div]\n\n def reset(self, *_):\n \"\"\"reset all\"\"\"\n\n @staticmethod\n async def _run(dlg: SaveFileDialog, mainview, ctrl, doc):\n paths = await mainview.threadmethod(dlg.save)\n if paths is None:\n return\n\n @doc.add_next_tick_callback\n def _toolbarsave():\n with ctrl.action:\n dlg.store(paths, False)\n path = paths if isinstance(paths, (str, Path)) else paths[0]\n if mainview.export(path) and Path(path).exists():\n startfile(path)\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"Widget for exporting the data\"\nimport asyncio\nfrom pathlib import Path\nfrom typing import List\nfrom bokeh.models import Div, CustomAction, CustomJS\nfrom view.dialog import FileDialog\nfrom utils.gui import startfile\n\nclass SaveFileDialog(FileDialog):\n \"A file dialog that adds a default save path\"\n def __init__(self, ctrl):\n super().__init__(ctrl, storage = \"save\")\n\n def _defaultpath(ext, bopen):\n assert not bopen\n pot = [i for i in self.storedpaths(ctrl, \"load\", ext) if i.exists()]\n ope = next((i for i in pot if i.suffix not in ('', '.gr')), None)\n if ope is None:\n ope = self.firstexistingpath(pot)\n\n pot = self.storedpaths(ctrl, \"save\", ext)\n sav = self.firstexistingparent(pot)\n\n if ope is None:\n return sav\n\n if sav is None:\n if Path(ope).is_dir():\n return ope\n sav = Path(ope).with_suffix(ext[0][1])\n else:\n psa = Path(sav)\n if psa.suffix == '':\n sav = (psa/Path(ope).stem).with_suffix(ext[0][1])\n else:\n sav = (psa.parent/Path(ope).stem).with_suffix(psa.suffix)\n\n self.defaultextension = sav.suffix[1:] if sav.suffix != '' else None\n return str(sav)\n\n self.__store = self.access[1]\n self.access = _defaultpath, None\n self.filetypes = \"xlsx:*.xlsx\"\n self.title = \"Export plot data to excel\"\n\n def store(self, *_):\n \"store the path\"\n return self.__store(*_)\n\nclass CSVExporter:\n \"exports all to csv\"\n @classmethod\n def addtodoc(cls, mainviews, ctrl, doc) -> List[Div]:\n \"creates the widget\"\n dlg = SaveFileDialog(ctrl)\n div = Div(text = \"\", width = 0, height = 0)\n\n mainview = mainviews[0] if isinstance(mainviews, (list, tuple)) else mainviews\n figure = mainview.getfigure()\n\n figure.tools = (\n figure.tools\n + [\n CustomAction(\n action_tooltip = dlg.title,\n callback = CustomJS(\n code = 'div.text = div.text + \" \";',\n args = dict(div = div)\n )\n )\n ]\n )\n\n if isinstance(mainviews, (list, tuple)):\n for i in mainviews[1:]:\n i.getfigure().tools = i.getfigure().tools + [figure.tools[-1]]\n\n def _cb(attr, old, new):\n if new == \" \" and div.text == ' ':\n div.text = \"\"\n asyncio.create_task(cls._run(dlg, mainview, ctrl, doc))\n\n div.on_change(\"text\", _cb)\n return [div]\n\n def reset(self, *_):\n \"reset all\"\n\n @staticmethod\n async def _run(dlg: SaveFileDialog, mainview, ctrl, doc):\n paths = await mainview.threadmethod(dlg.save)\n if paths is None:\n return\n\n @doc.add_next_tick_callback\n def _toolbarsave():\n with ctrl.action:\n dlg.store(paths, False) # pylint: disable=not-callable\n path = paths if isinstance(paths, (str, Path)) else paths[0]\n if mainview.export(path) and Path(path).exists():\n startfile(path)\n",
"step-ids": [
2,
7,
8,
9,
10
]
}
|
[
2,
7,
8,
9,
10
] |
# -*- coding: utf-8 -*-
#!/bin/python3
import websocket
import json
import time
from loraCrypto import LoRaCrypto
from binascii import hexlify
'''
没有加密的数据
{
cmd: 'tx';
EUI: string;
port: number;
data: string
}
加密的数据
{
cmd: 'tx';
EUI: string;
port: number;
encdata: string;
seqno: number;
}
'''
GATEWAY_ID = "be7a0029"
TOKEN = "7AXCO2-Kkle42YGVVKvmmQ"
# 目标设备信息
EUI = "BE7A0000000005D2"
ADDR = "00aa1174"
LASTEST_SEQ = 4739
APP_SKEY = "2b7e151628aed2a6abf7158809cf4f3c"
# 需要下载的文件
FILE_NAME = "lora.bin"
PACKET_SIZE = 50
sendData = {}
def main():
ws = websocket.WebSocket()
ws.connect("wss://www.loriot.io/app?id="+GATEWAY_ID+"&token="+TOKEN)
lc = LoRaCrypto()
with open(FILE_NAME, "rb") as downloadFile:
binData =downloadFile.read()
count = len(binData) // PACKET_SIZE
sendData["cmd"] = "tx"
sendData["EUI"] = EUI
sendData["port"] = 1
seq = LASTEST_SEQ
print("Upload start!")
for i in range(count+1):
packetBin = binData[i*PACKET_SIZE:i*PACKET_SIZE+PACKET_SIZE]
packetStr = hexlify(packetBin).decode()
packetEncStr = lc.PayloadEncrypt(packetStr, APP_SKEY, ADDR, 1, seq)
sendData["encdata"] = packetEncStr
sendData["seqno"] = seq
print("Packet %d:" % i)
print("Before encrypt:")
print(packetStr)
print("After encrypt:")
print(packetEncStr)
print("Sequence is %d" % seq)
ws.send(json.dumps(sendData))
seq += 1
time.sleep(10)
print("Upload finish!")
ws.close()
if __name__ == "__main__":
from server.app.libs.loraencrypto import wrap_data
print wrap_data('he', 'BX32903', 20)
|
normal
|
{
"blob_id": "3683b1f799fa315d736e4b62c9c093360afa893f",
"index": 2052,
"step-1": "# -*- coding: utf-8 -*-\n#!/bin/python3\nimport websocket\nimport json\nimport time\nfrom loraCrypto import LoRaCrypto\nfrom binascii import hexlify\n\n'''\n没有加密的数据\n{\n\tcmd: 'tx';\n\tEUI: string;\n\tport: number;\n\tdata: string\n}\n\n加密的数据\n{\n\tcmd: 'tx';\n\tEUI: string;\n\tport: number;\n\tencdata: string;\n\tseqno: number;\n}\n'''\n\nGATEWAY_ID = \"be7a0029\"\nTOKEN = \"7AXCO2-Kkle42YGVVKvmmQ\"\n\n# 目标设备信息\nEUI = \"BE7A0000000005D2\"\nADDR = \"00aa1174\"\nLASTEST_SEQ = 4739 \nAPP_SKEY = \"2b7e151628aed2a6abf7158809cf4f3c\"\n\n# 需要下载的文件\nFILE_NAME = \"lora.bin\" \nPACKET_SIZE = 50\n\n\nsendData = {}\n\ndef main():\n ws = websocket.WebSocket()\n ws.connect(\"wss://www.loriot.io/app?id=\"+GATEWAY_ID+\"&token=\"+TOKEN)\n lc = LoRaCrypto()\n\n with open(FILE_NAME, \"rb\") as downloadFile:\n binData =downloadFile.read()\n\n count = len(binData) // PACKET_SIZE\n\n sendData[\"cmd\"] = \"tx\"\n sendData[\"EUI\"] = EUI\n sendData[\"port\"] = 1\n seq = LASTEST_SEQ\n\n print(\"Upload start!\")\n for i in range(count+1):\n packetBin = binData[i*PACKET_SIZE:i*PACKET_SIZE+PACKET_SIZE]\n packetStr = hexlify(packetBin).decode()\n packetEncStr = lc.PayloadEncrypt(packetStr, APP_SKEY, ADDR, 1, seq)\n sendData[\"encdata\"] = packetEncStr\n sendData[\"seqno\"] = seq\n \t\t\n print(\"Packet %d:\" % i)\n print(\"Before encrypt:\")\n print(packetStr)\n print(\"After encrypt:\")\n print(packetEncStr)\n print(\"Sequence is %d\" % seq)\n ws.send(json.dumps(sendData))\n seq += 1\n time.sleep(10)\n \n print(\"Upload finish!\")\n ws.close()\n\nif __name__ == \"__main__\":\n from server.app.libs.loraencrypto import wrap_data\n print wrap_data('he', 'BX32903', 20)\n\n\n\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import math
from historia.utils import unique_id, position_in_range
from historia.pops.models.inventory import Inventory
from historia.economy.enums.resource import Good, NaturalResource
from historia.economy.enums.order_type import OrderType
from historia.economy.models.price_range import PriceRange
from historia.economy.models.order import Order
from historia.pops.enums.pop_job import PopJob
DEBUG = False
class Pop(object):
"""
A simulated unit of population
"""
def __init__(self, province, pop_job, population):
"""
Creates a new Pop.
manager (Historia)
province (SecondaryDivision)
culture (Culture)
religion (Religion)
language (Language)
job (Job)
"""
self.bankrupt_times = 0
self.home = province
self.location = province
self.id = unique_id('po')
self.population = population
self.population_yesterday = 0
self.pop_job = pop_job
# ECONOMY
self.money = pop_job.start_money
self.money_yesterday = 0
self.bankrupt = False
# set inventory and ideal amounts
self.inventory = Inventory(pop_job.inventory_size)
self.give_start_inventory()
self.update_ideal_inventory()
# a dictionary of Goods to PriceRanges
# represents the price range the agent considers valid for each Good
self.price_belief = {}
# a dictionary of Goods to price list
# represents the prices of the good that the Pop has observed
# during the time they have been trading
self.observed_trading_range = {}
self.successful_trades = 0
self.failed_trades = 0
# make some fake initial data
for good in Good.all():
avg_price = self.market.avg_historial_price(good, 15)
# fake trades
self.observed_trading_range[good] = [
avg_price * 0.5,
avg_price * 1.5
]
# generate fake price belief
self.price_belief[good] = PriceRange(avg_price * 0.5, avg_price * 1.5)
# Merchant logic
self.trade_location = None # the province this Pop is traveling to
self.trade_good = None # what good we're trading in right now
self.trade_amount = 0 # amount of trade_good we should be trading
self.trading_days = 0 # number of days waiting to trade
# Generic Pop properties
@property
def social_class(self):
return self.pop_job.social_class
@property
def market(self):
"Get the market instance"
return self.location.market
@property
def profit(self):
"Determine today's profit"
return self.money - self.money_yesterday
@property
def total_trades(self):
"Total number of trades this Pop participated in"
return self.successful_trades + self.failed_trades
@property
def trade_success(self):
"Percent of trades that were successful"
if self.total_trades == 0:
return 0
return (self.successful_trades / self.total_trades) * 100
@property
def is_away(self):
"Is this Pop away from it's home?"
return self.home is not self.location
# Merchant specific logic
def go_to_province(self, province):
"Moves the Pop to another Province"
self.location = province
def decide_trade_plan(self):
"""
Decide what good to trade in and how much.
Look for the most in demand good, or the most expensive good at the home Province
Find a province near home province where its the cheapest and there's inventory
"""
self.trade_amount = 5
most_demanded_goods = self.home.market.goods_demand_ratio(day_range=1)
most_demanded_goods = sorted(most_demanded_goods.items(), key=lambda i: i[1], reverse=True)
# if we already had a trade good, refresh ideal inventory
if self.trade_good:
self.update_ideal_inventory()
if DEBUG: print("Finding a Good to trade:")
for good, demand in most_demanded_goods:
if demand > 0:
# find nearby provinces where this has inventory and the price is lower
price_at_home = self.home.market.mean_price(good)
if DEBUG: print("Good: {}, Demand: {}, Price: ${}".format(good.title, demand, price_at_home))
neighboring_markets = [p.market for p in self.location.owned_neighbors]
neighboring_markets = [m for m in neighboring_markets if m.supply_for(good) > self.trade_amount]
neighboring_markets.sort(key=lambda m: m.supply_for(good), reverse=True)
if len(neighboring_markets) > 0:
# we found places where this good is cheaper and in inventory
target = neighboring_markets[0].location
price_at_target = target.market.mean_price(good)
# only trade with prices where we can make money
if price_at_home > price_at_target:
offset = 0
if good is Good.bread:
offset = 1
self.inventory.set_ideal(good, self.trade_amount + offset)
self.trade_location = target
if DEBUG:
print("\tTarget: {}, Supply: {}, Price: ${}, Price at home: ${}".format(
self.trade_location.name,
self.trade_location.market.supply_for(good),
self.trade_location.market.mean_price(good),
price_at_home)
)
self.trade_good = good
return
else:
if DEBUG: print("\tPrice is higher at target (home: ${} target: ${})".format(price_at_home, price_at_target))
else:
if DEBUG: print("\tNo markets selling {} found".format(good))
# Generic economic logic
def update_ideal_inventory(self):
"Update ideal inventory"
# reset so that the Pop can sell the inventory it doesn't need
for good in Good.all():
self.inventory.set_ideal(good, 0)
# update ideal inventory for new Job
for item in self.pop_job.ideal_inventory:
self.inventory.set_ideal(item['good'], item['amount'])
def give_start_inventory(self):
"Give the Pop the inventory it needs to do its job"
for item in self.pop_job.start_inventory:
self.inventory.add(item['good'], item['amount'])
def change_population(self, trade_success):
"Change the population based off the trade"
self.population_yesterday = self.population
if trade_success:
self.population += round(self.population * 0.01)
else:
self.population -= round(self.population * 0.002)
def handle_bankruptcy(self, pop_job):
"Change job, create money out of thin air, update ideal inventory"
# TODO: stop creating money out of thin air
self.pop_job = pop_job
self.bankrupt_times += 1
self.money = 2
self.update_ideal_inventory()
self.give_start_inventory()
def perform_logic(self):
"Depending on PopJob, perform logic (including production)"
logic = self.pop_job.logic(self)
logic.perform()
def create_buy_order(self, good, limit):
"Create a buy order for a given Good at a determined quantity"
bid_price = self.determine_price_of(good)
ideal = self.determine_buy_quantity(good)
# can't buy more than limit
quantity_to_buy = limit if ideal > limit else ideal
if quantity_to_buy > 0:
return Order(self, OrderType.buy_order, quantity_to_buy, bid_price, good)
return False
def create_sell_order(self, good, limit):
"Create a sell order for a given Good at a determined quantity"
sell_price = self.determine_price_of(good)
ideal = self.determine_sell_quantity(good)
# can't buy more than limit
quantity_to_sell = limit if ideal < limit else ideal
if quantity_to_sell > 0:
return Order(self, OrderType.sell_order, quantity_to_sell, sell_price, good)
return False
def price_belief_for(self, good):
"Gets the price belief this agent has for a particular Good"
if good in self.price_belief:
return self.price_belief[good]
def determine_price_of(self, good):
"Determine the price of a particular good"
return self.price_belief_for(good).random()
def trading_range_extremes(self, good):
"Gets the lowest and highst price of a Good this agent has seen"
trading_range = self.observed_trading_range[good]
return PriceRange(min(trading_range), max(trading_range))
def determine_sell_quantity(self, good):
"Determine how much inventory goods to sell based on market conditions"
mean = self.market.avg_historial_price(good, 15)
trading_range = self.trading_range_extremes(good)
favoribility = position_in_range(mean, trading_range.low, trading_range.high)
amount_to_sell = round(favoribility * self.inventory.surplus(good))
if amount_to_sell < 1:
amount_to_sell = 1
return amount_to_sell
def determine_buy_quantity(self, good):
"Determine how much goods to buy based on market conditions"
mean = self.market.avg_historial_price(good, 15)
trading_range = self.trading_range_extremes(good)
favoribility = 1 - position_in_range(mean, trading_range.low, trading_range.high)
amount_to_buy = round(favoribility * self.inventory.shortage(good))
if amount_to_buy < 1:
amount_to_buy = 1
return amount_to_buy
def generate_orders(self, good):
"""
If the Pop needs a Good to perform production, buy it
If the Pop has surplus Resources, sell them
"""
surplus = self.inventory.surplus(good)
if surplus >= 1: # sell inventory
# the original only old one item here
sell_amount = surplus
order = self.create_sell_order(good, surplus)
if order:
# print('{} sells {} {}'.format(self.pop_job.title, sell_amount, good.name))
self.market.sell(order)
else: # buy more
shortage = self.inventory.shortage(good)
free_space = self.inventory.empty_space
if shortage > 0:
if shortage <= free_space:
# enough space for ideal order
limit = shortage
else:
# not enough space for ideal order
limit = math.floor(free_space / shortage)
if limit > 0:
order = self.create_buy_order(good, limit)
if order:
# print('{} buys {} {}'.format(self.pop_job.title, limit, good.name))
self.market.buy(order)
# else:
# print("{} has no shortage of {} (has shortage: {})".format(self.pop_job.title, good.title, shortage))
def update_price_model(self, good, order_type, is_successful, clearing_price=0):
"""
Update the Pop's price model for the given resource
good (Good) The Good which was orderd
order_type (OrderType) Which kind of Order this was
is_successful (bool) whether or not the Order was successful
clearing_price (float) The price per unit of the good that was ordered
as defined by the Pop which ordered it
"""
SIGNIFICANT = 0.25 # 25% more or less is "significant"
SIG_IMBALANCE = 0.33
LOW_INVENTORY = 0.1 # 10% of ideal inventory = "LOW"
HIGH_INVENTORY = 2.0 # 200% of ideal inventory = "HIGH"
MIN_PRICE = 0.01 # lowest allowed price of a Good
if is_successful:
# add this trade to the observed trading range
self.observed_trading_range[good].append(clearing_price)
public_mean_price = self.market.mean_price(good)
belief = self.price_belief[good]
mean = belief.mean()
wobble = 0.05 # the degree which the Pop should bid outside the belief
# how different the public mean price is from the price belief
delta_to_mean = mean - public_mean_price
if is_successful:
if order_type is OrderType.buy_order and delta_to_mean > SIGNIFICANT:
# this Pop overpaid, shift belief towards mean
belief.low -= delta_to_mean / 2
belief.high -= delta_to_mean / 2
elif order_type is OrderType.sell_order and delta_to_mean < -SIGNIFICANT:
# this Pop underpaid!, shift belief towards mean
belief.low -= delta_to_mean / 2
belief.high -= delta_to_mean / 2
# increase the belief's certainty
belief.low += wobble * mean
belief.high -= wobble * mean
else:
# shift towards mean
belief.low -= delta_to_mean / 2
belief.high -= delta_to_mean / 2
# check for inventory special cases
stocks = self.inventory.get_amount(good)
ideal = self.inventory.get_ideal(good)
# if we're buying and inventory is too low
# meaning we're desperate to buy
if order_type is OrderType.buy_order and stocks < LOW_INVENTORY * ideal:
wobble *= 2
# if we're selling and inventory is too high
# meaning we're desperate to sell
elif order_type is OrderType.sell_order and stocks > HIGH_INVENTORY * ideal:
wobble *= 2
# all other cases
else:
sells = self.market.history.sell_orders.average(good, 1)
buys = self.market.history.buy_orders.average(good, 1)
# TODO: figure out why this is sometimes 0
if sells + buys > 0:
supply_vs_demand = (sells - buys) / (sells + buys)
if supply_vs_demand > SIG_IMBALANCE or supply_vs_demand < -SIG_IMBALANCE:
# too much supply? lower bid lower to sell faster
# too much demand? raise price to buy faster
new_mean = public_mean_price * (1 - supply_vs_demand)
delta_to_mean = mean - new_mean
# shift the price belief to the new price mean
belief.low -= delta_to_mean / 2
belief.high -= delta_to_mean / 2
# decrease belief's certainty since we've just changed it (we could be wrong)
belief.low -= wobble * mean
belief.high += wobble * mean
# make sure the price belief doesn't decrease below the minimum
if belief.low < MIN_PRICE:
belief.low = MIN_PRICE
elif belief.high < MIN_PRICE:
belief.high = MIN_PRICE
# Python utility methods
def __repr__(self):
return "<Pop: id={} type={}>".format(self.id, self.pop_job.title)
def __eq__(self, other):
return self.id == other.id
def __key__(self):
return self.id
def __hash__(self):
return hash(self.__key__())
def export(self):
model = {
'pop_job': self.pop_job.ref(),
'population': self.population,
'population_yesterday': self.population_yesterday,
'inventory': self.inventory.export(),
'money': self.money,
'money_yesterday': self.money_yesterday,
'successful_trades': self.successful_trades,
'failed_trades': self.failed_trades,
'bankrupt_times': self.bankrupt_times,
}
if self.pop_job is PopJob.merchant:
location_id = None
if self.trade_location:
location_id = self.trade_location.id
model.update({
'location': self.location.id,
'trade_location': location_id,
'trade_good': self.trade_good,
'trade_amount': self.trade_amount
})
return model
|
normal
|
{
"blob_id": "887a39f1eeb81e6472938c2451e57866d3ac4a45",
"index": 661,
"step-1": "<mask token>\n\n\nclass Pop(object):\n <mask token>\n\n def __init__(self, province, pop_job, population):\n \"\"\"\n Creates a new Pop.\n manager (Historia)\n province (SecondaryDivision)\n culture (Culture)\n religion (Religion)\n language (Language)\n job (Job)\n \"\"\"\n self.bankrupt_times = 0\n self.home = province\n self.location = province\n self.id = unique_id('po')\n self.population = population\n self.population_yesterday = 0\n self.pop_job = pop_job\n self.money = pop_job.start_money\n self.money_yesterday = 0\n self.bankrupt = False\n self.inventory = Inventory(pop_job.inventory_size)\n self.give_start_inventory()\n self.update_ideal_inventory()\n self.price_belief = {}\n self.observed_trading_range = {}\n self.successful_trades = 0\n self.failed_trades = 0\n for good in Good.all():\n avg_price = self.market.avg_historial_price(good, 15)\n self.observed_trading_range[good] = [avg_price * 0.5, avg_price *\n 1.5]\n self.price_belief[good] = PriceRange(avg_price * 0.5, avg_price *\n 1.5)\n self.trade_location = None\n self.trade_good = None\n self.trade_amount = 0\n self.trading_days = 0\n <mask token>\n <mask token>\n\n @property\n def profit(self):\n \"\"\"Determine today's profit\"\"\"\n return self.money - self.money_yesterday\n\n @property\n def total_trades(self):\n \"\"\"Total number of trades this Pop participated in\"\"\"\n return self.successful_trades + self.failed_trades\n <mask token>\n\n @property\n def is_away(self):\n \"\"\"Is this Pop away from it's home?\"\"\"\n return self.home is not self.location\n <mask token>\n <mask token>\n\n def update_ideal_inventory(self):\n \"\"\"Update ideal inventory\"\"\"\n for good in Good.all():\n self.inventory.set_ideal(good, 0)\n for item in self.pop_job.ideal_inventory:\n self.inventory.set_ideal(item['good'], item['amount'])\n\n def give_start_inventory(self):\n \"\"\"Give the Pop the inventory it needs to do its job\"\"\"\n for item in self.pop_job.start_inventory:\n self.inventory.add(item['good'], item['amount'])\n <mask token>\n <mask token>\n\n def perform_logic(self):\n \"\"\"Depending on PopJob, perform logic (including production)\"\"\"\n logic = self.pop_job.logic(self)\n logic.perform()\n\n def create_buy_order(self, good, limit):\n \"\"\"Create a buy order for a given Good at a determined quantity\"\"\"\n bid_price = self.determine_price_of(good)\n ideal = self.determine_buy_quantity(good)\n quantity_to_buy = limit if ideal > limit else ideal\n if quantity_to_buy > 0:\n return Order(self, OrderType.buy_order, quantity_to_buy,\n bid_price, good)\n return False\n\n def create_sell_order(self, good, limit):\n \"\"\"Create a sell order for a given Good at a determined quantity\"\"\"\n sell_price = self.determine_price_of(good)\n ideal = self.determine_sell_quantity(good)\n quantity_to_sell = limit if ideal < limit else ideal\n if quantity_to_sell > 0:\n return Order(self, OrderType.sell_order, quantity_to_sell,\n sell_price, good)\n return False\n\n def price_belief_for(self, good):\n \"\"\"Gets the price belief this agent has for a particular Good\"\"\"\n if good in self.price_belief:\n return self.price_belief[good]\n <mask token>\n\n def trading_range_extremes(self, good):\n \"\"\"Gets the lowest and highst price of a Good this agent has seen\"\"\"\n trading_range = self.observed_trading_range[good]\n return PriceRange(min(trading_range), max(trading_range))\n\n def determine_sell_quantity(self, good):\n \"\"\"Determine how much inventory goods to sell based on market conditions\"\"\"\n mean = self.market.avg_historial_price(good, 15)\n trading_range = self.trading_range_extremes(good)\n favoribility = position_in_range(mean, trading_range.low,\n trading_range.high)\n amount_to_sell = round(favoribility * self.inventory.surplus(good))\n if amount_to_sell < 1:\n amount_to_sell = 1\n return amount_to_sell\n <mask token>\n\n def generate_orders(self, good):\n \"\"\"\n If the Pop needs a Good to perform production, buy it\n If the Pop has surplus Resources, sell them\n \"\"\"\n surplus = self.inventory.surplus(good)\n if surplus >= 1:\n sell_amount = surplus\n order = self.create_sell_order(good, surplus)\n if order:\n self.market.sell(order)\n else:\n shortage = self.inventory.shortage(good)\n free_space = self.inventory.empty_space\n if shortage > 0:\n if shortage <= free_space:\n limit = shortage\n else:\n limit = math.floor(free_space / shortage)\n if limit > 0:\n order = self.create_buy_order(good, limit)\n if order:\n self.market.buy(order)\n <mask token>\n <mask token>\n <mask token>\n\n def __key__(self):\n return self.id\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Pop(object):\n <mask token>\n\n def __init__(self, province, pop_job, population):\n \"\"\"\n Creates a new Pop.\n manager (Historia)\n province (SecondaryDivision)\n culture (Culture)\n religion (Religion)\n language (Language)\n job (Job)\n \"\"\"\n self.bankrupt_times = 0\n self.home = province\n self.location = province\n self.id = unique_id('po')\n self.population = population\n self.population_yesterday = 0\n self.pop_job = pop_job\n self.money = pop_job.start_money\n self.money_yesterday = 0\n self.bankrupt = False\n self.inventory = Inventory(pop_job.inventory_size)\n self.give_start_inventory()\n self.update_ideal_inventory()\n self.price_belief = {}\n self.observed_trading_range = {}\n self.successful_trades = 0\n self.failed_trades = 0\n for good in Good.all():\n avg_price = self.market.avg_historial_price(good, 15)\n self.observed_trading_range[good] = [avg_price * 0.5, avg_price *\n 1.5]\n self.price_belief[good] = PriceRange(avg_price * 0.5, avg_price *\n 1.5)\n self.trade_location = None\n self.trade_good = None\n self.trade_amount = 0\n self.trading_days = 0\n <mask token>\n\n @property\n def market(self):\n \"\"\"Get the market instance\"\"\"\n return self.location.market\n\n @property\n def profit(self):\n \"\"\"Determine today's profit\"\"\"\n return self.money - self.money_yesterday\n\n @property\n def total_trades(self):\n \"\"\"Total number of trades this Pop participated in\"\"\"\n return self.successful_trades + self.failed_trades\n\n @property\n def trade_success(self):\n \"\"\"Percent of trades that were successful\"\"\"\n if self.total_trades == 0:\n return 0\n return self.successful_trades / self.total_trades * 100\n\n @property\n def is_away(self):\n \"\"\"Is this Pop away from it's home?\"\"\"\n return self.home is not self.location\n\n def go_to_province(self, province):\n \"\"\"Moves the Pop to another Province\"\"\"\n self.location = province\n\n def decide_trade_plan(self):\n \"\"\"\n Decide what good to trade in and how much.\n Look for the most in demand good, or the most expensive good at the home Province\n Find a province near home province where its the cheapest and there's inventory\n \"\"\"\n self.trade_amount = 5\n most_demanded_goods = self.home.market.goods_demand_ratio(day_range=1)\n most_demanded_goods = sorted(most_demanded_goods.items(), key=lambda\n i: i[1], reverse=True)\n if self.trade_good:\n self.update_ideal_inventory()\n if DEBUG:\n print('Finding a Good to trade:')\n for good, demand in most_demanded_goods:\n if demand > 0:\n price_at_home = self.home.market.mean_price(good)\n if DEBUG:\n print('Good: {}, Demand: {}, Price: ${}'.format(good.\n title, demand, price_at_home))\n neighboring_markets = [p.market for p in self.location.\n owned_neighbors]\n neighboring_markets = [m for m in neighboring_markets if m.\n supply_for(good) > self.trade_amount]\n neighboring_markets.sort(key=lambda m: m.supply_for(good),\n reverse=True)\n if len(neighboring_markets) > 0:\n target = neighboring_markets[0].location\n price_at_target = target.market.mean_price(good)\n if price_at_home > price_at_target:\n offset = 0\n if good is Good.bread:\n offset = 1\n self.inventory.set_ideal(good, self.trade_amount +\n offset)\n self.trade_location = target\n if DEBUG:\n print(\n '\\tTarget: {}, Supply: {}, Price: ${}, Price at home: ${}'\n .format(self.trade_location.name, self.\n trade_location.market.supply_for(good),\n self.trade_location.market.mean_price(good),\n price_at_home))\n self.trade_good = good\n return\n elif DEBUG:\n print(\n '\\tPrice is higher at target (home: ${} target: ${})'\n .format(price_at_home, price_at_target))\n elif DEBUG:\n print('\\tNo markets selling {} found'.format(good))\n\n def update_ideal_inventory(self):\n \"\"\"Update ideal inventory\"\"\"\n for good in Good.all():\n self.inventory.set_ideal(good, 0)\n for item in self.pop_job.ideal_inventory:\n self.inventory.set_ideal(item['good'], item['amount'])\n\n def give_start_inventory(self):\n \"\"\"Give the Pop the inventory it needs to do its job\"\"\"\n for item in self.pop_job.start_inventory:\n self.inventory.add(item['good'], item['amount'])\n\n def change_population(self, trade_success):\n \"\"\"Change the population based off the trade\"\"\"\n self.population_yesterday = self.population\n if trade_success:\n self.population += round(self.population * 0.01)\n else:\n self.population -= round(self.population * 0.002)\n\n def handle_bankruptcy(self, pop_job):\n \"\"\"Change job, create money out of thin air, update ideal inventory\"\"\"\n self.pop_job = pop_job\n self.bankrupt_times += 1\n self.money = 2\n self.update_ideal_inventory()\n self.give_start_inventory()\n\n def perform_logic(self):\n \"\"\"Depending on PopJob, perform logic (including production)\"\"\"\n logic = self.pop_job.logic(self)\n logic.perform()\n\n def create_buy_order(self, good, limit):\n \"\"\"Create a buy order for a given Good at a determined quantity\"\"\"\n bid_price = self.determine_price_of(good)\n ideal = self.determine_buy_quantity(good)\n quantity_to_buy = limit if ideal > limit else ideal\n if quantity_to_buy > 0:\n return Order(self, OrderType.buy_order, quantity_to_buy,\n bid_price, good)\n return False\n\n def create_sell_order(self, good, limit):\n \"\"\"Create a sell order for a given Good at a determined quantity\"\"\"\n sell_price = self.determine_price_of(good)\n ideal = self.determine_sell_quantity(good)\n quantity_to_sell = limit if ideal < limit else ideal\n if quantity_to_sell > 0:\n return Order(self, OrderType.sell_order, quantity_to_sell,\n sell_price, good)\n return False\n\n def price_belief_for(self, good):\n \"\"\"Gets the price belief this agent has for a particular Good\"\"\"\n if good in self.price_belief:\n return self.price_belief[good]\n\n def determine_price_of(self, good):\n \"\"\"Determine the price of a particular good\"\"\"\n return self.price_belief_for(good).random()\n\n def trading_range_extremes(self, good):\n \"\"\"Gets the lowest and highst price of a Good this agent has seen\"\"\"\n trading_range = self.observed_trading_range[good]\n return PriceRange(min(trading_range), max(trading_range))\n\n def determine_sell_quantity(self, good):\n \"\"\"Determine how much inventory goods to sell based on market conditions\"\"\"\n mean = self.market.avg_historial_price(good, 15)\n trading_range = self.trading_range_extremes(good)\n favoribility = position_in_range(mean, trading_range.low,\n trading_range.high)\n amount_to_sell = round(favoribility * self.inventory.surplus(good))\n if amount_to_sell < 1:\n amount_to_sell = 1\n return amount_to_sell\n\n def determine_buy_quantity(self, good):\n \"\"\"Determine how much goods to buy based on market conditions\"\"\"\n mean = self.market.avg_historial_price(good, 15)\n trading_range = self.trading_range_extremes(good)\n favoribility = 1 - position_in_range(mean, trading_range.low,\n trading_range.high)\n amount_to_buy = round(favoribility * self.inventory.shortage(good))\n if amount_to_buy < 1:\n amount_to_buy = 1\n return amount_to_buy\n\n def generate_orders(self, good):\n \"\"\"\n If the Pop needs a Good to perform production, buy it\n If the Pop has surplus Resources, sell them\n \"\"\"\n surplus = self.inventory.surplus(good)\n if surplus >= 1:\n sell_amount = surplus\n order = self.create_sell_order(good, surplus)\n if order:\n self.market.sell(order)\n else:\n shortage = self.inventory.shortage(good)\n free_space = self.inventory.empty_space\n if shortage > 0:\n if shortage <= free_space:\n limit = shortage\n else:\n limit = math.floor(free_space / shortage)\n if limit > 0:\n order = self.create_buy_order(good, limit)\n if order:\n self.market.buy(order)\n <mask token>\n\n def __repr__(self):\n return '<Pop: id={} type={}>'.format(self.id, self.pop_job.title)\n <mask token>\n\n def __key__(self):\n return self.id\n\n def __hash__(self):\n return hash(self.__key__())\n\n def export(self):\n model = {'pop_job': self.pop_job.ref(), 'population': self.\n population, 'population_yesterday': self.population_yesterday,\n 'inventory': self.inventory.export(), 'money': self.money,\n 'money_yesterday': self.money_yesterday, 'successful_trades':\n self.successful_trades, 'failed_trades': self.failed_trades,\n 'bankrupt_times': self.bankrupt_times}\n if self.pop_job is PopJob.merchant:\n location_id = None\n if self.trade_location:\n location_id = self.trade_location.id\n model.update({'location': self.location.id, 'trade_location':\n location_id, 'trade_good': self.trade_good, 'trade_amount':\n self.trade_amount})\n return model\n",
"step-3": "<mask token>\n\n\nclass Pop(object):\n <mask token>\n\n def __init__(self, province, pop_job, population):\n \"\"\"\n Creates a new Pop.\n manager (Historia)\n province (SecondaryDivision)\n culture (Culture)\n religion (Religion)\n language (Language)\n job (Job)\n \"\"\"\n self.bankrupt_times = 0\n self.home = province\n self.location = province\n self.id = unique_id('po')\n self.population = population\n self.population_yesterday = 0\n self.pop_job = pop_job\n self.money = pop_job.start_money\n self.money_yesterday = 0\n self.bankrupt = False\n self.inventory = Inventory(pop_job.inventory_size)\n self.give_start_inventory()\n self.update_ideal_inventory()\n self.price_belief = {}\n self.observed_trading_range = {}\n self.successful_trades = 0\n self.failed_trades = 0\n for good in Good.all():\n avg_price = self.market.avg_historial_price(good, 15)\n self.observed_trading_range[good] = [avg_price * 0.5, avg_price *\n 1.5]\n self.price_belief[good] = PriceRange(avg_price * 0.5, avg_price *\n 1.5)\n self.trade_location = None\n self.trade_good = None\n self.trade_amount = 0\n self.trading_days = 0\n\n @property\n def social_class(self):\n return self.pop_job.social_class\n\n @property\n def market(self):\n \"\"\"Get the market instance\"\"\"\n return self.location.market\n\n @property\n def profit(self):\n \"\"\"Determine today's profit\"\"\"\n return self.money - self.money_yesterday\n\n @property\n def total_trades(self):\n \"\"\"Total number of trades this Pop participated in\"\"\"\n return self.successful_trades + self.failed_trades\n\n @property\n def trade_success(self):\n \"\"\"Percent of trades that were successful\"\"\"\n if self.total_trades == 0:\n return 0\n return self.successful_trades / self.total_trades * 100\n\n @property\n def is_away(self):\n \"\"\"Is this Pop away from it's home?\"\"\"\n return self.home is not self.location\n\n def go_to_province(self, province):\n \"\"\"Moves the Pop to another Province\"\"\"\n self.location = province\n\n def decide_trade_plan(self):\n \"\"\"\n Decide what good to trade in and how much.\n Look for the most in demand good, or the most expensive good at the home Province\n Find a province near home province where its the cheapest and there's inventory\n \"\"\"\n self.trade_amount = 5\n most_demanded_goods = self.home.market.goods_demand_ratio(day_range=1)\n most_demanded_goods = sorted(most_demanded_goods.items(), key=lambda\n i: i[1], reverse=True)\n if self.trade_good:\n self.update_ideal_inventory()\n if DEBUG:\n print('Finding a Good to trade:')\n for good, demand in most_demanded_goods:\n if demand > 0:\n price_at_home = self.home.market.mean_price(good)\n if DEBUG:\n print('Good: {}, Demand: {}, Price: ${}'.format(good.\n title, demand, price_at_home))\n neighboring_markets = [p.market for p in self.location.\n owned_neighbors]\n neighboring_markets = [m for m in neighboring_markets if m.\n supply_for(good) > self.trade_amount]\n neighboring_markets.sort(key=lambda m: m.supply_for(good),\n reverse=True)\n if len(neighboring_markets) > 0:\n target = neighboring_markets[0].location\n price_at_target = target.market.mean_price(good)\n if price_at_home > price_at_target:\n offset = 0\n if good is Good.bread:\n offset = 1\n self.inventory.set_ideal(good, self.trade_amount +\n offset)\n self.trade_location = target\n if DEBUG:\n print(\n '\\tTarget: {}, Supply: {}, Price: ${}, Price at home: ${}'\n .format(self.trade_location.name, self.\n trade_location.market.supply_for(good),\n self.trade_location.market.mean_price(good),\n price_at_home))\n self.trade_good = good\n return\n elif DEBUG:\n print(\n '\\tPrice is higher at target (home: ${} target: ${})'\n .format(price_at_home, price_at_target))\n elif DEBUG:\n print('\\tNo markets selling {} found'.format(good))\n\n def update_ideal_inventory(self):\n \"\"\"Update ideal inventory\"\"\"\n for good in Good.all():\n self.inventory.set_ideal(good, 0)\n for item in self.pop_job.ideal_inventory:\n self.inventory.set_ideal(item['good'], item['amount'])\n\n def give_start_inventory(self):\n \"\"\"Give the Pop the inventory it needs to do its job\"\"\"\n for item in self.pop_job.start_inventory:\n self.inventory.add(item['good'], item['amount'])\n\n def change_population(self, trade_success):\n \"\"\"Change the population based off the trade\"\"\"\n self.population_yesterday = self.population\n if trade_success:\n self.population += round(self.population * 0.01)\n else:\n self.population -= round(self.population * 0.002)\n\n def handle_bankruptcy(self, pop_job):\n \"\"\"Change job, create money out of thin air, update ideal inventory\"\"\"\n self.pop_job = pop_job\n self.bankrupt_times += 1\n self.money = 2\n self.update_ideal_inventory()\n self.give_start_inventory()\n\n def perform_logic(self):\n \"\"\"Depending on PopJob, perform logic (including production)\"\"\"\n logic = self.pop_job.logic(self)\n logic.perform()\n\n def create_buy_order(self, good, limit):\n \"\"\"Create a buy order for a given Good at a determined quantity\"\"\"\n bid_price = self.determine_price_of(good)\n ideal = self.determine_buy_quantity(good)\n quantity_to_buy = limit if ideal > limit else ideal\n if quantity_to_buy > 0:\n return Order(self, OrderType.buy_order, quantity_to_buy,\n bid_price, good)\n return False\n\n def create_sell_order(self, good, limit):\n \"\"\"Create a sell order for a given Good at a determined quantity\"\"\"\n sell_price = self.determine_price_of(good)\n ideal = self.determine_sell_quantity(good)\n quantity_to_sell = limit if ideal < limit else ideal\n if quantity_to_sell > 0:\n return Order(self, OrderType.sell_order, quantity_to_sell,\n sell_price, good)\n return False\n\n def price_belief_for(self, good):\n \"\"\"Gets the price belief this agent has for a particular Good\"\"\"\n if good in self.price_belief:\n return self.price_belief[good]\n\n def determine_price_of(self, good):\n \"\"\"Determine the price of a particular good\"\"\"\n return self.price_belief_for(good).random()\n\n def trading_range_extremes(self, good):\n \"\"\"Gets the lowest and highst price of a Good this agent has seen\"\"\"\n trading_range = self.observed_trading_range[good]\n return PriceRange(min(trading_range), max(trading_range))\n\n def determine_sell_quantity(self, good):\n \"\"\"Determine how much inventory goods to sell based on market conditions\"\"\"\n mean = self.market.avg_historial_price(good, 15)\n trading_range = self.trading_range_extremes(good)\n favoribility = position_in_range(mean, trading_range.low,\n trading_range.high)\n amount_to_sell = round(favoribility * self.inventory.surplus(good))\n if amount_to_sell < 1:\n amount_to_sell = 1\n return amount_to_sell\n\n def determine_buy_quantity(self, good):\n \"\"\"Determine how much goods to buy based on market conditions\"\"\"\n mean = self.market.avg_historial_price(good, 15)\n trading_range = self.trading_range_extremes(good)\n favoribility = 1 - position_in_range(mean, trading_range.low,\n trading_range.high)\n amount_to_buy = round(favoribility * self.inventory.shortage(good))\n if amount_to_buy < 1:\n amount_to_buy = 1\n return amount_to_buy\n\n def generate_orders(self, good):\n \"\"\"\n If the Pop needs a Good to perform production, buy it\n If the Pop has surplus Resources, sell them\n \"\"\"\n surplus = self.inventory.surplus(good)\n if surplus >= 1:\n sell_amount = surplus\n order = self.create_sell_order(good, surplus)\n if order:\n self.market.sell(order)\n else:\n shortage = self.inventory.shortage(good)\n free_space = self.inventory.empty_space\n if shortage > 0:\n if shortage <= free_space:\n limit = shortage\n else:\n limit = math.floor(free_space / shortage)\n if limit > 0:\n order = self.create_buy_order(good, limit)\n if order:\n self.market.buy(order)\n\n def update_price_model(self, good, order_type, is_successful,\n clearing_price=0):\n \"\"\"\n Update the Pop's price model for the given resource\n good (Good) The Good which was orderd\n order_type (OrderType) Which kind of Order this was\n is_successful (bool) whether or not the Order was successful\n clearing_price (float) The price per unit of the good that was ordered\n as defined by the Pop which ordered it\n \"\"\"\n SIGNIFICANT = 0.25\n SIG_IMBALANCE = 0.33\n LOW_INVENTORY = 0.1\n HIGH_INVENTORY = 2.0\n MIN_PRICE = 0.01\n if is_successful:\n self.observed_trading_range[good].append(clearing_price)\n public_mean_price = self.market.mean_price(good)\n belief = self.price_belief[good]\n mean = belief.mean()\n wobble = 0.05\n delta_to_mean = mean - public_mean_price\n if is_successful:\n if (order_type is OrderType.buy_order and delta_to_mean >\n SIGNIFICANT):\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n elif order_type is OrderType.sell_order and delta_to_mean < -SIGNIFICANT:\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n belief.low += wobble * mean\n belief.high -= wobble * mean\n else:\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n stocks = self.inventory.get_amount(good)\n ideal = self.inventory.get_ideal(good)\n if (order_type is OrderType.buy_order and stocks < \n LOW_INVENTORY * ideal):\n wobble *= 2\n elif order_type is OrderType.sell_order and stocks > HIGH_INVENTORY * ideal:\n wobble *= 2\n else:\n sells = self.market.history.sell_orders.average(good, 1)\n buys = self.market.history.buy_orders.average(good, 1)\n if sells + buys > 0:\n supply_vs_demand = (sells - buys) / (sells + buys)\n if (supply_vs_demand > SIG_IMBALANCE or \n supply_vs_demand < -SIG_IMBALANCE):\n new_mean = public_mean_price * (1 - supply_vs_demand)\n delta_to_mean = mean - new_mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n belief.low -= wobble * mean\n belief.high += wobble * mean\n if belief.low < MIN_PRICE:\n belief.low = MIN_PRICE\n elif belief.high < MIN_PRICE:\n belief.high = MIN_PRICE\n\n def __repr__(self):\n return '<Pop: id={} type={}>'.format(self.id, self.pop_job.title)\n <mask token>\n\n def __key__(self):\n return self.id\n\n def __hash__(self):\n return hash(self.__key__())\n\n def export(self):\n model = {'pop_job': self.pop_job.ref(), 'population': self.\n population, 'population_yesterday': self.population_yesterday,\n 'inventory': self.inventory.export(), 'money': self.money,\n 'money_yesterday': self.money_yesterday, 'successful_trades':\n self.successful_trades, 'failed_trades': self.failed_trades,\n 'bankrupt_times': self.bankrupt_times}\n if self.pop_job is PopJob.merchant:\n location_id = None\n if self.trade_location:\n location_id = self.trade_location.id\n model.update({'location': self.location.id, 'trade_location':\n location_id, 'trade_good': self.trade_good, 'trade_amount':\n self.trade_amount})\n return model\n",
"step-4": "import math\nfrom historia.utils import unique_id, position_in_range\nfrom historia.pops.models.inventory import Inventory\nfrom historia.economy.enums.resource import Good, NaturalResource\nfrom historia.economy.enums.order_type import OrderType\nfrom historia.economy.models.price_range import PriceRange\nfrom historia.economy.models.order import Order\nfrom historia.pops.enums.pop_job import PopJob\nDEBUG = False\n\n\nclass Pop(object):\n \"\"\"\n A simulated unit of population\n \"\"\"\n\n def __init__(self, province, pop_job, population):\n \"\"\"\n Creates a new Pop.\n manager (Historia)\n province (SecondaryDivision)\n culture (Culture)\n religion (Religion)\n language (Language)\n job (Job)\n \"\"\"\n self.bankrupt_times = 0\n self.home = province\n self.location = province\n self.id = unique_id('po')\n self.population = population\n self.population_yesterday = 0\n self.pop_job = pop_job\n self.money = pop_job.start_money\n self.money_yesterday = 0\n self.bankrupt = False\n self.inventory = Inventory(pop_job.inventory_size)\n self.give_start_inventory()\n self.update_ideal_inventory()\n self.price_belief = {}\n self.observed_trading_range = {}\n self.successful_trades = 0\n self.failed_trades = 0\n for good in Good.all():\n avg_price = self.market.avg_historial_price(good, 15)\n self.observed_trading_range[good] = [avg_price * 0.5, avg_price *\n 1.5]\n self.price_belief[good] = PriceRange(avg_price * 0.5, avg_price *\n 1.5)\n self.trade_location = None\n self.trade_good = None\n self.trade_amount = 0\n self.trading_days = 0\n\n @property\n def social_class(self):\n return self.pop_job.social_class\n\n @property\n def market(self):\n \"\"\"Get the market instance\"\"\"\n return self.location.market\n\n @property\n def profit(self):\n \"\"\"Determine today's profit\"\"\"\n return self.money - self.money_yesterday\n\n @property\n def total_trades(self):\n \"\"\"Total number of trades this Pop participated in\"\"\"\n return self.successful_trades + self.failed_trades\n\n @property\n def trade_success(self):\n \"\"\"Percent of trades that were successful\"\"\"\n if self.total_trades == 0:\n return 0\n return self.successful_trades / self.total_trades * 100\n\n @property\n def is_away(self):\n \"\"\"Is this Pop away from it's home?\"\"\"\n return self.home is not self.location\n\n def go_to_province(self, province):\n \"\"\"Moves the Pop to another Province\"\"\"\n self.location = province\n\n def decide_trade_plan(self):\n \"\"\"\n Decide what good to trade in and how much.\n Look for the most in demand good, or the most expensive good at the home Province\n Find a province near home province where its the cheapest and there's inventory\n \"\"\"\n self.trade_amount = 5\n most_demanded_goods = self.home.market.goods_demand_ratio(day_range=1)\n most_demanded_goods = sorted(most_demanded_goods.items(), key=lambda\n i: i[1], reverse=True)\n if self.trade_good:\n self.update_ideal_inventory()\n if DEBUG:\n print('Finding a Good to trade:')\n for good, demand in most_demanded_goods:\n if demand > 0:\n price_at_home = self.home.market.mean_price(good)\n if DEBUG:\n print('Good: {}, Demand: {}, Price: ${}'.format(good.\n title, demand, price_at_home))\n neighboring_markets = [p.market for p in self.location.\n owned_neighbors]\n neighboring_markets = [m for m in neighboring_markets if m.\n supply_for(good) > self.trade_amount]\n neighboring_markets.sort(key=lambda m: m.supply_for(good),\n reverse=True)\n if len(neighboring_markets) > 0:\n target = neighboring_markets[0].location\n price_at_target = target.market.mean_price(good)\n if price_at_home > price_at_target:\n offset = 0\n if good is Good.bread:\n offset = 1\n self.inventory.set_ideal(good, self.trade_amount +\n offset)\n self.trade_location = target\n if DEBUG:\n print(\n '\\tTarget: {}, Supply: {}, Price: ${}, Price at home: ${}'\n .format(self.trade_location.name, self.\n trade_location.market.supply_for(good),\n self.trade_location.market.mean_price(good),\n price_at_home))\n self.trade_good = good\n return\n elif DEBUG:\n print(\n '\\tPrice is higher at target (home: ${} target: ${})'\n .format(price_at_home, price_at_target))\n elif DEBUG:\n print('\\tNo markets selling {} found'.format(good))\n\n def update_ideal_inventory(self):\n \"\"\"Update ideal inventory\"\"\"\n for good in Good.all():\n self.inventory.set_ideal(good, 0)\n for item in self.pop_job.ideal_inventory:\n self.inventory.set_ideal(item['good'], item['amount'])\n\n def give_start_inventory(self):\n \"\"\"Give the Pop the inventory it needs to do its job\"\"\"\n for item in self.pop_job.start_inventory:\n self.inventory.add(item['good'], item['amount'])\n\n def change_population(self, trade_success):\n \"\"\"Change the population based off the trade\"\"\"\n self.population_yesterday = self.population\n if trade_success:\n self.population += round(self.population * 0.01)\n else:\n self.population -= round(self.population * 0.002)\n\n def handle_bankruptcy(self, pop_job):\n \"\"\"Change job, create money out of thin air, update ideal inventory\"\"\"\n self.pop_job = pop_job\n self.bankrupt_times += 1\n self.money = 2\n self.update_ideal_inventory()\n self.give_start_inventory()\n\n def perform_logic(self):\n \"\"\"Depending on PopJob, perform logic (including production)\"\"\"\n logic = self.pop_job.logic(self)\n logic.perform()\n\n def create_buy_order(self, good, limit):\n \"\"\"Create a buy order for a given Good at a determined quantity\"\"\"\n bid_price = self.determine_price_of(good)\n ideal = self.determine_buy_quantity(good)\n quantity_to_buy = limit if ideal > limit else ideal\n if quantity_to_buy > 0:\n return Order(self, OrderType.buy_order, quantity_to_buy,\n bid_price, good)\n return False\n\n def create_sell_order(self, good, limit):\n \"\"\"Create a sell order for a given Good at a determined quantity\"\"\"\n sell_price = self.determine_price_of(good)\n ideal = self.determine_sell_quantity(good)\n quantity_to_sell = limit if ideal < limit else ideal\n if quantity_to_sell > 0:\n return Order(self, OrderType.sell_order, quantity_to_sell,\n sell_price, good)\n return False\n\n def price_belief_for(self, good):\n \"\"\"Gets the price belief this agent has for a particular Good\"\"\"\n if good in self.price_belief:\n return self.price_belief[good]\n\n def determine_price_of(self, good):\n \"\"\"Determine the price of a particular good\"\"\"\n return self.price_belief_for(good).random()\n\n def trading_range_extremes(self, good):\n \"\"\"Gets the lowest and highst price of a Good this agent has seen\"\"\"\n trading_range = self.observed_trading_range[good]\n return PriceRange(min(trading_range), max(trading_range))\n\n def determine_sell_quantity(self, good):\n \"\"\"Determine how much inventory goods to sell based on market conditions\"\"\"\n mean = self.market.avg_historial_price(good, 15)\n trading_range = self.trading_range_extremes(good)\n favoribility = position_in_range(mean, trading_range.low,\n trading_range.high)\n amount_to_sell = round(favoribility * self.inventory.surplus(good))\n if amount_to_sell < 1:\n amount_to_sell = 1\n return amount_to_sell\n\n def determine_buy_quantity(self, good):\n \"\"\"Determine how much goods to buy based on market conditions\"\"\"\n mean = self.market.avg_historial_price(good, 15)\n trading_range = self.trading_range_extremes(good)\n favoribility = 1 - position_in_range(mean, trading_range.low,\n trading_range.high)\n amount_to_buy = round(favoribility * self.inventory.shortage(good))\n if amount_to_buy < 1:\n amount_to_buy = 1\n return amount_to_buy\n\n def generate_orders(self, good):\n \"\"\"\n If the Pop needs a Good to perform production, buy it\n If the Pop has surplus Resources, sell them\n \"\"\"\n surplus = self.inventory.surplus(good)\n if surplus >= 1:\n sell_amount = surplus\n order = self.create_sell_order(good, surplus)\n if order:\n self.market.sell(order)\n else:\n shortage = self.inventory.shortage(good)\n free_space = self.inventory.empty_space\n if shortage > 0:\n if shortage <= free_space:\n limit = shortage\n else:\n limit = math.floor(free_space / shortage)\n if limit > 0:\n order = self.create_buy_order(good, limit)\n if order:\n self.market.buy(order)\n\n def update_price_model(self, good, order_type, is_successful,\n clearing_price=0):\n \"\"\"\n Update the Pop's price model for the given resource\n good (Good) The Good which was orderd\n order_type (OrderType) Which kind of Order this was\n is_successful (bool) whether or not the Order was successful\n clearing_price (float) The price per unit of the good that was ordered\n as defined by the Pop which ordered it\n \"\"\"\n SIGNIFICANT = 0.25\n SIG_IMBALANCE = 0.33\n LOW_INVENTORY = 0.1\n HIGH_INVENTORY = 2.0\n MIN_PRICE = 0.01\n if is_successful:\n self.observed_trading_range[good].append(clearing_price)\n public_mean_price = self.market.mean_price(good)\n belief = self.price_belief[good]\n mean = belief.mean()\n wobble = 0.05\n delta_to_mean = mean - public_mean_price\n if is_successful:\n if (order_type is OrderType.buy_order and delta_to_mean >\n SIGNIFICANT):\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n elif order_type is OrderType.sell_order and delta_to_mean < -SIGNIFICANT:\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n belief.low += wobble * mean\n belief.high -= wobble * mean\n else:\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n stocks = self.inventory.get_amount(good)\n ideal = self.inventory.get_ideal(good)\n if (order_type is OrderType.buy_order and stocks < \n LOW_INVENTORY * ideal):\n wobble *= 2\n elif order_type is OrderType.sell_order and stocks > HIGH_INVENTORY * ideal:\n wobble *= 2\n else:\n sells = self.market.history.sell_orders.average(good, 1)\n buys = self.market.history.buy_orders.average(good, 1)\n if sells + buys > 0:\n supply_vs_demand = (sells - buys) / (sells + buys)\n if (supply_vs_demand > SIG_IMBALANCE or \n supply_vs_demand < -SIG_IMBALANCE):\n new_mean = public_mean_price * (1 - supply_vs_demand)\n delta_to_mean = mean - new_mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n belief.low -= wobble * mean\n belief.high += wobble * mean\n if belief.low < MIN_PRICE:\n belief.low = MIN_PRICE\n elif belief.high < MIN_PRICE:\n belief.high = MIN_PRICE\n\n def __repr__(self):\n return '<Pop: id={} type={}>'.format(self.id, self.pop_job.title)\n\n def __eq__(self, other):\n return self.id == other.id\n\n def __key__(self):\n return self.id\n\n def __hash__(self):\n return hash(self.__key__())\n\n def export(self):\n model = {'pop_job': self.pop_job.ref(), 'population': self.\n population, 'population_yesterday': self.population_yesterday,\n 'inventory': self.inventory.export(), 'money': self.money,\n 'money_yesterday': self.money_yesterday, 'successful_trades':\n self.successful_trades, 'failed_trades': self.failed_trades,\n 'bankrupt_times': self.bankrupt_times}\n if self.pop_job is PopJob.merchant:\n location_id = None\n if self.trade_location:\n location_id = self.trade_location.id\n model.update({'location': self.location.id, 'trade_location':\n location_id, 'trade_good': self.trade_good, 'trade_amount':\n self.trade_amount})\n return model\n",
"step-5": "import math\nfrom historia.utils import unique_id, position_in_range\nfrom historia.pops.models.inventory import Inventory\nfrom historia.economy.enums.resource import Good, NaturalResource\nfrom historia.economy.enums.order_type import OrderType\nfrom historia.economy.models.price_range import PriceRange\nfrom historia.economy.models.order import Order\nfrom historia.pops.enums.pop_job import PopJob\n\nDEBUG = False\n\nclass Pop(object):\n \"\"\"\n A simulated unit of population\n \"\"\"\n\n def __init__(self, province, pop_job, population):\n \"\"\"\n Creates a new Pop.\n manager (Historia)\n province (SecondaryDivision)\n culture (Culture)\n religion (Religion)\n language (Language)\n job (Job)\n \"\"\"\n self.bankrupt_times = 0\n self.home = province\n self.location = province\n self.id = unique_id('po')\n\n self.population = population\n self.population_yesterday = 0\n\n self.pop_job = pop_job\n\n # ECONOMY\n self.money = pop_job.start_money\n self.money_yesterday = 0\n self.bankrupt = False\n\n # set inventory and ideal amounts\n self.inventory = Inventory(pop_job.inventory_size)\n self.give_start_inventory()\n\n self.update_ideal_inventory()\n\n # a dictionary of Goods to PriceRanges\n # represents the price range the agent considers valid for each Good\n self.price_belief = {}\n\n # a dictionary of Goods to price list\n # represents the prices of the good that the Pop has observed\n # during the time they have been trading\n self.observed_trading_range = {}\n\n self.successful_trades = 0\n self.failed_trades = 0\n\n # make some fake initial data\n for good in Good.all():\n avg_price = self.market.avg_historial_price(good, 15)\n # fake trades\n self.observed_trading_range[good] = [\n avg_price * 0.5,\n avg_price * 1.5\n ]\n # generate fake price belief\n self.price_belief[good] = PriceRange(avg_price * 0.5, avg_price * 1.5)\n\n # Merchant logic\n self.trade_location = None # the province this Pop is traveling to\n self.trade_good = None # what good we're trading in right now\n self.trade_amount = 0 # amount of trade_good we should be trading\n self.trading_days = 0 # number of days waiting to trade\n\n # Generic Pop properties\n @property\n def social_class(self):\n return self.pop_job.social_class\n\n @property\n def market(self):\n \"Get the market instance\"\n return self.location.market\n\n @property\n def profit(self):\n \"Determine today's profit\"\n return self.money - self.money_yesterday\n\n @property\n def total_trades(self):\n \"Total number of trades this Pop participated in\"\n return self.successful_trades + self.failed_trades\n\n @property\n def trade_success(self):\n \"Percent of trades that were successful\"\n if self.total_trades == 0:\n return 0\n return (self.successful_trades / self.total_trades) * 100\n\n @property\n def is_away(self):\n \"Is this Pop away from it's home?\"\n return self.home is not self.location\n\n # Merchant specific logic\n def go_to_province(self, province):\n \"Moves the Pop to another Province\"\n self.location = province\n\n def decide_trade_plan(self):\n \"\"\"\n Decide what good to trade in and how much.\n Look for the most in demand good, or the most expensive good at the home Province\n Find a province near home province where its the cheapest and there's inventory\n \"\"\"\n self.trade_amount = 5\n most_demanded_goods = self.home.market.goods_demand_ratio(day_range=1)\n most_demanded_goods = sorted(most_demanded_goods.items(), key=lambda i: i[1], reverse=True)\n\n # if we already had a trade good, refresh ideal inventory\n if self.trade_good:\n self.update_ideal_inventory()\n\n if DEBUG: print(\"Finding a Good to trade:\")\n\n for good, demand in most_demanded_goods:\n if demand > 0:\n # find nearby provinces where this has inventory and the price is lower\n price_at_home = self.home.market.mean_price(good)\n if DEBUG: print(\"Good: {}, Demand: {}, Price: ${}\".format(good.title, demand, price_at_home))\n neighboring_markets = [p.market for p in self.location.owned_neighbors]\n neighboring_markets = [m for m in neighboring_markets if m.supply_for(good) > self.trade_amount]\n neighboring_markets.sort(key=lambda m: m.supply_for(good), reverse=True)\n\n if len(neighboring_markets) > 0:\n # we found places where this good is cheaper and in inventory\n target = neighboring_markets[0].location\n price_at_target = target.market.mean_price(good)\n\n # only trade with prices where we can make money\n if price_at_home > price_at_target:\n offset = 0\n if good is Good.bread:\n offset = 1\n self.inventory.set_ideal(good, self.trade_amount + offset)\n self.trade_location = target\n if DEBUG:\n print(\"\\tTarget: {}, Supply: {}, Price: ${}, Price at home: ${}\".format(\n self.trade_location.name,\n self.trade_location.market.supply_for(good),\n self.trade_location.market.mean_price(good),\n price_at_home)\n )\n self.trade_good = good\n return\n else:\n if DEBUG: print(\"\\tPrice is higher at target (home: ${} target: ${})\".format(price_at_home, price_at_target))\n else:\n if DEBUG: print(\"\\tNo markets selling {} found\".format(good))\n\n\n # Generic economic logic\n def update_ideal_inventory(self):\n \"Update ideal inventory\"\n # reset so that the Pop can sell the inventory it doesn't need\n for good in Good.all():\n self.inventory.set_ideal(good, 0)\n\n # update ideal inventory for new Job\n for item in self.pop_job.ideal_inventory:\n self.inventory.set_ideal(item['good'], item['amount'])\n\n def give_start_inventory(self):\n \"Give the Pop the inventory it needs to do its job\"\n for item in self.pop_job.start_inventory:\n self.inventory.add(item['good'], item['amount'])\n\n def change_population(self, trade_success):\n \"Change the population based off the trade\"\n self.population_yesterday = self.population\n if trade_success:\n self.population += round(self.population * 0.01)\n else:\n self.population -= round(self.population * 0.002)\n\n def handle_bankruptcy(self, pop_job):\n \"Change job, create money out of thin air, update ideal inventory\"\n # TODO: stop creating money out of thin air\n self.pop_job = pop_job\n self.bankrupt_times += 1\n self.money = 2\n self.update_ideal_inventory()\n self.give_start_inventory()\n\n def perform_logic(self):\n \"Depending on PopJob, perform logic (including production)\"\n logic = self.pop_job.logic(self)\n logic.perform()\n\n def create_buy_order(self, good, limit):\n \"Create a buy order for a given Good at a determined quantity\"\n bid_price = self.determine_price_of(good)\n ideal = self.determine_buy_quantity(good)\n\n # can't buy more than limit\n quantity_to_buy = limit if ideal > limit else ideal\n if quantity_to_buy > 0:\n return Order(self, OrderType.buy_order, quantity_to_buy, bid_price, good)\n return False\n\n def create_sell_order(self, good, limit):\n \"Create a sell order for a given Good at a determined quantity\"\n sell_price = self.determine_price_of(good)\n ideal = self.determine_sell_quantity(good)\n\n # can't buy more than limit\n quantity_to_sell = limit if ideal < limit else ideal\n if quantity_to_sell > 0:\n return Order(self, OrderType.sell_order, quantity_to_sell, sell_price, good)\n return False\n\n def price_belief_for(self, good):\n \"Gets the price belief this agent has for a particular Good\"\n if good in self.price_belief:\n return self.price_belief[good]\n\n def determine_price_of(self, good):\n \"Determine the price of a particular good\"\n return self.price_belief_for(good).random()\n\n def trading_range_extremes(self, good):\n \"Gets the lowest and highst price of a Good this agent has seen\"\n trading_range = self.observed_trading_range[good]\n return PriceRange(min(trading_range), max(trading_range))\n\n def determine_sell_quantity(self, good):\n \"Determine how much inventory goods to sell based on market conditions\"\n mean = self.market.avg_historial_price(good, 15)\n trading_range = self.trading_range_extremes(good)\n\n favoribility = position_in_range(mean, trading_range.low, trading_range.high)\n amount_to_sell = round(favoribility * self.inventory.surplus(good))\n if amount_to_sell < 1:\n amount_to_sell = 1\n return amount_to_sell\n\n def determine_buy_quantity(self, good):\n \"Determine how much goods to buy based on market conditions\"\n mean = self.market.avg_historial_price(good, 15)\n trading_range = self.trading_range_extremes(good)\n\n favoribility = 1 - position_in_range(mean, trading_range.low, trading_range.high)\n amount_to_buy = round(favoribility * self.inventory.shortage(good))\n if amount_to_buy < 1:\n amount_to_buy = 1\n return amount_to_buy\n\n def generate_orders(self, good):\n \"\"\"\n If the Pop needs a Good to perform production, buy it\n If the Pop has surplus Resources, sell them\n \"\"\"\n surplus = self.inventory.surplus(good)\n if surplus >= 1: # sell inventory\n # the original only old one item here\n sell_amount = surplus\n order = self.create_sell_order(good, surplus)\n if order:\n # print('{} sells {} {}'.format(self.pop_job.title, sell_amount, good.name))\n self.market.sell(order)\n else: # buy more\n shortage = self.inventory.shortage(good)\n free_space = self.inventory.empty_space\n\n if shortage > 0:\n if shortage <= free_space:\n # enough space for ideal order\n limit = shortage\n else:\n # not enough space for ideal order\n limit = math.floor(free_space / shortage)\n\n if limit > 0:\n order = self.create_buy_order(good, limit)\n if order:\n # print('{} buys {} {}'.format(self.pop_job.title, limit, good.name))\n self.market.buy(order)\n # else:\n # print(\"{} has no shortage of {} (has shortage: {})\".format(self.pop_job.title, good.title, shortage))\n\n\n\n def update_price_model(self, good, order_type, is_successful, clearing_price=0):\n \"\"\"\n Update the Pop's price model for the given resource\n good (Good) The Good which was orderd\n order_type (OrderType) Which kind of Order this was\n is_successful (bool) whether or not the Order was successful\n clearing_price (float) The price per unit of the good that was ordered\n as defined by the Pop which ordered it\n \"\"\"\n\n SIGNIFICANT = 0.25 # 25% more or less is \"significant\"\n SIG_IMBALANCE = 0.33\n LOW_INVENTORY = 0.1 # 10% of ideal inventory = \"LOW\"\n HIGH_INVENTORY = 2.0 # 200% of ideal inventory = \"HIGH\"\n MIN_PRICE = 0.01 # lowest allowed price of a Good\n\n if is_successful:\n # add this trade to the observed trading range\n self.observed_trading_range[good].append(clearing_price)\n\n public_mean_price = self.market.mean_price(good)\n belief = self.price_belief[good]\n mean = belief.mean()\n wobble = 0.05 # the degree which the Pop should bid outside the belief\n\n # how different the public mean price is from the price belief\n delta_to_mean = mean - public_mean_price\n\n if is_successful:\n if order_type is OrderType.buy_order and delta_to_mean > SIGNIFICANT:\n # this Pop overpaid, shift belief towards mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n elif order_type is OrderType.sell_order and delta_to_mean < -SIGNIFICANT:\n # this Pop underpaid!, shift belief towards mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n\n # increase the belief's certainty\n belief.low += wobble * mean\n belief.high -= wobble * mean\n\n else:\n # shift towards mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n\n # check for inventory special cases\n stocks = self.inventory.get_amount(good)\n ideal = self.inventory.get_ideal(good)\n\n # if we're buying and inventory is too low\n # meaning we're desperate to buy\n if order_type is OrderType.buy_order and stocks < LOW_INVENTORY * ideal:\n wobble *= 2\n\n # if we're selling and inventory is too high\n # meaning we're desperate to sell\n elif order_type is OrderType.sell_order and stocks > HIGH_INVENTORY * ideal:\n wobble *= 2\n # all other cases\n else:\n sells = self.market.history.sell_orders.average(good, 1)\n buys = self.market.history.buy_orders.average(good, 1)\n\n # TODO: figure out why this is sometimes 0\n if sells + buys > 0:\n\n supply_vs_demand = (sells - buys) / (sells + buys)\n\n if supply_vs_demand > SIG_IMBALANCE or supply_vs_demand < -SIG_IMBALANCE:\n # too much supply? lower bid lower to sell faster\n # too much demand? raise price to buy faster\n\n new_mean = public_mean_price * (1 - supply_vs_demand)\n delta_to_mean = mean - new_mean\n\n # shift the price belief to the new price mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n\n\n # decrease belief's certainty since we've just changed it (we could be wrong)\n belief.low -= wobble * mean\n belief.high += wobble * mean\n\n # make sure the price belief doesn't decrease below the minimum\n if belief.low < MIN_PRICE:\n belief.low = MIN_PRICE\n elif belief.high < MIN_PRICE:\n belief.high = MIN_PRICE\n\n # Python utility methods\n def __repr__(self):\n return \"<Pop: id={} type={}>\".format(self.id, self.pop_job.title)\n\n def __eq__(self, other):\n return self.id == other.id\n\n def __key__(self):\n return self.id\n\n def __hash__(self):\n return hash(self.__key__())\n\n def export(self):\n model = {\n 'pop_job': self.pop_job.ref(),\n 'population': self.population,\n 'population_yesterday': self.population_yesterday,\n 'inventory': self.inventory.export(),\n 'money': self.money,\n 'money_yesterday': self.money_yesterday,\n 'successful_trades': self.successful_trades,\n 'failed_trades': self.failed_trades,\n 'bankrupt_times': self.bankrupt_times,\n }\n if self.pop_job is PopJob.merchant:\n location_id = None\n if self.trade_location:\n location_id = self.trade_location.id\n model.update({\n 'location': self.location.id,\n 'trade_location': location_id,\n 'trade_good': self.trade_good,\n 'trade_amount': self.trade_amount\n })\n return model\n",
"step-ids": [
15,
26,
28,
32,
33
]
}
|
[
15,
26,
28,
32,
33
] |
import torch
from torchvision import datasets, transforms
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from PIL import Image
import requests
from io import BytesIO
from net import Net
class predict_guitar():
def __init__(self):
"""Model is loaded on init of the class"""
self.model = Net()
if torch.cuda.is_available():
map_location=torch.device('cuda')
else:
map_location=torch.device('cpu')
# load parameters
self.model.load_state_dict(torch.load('model.pt',
map_location=map_location))
if torch.cuda.is_available():
self.model.cuda()
else:
self.model.cpu()
self.model.eval()
def softmax(self, vector):
"""Softmax function for calculating probs"""
e = np.exp(vector)
return e / e.sum()
def predict(self,url):
"""Generating prediction of image url"""
# get image
response = requests.get(url)
img = Image.open(BytesIO(response.content))
transform = transforms.Compose([transforms.Grayscale(),
transforms.Resize((128,128)),
transforms.ToTensor()])
img = transform(img).unsqueeze(0)
if torch.cuda.is_available():
img = img.cuda()
out = self.model(img)
classes = ['Jazzmaster','Les Paul', 'Mustang', 'PRS SE', 'SG',
'Stratocaster','Telecaster']
if torch.cuda.is_available():
logs = out.cpu().data.numpy()
else:
logs = out.data.numpy()
return [classes[logs.argmax()]]
|
normal
|
{
"blob_id": "8743be809953f59bd14431e509042c4c51d9fab4",
"index": 4175,
"step-1": "<mask token>\n\n\nclass predict_guitar:\n <mask token>\n\n def softmax(self, vector):\n \"\"\"Softmax function for calculating probs\"\"\"\n e = np.exp(vector)\n return e / e.sum()\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass predict_guitar:\n <mask token>\n\n def softmax(self, vector):\n \"\"\"Softmax function for calculating probs\"\"\"\n e = np.exp(vector)\n return e / e.sum()\n\n def predict(self, url):\n \"\"\"Generating prediction of image url\"\"\"\n response = requests.get(url)\n img = Image.open(BytesIO(response.content))\n transform = transforms.Compose([transforms.Grayscale(), transforms.\n Resize((128, 128)), transforms.ToTensor()])\n img = transform(img).unsqueeze(0)\n if torch.cuda.is_available():\n img = img.cuda()\n out = self.model(img)\n classes = ['Jazzmaster', 'Les Paul', 'Mustang', 'PRS SE', 'SG',\n 'Stratocaster', 'Telecaster']\n if torch.cuda.is_available():\n logs = out.cpu().data.numpy()\n else:\n logs = out.data.numpy()\n return [classes[logs.argmax()]]\n",
"step-3": "<mask token>\n\n\nclass predict_guitar:\n\n def __init__(self):\n \"\"\"Model is loaded on init of the class\"\"\"\n self.model = Net()\n if torch.cuda.is_available():\n map_location = torch.device('cuda')\n else:\n map_location = torch.device('cpu')\n self.model.load_state_dict(torch.load('model.pt', map_location=\n map_location))\n if torch.cuda.is_available():\n self.model.cuda()\n else:\n self.model.cpu()\n self.model.eval()\n\n def softmax(self, vector):\n \"\"\"Softmax function for calculating probs\"\"\"\n e = np.exp(vector)\n return e / e.sum()\n\n def predict(self, url):\n \"\"\"Generating prediction of image url\"\"\"\n response = requests.get(url)\n img = Image.open(BytesIO(response.content))\n transform = transforms.Compose([transforms.Grayscale(), transforms.\n Resize((128, 128)), transforms.ToTensor()])\n img = transform(img).unsqueeze(0)\n if torch.cuda.is_available():\n img = img.cuda()\n out = self.model(img)\n classes = ['Jazzmaster', 'Les Paul', 'Mustang', 'PRS SE', 'SG',\n 'Stratocaster', 'Telecaster']\n if torch.cuda.is_available():\n logs = out.cpu().data.numpy()\n else:\n logs = out.data.numpy()\n return [classes[logs.argmax()]]\n",
"step-4": "import torch\nfrom torchvision import datasets, transforms\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.models as models\nfrom PIL import Image\nimport requests\nfrom io import BytesIO\nfrom net import Net\n\n\nclass predict_guitar:\n\n def __init__(self):\n \"\"\"Model is loaded on init of the class\"\"\"\n self.model = Net()\n if torch.cuda.is_available():\n map_location = torch.device('cuda')\n else:\n map_location = torch.device('cpu')\n self.model.load_state_dict(torch.load('model.pt', map_location=\n map_location))\n if torch.cuda.is_available():\n self.model.cuda()\n else:\n self.model.cpu()\n self.model.eval()\n\n def softmax(self, vector):\n \"\"\"Softmax function for calculating probs\"\"\"\n e = np.exp(vector)\n return e / e.sum()\n\n def predict(self, url):\n \"\"\"Generating prediction of image url\"\"\"\n response = requests.get(url)\n img = Image.open(BytesIO(response.content))\n transform = transforms.Compose([transforms.Grayscale(), transforms.\n Resize((128, 128)), transforms.ToTensor()])\n img = transform(img).unsqueeze(0)\n if torch.cuda.is_available():\n img = img.cuda()\n out = self.model(img)\n classes = ['Jazzmaster', 'Les Paul', 'Mustang', 'PRS SE', 'SG',\n 'Stratocaster', 'Telecaster']\n if torch.cuda.is_available():\n logs = out.cpu().data.numpy()\n else:\n logs = out.data.numpy()\n return [classes[logs.argmax()]]\n",
"step-5": "import torch\nfrom torchvision import datasets, transforms\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.models as models\nfrom PIL import Image\nimport requests\nfrom io import BytesIO\nfrom net import Net\n\nclass predict_guitar():\n\n def __init__(self):\n \"\"\"Model is loaded on init of the class\"\"\"\n \n self.model = Net()\n\n if torch.cuda.is_available():\n map_location=torch.device('cuda')\n else:\n map_location=torch.device('cpu')\n\n # load parameters\n self.model.load_state_dict(torch.load('model.pt',\n map_location=map_location)) \n \n if torch.cuda.is_available():\n self.model.cuda()\n else:\n self.model.cpu()\n \n self.model.eval()\n\n def softmax(self, vector):\n \"\"\"Softmax function for calculating probs\"\"\"\n e = np.exp(vector)\n return e / e.sum()\n\n def predict(self,url):\n \"\"\"Generating prediction of image url\"\"\"\n\n # get image\n response = requests.get(url)\n \n img = Image.open(BytesIO(response.content))\n\n transform = transforms.Compose([transforms.Grayscale(),\n transforms.Resize((128,128)),\n transforms.ToTensor()])\n\n img = transform(img).unsqueeze(0)\n\n if torch.cuda.is_available(): \n img = img.cuda() \n\n out = self.model(img)\n\n classes = ['Jazzmaster','Les Paul', 'Mustang', 'PRS SE', 'SG',\n 'Stratocaster','Telecaster']\n\n if torch.cuda.is_available():\n\n logs = out.cpu().data.numpy()\n \n else:\n\n logs = out.data.numpy()\n \n return [classes[logs.argmax()]]\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#!/usr/bin/env python
import sys
def solve(n, k):
wrap = 2 ** n
snaps_that_matter = k % wrap
return snaps_that_matter == wrap - 1
def main():
lines = sys.stdin.readlines()
T = int(lines[0])
for i, line in enumerate(lines[1:]):
N, K = line.split(' ')
on = solve(int(N), int(K))
str_on = 'OFF'
if on:
str_on = 'ON'
print 'Case #%d: %s' % (i+1, str_on)
if __name__ == '__main__': main()
|
normal
|
{
"blob_id": "1803f634c8e833f4a92ae35bcfafb04dfd1d2305",
"index": 7661,
"step-1": "#!/usr/bin/env python\n\nimport sys\n\ndef solve(n, k):\n wrap = 2 ** n\n snaps_that_matter = k % wrap\n return snaps_that_matter == wrap - 1\n\ndef main():\n lines = sys.stdin.readlines()\n T = int(lines[0])\n \n for i, line in enumerate(lines[1:]):\n N, K = line.split(' ')\n on = solve(int(N), int(K))\n str_on = 'OFF'\n if on:\n str_on = 'ON'\n print 'Case #%d: %s' % (i+1, str_on)\n\nif __name__ == '__main__': main()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 2 16:07:25 2018
@author: Yigao
"""
import re
from nltk.tokenize import TweetTokenizer
from nltk.corpus import stopwords
from wordcloud import WordCloud
import matplotlib.pyplot as plt
## create a tokenizer
hfilename = "file.txt"
linecount=0
hashcount=0
wordcount=0
BagOfWords=[]
BagOfHashes=[]
BagOfLinks=[]
with open(hfilename, "r") as file:
for line in file:
#print(line,"\n")
tweetSplitter = TweetTokenizer(strip_handles=True, reduce_len=True)
WordList=tweetSplitter.tokenize(line)
#WordList2=word_tokenize(line)
#linecount=linecount+1
#print(WordList)
#print(len(WordList))
#print(WordList[0])
#print(WordList2)
#print(len(WordList2))
#print(WordList2[3:6])
#print("NEXT..........\n")
regex1=re.compile('^#.+')
regex2=re.compile('[^\W\d]') #no numbers
regex3=re.compile('^http*')
regex4=re.compile('.+\..+')
for item in WordList:
if(len(item)>2):
if((re.match(regex1,item))):
#print(item)
newitem=item[1:] #remove the hash
BagOfHashes.append(newitem)
hashcount=hashcount+1
elif(re.match(regex2,item)):
if(re.match(regex3,item) or re.match(regex4,item)):
BagOfLinks.append(item)
else:
BagOfWords.append(item)
wordcount=wordcount+1
else:
pass
else:
pass
#print(linecount)
#print(BagOfWords)
#print(BagOfHashes)
#print(BagOfLinks)
BigBag=BagOfWords+BagOfHashes
## create Word Cloud
IgnoreThese=[] #other irrelevant words
filtered_words = [] #list of words ready for wordcloud
for word in BigBag:
if (word.lower() not in stopwords.words()) and (word.lower() not in IgnoreThese):
filtered_words.append(word.lower())
word_string = " ".join(filtered_words)
with open("wordcloud.txt", "w") as f:
f.write(word_string)
with open("tableau.txt", "w") as f:
for s in filtered_words:
f.write("%s\n" % s)
TwitterWordCloud = WordCloud(width = 800, height = 800, background_color = "white", stopwords = None,
min_font_size = 10).generate(word_string)
plt.figure(figsize = (8,8), facecolor = None)
plt.imshow(TwitterWordCloud)
plt.axis("off")
plt.tight_layout(pad = 0)
plt.show()
|
normal
|
{
"blob_id": "fd04f6f4a03fdbe40e400d04e5759ef9ef30f974",
"index": 6634,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open(hfilename, 'r') as file:\n for line in file:\n tweetSplitter = TweetTokenizer(strip_handles=True, reduce_len=True)\n WordList = tweetSplitter.tokenize(line)\n regex1 = re.compile('^#.+')\n regex2 = re.compile('[^\\\\W\\\\d]')\n regex3 = re.compile('^http*')\n regex4 = re.compile('.+\\\\..+')\n for item in WordList:\n if len(item) > 2:\n if re.match(regex1, item):\n newitem = item[1:]\n BagOfHashes.append(newitem)\n hashcount = hashcount + 1\n elif re.match(regex2, item):\n if re.match(regex3, item) or re.match(regex4, item):\n BagOfLinks.append(item)\n else:\n BagOfWords.append(item)\n wordcount = wordcount + 1\n else:\n pass\n else:\n pass\n<mask token>\nfor word in BigBag:\n if word.lower() not in stopwords.words() and word.lower(\n ) not in IgnoreThese:\n filtered_words.append(word.lower())\n<mask token>\nwith open('wordcloud.txt', 'w') as f:\n f.write(word_string)\nwith open('tableau.txt', 'w') as f:\n for s in filtered_words:\n f.write('%s\\n' % s)\n<mask token>\nplt.figure(figsize=(8, 8), facecolor=None)\nplt.imshow(TwitterWordCloud)\nplt.axis('off')\nplt.tight_layout(pad=0)\nplt.show()\n",
"step-3": "<mask token>\nhfilename = 'file.txt'\nlinecount = 0\nhashcount = 0\nwordcount = 0\nBagOfWords = []\nBagOfHashes = []\nBagOfLinks = []\nwith open(hfilename, 'r') as file:\n for line in file:\n tweetSplitter = TweetTokenizer(strip_handles=True, reduce_len=True)\n WordList = tweetSplitter.tokenize(line)\n regex1 = re.compile('^#.+')\n regex2 = re.compile('[^\\\\W\\\\d]')\n regex3 = re.compile('^http*')\n regex4 = re.compile('.+\\\\..+')\n for item in WordList:\n if len(item) > 2:\n if re.match(regex1, item):\n newitem = item[1:]\n BagOfHashes.append(newitem)\n hashcount = hashcount + 1\n elif re.match(regex2, item):\n if re.match(regex3, item) or re.match(regex4, item):\n BagOfLinks.append(item)\n else:\n BagOfWords.append(item)\n wordcount = wordcount + 1\n else:\n pass\n else:\n pass\nBigBag = BagOfWords + BagOfHashes\nIgnoreThese = []\nfiltered_words = []\nfor word in BigBag:\n if word.lower() not in stopwords.words() and word.lower(\n ) not in IgnoreThese:\n filtered_words.append(word.lower())\nword_string = ' '.join(filtered_words)\nwith open('wordcloud.txt', 'w') as f:\n f.write(word_string)\nwith open('tableau.txt', 'w') as f:\n for s in filtered_words:\n f.write('%s\\n' % s)\nTwitterWordCloud = WordCloud(width=800, height=800, background_color=\n 'white', stopwords=None, min_font_size=10).generate(word_string)\nplt.figure(figsize=(8, 8), facecolor=None)\nplt.imshow(TwitterWordCloud)\nplt.axis('off')\nplt.tight_layout(pad=0)\nplt.show()\n",
"step-4": "<mask token>\nimport re\nfrom nltk.tokenize import TweetTokenizer\nfrom nltk.corpus import stopwords\nfrom wordcloud import WordCloud\nimport matplotlib.pyplot as plt\nhfilename = 'file.txt'\nlinecount = 0\nhashcount = 0\nwordcount = 0\nBagOfWords = []\nBagOfHashes = []\nBagOfLinks = []\nwith open(hfilename, 'r') as file:\n for line in file:\n tweetSplitter = TweetTokenizer(strip_handles=True, reduce_len=True)\n WordList = tweetSplitter.tokenize(line)\n regex1 = re.compile('^#.+')\n regex2 = re.compile('[^\\\\W\\\\d]')\n regex3 = re.compile('^http*')\n regex4 = re.compile('.+\\\\..+')\n for item in WordList:\n if len(item) > 2:\n if re.match(regex1, item):\n newitem = item[1:]\n BagOfHashes.append(newitem)\n hashcount = hashcount + 1\n elif re.match(regex2, item):\n if re.match(regex3, item) or re.match(regex4, item):\n BagOfLinks.append(item)\n else:\n BagOfWords.append(item)\n wordcount = wordcount + 1\n else:\n pass\n else:\n pass\nBigBag = BagOfWords + BagOfHashes\nIgnoreThese = []\nfiltered_words = []\nfor word in BigBag:\n if word.lower() not in stopwords.words() and word.lower(\n ) not in IgnoreThese:\n filtered_words.append(word.lower())\nword_string = ' '.join(filtered_words)\nwith open('wordcloud.txt', 'w') as f:\n f.write(word_string)\nwith open('tableau.txt', 'w') as f:\n for s in filtered_words:\n f.write('%s\\n' % s)\nTwitterWordCloud = WordCloud(width=800, height=800, background_color=\n 'white', stopwords=None, min_font_size=10).generate(word_string)\nplt.figure(figsize=(8, 8), facecolor=None)\nplt.imshow(TwitterWordCloud)\nplt.axis('off')\nplt.tight_layout(pad=0)\nplt.show()\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 2 16:07:25 2018\n\n@author: Yigao\n\"\"\"\n\nimport re\nfrom nltk.tokenize import TweetTokenizer\nfrom nltk.corpus import stopwords\nfrom wordcloud import WordCloud\nimport matplotlib.pyplot as plt\n\n## create a tokenizer\nhfilename = \"file.txt\"\nlinecount=0\nhashcount=0\nwordcount=0\nBagOfWords=[]\nBagOfHashes=[]\nBagOfLinks=[]\nwith open(hfilename, \"r\") as file:\n for line in file:\n #print(line,\"\\n\")\n tweetSplitter = TweetTokenizer(strip_handles=True, reduce_len=True)\n WordList=tweetSplitter.tokenize(line)\n #WordList2=word_tokenize(line)\n #linecount=linecount+1\n #print(WordList)\n #print(len(WordList))\n #print(WordList[0])\n #print(WordList2)\n #print(len(WordList2))\n #print(WordList2[3:6])\n #print(\"NEXT..........\\n\")\n regex1=re.compile('^#.+')\n regex2=re.compile('[^\\W\\d]') #no numbers\n regex3=re.compile('^http*')\n regex4=re.compile('.+\\..+')\n for item in WordList:\n if(len(item)>2):\n if((re.match(regex1,item))):\n #print(item)\n newitem=item[1:] #remove the hash\n BagOfHashes.append(newitem)\n hashcount=hashcount+1\n elif(re.match(regex2,item)):\n if(re.match(regex3,item) or re.match(regex4,item)):\n BagOfLinks.append(item)\n else:\n BagOfWords.append(item)\n wordcount=wordcount+1\n else:\n pass\n else:\n pass\n#print(linecount) \n#print(BagOfWords)\n#print(BagOfHashes)\n#print(BagOfLinks)\nBigBag=BagOfWords+BagOfHashes\n\n## create Word Cloud\nIgnoreThese=[] #other irrelevant words\nfiltered_words = [] #list of words ready for wordcloud\nfor word in BigBag:\n if (word.lower() not in stopwords.words()) and (word.lower() not in IgnoreThese):\n filtered_words.append(word.lower())\nword_string = \" \".join(filtered_words)\nwith open(\"wordcloud.txt\", \"w\") as f:\n f.write(word_string)\nwith open(\"tableau.txt\", \"w\") as f:\n for s in filtered_words:\n f.write(\"%s\\n\" % s)\nTwitterWordCloud = WordCloud(width = 800, height = 800, background_color = \"white\", stopwords = None,\n min_font_size = 10).generate(word_string)\nplt.figure(figsize = (8,8), facecolor = None)\nplt.imshow(TwitterWordCloud)\nplt.axis(\"off\")\nplt.tight_layout(pad = 0)\nplt.show()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from time import perf_counter_ns
from anthony.utility.distance import compare, compare_info
from icecream import ic
start = perf_counter_ns()
ic(compare("tranpsosed", "transposed"))
print(f"Example Time: {(perf_counter_ns() - start)/1e+9} Seconds")
ic(compare_info("momther", "mother"))
|
normal
|
{
"blob_id": "98b0e42f3ed1a234f63c4d3aa76ceb9fce7c041d",
"index": 3631,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nic(compare('tranpsosed', 'transposed'))\nprint(f'Example Time: {(perf_counter_ns() - start) / 1000000000.0} Seconds')\nic(compare_info('momther', 'mother'))\n",
"step-3": "<mask token>\nstart = perf_counter_ns()\nic(compare('tranpsosed', 'transposed'))\nprint(f'Example Time: {(perf_counter_ns() - start) / 1000000000.0} Seconds')\nic(compare_info('momther', 'mother'))\n",
"step-4": "from time import perf_counter_ns\nfrom anthony.utility.distance import compare, compare_info\nfrom icecream import ic\nstart = perf_counter_ns()\nic(compare('tranpsosed', 'transposed'))\nprint(f'Example Time: {(perf_counter_ns() - start) / 1000000000.0} Seconds')\nic(compare_info('momther', 'mother'))\n",
"step-5": "from time import perf_counter_ns\n\nfrom anthony.utility.distance import compare, compare_info\nfrom icecream import ic\n\nstart = perf_counter_ns()\nic(compare(\"tranpsosed\", \"transposed\"))\nprint(f\"Example Time: {(perf_counter_ns() - start)/1e+9} Seconds\")\n\nic(compare_info(\"momther\", \"mother\"))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# This file is Copyright (c) 2020 LambdaConcept <[email protected]>
# License: BSD
from math import log2
from nmigen import *
from nmigen.utils import log2_int
from nmigen_soc import wishbone
from nmigen_soc.memory import MemoryMap
from lambdasoc.periph import Peripheral
class gramWishbone(Peripheral, Elaboratable):
def __init__(self, core, data_width=32, granularity=8):
super().__init__(name="wishbone")
self.native_port = core.crossbar.get_native_port()
self.ratio = self.native_port.data_width//data_width
addr_width = log2_int(core.size//(self.native_port.data_width//data_width))
self.bus = wishbone.Interface(addr_width=addr_width+log2_int(self.ratio),
data_width=data_width, granularity=granularity)
map = MemoryMap(addr_width=addr_width+log2_int(self.ratio)+log2_int(data_width//granularity),
data_width=granularity)
self.bus.memory_map = map
def elaborate(self, platform):
m = Module()
# Write datapath
m.d.comb += [
self.native_port.wdata.valid.eq(self.bus.cyc & self.bus.stb & self.bus.we),
]
ratio_bitmask = Repl(1, log2_int(self.ratio))
sel = Signal.like(self.bus.sel)
with m.If(self.bus.sel == 0):
m.d.comb += sel.eq(Repl(1, sel.width))
with m.Else():
m.d.comb += sel.eq(self.bus.sel)
with m.Switch(self.bus.adr & ratio_bitmask):
for i in range(self.ratio):
with m.Case(i):
m.d.comb += self.native_port.wdata.we.eq(Repl(sel, self.bus.granularity//8) << (self.ratio*i))
with m.Switch(self.bus.adr & ratio_bitmask):
for i in range(self.ratio):
with m.Case(i):
m.d.comb += self.native_port.wdata.data.eq(self.bus.dat_w << (self.bus.data_width*i))
# Read datapath
m.d.comb += [
self.native_port.rdata.ready.eq(1),
]
with m.Switch(self.bus.adr & ratio_bitmask):
for i in range(self.ratio):
with m.Case(i):
m.d.comb += self.bus.dat_r.eq(self.native_port.rdata.data >> (self.bus.data_width*i))
with m.FSM():
with m.State("Send-Cmd"):
m.d.comb += [
self.native_port.cmd.valid.eq(self.bus.cyc & self.bus.stb),
self.native_port.cmd.we.eq(self.bus.we),
self.native_port.cmd.addr.eq(self.bus.adr >> log2_int(self.bus.data_width//self.bus.granularity)),
]
with m.If(self.native_port.cmd.valid & self.native_port.cmd.ready):
with m.If(self.bus.we):
m.next = "Wait-Write"
with m.Else():
m.next = "Wait-Read"
with m.State("Wait-Read"):
with m.If(self.native_port.rdata.valid):
m.d.comb += self.bus.ack.eq(1)
m.next = "Send-Cmd"
with m.State("Wait-Write"):
with m.If(self.native_port.wdata.ready):
m.d.comb += self.bus.ack.eq(1)
m.next = "Send-Cmd"
return m
|
normal
|
{
"blob_id": "3775ba538d6fab13e35e2f0761a1cacbe087f339",
"index": 4723,
"step-1": "<mask token>\n\n\nclass gramWishbone(Peripheral, Elaboratable):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass gramWishbone(Peripheral, Elaboratable):\n\n def __init__(self, core, data_width=32, granularity=8):\n super().__init__(name='wishbone')\n self.native_port = core.crossbar.get_native_port()\n self.ratio = self.native_port.data_width // data_width\n addr_width = log2_int(core.size // (self.native_port.data_width //\n data_width))\n self.bus = wishbone.Interface(addr_width=addr_width + log2_int(self\n .ratio), data_width=data_width, granularity=granularity)\n map = MemoryMap(addr_width=addr_width + log2_int(self.ratio) +\n log2_int(data_width // granularity), data_width=granularity)\n self.bus.memory_map = map\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass gramWishbone(Peripheral, Elaboratable):\n\n def __init__(self, core, data_width=32, granularity=8):\n super().__init__(name='wishbone')\n self.native_port = core.crossbar.get_native_port()\n self.ratio = self.native_port.data_width // data_width\n addr_width = log2_int(core.size // (self.native_port.data_width //\n data_width))\n self.bus = wishbone.Interface(addr_width=addr_width + log2_int(self\n .ratio), data_width=data_width, granularity=granularity)\n map = MemoryMap(addr_width=addr_width + log2_int(self.ratio) +\n log2_int(data_width // granularity), data_width=granularity)\n self.bus.memory_map = map\n\n def elaborate(self, platform):\n m = Module()\n m.d.comb += [self.native_port.wdata.valid.eq(self.bus.cyc & self.\n bus.stb & self.bus.we)]\n ratio_bitmask = Repl(1, log2_int(self.ratio))\n sel = Signal.like(self.bus.sel)\n with m.If(self.bus.sel == 0):\n m.d.comb += sel.eq(Repl(1, sel.width))\n with m.Else():\n m.d.comb += sel.eq(self.bus.sel)\n with m.Switch(self.bus.adr & ratio_bitmask):\n for i in range(self.ratio):\n with m.Case(i):\n m.d.comb += self.native_port.wdata.we.eq(Repl(sel, self\n .bus.granularity // 8) << self.ratio * i)\n with m.Switch(self.bus.adr & ratio_bitmask):\n for i in range(self.ratio):\n with m.Case(i):\n m.d.comb += self.native_port.wdata.data.eq(self.bus.\n dat_w << self.bus.data_width * i)\n m.d.comb += [self.native_port.rdata.ready.eq(1)]\n with m.Switch(self.bus.adr & ratio_bitmask):\n for i in range(self.ratio):\n with m.Case(i):\n m.d.comb += self.bus.dat_r.eq(self.native_port.rdata.\n data >> self.bus.data_width * i)\n with m.FSM():\n with m.State('Send-Cmd'):\n m.d.comb += [self.native_port.cmd.valid.eq(self.bus.cyc &\n self.bus.stb), self.native_port.cmd.we.eq(self.bus.we),\n self.native_port.cmd.addr.eq(self.bus.adr >> log2_int(\n self.bus.data_width // self.bus.granularity))]\n with m.If(self.native_port.cmd.valid & self.native_port.cmd\n .ready):\n with m.If(self.bus.we):\n m.next = 'Wait-Write'\n with m.Else():\n m.next = 'Wait-Read'\n with m.State('Wait-Read'):\n with m.If(self.native_port.rdata.valid):\n m.d.comb += self.bus.ack.eq(1)\n m.next = 'Send-Cmd'\n with m.State('Wait-Write'):\n with m.If(self.native_port.wdata.ready):\n m.d.comb += self.bus.ack.eq(1)\n m.next = 'Send-Cmd'\n return m\n",
"step-4": "from math import log2\nfrom nmigen import *\nfrom nmigen.utils import log2_int\nfrom nmigen_soc import wishbone\nfrom nmigen_soc.memory import MemoryMap\nfrom lambdasoc.periph import Peripheral\n\n\nclass gramWishbone(Peripheral, Elaboratable):\n\n def __init__(self, core, data_width=32, granularity=8):\n super().__init__(name='wishbone')\n self.native_port = core.crossbar.get_native_port()\n self.ratio = self.native_port.data_width // data_width\n addr_width = log2_int(core.size // (self.native_port.data_width //\n data_width))\n self.bus = wishbone.Interface(addr_width=addr_width + log2_int(self\n .ratio), data_width=data_width, granularity=granularity)\n map = MemoryMap(addr_width=addr_width + log2_int(self.ratio) +\n log2_int(data_width // granularity), data_width=granularity)\n self.bus.memory_map = map\n\n def elaborate(self, platform):\n m = Module()\n m.d.comb += [self.native_port.wdata.valid.eq(self.bus.cyc & self.\n bus.stb & self.bus.we)]\n ratio_bitmask = Repl(1, log2_int(self.ratio))\n sel = Signal.like(self.bus.sel)\n with m.If(self.bus.sel == 0):\n m.d.comb += sel.eq(Repl(1, sel.width))\n with m.Else():\n m.d.comb += sel.eq(self.bus.sel)\n with m.Switch(self.bus.adr & ratio_bitmask):\n for i in range(self.ratio):\n with m.Case(i):\n m.d.comb += self.native_port.wdata.we.eq(Repl(sel, self\n .bus.granularity // 8) << self.ratio * i)\n with m.Switch(self.bus.adr & ratio_bitmask):\n for i in range(self.ratio):\n with m.Case(i):\n m.d.comb += self.native_port.wdata.data.eq(self.bus.\n dat_w << self.bus.data_width * i)\n m.d.comb += [self.native_port.rdata.ready.eq(1)]\n with m.Switch(self.bus.adr & ratio_bitmask):\n for i in range(self.ratio):\n with m.Case(i):\n m.d.comb += self.bus.dat_r.eq(self.native_port.rdata.\n data >> self.bus.data_width * i)\n with m.FSM():\n with m.State('Send-Cmd'):\n m.d.comb += [self.native_port.cmd.valid.eq(self.bus.cyc &\n self.bus.stb), self.native_port.cmd.we.eq(self.bus.we),\n self.native_port.cmd.addr.eq(self.bus.adr >> log2_int(\n self.bus.data_width // self.bus.granularity))]\n with m.If(self.native_port.cmd.valid & self.native_port.cmd\n .ready):\n with m.If(self.bus.we):\n m.next = 'Wait-Write'\n with m.Else():\n m.next = 'Wait-Read'\n with m.State('Wait-Read'):\n with m.If(self.native_port.rdata.valid):\n m.d.comb += self.bus.ack.eq(1)\n m.next = 'Send-Cmd'\n with m.State('Wait-Write'):\n with m.If(self.native_port.wdata.ready):\n m.d.comb += self.bus.ack.eq(1)\n m.next = 'Send-Cmd'\n return m\n",
"step-5": "# This file is Copyright (c) 2020 LambdaConcept <[email protected]>\n# License: BSD\n\nfrom math import log2\n\nfrom nmigen import *\nfrom nmigen.utils import log2_int\n\nfrom nmigen_soc import wishbone\nfrom nmigen_soc.memory import MemoryMap\nfrom lambdasoc.periph import Peripheral\n\n\nclass gramWishbone(Peripheral, Elaboratable):\n def __init__(self, core, data_width=32, granularity=8):\n super().__init__(name=\"wishbone\")\n\n self.native_port = core.crossbar.get_native_port()\n\n self.ratio = self.native_port.data_width//data_width\n\n addr_width = log2_int(core.size//(self.native_port.data_width//data_width))\n self.bus = wishbone.Interface(addr_width=addr_width+log2_int(self.ratio),\n data_width=data_width, granularity=granularity)\n\n map = MemoryMap(addr_width=addr_width+log2_int(self.ratio)+log2_int(data_width//granularity),\n data_width=granularity)\n self.bus.memory_map = map\n\n def elaborate(self, platform):\n m = Module()\n\n # Write datapath\n m.d.comb += [\n self.native_port.wdata.valid.eq(self.bus.cyc & self.bus.stb & self.bus.we),\n ]\n\n ratio_bitmask = Repl(1, log2_int(self.ratio))\n\n sel = Signal.like(self.bus.sel)\n with m.If(self.bus.sel == 0):\n m.d.comb += sel.eq(Repl(1, sel.width))\n with m.Else():\n m.d.comb += sel.eq(self.bus.sel)\n\n with m.Switch(self.bus.adr & ratio_bitmask):\n for i in range(self.ratio):\n with m.Case(i):\n m.d.comb += self.native_port.wdata.we.eq(Repl(sel, self.bus.granularity//8) << (self.ratio*i))\n\n with m.Switch(self.bus.adr & ratio_bitmask):\n for i in range(self.ratio):\n with m.Case(i):\n m.d.comb += self.native_port.wdata.data.eq(self.bus.dat_w << (self.bus.data_width*i))\n\n # Read datapath\n m.d.comb += [\n self.native_port.rdata.ready.eq(1),\n ]\n\n with m.Switch(self.bus.adr & ratio_bitmask):\n for i in range(self.ratio):\n with m.Case(i):\n m.d.comb += self.bus.dat_r.eq(self.native_port.rdata.data >> (self.bus.data_width*i))\n\n with m.FSM():\n with m.State(\"Send-Cmd\"):\n m.d.comb += [\n self.native_port.cmd.valid.eq(self.bus.cyc & self.bus.stb),\n self.native_port.cmd.we.eq(self.bus.we),\n self.native_port.cmd.addr.eq(self.bus.adr >> log2_int(self.bus.data_width//self.bus.granularity)),\n ]\n\n with m.If(self.native_port.cmd.valid & self.native_port.cmd.ready):\n with m.If(self.bus.we):\n m.next = \"Wait-Write\"\n with m.Else():\n m.next = \"Wait-Read\"\n\n with m.State(\"Wait-Read\"):\n with m.If(self.native_port.rdata.valid):\n m.d.comb += self.bus.ack.eq(1)\n m.next = \"Send-Cmd\"\n\n with m.State(\"Wait-Write\"):\n with m.If(self.native_port.wdata.ready):\n m.d.comb += self.bus.ack.eq(1)\n m.next = \"Send-Cmd\"\n\n return m\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
"""
Test the OOD-detection capabilities of models by scaling a random feature for all sample in the data set.
"""
# STD
import os
import pickle
from copy import deepcopy
from collections import defaultdict
import argparse
from typing import Tuple, Dict, List
# EXT
import numpy as np
from tqdm import tqdm
import torch
# PROJECT
from uncertainty_estimation.utils.model_init import AVAILABLE_MODELS
from uncertainty_estimation.utils.model_init import init_models
from uncertainty_estimation.utils.datahandler import DataHandler
from uncertainty_estimation.utils.novelty_analyzer import NoveltyAnalyzer
# CONST
SCALES = [10, 100, 1000, 10000]
N_FEATURES = 100
RESULT_DIR = "../../data/results"
def run_perturbation_experiment(
nov_an: NoveltyAnalyzer, X_test: np.ndarray, scoring_func: str = None
) -> Tuple[Dict[str, List[float]], Dict[str, List[float]]]:
"""Runs the perturbation experiment for a single novelty estimator.
Parameters
----------
nov_an: NoveltyAnalyzer
The novelty analyzer (handles scaling, imputation, evaluation)
X_test: np.ndarray
The test data to use
scoring_func: str
Which kind of novelty to evaluate (used for NN ensemble, where you can choose between
'std' and 'entropy'
Returns
-------
aucs_dict: dict
a dictionary of lists of OOD detection AUCS for different scales. The list contains the
detection AUCs for the same scale but different features.
recall_dict: dict
a dictionary of lists of recalled OOD fractions using the 95th percentile cutoff.The
list contains the recalls for the same scale but different features.
"""
aucs_dict = defaultdict(list)
recall_dict = defaultdict(list)
for scale_adjustment in tqdm(SCALES):
random_sample = np.random.choice(
np.arange(0, X_test.shape[1]), N_FEATURES, replace=False
)
for r in random_sample:
X_test_adjusted = deepcopy(nov_an.X_test)
X_test_adjusted[:, r] = X_test_adjusted[:, r] * scale_adjustment
nov_an.set_ood(X_test_adjusted, impute_and_scale=False)
nov_an.calculate_novelty(scoring_func=scoring_func)
aucs_dict[scale_adjustment] += [nov_an.get_ood_detection_auc()]
recall_dict[scale_adjustment] += [nov_an.get_ood_recall()]
return aucs_dict, recall_dict
if __name__ == "__main__":
np.random.seed(123)
torch.manual_seed(123)
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_origin", type=str, default="MIMIC", help="Which data to use"
)
parser.add_argument(
"--models",
type=str,
nargs="+",
default=AVAILABLE_MODELS,
choices=AVAILABLE_MODELS,
help="Determine the models which are being used for this experiment.",
)
parser.add_argument(
"--result_dir",
type=str,
default=RESULT_DIR,
help="Define the directory that results should be saved to.",
)
args = parser.parse_args()
# Loading the data
dh = DataHandler(args.data_origin)
feature_names = dh.load_feature_names()
train_data, test_data, val_data = dh.load_data_splits()
y_name = dh.load_target_name()
for ne, scoring_funcs, name in init_models(
input_dim=len(feature_names), selection=args.models, origin=args.data_origin
):
print(name)
nov_an = NoveltyAnalyzer(
ne,
train_data[feature_names].values,
test_data[feature_names].values,
val_data[feature_names].values,
train_data[y_name].values,
test_data[y_name].values,
val_data[y_name].values,
)
nov_an.train()
for scoring_func in scoring_funcs:
aucs_dict, recall_dict = run_perturbation_experiment(
nov_an, test_data[feature_names], scoring_func=scoring_func
)
dir_name = os.path.join(
args.result_dir,
args.data_origin,
"perturbation",
name,
"detection",
scoring_func,
)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
with open(os.path.join(dir_name, "recall.pkl"), "wb") as f:
pickle.dump(recall_dict, f)
with open(os.path.join(dir_name, "detect_auc.pkl"), "wb") as f:
pickle.dump(aucs_dict, f)
|
normal
|
{
"blob_id": "bf3e7f1aa9fd20b69e751da9ac8970c88b1144eb",
"index": 9363,
"step-1": "<mask token>\n\n\ndef run_perturbation_experiment(nov_an: NoveltyAnalyzer, X_test: np.ndarray,\n scoring_func: str=None) ->Tuple[Dict[str, List[float]], Dict[str, List[\n float]]]:\n \"\"\"Runs the perturbation experiment for a single novelty estimator.\n\n Parameters\n ----------\n nov_an: NoveltyAnalyzer\n The novelty analyzer (handles scaling, imputation, evaluation)\n X_test: np.ndarray\n The test data to use\n scoring_func: str\n Which kind of novelty to evaluate (used for NN ensemble, where you can choose between\n 'std' and 'entropy'\n\n Returns\n -------\n aucs_dict: dict\n a dictionary of lists of OOD detection AUCS for different scales. The list contains the\n detection AUCs for the same scale but different features.\n recall_dict: dict\n a dictionary of lists of recalled OOD fractions using the 95th percentile cutoff.The\n list contains the recalls for the same scale but different features.\n\n \"\"\"\n aucs_dict = defaultdict(list)\n recall_dict = defaultdict(list)\n for scale_adjustment in tqdm(SCALES):\n random_sample = np.random.choice(np.arange(0, X_test.shape[1]),\n N_FEATURES, replace=False)\n for r in random_sample:\n X_test_adjusted = deepcopy(nov_an.X_test)\n X_test_adjusted[:, r] = X_test_adjusted[:, r] * scale_adjustment\n nov_an.set_ood(X_test_adjusted, impute_and_scale=False)\n nov_an.calculate_novelty(scoring_func=scoring_func)\n aucs_dict[scale_adjustment] += [nov_an.get_ood_detection_auc()]\n recall_dict[scale_adjustment] += [nov_an.get_ood_recall()]\n return aucs_dict, recall_dict\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef run_perturbation_experiment(nov_an: NoveltyAnalyzer, X_test: np.ndarray,\n scoring_func: str=None) ->Tuple[Dict[str, List[float]], Dict[str, List[\n float]]]:\n \"\"\"Runs the perturbation experiment for a single novelty estimator.\n\n Parameters\n ----------\n nov_an: NoveltyAnalyzer\n The novelty analyzer (handles scaling, imputation, evaluation)\n X_test: np.ndarray\n The test data to use\n scoring_func: str\n Which kind of novelty to evaluate (used for NN ensemble, where you can choose between\n 'std' and 'entropy'\n\n Returns\n -------\n aucs_dict: dict\n a dictionary of lists of OOD detection AUCS for different scales. The list contains the\n detection AUCs for the same scale but different features.\n recall_dict: dict\n a dictionary of lists of recalled OOD fractions using the 95th percentile cutoff.The\n list contains the recalls for the same scale but different features.\n\n \"\"\"\n aucs_dict = defaultdict(list)\n recall_dict = defaultdict(list)\n for scale_adjustment in tqdm(SCALES):\n random_sample = np.random.choice(np.arange(0, X_test.shape[1]),\n N_FEATURES, replace=False)\n for r in random_sample:\n X_test_adjusted = deepcopy(nov_an.X_test)\n X_test_adjusted[:, r] = X_test_adjusted[:, r] * scale_adjustment\n nov_an.set_ood(X_test_adjusted, impute_and_scale=False)\n nov_an.calculate_novelty(scoring_func=scoring_func)\n aucs_dict[scale_adjustment] += [nov_an.get_ood_detection_auc()]\n recall_dict[scale_adjustment] += [nov_an.get_ood_recall()]\n return aucs_dict, recall_dict\n\n\nif __name__ == '__main__':\n np.random.seed(123)\n torch.manual_seed(123)\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_origin', type=str, default='MIMIC', help=\n 'Which data to use')\n parser.add_argument('--models', type=str, nargs='+', default=\n AVAILABLE_MODELS, choices=AVAILABLE_MODELS, help=\n 'Determine the models which are being used for this experiment.')\n parser.add_argument('--result_dir', type=str, default=RESULT_DIR, help=\n 'Define the directory that results should be saved to.')\n args = parser.parse_args()\n dh = DataHandler(args.data_origin)\n feature_names = dh.load_feature_names()\n train_data, test_data, val_data = dh.load_data_splits()\n y_name = dh.load_target_name()\n for ne, scoring_funcs, name in init_models(input_dim=len(feature_names),\n selection=args.models, origin=args.data_origin):\n print(name)\n nov_an = NoveltyAnalyzer(ne, train_data[feature_names].values,\n test_data[feature_names].values, val_data[feature_names].values,\n train_data[y_name].values, test_data[y_name].values, val_data[\n y_name].values)\n nov_an.train()\n for scoring_func in scoring_funcs:\n aucs_dict, recall_dict = run_perturbation_experiment(nov_an,\n test_data[feature_names], scoring_func=scoring_func)\n dir_name = os.path.join(args.result_dir, args.data_origin,\n 'perturbation', name, 'detection', scoring_func)\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n with open(os.path.join(dir_name, 'recall.pkl'), 'wb') as f:\n pickle.dump(recall_dict, f)\n with open(os.path.join(dir_name, 'detect_auc.pkl'), 'wb') as f:\n pickle.dump(aucs_dict, f)\n",
"step-3": "<mask token>\nSCALES = [10, 100, 1000, 10000]\nN_FEATURES = 100\nRESULT_DIR = '../../data/results'\n\n\ndef run_perturbation_experiment(nov_an: NoveltyAnalyzer, X_test: np.ndarray,\n scoring_func: str=None) ->Tuple[Dict[str, List[float]], Dict[str, List[\n float]]]:\n \"\"\"Runs the perturbation experiment for a single novelty estimator.\n\n Parameters\n ----------\n nov_an: NoveltyAnalyzer\n The novelty analyzer (handles scaling, imputation, evaluation)\n X_test: np.ndarray\n The test data to use\n scoring_func: str\n Which kind of novelty to evaluate (used for NN ensemble, where you can choose between\n 'std' and 'entropy'\n\n Returns\n -------\n aucs_dict: dict\n a dictionary of lists of OOD detection AUCS for different scales. The list contains the\n detection AUCs for the same scale but different features.\n recall_dict: dict\n a dictionary of lists of recalled OOD fractions using the 95th percentile cutoff.The\n list contains the recalls for the same scale but different features.\n\n \"\"\"\n aucs_dict = defaultdict(list)\n recall_dict = defaultdict(list)\n for scale_adjustment in tqdm(SCALES):\n random_sample = np.random.choice(np.arange(0, X_test.shape[1]),\n N_FEATURES, replace=False)\n for r in random_sample:\n X_test_adjusted = deepcopy(nov_an.X_test)\n X_test_adjusted[:, r] = X_test_adjusted[:, r] * scale_adjustment\n nov_an.set_ood(X_test_adjusted, impute_and_scale=False)\n nov_an.calculate_novelty(scoring_func=scoring_func)\n aucs_dict[scale_adjustment] += [nov_an.get_ood_detection_auc()]\n recall_dict[scale_adjustment] += [nov_an.get_ood_recall()]\n return aucs_dict, recall_dict\n\n\nif __name__ == '__main__':\n np.random.seed(123)\n torch.manual_seed(123)\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_origin', type=str, default='MIMIC', help=\n 'Which data to use')\n parser.add_argument('--models', type=str, nargs='+', default=\n AVAILABLE_MODELS, choices=AVAILABLE_MODELS, help=\n 'Determine the models which are being used for this experiment.')\n parser.add_argument('--result_dir', type=str, default=RESULT_DIR, help=\n 'Define the directory that results should be saved to.')\n args = parser.parse_args()\n dh = DataHandler(args.data_origin)\n feature_names = dh.load_feature_names()\n train_data, test_data, val_data = dh.load_data_splits()\n y_name = dh.load_target_name()\n for ne, scoring_funcs, name in init_models(input_dim=len(feature_names),\n selection=args.models, origin=args.data_origin):\n print(name)\n nov_an = NoveltyAnalyzer(ne, train_data[feature_names].values,\n test_data[feature_names].values, val_data[feature_names].values,\n train_data[y_name].values, test_data[y_name].values, val_data[\n y_name].values)\n nov_an.train()\n for scoring_func in scoring_funcs:\n aucs_dict, recall_dict = run_perturbation_experiment(nov_an,\n test_data[feature_names], scoring_func=scoring_func)\n dir_name = os.path.join(args.result_dir, args.data_origin,\n 'perturbation', name, 'detection', scoring_func)\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n with open(os.path.join(dir_name, 'recall.pkl'), 'wb') as f:\n pickle.dump(recall_dict, f)\n with open(os.path.join(dir_name, 'detect_auc.pkl'), 'wb') as f:\n pickle.dump(aucs_dict, f)\n",
"step-4": "<mask token>\nimport os\nimport pickle\nfrom copy import deepcopy\nfrom collections import defaultdict\nimport argparse\nfrom typing import Tuple, Dict, List\nimport numpy as np\nfrom tqdm import tqdm\nimport torch\nfrom uncertainty_estimation.utils.model_init import AVAILABLE_MODELS\nfrom uncertainty_estimation.utils.model_init import init_models\nfrom uncertainty_estimation.utils.datahandler import DataHandler\nfrom uncertainty_estimation.utils.novelty_analyzer import NoveltyAnalyzer\nSCALES = [10, 100, 1000, 10000]\nN_FEATURES = 100\nRESULT_DIR = '../../data/results'\n\n\ndef run_perturbation_experiment(nov_an: NoveltyAnalyzer, X_test: np.ndarray,\n scoring_func: str=None) ->Tuple[Dict[str, List[float]], Dict[str, List[\n float]]]:\n \"\"\"Runs the perturbation experiment for a single novelty estimator.\n\n Parameters\n ----------\n nov_an: NoveltyAnalyzer\n The novelty analyzer (handles scaling, imputation, evaluation)\n X_test: np.ndarray\n The test data to use\n scoring_func: str\n Which kind of novelty to evaluate (used for NN ensemble, where you can choose between\n 'std' and 'entropy'\n\n Returns\n -------\n aucs_dict: dict\n a dictionary of lists of OOD detection AUCS for different scales. The list contains the\n detection AUCs for the same scale but different features.\n recall_dict: dict\n a dictionary of lists of recalled OOD fractions using the 95th percentile cutoff.The\n list contains the recalls for the same scale but different features.\n\n \"\"\"\n aucs_dict = defaultdict(list)\n recall_dict = defaultdict(list)\n for scale_adjustment in tqdm(SCALES):\n random_sample = np.random.choice(np.arange(0, X_test.shape[1]),\n N_FEATURES, replace=False)\n for r in random_sample:\n X_test_adjusted = deepcopy(nov_an.X_test)\n X_test_adjusted[:, r] = X_test_adjusted[:, r] * scale_adjustment\n nov_an.set_ood(X_test_adjusted, impute_and_scale=False)\n nov_an.calculate_novelty(scoring_func=scoring_func)\n aucs_dict[scale_adjustment] += [nov_an.get_ood_detection_auc()]\n recall_dict[scale_adjustment] += [nov_an.get_ood_recall()]\n return aucs_dict, recall_dict\n\n\nif __name__ == '__main__':\n np.random.seed(123)\n torch.manual_seed(123)\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_origin', type=str, default='MIMIC', help=\n 'Which data to use')\n parser.add_argument('--models', type=str, nargs='+', default=\n AVAILABLE_MODELS, choices=AVAILABLE_MODELS, help=\n 'Determine the models which are being used for this experiment.')\n parser.add_argument('--result_dir', type=str, default=RESULT_DIR, help=\n 'Define the directory that results should be saved to.')\n args = parser.parse_args()\n dh = DataHandler(args.data_origin)\n feature_names = dh.load_feature_names()\n train_data, test_data, val_data = dh.load_data_splits()\n y_name = dh.load_target_name()\n for ne, scoring_funcs, name in init_models(input_dim=len(feature_names),\n selection=args.models, origin=args.data_origin):\n print(name)\n nov_an = NoveltyAnalyzer(ne, train_data[feature_names].values,\n test_data[feature_names].values, val_data[feature_names].values,\n train_data[y_name].values, test_data[y_name].values, val_data[\n y_name].values)\n nov_an.train()\n for scoring_func in scoring_funcs:\n aucs_dict, recall_dict = run_perturbation_experiment(nov_an,\n test_data[feature_names], scoring_func=scoring_func)\n dir_name = os.path.join(args.result_dir, args.data_origin,\n 'perturbation', name, 'detection', scoring_func)\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n with open(os.path.join(dir_name, 'recall.pkl'), 'wb') as f:\n pickle.dump(recall_dict, f)\n with open(os.path.join(dir_name, 'detect_auc.pkl'), 'wb') as f:\n pickle.dump(aucs_dict, f)\n",
"step-5": "\"\"\"\nTest the OOD-detection capabilities of models by scaling a random feature for all sample in the data set.\n\"\"\"\n\n# STD\nimport os\nimport pickle\nfrom copy import deepcopy\nfrom collections import defaultdict\nimport argparse\nfrom typing import Tuple, Dict, List\n\n# EXT\nimport numpy as np\nfrom tqdm import tqdm\nimport torch\n\n# PROJECT\nfrom uncertainty_estimation.utils.model_init import AVAILABLE_MODELS\nfrom uncertainty_estimation.utils.model_init import init_models\nfrom uncertainty_estimation.utils.datahandler import DataHandler\nfrom uncertainty_estimation.utils.novelty_analyzer import NoveltyAnalyzer\n\n# CONST\nSCALES = [10, 100, 1000, 10000]\nN_FEATURES = 100\nRESULT_DIR = \"../../data/results\"\n\n\ndef run_perturbation_experiment(\n nov_an: NoveltyAnalyzer, X_test: np.ndarray, scoring_func: str = None\n) -> Tuple[Dict[str, List[float]], Dict[str, List[float]]]:\n \"\"\"Runs the perturbation experiment for a single novelty estimator.\n\n Parameters\n ----------\n nov_an: NoveltyAnalyzer\n The novelty analyzer (handles scaling, imputation, evaluation)\n X_test: np.ndarray\n The test data to use\n scoring_func: str\n Which kind of novelty to evaluate (used for NN ensemble, where you can choose between\n 'std' and 'entropy'\n\n Returns\n -------\n aucs_dict: dict\n a dictionary of lists of OOD detection AUCS for different scales. The list contains the\n detection AUCs for the same scale but different features.\n recall_dict: dict\n a dictionary of lists of recalled OOD fractions using the 95th percentile cutoff.The\n list contains the recalls for the same scale but different features.\n\n \"\"\"\n aucs_dict = defaultdict(list)\n recall_dict = defaultdict(list)\n\n for scale_adjustment in tqdm(SCALES):\n random_sample = np.random.choice(\n np.arange(0, X_test.shape[1]), N_FEATURES, replace=False\n )\n\n for r in random_sample:\n X_test_adjusted = deepcopy(nov_an.X_test)\n X_test_adjusted[:, r] = X_test_adjusted[:, r] * scale_adjustment\n nov_an.set_ood(X_test_adjusted, impute_and_scale=False)\n nov_an.calculate_novelty(scoring_func=scoring_func)\n aucs_dict[scale_adjustment] += [nov_an.get_ood_detection_auc()]\n recall_dict[scale_adjustment] += [nov_an.get_ood_recall()]\n\n return aucs_dict, recall_dict\n\n\nif __name__ == \"__main__\":\n np.random.seed(123)\n torch.manual_seed(123)\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--data_origin\", type=str, default=\"MIMIC\", help=\"Which data to use\"\n )\n parser.add_argument(\n \"--models\",\n type=str,\n nargs=\"+\",\n default=AVAILABLE_MODELS,\n choices=AVAILABLE_MODELS,\n help=\"Determine the models which are being used for this experiment.\",\n )\n parser.add_argument(\n \"--result_dir\",\n type=str,\n default=RESULT_DIR,\n help=\"Define the directory that results should be saved to.\",\n )\n args = parser.parse_args()\n\n # Loading the data\n dh = DataHandler(args.data_origin)\n feature_names = dh.load_feature_names()\n train_data, test_data, val_data = dh.load_data_splits()\n y_name = dh.load_target_name()\n\n for ne, scoring_funcs, name in init_models(\n input_dim=len(feature_names), selection=args.models, origin=args.data_origin\n ):\n print(name)\n nov_an = NoveltyAnalyzer(\n ne,\n train_data[feature_names].values,\n test_data[feature_names].values,\n val_data[feature_names].values,\n train_data[y_name].values,\n test_data[y_name].values,\n val_data[y_name].values,\n )\n nov_an.train()\n\n for scoring_func in scoring_funcs:\n aucs_dict, recall_dict = run_perturbation_experiment(\n nov_an, test_data[feature_names], scoring_func=scoring_func\n )\n\n dir_name = os.path.join(\n args.result_dir,\n args.data_origin,\n \"perturbation\",\n name,\n \"detection\",\n scoring_func,\n )\n\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n\n with open(os.path.join(dir_name, \"recall.pkl\"), \"wb\") as f:\n pickle.dump(recall_dict, f)\n\n with open(os.path.join(dir_name, \"detect_auc.pkl\"), \"wb\") as f:\n pickle.dump(aucs_dict, f)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#
# * Python 57, Correct Lineup
# * Easy
# * For the opening ceremony of the upcoming sports event an even number of
# * athletes were picked. They formed a correct lineup, i.e. such a lineup in
# * which no two boys or two girls stand together. The first person in the lineup
# * was a girl. As a part of the performance, adjacent pairs of athletes (i.e.
# * the first one together with the second one, the third one together with the
# * fourth one, etc.) had to swap positions with each other.
# * Given a list of athletes, return the list of athletes after the changes, i.e.
# * after each adjacent pair of athletes is swapped.
# * Example
# For athletes = [1, 2, 3, 4, 5, 6], the output should be
# correctLineup(athletes) = [2, 1, 4, 3, 6, 5].
# * Input/Output
# [execution time limit] 4 seconds (py3)
# [input] array.integer athletes
# A list of even length representing the athletes, where each athlete is given
# by the number written on their back.
# Guaranteed constraints:
# 2 ≤ athletes.length ≤ 20,
# 1 ≤ athletes[i] ≤ 100.
# [output] array.integer
# Array of athletes with each pair of adjacent elements swapped.
#%%
# * Solution 1
def correctLineup1(athletes:list)-> list:
return [athletes[i+1] if i%2==0 else athletes[i-1] for i in range(len(athletes))]
# * Solution 2
# ! bitwise operator ^.
def correctLineup1(athletes:list)-> list:
return [athletes[i^1] for i in range(len(athletes))]
a1 = [1, 2, 3, 4, 5, 6]
r1 = correctLineup1(a1)
print(r1)
# %%
|
normal
|
{
"blob_id": "6c5f60e7a122e3da5e6705bfacf73a361f6c1362",
"index": 1120,
"step-1": "def correctLineup1(athletes: list) ->list:\n return [(athletes[i + 1] if i % 2 == 0 else athletes[i - 1]) for i in\n range(len(athletes))]\n\n\n<mask token>\n",
"step-2": "def correctLineup1(athletes: list) ->list:\n return [(athletes[i + 1] if i % 2 == 0 else athletes[i - 1]) for i in\n range(len(athletes))]\n\n\ndef correctLineup1(athletes: list) ->list:\n return [athletes[i ^ 1] for i in range(len(athletes))]\n\n\n<mask token>\n",
"step-3": "def correctLineup1(athletes: list) ->list:\n return [(athletes[i + 1] if i % 2 == 0 else athletes[i - 1]) for i in\n range(len(athletes))]\n\n\ndef correctLineup1(athletes: list) ->list:\n return [athletes[i ^ 1] for i in range(len(athletes))]\n\n\n<mask token>\nprint(r1)\n",
"step-4": "def correctLineup1(athletes: list) ->list:\n return [(athletes[i + 1] if i % 2 == 0 else athletes[i - 1]) for i in\n range(len(athletes))]\n\n\ndef correctLineup1(athletes: list) ->list:\n return [athletes[i ^ 1] for i in range(len(athletes))]\n\n\na1 = [1, 2, 3, 4, 5, 6]\nr1 = correctLineup1(a1)\nprint(r1)\n",
"step-5": "#\n# * Python 57, Correct Lineup\n# * Easy\n\n# * For the opening ceremony of the upcoming sports event an even number of \n# * athletes were picked. They formed a correct lineup, i.e. such a lineup in \n# * which no two boys or two girls stand together. The first person in the lineup \n# * was a girl. As a part of the performance, adjacent pairs of athletes (i.e. \n# * the first one together with the second one, the third one together with the \n# * fourth one, etc.) had to swap positions with each other.\n\n# * Given a list of athletes, return the list of athletes after the changes, i.e. \n# * after each adjacent pair of athletes is swapped.\n\n# * Example\n\n# For athletes = [1, 2, 3, 4, 5, 6], the output should be\n# correctLineup(athletes) = [2, 1, 4, 3, 6, 5].\n\n# * Input/Output\n\n# [execution time limit] 4 seconds (py3)\n\n# [input] array.integer athletes\n\n# A list of even length representing the athletes, where each athlete is given \n# by the number written on their back.\n\n# Guaranteed constraints:\n# 2 ≤ athletes.length ≤ 20,\n# 1 ≤ athletes[i] ≤ 100.\n\n# [output] array.integer\n\n# Array of athletes with each pair of adjacent elements swapped.\n\n#%%\n\n# * Solution 1\ndef correctLineup1(athletes:list)-> list:\n return [athletes[i+1] if i%2==0 else athletes[i-1] for i in range(len(athletes))]\n\n\n# * Solution 2\n# ! bitwise operator ^. \ndef correctLineup1(athletes:list)-> list:\n return [athletes[i^1] for i in range(len(athletes))]\n\n\na1 = [1, 2, 3, 4, 5, 6]\nr1 = correctLineup1(a1)\nprint(r1)\n\n\n# %%\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.header import Header
class SENDMAIL(object):
def __init__(self):
self.smtpserver = 'smtp.qq.com'
self.username = '[email protected]' # 比如QQ邮箱
self.password = 'xxxxxxxxxxxxxxxx' # 生成授权码
self.sender = '[email protected]'
def sendmail(self, receiver, lch, type, cfsj):
subject = '【 抢票提醒通知 】'
receiver = ['%s' % receiver]
msg = MIMEMultipart('mixed')
msg['Subject'] = subject
msg['From'] = 'Ncoreqp-Server <[email protected]>'
msg['To'] = ";".join(receiver)
# 构造文字内容
text = """Hi!\n
十万火急, 探子来报! \n
目前, %s号列车, %s当前有票! - 出发时间为:[ %s ]
快去12306网站支付买票吧!! 快速通道链接https://www.12306.cn/index/\n
http://www.northcorezh.com\n
北芯众合, 改变生活!
""" % (lch, type, cfsj)
text_plain = MIMEText(text, 'plain', 'utf-8')
msg.attach(text_plain)
# 发送邮件
smtp = smtplib.SMTP()
smtp.connect('smtp.qq.com')
# 我们用set_debuglevel(1)就可以打印出和SMTP服务器交互的所有信息。
# smtp.set_debuglevel(1)
smtp.login(self.username, self.password)
smtp.sendmail(self.sender, receiver, msg.as_string())
smtp.quit()
print('邮件发送成功 !!!')
def send_email_by_smtp(self):
# 用于发送邮件的邮箱。修改成自己的邮箱
sender_email_address = "[email protected]"
# 用于发送邮件的邮箱的密码。修改成自己的邮箱的密码
sender_email_password = "xxxxxxxxxxxxxxxxxx"
# 用于发送邮件的邮箱的smtp服务器,也可以直接是IP地址
# 修改成自己邮箱的sntp服务器地址;qq邮箱不需要修改此值
smtp_server_host = "smtp.qq.com"
# 修改成自己邮箱的sntp服务器监听的端口;qq邮箱不需要修改此值
smtp_server_port = 465
# 要发往的邮箱
receiver_email = "[email protected]"
# 要发送的邮件主题
message_subject = "Python smtp测试邮件"
# 要发送的邮件内容
message_context = "这是一封通过Python smtp发送的测试邮件..."
# 邮件对象,用于构建邮件
message = MIMEText(message_context, 'plain', 'utf-8')
# 设置发件人(声称的)
message["From"] = Header(sender_email_address, "utf-8")
# 设置收件人(声称的)
message["To"] = Header(receiver_email, "utf-8")
# 设置邮件主题
message["Subject"] = Header(message_subject, "utf-8")
# 连接smtp服务器。如果没有使用SSL,将SMTP_SSL()改成SMTP()即可其他都不需要做改动
email_client = smtplib.SMTP_SSL(smtp_server_host, smtp_server_port)
try:
# 验证邮箱及密码是否正确
email_client.login(sender_email_address, sender_email_password)
print("smtp----login success, now will send an email to {receiver_email}")
except Exception:
print("smtp----sorry, username or password not correct or another problem occur")
else:
# 发送邮件
email_client.sendmail(sender_email_address, receiver_email, message.as_string())
print(f"smtp----send email to {receiver_email} finish")
finally:
# 关闭连接
email_client.close()
|
normal
|
{
"blob_id": "bcab83e0ae6ee4925393b50bdefdfeb85c42ad2c",
"index": 1914,
"step-1": "<mask token>\n\n\nclass SENDMAIL(object):\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass SENDMAIL(object):\n\n def __init__(self):\n self.smtpserver = 'smtp.qq.com'\n self.username = '[email protected]'\n self.password = 'xxxxxxxxxxxxxxxx'\n self.sender = '[email protected]'\n\n def sendmail(self, receiver, lch, type, cfsj):\n subject = '【 抢票提醒通知 】'\n receiver = ['%s' % receiver]\n msg = MIMEMultipart('mixed')\n msg['Subject'] = subject\n msg['From'] = 'Ncoreqp-Server <[email protected]>'\n msg['To'] = ';'.join(receiver)\n text = (\n \"\"\"Hi!\n\n 十万火急, 探子来报! \n\n \n 目前, %s号列车, %s当前有票! - 出发时间为:[ %s ]\n 快去12306网站支付买票吧!! 快速通道链接https://www.12306.cn/index/\n\n \n http://www.northcorezh.com\n\n 北芯众合, 改变生活!\n \"\"\"\n % (lch, type, cfsj))\n text_plain = MIMEText(text, 'plain', 'utf-8')\n msg.attach(text_plain)\n smtp = smtplib.SMTP()\n smtp.connect('smtp.qq.com')\n smtp.login(self.username, self.password)\n smtp.sendmail(self.sender, receiver, msg.as_string())\n smtp.quit()\n print('邮件发送成功 !!!')\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass SENDMAIL(object):\n\n def __init__(self):\n self.smtpserver = 'smtp.qq.com'\n self.username = '[email protected]'\n self.password = 'xxxxxxxxxxxxxxxx'\n self.sender = '[email protected]'\n\n def sendmail(self, receiver, lch, type, cfsj):\n subject = '【 抢票提醒通知 】'\n receiver = ['%s' % receiver]\n msg = MIMEMultipart('mixed')\n msg['Subject'] = subject\n msg['From'] = 'Ncoreqp-Server <[email protected]>'\n msg['To'] = ';'.join(receiver)\n text = (\n \"\"\"Hi!\n\n 十万火急, 探子来报! \n\n \n 目前, %s号列车, %s当前有票! - 出发时间为:[ %s ]\n 快去12306网站支付买票吧!! 快速通道链接https://www.12306.cn/index/\n\n \n http://www.northcorezh.com\n\n 北芯众合, 改变生活!\n \"\"\"\n % (lch, type, cfsj))\n text_plain = MIMEText(text, 'plain', 'utf-8')\n msg.attach(text_plain)\n smtp = smtplib.SMTP()\n smtp.connect('smtp.qq.com')\n smtp.login(self.username, self.password)\n smtp.sendmail(self.sender, receiver, msg.as_string())\n smtp.quit()\n print('邮件发送成功 !!!')\n\n def send_email_by_smtp(self):\n sender_email_address = '[email protected]'\n sender_email_password = 'xxxxxxxxxxxxxxxxxx'\n smtp_server_host = 'smtp.qq.com'\n smtp_server_port = 465\n receiver_email = '[email protected]'\n message_subject = 'Python smtp测试邮件'\n message_context = '这是一封通过Python smtp发送的测试邮件...'\n message = MIMEText(message_context, 'plain', 'utf-8')\n message['From'] = Header(sender_email_address, 'utf-8')\n message['To'] = Header(receiver_email, 'utf-8')\n message['Subject'] = Header(message_subject, 'utf-8')\n email_client = smtplib.SMTP_SSL(smtp_server_host, smtp_server_port)\n try:\n email_client.login(sender_email_address, sender_email_password)\n print(\n 'smtp----login success, now will send an email to {receiver_email}'\n )\n except Exception:\n print(\n 'smtp----sorry, username or password not correct or another problem occur'\n )\n else:\n email_client.sendmail(sender_email_address, receiver_email,\n message.as_string())\n print(f'smtp----send email to {receiver_email} finish')\n finally:\n email_client.close()\n",
"step-4": "import smtplib\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.header import Header\n\n\nclass SENDMAIL(object):\n\n def __init__(self):\n self.smtpserver = 'smtp.qq.com'\n self.username = '[email protected]'\n self.password = 'xxxxxxxxxxxxxxxx'\n self.sender = '[email protected]'\n\n def sendmail(self, receiver, lch, type, cfsj):\n subject = '【 抢票提醒通知 】'\n receiver = ['%s' % receiver]\n msg = MIMEMultipart('mixed')\n msg['Subject'] = subject\n msg['From'] = 'Ncoreqp-Server <[email protected]>'\n msg['To'] = ';'.join(receiver)\n text = (\n \"\"\"Hi!\n\n 十万火急, 探子来报! \n\n \n 目前, %s号列车, %s当前有票! - 出发时间为:[ %s ]\n 快去12306网站支付买票吧!! 快速通道链接https://www.12306.cn/index/\n\n \n http://www.northcorezh.com\n\n 北芯众合, 改变生活!\n \"\"\"\n % (lch, type, cfsj))\n text_plain = MIMEText(text, 'plain', 'utf-8')\n msg.attach(text_plain)\n smtp = smtplib.SMTP()\n smtp.connect('smtp.qq.com')\n smtp.login(self.username, self.password)\n smtp.sendmail(self.sender, receiver, msg.as_string())\n smtp.quit()\n print('邮件发送成功 !!!')\n\n def send_email_by_smtp(self):\n sender_email_address = '[email protected]'\n sender_email_password = 'xxxxxxxxxxxxxxxxxx'\n smtp_server_host = 'smtp.qq.com'\n smtp_server_port = 465\n receiver_email = '[email protected]'\n message_subject = 'Python smtp测试邮件'\n message_context = '这是一封通过Python smtp发送的测试邮件...'\n message = MIMEText(message_context, 'plain', 'utf-8')\n message['From'] = Header(sender_email_address, 'utf-8')\n message['To'] = Header(receiver_email, 'utf-8')\n message['Subject'] = Header(message_subject, 'utf-8')\n email_client = smtplib.SMTP_SSL(smtp_server_host, smtp_server_port)\n try:\n email_client.login(sender_email_address, sender_email_password)\n print(\n 'smtp----login success, now will send an email to {receiver_email}'\n )\n except Exception:\n print(\n 'smtp----sorry, username or password not correct or another problem occur'\n )\n else:\n email_client.sendmail(sender_email_address, receiver_email,\n message.as_string())\n print(f'smtp----send email to {receiver_email} finish')\n finally:\n email_client.close()\n",
"step-5": "import smtplib\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.header import Header\n\nclass SENDMAIL(object):\n\n def __init__(self):\n self.smtpserver = 'smtp.qq.com'\n self.username = '[email protected]' # 比如QQ邮箱\n self.password = 'xxxxxxxxxxxxxxxx' # 生成授权码\n self.sender = '[email protected]'\n\n def sendmail(self, receiver, lch, type, cfsj):\n\n subject = '【 抢票提醒通知 】'\n receiver = ['%s' % receiver]\n\n msg = MIMEMultipart('mixed')\n msg['Subject'] = subject\n msg['From'] = 'Ncoreqp-Server <[email protected]>'\n\n msg['To'] = \";\".join(receiver)\n\n # 构造文字内容\n text = \"\"\"Hi!\\n\n 十万火急, 探子来报! \\n\n \n 目前, %s号列车, %s当前有票! - 出发时间为:[ %s ]\n 快去12306网站支付买票吧!! 快速通道链接https://www.12306.cn/index/\\n\n \n http://www.northcorezh.com\\n\n 北芯众合, 改变生活!\n \"\"\" % (lch, type, cfsj)\n text_plain = MIMEText(text, 'plain', 'utf-8')\n msg.attach(text_plain)\n\n # 发送邮件\n smtp = smtplib.SMTP()\n smtp.connect('smtp.qq.com')\n\n # 我们用set_debuglevel(1)就可以打印出和SMTP服务器交互的所有信息。\n # smtp.set_debuglevel(1)\n smtp.login(self.username, self.password)\n smtp.sendmail(self.sender, receiver, msg.as_string())\n smtp.quit()\n\n print('邮件发送成功 !!!')\n\n def send_email_by_smtp(self):\n # 用于发送邮件的邮箱。修改成自己的邮箱\n sender_email_address = \"[email protected]\"\n # 用于发送邮件的邮箱的密码。修改成自己的邮箱的密码\n sender_email_password = \"xxxxxxxxxxxxxxxxxx\"\n # 用于发送邮件的邮箱的smtp服务器,也可以直接是IP地址\n # 修改成自己邮箱的sntp服务器地址;qq邮箱不需要修改此值\n smtp_server_host = \"smtp.qq.com\"\n # 修改成自己邮箱的sntp服务器监听的端口;qq邮箱不需要修改此值\n smtp_server_port = 465\n # 要发往的邮箱\n receiver_email = \"[email protected]\"\n # 要发送的邮件主题\n message_subject = \"Python smtp测试邮件\"\n # 要发送的邮件内容\n message_context = \"这是一封通过Python smtp发送的测试邮件...\"\n\n # 邮件对象,用于构建邮件\n message = MIMEText(message_context, 'plain', 'utf-8')\n # 设置发件人(声称的)\n message[\"From\"] = Header(sender_email_address, \"utf-8\")\n # 设置收件人(声称的)\n message[\"To\"] = Header(receiver_email, \"utf-8\")\n # 设置邮件主题\n message[\"Subject\"] = Header(message_subject, \"utf-8\")\n\n # 连接smtp服务器。如果没有使用SSL,将SMTP_SSL()改成SMTP()即可其他都不需要做改动\n email_client = smtplib.SMTP_SSL(smtp_server_host, smtp_server_port)\n try:\n # 验证邮箱及密码是否正确\n email_client.login(sender_email_address, sender_email_password)\n print(\"smtp----login success, now will send an email to {receiver_email}\")\n\n except Exception:\n print(\"smtp----sorry, username or password not correct or another problem occur\")\n\n else:\n # 发送邮件\n email_client.sendmail(sender_email_address, receiver_email, message.as_string())\n print(f\"smtp----send email to {receiver_email} finish\")\n finally:\n # 关闭连接\n email_client.close()\n\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
from MyFeistel import MyFeistel, LengthPreservingCipher
import pytest
import base64
import os
class TestMyFeistel:
def test_Functionality(self):
key = base64.urlsafe_b64encode(os.urandom(16))
feistel = MyFeistel(key, 10)
# decrypt(encrypt(msg)) == msg
for i in xrange(20):
msg = os.urandom(6)
assert feistel.decrypt(feistel.encrypt(msg)) == msg
def test_OddLengthMessage(self):
pass
class TestLengthPreservingCipher:
def test_Functionality(self):
key = base64.urlsafe_b64encode(os.urandom(16))
lpc = LengthPreservingCipher(key, 10)
# decrypt(encrypt(msg)) == msg
for i in xrange(20):
msg = os.urandom(6)
assert lpc.decrypt(lpc.encrypt(msg)) == msg
|
normal
|
{
"blob_id": "2464da1c4d2ddab3a053f0a14e3cc9a8beabe031",
"index": 6031,
"step-1": "<mask token>\n\n\nclass TestLengthPreservingCipher:\n\n def test_Functionality(self):\n key = base64.urlsafe_b64encode(os.urandom(16))\n lpc = LengthPreservingCipher(key, 10)\n for i in xrange(20):\n msg = os.urandom(6)\n assert lpc.decrypt(lpc.encrypt(msg)) == msg\n",
"step-2": "<mask token>\n\n\nclass TestMyFeistel:\n\n def test_Functionality(self):\n key = base64.urlsafe_b64encode(os.urandom(16))\n feistel = MyFeistel(key, 10)\n for i in xrange(20):\n msg = os.urandom(6)\n assert feistel.decrypt(feistel.encrypt(msg)) == msg\n <mask token>\n\n\nclass TestLengthPreservingCipher:\n\n def test_Functionality(self):\n key = base64.urlsafe_b64encode(os.urandom(16))\n lpc = LengthPreservingCipher(key, 10)\n for i in xrange(20):\n msg = os.urandom(6)\n assert lpc.decrypt(lpc.encrypt(msg)) == msg\n",
"step-3": "<mask token>\n\n\nclass TestMyFeistel:\n\n def test_Functionality(self):\n key = base64.urlsafe_b64encode(os.urandom(16))\n feistel = MyFeistel(key, 10)\n for i in xrange(20):\n msg = os.urandom(6)\n assert feistel.decrypt(feistel.encrypt(msg)) == msg\n\n def test_OddLengthMessage(self):\n pass\n\n\nclass TestLengthPreservingCipher:\n\n def test_Functionality(self):\n key = base64.urlsafe_b64encode(os.urandom(16))\n lpc = LengthPreservingCipher(key, 10)\n for i in xrange(20):\n msg = os.urandom(6)\n assert lpc.decrypt(lpc.encrypt(msg)) == msg\n",
"step-4": "from MyFeistel import MyFeistel, LengthPreservingCipher\nimport pytest\nimport base64\nimport os\n\n\nclass TestMyFeistel:\n\n def test_Functionality(self):\n key = base64.urlsafe_b64encode(os.urandom(16))\n feistel = MyFeistel(key, 10)\n for i in xrange(20):\n msg = os.urandom(6)\n assert feistel.decrypt(feistel.encrypt(msg)) == msg\n\n def test_OddLengthMessage(self):\n pass\n\n\nclass TestLengthPreservingCipher:\n\n def test_Functionality(self):\n key = base64.urlsafe_b64encode(os.urandom(16))\n lpc = LengthPreservingCipher(key, 10)\n for i in xrange(20):\n msg = os.urandom(6)\n assert lpc.decrypt(lpc.encrypt(msg)) == msg\n",
"step-5": "from MyFeistel import MyFeistel, LengthPreservingCipher\nimport pytest\nimport base64\nimport os\n\nclass TestMyFeistel:\n def test_Functionality(self):\n key = base64.urlsafe_b64encode(os.urandom(16))\n feistel = MyFeistel(key, 10)\n\n # decrypt(encrypt(msg)) == msg\n for i in xrange(20):\n msg = os.urandom(6)\n assert feistel.decrypt(feistel.encrypt(msg)) == msg\n def test_OddLengthMessage(self):\n pass\n\n\n\nclass TestLengthPreservingCipher:\n def test_Functionality(self):\n key = base64.urlsafe_b64encode(os.urandom(16))\n lpc = LengthPreservingCipher(key, 10)\n\n # decrypt(encrypt(msg)) == msg\n for i in xrange(20):\n msg = os.urandom(6)\n assert lpc.decrypt(lpc.encrypt(msg)) == msg\n\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
# Copyright (c) 2011-2014 by California Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the California Institute of Technology nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH
# OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
r"""Equality Set Projection (ESP).
Non-vertex polytope projection method from
- https://web.archive.org/web/20150103142532/
https://www-control.eng.cam.ac.uk/~cnj22/research/projection.html
- https://infoscience.epfl.ch/record/169768
Very unstable, can not handle complex polytopes.
Reference
=========
\cite{Jones04}
"""
# Created by P. Nilsson, 8/2/11
import pickle
import numpy as np
from scipy import io as sio
from scipy import linalg
from polytope import solvers
class Ridge(object):
"""A ridge.
Attributes:
- `E_r`: Equality set of a facet
- `ar, br`: Affine hull of the facet
s.t. P_{E_0} = P intersection {x | ar x = br}.
"""
def __init__(self, E, a, b):
self.E_r = E
self.ar = a
self.br = b
class Ridge_Facet(object):
"""A ridge facet.
Attributes:
- `E_r`: Equality set of a ridge
- `ar,br`: Affine hull of the ridge s.t.
P_{E_f} intersection {x | ar x = br}
defines the ridge, where E_f is the
equality set of the facet.
- `E_0`: Equality set of a facet
- `af,bf`: Affine hull of the facet.
"""
def __init__(self, E_r, ar, br, E_0, af, bf):
self.E_r = E_r
self.ar = ar
self.br = br
self.E_0 = E_0
self.af = af
self.bf = bf
def esp(CC, DD, bb, centered=False, abs_tol=1e-10, verbose=0):
"""Project polytope [C D] x <= b onto C coordinates.
Projects the polytope [C D] x <= b onto the
coordinates that correspond to C. The projection of the polytope
P = {[C D]x <= b} where C is M x D and D is M x K is
defined as proj(P) = {x in R^d | exist y in R^k s.t Cx + Dy < b}
"""
if 'glpk' not in solvers.installed_solvers:
raise Exception(
"projection_esp error:"
" Equality set projection requires `cvxopt.glpk` to run.")
# Remove zero columns and rows
nonzerorows = np.nonzero(
np.sum(np.abs(np.hstack([CC, DD])), axis=1) > abs_tol)[0]
nonzeroxcols = np.nonzero(np.sum(np.abs(CC), axis=0) > abs_tol)[0]
nonzeroycols = np.nonzero(np.sum(np.abs(DD), axis=0) > abs_tol)[0]
C = CC[nonzerorows, :].copy()
D = DD[nonzerorows, :].copy()
C = C[:, nonzeroxcols]
D = D[:, nonzeroycols]
b = bb[nonzerorows].copy()
# Make sure origo is inside polytope
if not centered:
xc0, yc0, trans = cheby_center(C, D, b)
if trans:
b = b - np.dot(C, xc0).flatten() - np.dot(D, yc0).flatten()
else:
b = b
else:
trans = False
d = C.shape[1]
k = D.shape[1]
if verbose > 0:
print("Projecting from dim " + str(d + k) + " to " + str(d))
if k == 0:
# Not projecting
return C, bb, []
if d == 1:
# Projection to 1D
c = np.zeros(d + k)
c[0] = 1
G = np.hstack([C, D])
sol = solvers.lpsolve(c, G, b, solver='glpk')
if sol['status'] != "optimal":
raise Exception(
"esp: projection to 1D is not full-dimensional, "
"LP returned status " + str(sol['status']))
min_sol = np.array(sol['x']).flatten()
min_dual_sol = np.array(sol['z']).flatten()
sol = solvers.lpsolve(-c, G, b, solver='glpk')
if sol['status'] != "optimal":
raise Exception(
"esp: projection to 1D is not full-dimensional, " +
"LP returned status " + str(sol['status']))
max_sol = np.array(sol['x']).flatten()
max_dual_sol = np.array(sol['z']).flatten()
# min, max
x_min = min_sol[0]
x_max = max_sol[0]
y_min = min_sol[range(1, k + 1)]
y_max = max_sol[range(1, k + 1)]
if is_dual_degenerate(c, G, b, None, None, min_sol, min_dual_sol):
# Min case, relax constraint a little to avoid infeasibility
E_min = unique_equalityset(
C, D, b, np.array([1.]), x_min + abs_tol / 3, abs_tol=abs_tol)
else:
E_min = np.nonzero(np.abs(np.dot(G, min_sol) - b) < abs_tol)[0]
if is_dual_degenerate(c, G, b, None, None, max_sol, max_dual_sol):
# Max case, relax constraint a little to avoid infeasibility
E_max = unique_equalityset(
C, D, b, np.array([1.]), x_max - abs_tol / 3, abs_tol=abs_tol)
else:
E_max = np.nonzero(np.abs(np.dot(G, max_sol) - b) < abs_tol)[0]
G = np.array([[1.], [-1.]])
g = np.array([x_max, -x_min])
# Relocate
if trans:
g = g + np.dot(G, xc0)
# Return zero cols/rows
E_max = nonzerorows[E_max]
E_min = nonzerorows[E_min]
if verbose > 0:
print(
"Returning projection from dim " +
str(d + k) + " to dim 1 \n")
return G, g, [E_max, E_min]
E = []
L = []
E_0, af, bf = shoot(C, D, b, abs_tol=abs_tol)
ridge_list = ridge(C, D, b, E_0, af, bf, abs_tol=abs_tol, verbose=verbose)
for i in range(len(ridge_list)):
r = ridge_list[i]
L.append(Ridge_Facet(r.E_r, r.ar, r.br, E_0, af, bf))
G = af.T
g = bf
if verbose > 0:
print("\nStarting eq set " + str(E_0) + "\nStarting ridges ")
for rr in L:
print(str(rr.E_r))
E.append(E_0)
while len(L) > 0:
rid_fac1 = L[0]
if verbose > 0:
print("\nLooking for neighbors to " + str(rid_fac1.E_0) +
" and " + str(rid_fac1.E_r) + " ..")
E_adj, a_adj, b_adj = adjacent(C, D, b, rid_fac1, abs_tol=abs_tol)
if verbose > 0:
print("found neighbor " + str(E_adj) +
". \n\nLooking for ridges of neighbor..")
ridge_list = ridge(
C, D, b, E_adj, a_adj, b_adj,
abs_tol=abs_tol, verbose=verbose)
if verbose > 0:
print("found " + str(len(ridge_list)) + " ridges\n")
found_org = False
for i in range(len(ridge_list)):
r = ridge_list[i]
E_r = r.E_r
ar = r.ar
br = r.br
found = False
for j in range(len(L)):
rid_fac2 = L[j]
A_r = rid_fac2.E_r
if len(A_r) != len(E_r):
continue
t1 = np.sort(np.array(A_r))
t2 = np.sort(np.array(E_r))
if np.sum(np.abs(t1 - t2)) < abs_tol:
found = True
break
if found:
if verbose > 0:
print("Ridge " + str(E_r) +
" already visited, removing from L..")
if rid_fac2 == rid_fac1:
found_org = True
L.remove(rid_fac2)
else:
if verbose > 0:
print("Adding ridge-facet " + str(E_adj) +
" " + str(E_r) + "")
L.append(Ridge_Facet(E_r, ar, br, E_adj, a_adj, b_adj))
if not found_org:
print("Expected ridge " + str(rid_fac1.E_r))
print("but got ridges ")
for rid in ridge_list:
print(rid.E_r)
raise Exception(
"esp: ridge did not return neighboring ridge as expected")
G = np.vstack([G, a_adj])
g = np.hstack([g, b_adj])
E.append(E_adj)
# Restore center
if trans:
g = g + np.dot(G, xc0)
# Return zero rows
for Ef in E:
Ef = nonzerorows[Ef]
return G, g, E
def shoot(C, D, b, maxiter=1000, abs_tol=1e-7):
"""Return random equality set of P that projects on a projection facet.
Returns randomly selected equality set E_0 of P such
that the projection of the equality set is a facet of the projection.
@param C: Matrix defining the polytope Cx+Dy <= b
@param D: Matrix defining the polytope Cx+Dy <= b
@param b: Vector defining the polytope Cx+Dy <= b
@return: `E_0,af,bf`: Equality set and affine hull
"""
d = C.shape[1]
k = D.shape[1]
iter = 0
while True:
if iter > maxiter:
raise Exception(
"shoot: could not find starting equality set")
gamma = np.random.rand(d) - 0.5
c = np.zeros(k + 1)
c[0] = -1
G = np.hstack([np.array([np.dot(C, gamma)]).T, D])
sol = solvers.lpsolve(c, G, b, solver='glpk')
opt_sol = np.array(sol['x']).flatten()
opt_dual = np.array(sol['z']).flatten()
r_opt = opt_sol[0]
y_opt = np.array(opt_sol[range(1, len(opt_sol))]).flatten()
x_opt = r_opt * gamma
E_0 = np.nonzero(
np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) < abs_tol)[0]
DE0 = D[E_0, :]
CE0 = C[E_0, :]
b0 = b[E_0]
if rank(np.dot(null_space(DE0.T).T, CE0)) == 1:
break
iter += 1
af, bf = proj_aff(CE0, DE0, b0, abs_tol=abs_tol)
if is_dual_degenerate(c, G, b, None, None, opt_sol,
opt_dual, abs_tol=abs_tol):
E_0 = unique_equalityset(C, D, b, af, bf, abs_tol=abs_tol)
af, bf = proj_aff(C[E_0, :], D[E_0, :], b[E_0])
if len(bf) > 1:
raise Exception("shoot: wrong dimension of affine hull")
return E_0, af.flatten(), bf
def ridge(C, D, b, E, af, bf, abs_tol=1e-7, verbose=0):
"""Compute all ridges of a facet in the projection.
Input:
`C,D,b`: Original polytope data
`E,af,bf`: Equality set and affine hull of a facet in the projection
Output:
`ridge_list`: A list containing all the ridges of
the facet as Ridge objects
"""
d = C.shape[1]
k = D.shape[1]
Er_list = []
q = C.shape[0]
E_c = np.setdiff1d(range(q), E)
# E slices
C_E = C[E, :]
D_E = D[E, :]
b_E = b[E, :]
# E_c slices
C_Ec = C[E_c, :]
D_Ec = D[E_c, :]
b_Ec = b[E_c]
# dots
S = C_Ec - np.dot(np.dot(D_Ec, linalg.pinv(D_E)), C_E)
L = np.dot(D_Ec, null_space(D_E))
t = b_Ec - np.dot(D_Ec, np.dot(linalg.pinv(D_E), b_E))
if rank(np.hstack([C_E, D_E])) < k + 1:
if verbose > 1:
print("Doing recursive ESP call")
u, s, v = linalg.svd(np.array([af]), full_matrices=1)
sigma = s[0]
v = v.T * u[0, 0] # Correct sign
V_hat = v[:, [0]]
V_tilde = v[:, range(1, v.shape[1])]
Cnew = np.dot(S, V_tilde)
Dnew = L
bnew = t - np.dot(S, V_hat).flatten() * bf / sigma
Anew = np.hstack([Cnew, Dnew])
xc2, yc2, cen2 = cheby_center(Cnew, Dnew, bnew)
bnew = bnew - np.dot(Cnew, xc2).flatten() - np.dot(Dnew, yc2).flatten()
Gt, gt, E_t = esp(
Cnew, Dnew, bnew,
centered=True, abs_tol=abs_tol, verbose=0)
if (len(E_t[0]) == 0) or (len(E_t[1]) == 0):
raise Exception(
"ridge: recursive call did not return any equality sets")
for i in range(len(E_t)):
E_f = E_t[i]
er = np.sort(np.hstack([E, E_c[E_f]]))
ar = np.dot(Gt[i, :], V_tilde.T).flatten()
br0 = gt[i].flatten()
# Make orthogonal to facet
ar = ar - af * np.dot(af.flatten(), ar.flatten())
br = br0 - bf * np.dot(af.flatten(), ar.flatten())
# Normalize and make ridge equation point outwards
norm = np.sqrt(np.sum(ar * ar))
ar = ar * np.sign(br) / norm
br = br * np.sign(br) / norm
# Restore center
br = br + np.dot(Gt[i, :], xc2) / norm
if len(ar) > d:
raise Exception("ridge: wrong length of new ridge!")
Er_list.append(Ridge(er, ar, br))
else:
if verbose > 0:
print("Doing direct calculation of ridges")
X = np.arange(S.shape[0])
while len(X) > 0:
i = X[0]
X = np.setdiff1d(X, i)
if np.linalg.norm(S[i, :]) < abs_tol:
continue
Si = S[i, :]
Si = Si / np.linalg.norm(Si)
if np.linalg.norm(af - np.dot(Si, af) * Si) > abs_tol:
test1 = null_space(
np.vstack([
np.hstack([af, bf]),
np.hstack([S[i, :], t[i]])]),
nonempty=True)
test2 = np.hstack([S, np.array([t]).T])
test = np.dot(test1.T, test2.T)
test = np.sum(np.abs(test), 0)
Q_i = np.nonzero(test > abs_tol)[0]
Q = np.nonzero(test < abs_tol)[0]
X = np.setdiff1d(X, Q)
# Have Q_i
Sq = S[Q_i, :]
tq = t[Q_i]
c = np.zeros(d + 1)
c[0] = 1
Gup = np.hstack([-np.ones([Sq.shape[0], 1]), Sq])
Gdo = np.hstack([-1, np.zeros(Sq.shape[1])])
G = np.vstack([Gup, Gdo])
h = np.hstack([tq, 1])
Al = np.zeros([2, 1])
Ar = np.vstack([af, S[i, :]])
A = np.hstack([Al, Ar])
bb = np.hstack([bf, t[i]])
sol = solvers._solve_lp_using_cvxopt(
c, G, h, A=A, b=bb)
if sol['status'] == 'optimal':
tau = sol['x'][0]
if tau < -abs_tol:
ar = np.array([S[i, :]]).flatten()
br = t[i].flatten()
# Make orthogonal to facet
ar = ar - af * np.dot(af.flatten(), ar.flatten())
br = br - bf * np.dot(af.flatten(), ar.flatten())
# Normalize and make ridge equation point outwards
norm = np.sqrt(np.sum(ar * ar))
ar = ar / norm
br = br / norm
# accumulate
Er_list.append(
Ridge(np.sort(np.hstack([E, E_c[Q]])), ar, br))
return Er_list
def adjacent(C, D, b, rid_fac, abs_tol=1e-7):
"""Compute the (unique) adjacent facet.
@param rid_fac: A Ridge_Facet object containing the parameters for
a facet and one of its ridges.
@return: (E_adj,a_adj,b_adj): The equality set and parameters for
the adjacent facet such that::
P_{E_adj} = P intersection {x | a_adj x = b_adj}
"""
E = rid_fac.E_0
af = rid_fac.af
bf = rid_fac.bf
#
E_r = rid_fac.E_r
ar = rid_fac.ar
br = rid_fac.br
# shape
d = C.shape[1]
k = D.shape[1]
# E_r slices
C_er = C[E_r, :]
D_er = D[E_r, :]
b_er = b[E_r]
# stack
c = -np.hstack([ar, np.zeros(k)])
G = np.hstack([C_er, D_er])
h = b_er
A = np.hstack([af, np.zeros(k)])
sol = solvers._solve_lp_using_cvxopt(
c, G, h, A=A.T, b=bf * (1 - 0.01))
if sol['status'] != "optimal":
print(G)
print(h)
print(af)
print(bf)
print(ar)
print(br)
print(np.dot(af, ar))
data = {}
data["C"] = C
data["D"] = D
data["b"] = b
sio.savemat("matlabdata", data)
with open('polytope.p', 'wb') as f:
pickle.dump(data, f)
raise Exception(
"adjacent: Lp returned status " + str(sol['status']))
opt_sol = np.array(sol['x']).flatten()
dual_opt_sol = np.array(sol['z']).flatten()
x_opt = opt_sol[range(d)]
y_opt = opt_sol[range(d, d + k)]
if is_dual_degenerate(
c.flatten(), G, h, A, bf * (1 - 0.01),
opt_sol, dual_opt_sol, abs_tol=abs_tol):
# If degenerate, compute affine hull and take preimage
E_temp = np.nonzero(np.abs(np.dot(G, opt_sol) - h) < abs_tol)[0]
a_temp, b_temp = proj_aff(
C_er[E_temp, :], D_er[E_temp, :], b_er[E_temp],
expected_dim=1, abs_tol=abs_tol)
E_adj = unique_equalityset(C, D, b, a_temp, b_temp, abs_tol=abs_tol)
if len(E_adj) == 0:
data = {}
data["C"] = C
data["D"] = D
data["b"] = b
data["Er"] = E_r + 1
data["ar"] = ar
data["br"] = br
data["Ef"] = E + 1
data["af"] = af
data["bf"] = bf
sio.savemat("matlabdata", data)
raise Exception(
"adjacent: equality set computation returned empty set")
else:
r = np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) < abs_tol
E_adj = np.nonzero(r)[0]
C_eadj = C[E_adj, :]
D_eadj = D[E_adj, :]
b_eadj = b[E_adj]
af_adj, bf_adj = proj_aff(C_eadj, D_eadj, b_eadj, abs_tol=abs_tol)
return E_adj, af_adj, bf_adj
def proj_aff(Ce, De, be, expected_dim=None, abs_tol=1e-7):
"""Affine projection.
Compute the set aff = {x | Ce x + De y = be} on the form
aff = ({x | a x = b} intersection {Ce x + De y < be}).
Input: Polytope parameters Ce, De and be
Output: Constants a and b
"""
# Remove zero columns
ind = np.nonzero(np.sum(np.abs(De), axis=0) > abs_tol)[0]
D = De[:, ind]
if D.shape[1] == 0:
a = Ce
b = be
a_n, b_n = normalize(a, b)
if expected_dim is not None:
if expected_dim != b_n.size:
raise Exception(
"proj_aff: wrong dimension calculated in 1")
return a_n.flatten(), b_n
sh = np.shape(D.T)
m = sh[0]
n = sh[1]
nDe = null_space(D.T)
a = np.dot(nDe.T, Ce)
b = np.dot(nDe.T, be)
a_n, b_n = normalize(a, b)
if expected_dim is not None:
if expected_dim != b_n.size:
raise Exception("proj_aff: wrong dimension calculated in 2")
return a_n, b_n
def is_dual_degenerate(c, G, h, A, b, x_opt, z_opt, abs_tol=1e-7):
"""Return `True` if pair of dual problems is dual degenerate.
Checks if the pair of dual problems::
(P): min c'x (D): max h'z + b'y
s.t Gx <= h s.t G'z + A'y = c
Ax = b z <= 0
is dual degenerate, i.e. if (P) has several optimal solutions.
Optimal solutions x* and z* are required.
Input:
`G,h,A,b`: Parameters of (P)
`x_opt`: One optimal solution to (P)
`z_opt`: The optimal solution to (D) corresponding to
_inequality constraints_ in (P)
Output:
`dual`: Boolean indicating whether (P) has many optimal solutions.
"""
D = - G
d = - h.flatten()
mu = - z_opt.flatten() # mu >= 0
# Active constraints
I = np.nonzero(np.abs(np.dot(D, x_opt).flatten() - d) < abs_tol)[0]
# Positive elements in dual opt
J = np.nonzero(mu > abs_tol)[0]
# i, j
i = mu < abs_tol # Zero elements in dual opt
i = i.astype(int)
j = np.zeros(len(mu), dtype=int)
j[I] = 1 # 1 if active
# Indices where active constraints have 0 dual opt
L = np.nonzero(i + j == 2)[0]
# sizes
nI = len(I)
nJ = len(J)
nL = len(L)
# constraints
DI = D[I, :] # Active constraints
DJ = D[J, :] # Constraints with positive lagrange mult
DL = D[L, :] # Active constraints with zero dual opt
dual = 0
if A is None:
test = DI
else:
test = np.vstack([DI, A])
if rank(test) < np.amin(DI.shape):
return True
else:
if len(L) > 0:
if A is None:
Ae = DJ
else:
Ae = np.vstack([DJ, A])
be = np.zeros(Ae.shape[0])
Ai = - DL
bi = np.zeros(nL)
sol = solvers._solve_lp_using_cvxopt(
c= - np.sum(DL, axis=0), G=Ai,
h=bi, A=Ae, b=be)
if sol['status'] == "dual infeasible":
# Dual infeasible -> primal unbounded -> value>epsilon
return True
if sol['primal objective'] > abs_tol:
return True
return False
def unique_equalityset(C, D, b, af, bf, abs_tol=1e-7, verbose=0):
"""Return equality set E with the following property:
P_E = {x | af x = bf} intersection P
where P is the polytope C x + D y < b
The inequalities have to be satisfied with equality everywhere on
the face defined by af and bf.
"""
if D is not None:
A = np.hstack([C, D])
a = np.hstack([af, np.zeros(D.shape[1])])
else:
A = C
a = af
E = []
for i in range(A.shape[0]):
A_i = np.array(A[i, :])
b_i = b[i]
sol = solvers._solve_lp_using_cvxopt(
c=A_i, G=A, h=b,
A=a.T, b=bf)
if sol['status'] != "optimal":
raise Exception(
"unique_equalityset: LP returned status " +
str(sol['status']))
if np.abs(sol['primal objective'] - b_i) < abs_tol:
# Constraint is active everywhere
E.append(i)
if len(E) == 0:
raise Exception("unique_equalityset: empty E")
return np.array(E)
def unique_equalityset2(C, D, b, opt_sol, abs_tol=1e-7):
A = np.hstack([C, D])
E0 = np.nonzero(np.abs(np.dot(A, opt_sol) - b) < abs_tol)[0]
af, bf = proj_aff(C[E0, :], D[E0, :], b[E0], expected_dim=1)
# stack
ineq = np.hstack([af, np.zeros(D.shape[1])])
G = np.vstack([A, np.vstack([ineq, -ineq])])
h = np.hstack([b, np.hstack([bf, -bf])])
# shape
m = G.shape[0]
n = G.shape[1]
# ht
e = 1e-3
v = np.vstack([np.zeros([1, n]), np.eye(n)]).T
v = v - np.array([np.mean(v, axis=1)]).T
v = v * e
ht = h + np.amin(-np.dot(G, v), axis=1)
# stack
H1 = np.hstack([G, -np.eye(m)])
H2 = np.hstack([G, np.zeros([m, m])])
H3 = np.hstack([np.zeros([m, n]), -np.eye(m)])
H = np.vstack([H1, np.vstack([H2, H3])])
h = np.hstack([ht, np.hstack([h, np.zeros(m)])])
c = np.hstack([np.zeros(n), np.ones(m)])
sol = solvers.lpsolve(c, H, h, solver='glpk')
if not sol['status'] == "optimal":
raise Exception(
"unique_equalityset: LP returned status " +
str(sol['status']))
opt_sol2 = np.array(sol['x']).flatten()
x = opt_sol2[range(n)]
s = opt_sol2[range(n, len(opt_sol2))]
E = np.nonzero(s > abs_tol)[0]
print(E)
E = np.sort(E[np.nonzero(E < C.shape[0])])
# Check that they define the same projection
at, bt = proj_aff(C[E, :], D[E, :], b[E])
if bt.size != 1 or np.sum(np.abs(at - af)) + np.abs(bt - bf) > abs_tol:
raise Exception("unique_equalityset2: affine hulls not the same")
return E
def cheby_center(C, D, b):
"""Calculate Chebyshev center for the polytope `C x + D y <= b`.
Input:
`C, D, b`: Polytope parameters
Output:
`x_0, y_0`: The chebyshev centra
`boolean`: True if a point could be found, False otherwise.
"""
d = C.shape[1]
k = D.shape[1]
A = np.hstack([C, D])
dim = np.shape(A)[1]
c = - np.r_[np.zeros(dim), 1]
norm2 = np.sqrt(np.sum(A * A, axis=1))
G = np.c_[A, norm2]
sol = solvers.lpsolve(c, G, h=b, solver='glpk')
if sol['status'] == "optimal":
opt = np.array(sol['x'][0:-1]).flatten()
return opt[range(d)], opt[range(d, d + k)], True
else:
return np.zeros(d), np.zeros(k), False
def normalize(AA, bb, abs_tol=1e-7):
"""Normalize `A x = b` such that `A'A = 1` and `b > 0`.
Also, remove duplicate lines.
"""
if AA.size == 0:
return AA, bb
dim = AA.size / bb.size
A = AA.copy().reshape(bb.size, dim)
b = bb.copy().reshape(bb.size, 1)
# Remove zero lines
keepind = np.nonzero(
np.sum(np.abs(np.hstack([A, b])), axis=1) > abs_tol)[0]
A = A[keepind, :]
b = b[keepind]
# Normalize
anorm = np.sqrt(np.sum(A * A, axis=1))
for i in range(len(anorm)):
A[i, :] = A[i, :] * np.sign(b[i, 0]) / anorm[i]
b[i, 0] = np.sign(b[i, 0]) * b[i, 0] / anorm[i]
# Remove duplicate rows
keep_row = []
for i in range(len(anorm)):
unique = True
for j in range(i + 1, len(anorm)):
test = (np.sum(np.abs(A[i, :] - A[j, :])) +
np.abs(b[i, 0] - b[j, 0]))
if test < abs_tol:
unique = False
break
if unique:
keep_row.append(i)
A_n = A[keep_row, :]
b_n = b[keep_row, 0]
# Return flat A if only one row
if A_n.size == dim:
A_n = A_n.flatten()
return A_n, b_n.flatten()
def rank(A, eps=1e-15):
u, s, vh = linalg.svd(A)
m = A.shape[0]
n = A.shape[1]
tol = np.amax([m, n]) * np.amax(s) * eps
return np.sum(s > tol)
def null_space(A, eps=1e-15, nonempty=False):
"""Returns the null space N_A to matrix A such that A N_A = 0."""
u, s, v = linalg.svd(A, full_matrices=1)
m = A.shape[0]
n = A.shape[1]
tol = np.amax([m, n]) * np.amax(s) * eps
rank = np.sum(s > tol)
N_space = v[range(rank, n), :].T
if nonempty and (len(N_space) == 0):
N_space = v[range(np.amax(n - 1, 1), n), :]
return N_space
|
normal
|
{
"blob_id": "707c83bc83f606b570af973094574e6675cfc83f",
"index": 8793,
"step-1": "<mask token>\n\n\nclass Ridge(object):\n \"\"\"A ridge.\n\n Attributes:\n\n - `E_r`: Equality set of a facet\n\n - `ar, br`: Affine hull of the facet\n s.t. P_{E_0} = P intersection {x | ar x = br}.\n \"\"\"\n\n def __init__(self, E, a, b):\n self.E_r = E\n self.ar = a\n self.br = b\n\n\nclass Ridge_Facet(object):\n \"\"\"A ridge facet.\n\n Attributes:\n\n - `E_r`: Equality set of a ridge\n\n - `ar,br`: Affine hull of the ridge s.t.\n P_{E_f} intersection {x | ar x = br}\n defines the ridge, where E_f is the\n equality set of the facet.\n\n - `E_0`: Equality set of a facet\n\n - `af,bf`: Affine hull of the facet.\n \"\"\"\n\n def __init__(self, E_r, ar, br, E_0, af, bf):\n self.E_r = E_r\n self.ar = ar\n self.br = br\n self.E_0 = E_0\n self.af = af\n self.bf = bf\n\n\n<mask token>\n\n\ndef unique_equalityset(C, D, b, af, bf, abs_tol=1e-07, verbose=0):\n \"\"\"Return equality set E with the following property:\n\n P_E = {x | af x = bf} intersection P\n\n where P is the polytope C x + D y < b\n\n The inequalities have to be satisfied with equality everywhere on\n the face defined by af and bf.\n \"\"\"\n if D is not None:\n A = np.hstack([C, D])\n a = np.hstack([af, np.zeros(D.shape[1])])\n else:\n A = C\n a = af\n E = []\n for i in range(A.shape[0]):\n A_i = np.array(A[i, :])\n b_i = b[i]\n sol = solvers._solve_lp_using_cvxopt(c=A_i, G=A, h=b, A=a.T, b=bf)\n if sol['status'] != 'optimal':\n raise Exception('unique_equalityset: LP returned status ' + str\n (sol['status']))\n if np.abs(sol['primal objective'] - b_i) < abs_tol:\n E.append(i)\n if len(E) == 0:\n raise Exception('unique_equalityset: empty E')\n return np.array(E)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Ridge(object):\n \"\"\"A ridge.\n\n Attributes:\n\n - `E_r`: Equality set of a facet\n\n - `ar, br`: Affine hull of the facet\n s.t. P_{E_0} = P intersection {x | ar x = br}.\n \"\"\"\n\n def __init__(self, E, a, b):\n self.E_r = E\n self.ar = a\n self.br = b\n\n\nclass Ridge_Facet(object):\n \"\"\"A ridge facet.\n\n Attributes:\n\n - `E_r`: Equality set of a ridge\n\n - `ar,br`: Affine hull of the ridge s.t.\n P_{E_f} intersection {x | ar x = br}\n defines the ridge, where E_f is the\n equality set of the facet.\n\n - `E_0`: Equality set of a facet\n\n - `af,bf`: Affine hull of the facet.\n \"\"\"\n\n def __init__(self, E_r, ar, br, E_0, af, bf):\n self.E_r = E_r\n self.ar = ar\n self.br = br\n self.E_0 = E_0\n self.af = af\n self.bf = bf\n\n\n<mask token>\n\n\ndef unique_equalityset(C, D, b, af, bf, abs_tol=1e-07, verbose=0):\n \"\"\"Return equality set E with the following property:\n\n P_E = {x | af x = bf} intersection P\n\n where P is the polytope C x + D y < b\n\n The inequalities have to be satisfied with equality everywhere on\n the face defined by af and bf.\n \"\"\"\n if D is not None:\n A = np.hstack([C, D])\n a = np.hstack([af, np.zeros(D.shape[1])])\n else:\n A = C\n a = af\n E = []\n for i in range(A.shape[0]):\n A_i = np.array(A[i, :])\n b_i = b[i]\n sol = solvers._solve_lp_using_cvxopt(c=A_i, G=A, h=b, A=a.T, b=bf)\n if sol['status'] != 'optimal':\n raise Exception('unique_equalityset: LP returned status ' + str\n (sol['status']))\n if np.abs(sol['primal objective'] - b_i) < abs_tol:\n E.append(i)\n if len(E) == 0:\n raise Exception('unique_equalityset: empty E')\n return np.array(E)\n\n\ndef unique_equalityset2(C, D, b, opt_sol, abs_tol=1e-07):\n A = np.hstack([C, D])\n E0 = np.nonzero(np.abs(np.dot(A, opt_sol) - b) < abs_tol)[0]\n af, bf = proj_aff(C[E0, :], D[E0, :], b[E0], expected_dim=1)\n ineq = np.hstack([af, np.zeros(D.shape[1])])\n G = np.vstack([A, np.vstack([ineq, -ineq])])\n h = np.hstack([b, np.hstack([bf, -bf])])\n m = G.shape[0]\n n = G.shape[1]\n e = 0.001\n v = np.vstack([np.zeros([1, n]), np.eye(n)]).T\n v = v - np.array([np.mean(v, axis=1)]).T\n v = v * e\n ht = h + np.amin(-np.dot(G, v), axis=1)\n H1 = np.hstack([G, -np.eye(m)])\n H2 = np.hstack([G, np.zeros([m, m])])\n H3 = np.hstack([np.zeros([m, n]), -np.eye(m)])\n H = np.vstack([H1, np.vstack([H2, H3])])\n h = np.hstack([ht, np.hstack([h, np.zeros(m)])])\n c = np.hstack([np.zeros(n), np.ones(m)])\n sol = solvers.lpsolve(c, H, h, solver='glpk')\n if not sol['status'] == 'optimal':\n raise Exception('unique_equalityset: LP returned status ' + str(sol\n ['status']))\n opt_sol2 = np.array(sol['x']).flatten()\n x = opt_sol2[range(n)]\n s = opt_sol2[range(n, len(opt_sol2))]\n E = np.nonzero(s > abs_tol)[0]\n print(E)\n E = np.sort(E[np.nonzero(E < C.shape[0])])\n at, bt = proj_aff(C[E, :], D[E, :], b[E])\n if bt.size != 1 or np.sum(np.abs(at - af)) + np.abs(bt - bf) > abs_tol:\n raise Exception('unique_equalityset2: affine hulls not the same')\n return E\n\n\ndef cheby_center(C, D, b):\n \"\"\"Calculate Chebyshev center for the polytope `C x + D y <= b`.\n\n Input:\n `C, D, b`: Polytope parameters\n\n Output:\n `x_0, y_0`: The chebyshev centra\n `boolean`: True if a point could be found, False otherwise.\n \"\"\"\n d = C.shape[1]\n k = D.shape[1]\n A = np.hstack([C, D])\n dim = np.shape(A)[1]\n c = -np.r_[np.zeros(dim), 1]\n norm2 = np.sqrt(np.sum(A * A, axis=1))\n G = np.c_[A, norm2]\n sol = solvers.lpsolve(c, G, h=b, solver='glpk')\n if sol['status'] == 'optimal':\n opt = np.array(sol['x'][0:-1]).flatten()\n return opt[range(d)], opt[range(d, d + k)], True\n else:\n return np.zeros(d), np.zeros(k), False\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Ridge(object):\n \"\"\"A ridge.\n\n Attributes:\n\n - `E_r`: Equality set of a facet\n\n - `ar, br`: Affine hull of the facet\n s.t. P_{E_0} = P intersection {x | ar x = br}.\n \"\"\"\n\n def __init__(self, E, a, b):\n self.E_r = E\n self.ar = a\n self.br = b\n\n\nclass Ridge_Facet(object):\n \"\"\"A ridge facet.\n\n Attributes:\n\n - `E_r`: Equality set of a ridge\n\n - `ar,br`: Affine hull of the ridge s.t.\n P_{E_f} intersection {x | ar x = br}\n defines the ridge, where E_f is the\n equality set of the facet.\n\n - `E_0`: Equality set of a facet\n\n - `af,bf`: Affine hull of the facet.\n \"\"\"\n\n def __init__(self, E_r, ar, br, E_0, af, bf):\n self.E_r = E_r\n self.ar = ar\n self.br = br\n self.E_0 = E_0\n self.af = af\n self.bf = bf\n\n\ndef esp(CC, DD, bb, centered=False, abs_tol=1e-10, verbose=0):\n \"\"\"Project polytope [C D] x <= b onto C coordinates.\n\n Projects the polytope [C D] x <= b onto the\n coordinates that correspond to C. The projection of the polytope\n P = {[C D]x <= b} where C is M x D and D is M x K is\n defined as proj(P) = {x in R^d | exist y in R^k s.t Cx + Dy < b}\n \"\"\"\n if 'glpk' not in solvers.installed_solvers:\n raise Exception(\n 'projection_esp error: Equality set projection requires `cvxopt.glpk` to run.'\n )\n nonzerorows = np.nonzero(np.sum(np.abs(np.hstack([CC, DD])), axis=1) >\n abs_tol)[0]\n nonzeroxcols = np.nonzero(np.sum(np.abs(CC), axis=0) > abs_tol)[0]\n nonzeroycols = np.nonzero(np.sum(np.abs(DD), axis=0) > abs_tol)[0]\n C = CC[nonzerorows, :].copy()\n D = DD[nonzerorows, :].copy()\n C = C[:, nonzeroxcols]\n D = D[:, nonzeroycols]\n b = bb[nonzerorows].copy()\n if not centered:\n xc0, yc0, trans = cheby_center(C, D, b)\n if trans:\n b = b - np.dot(C, xc0).flatten() - np.dot(D, yc0).flatten()\n else:\n b = b\n else:\n trans = False\n d = C.shape[1]\n k = D.shape[1]\n if verbose > 0:\n print('Projecting from dim ' + str(d + k) + ' to ' + str(d))\n if k == 0:\n return C, bb, []\n if d == 1:\n c = np.zeros(d + k)\n c[0] = 1\n G = np.hstack([C, D])\n sol = solvers.lpsolve(c, G, b, solver='glpk')\n if sol['status'] != 'optimal':\n raise Exception(\n 'esp: projection to 1D is not full-dimensional, LP returned status '\n + str(sol['status']))\n min_sol = np.array(sol['x']).flatten()\n min_dual_sol = np.array(sol['z']).flatten()\n sol = solvers.lpsolve(-c, G, b, solver='glpk')\n if sol['status'] != 'optimal':\n raise Exception(\n 'esp: projection to 1D is not full-dimensional, ' +\n 'LP returned status ' + str(sol['status']))\n max_sol = np.array(sol['x']).flatten()\n max_dual_sol = np.array(sol['z']).flatten()\n x_min = min_sol[0]\n x_max = max_sol[0]\n y_min = min_sol[range(1, k + 1)]\n y_max = max_sol[range(1, k + 1)]\n if is_dual_degenerate(c, G, b, None, None, min_sol, min_dual_sol):\n E_min = unique_equalityset(C, D, b, np.array([1.0]), x_min + \n abs_tol / 3, abs_tol=abs_tol)\n else:\n E_min = np.nonzero(np.abs(np.dot(G, min_sol) - b) < abs_tol)[0]\n if is_dual_degenerate(c, G, b, None, None, max_sol, max_dual_sol):\n E_max = unique_equalityset(C, D, b, np.array([1.0]), x_max - \n abs_tol / 3, abs_tol=abs_tol)\n else:\n E_max = np.nonzero(np.abs(np.dot(G, max_sol) - b) < abs_tol)[0]\n G = np.array([[1.0], [-1.0]])\n g = np.array([x_max, -x_min])\n if trans:\n g = g + np.dot(G, xc0)\n E_max = nonzerorows[E_max]\n E_min = nonzerorows[E_min]\n if verbose > 0:\n print('Returning projection from dim ' + str(d + k) +\n ' to dim 1 \\n')\n return G, g, [E_max, E_min]\n E = []\n L = []\n E_0, af, bf = shoot(C, D, b, abs_tol=abs_tol)\n ridge_list = ridge(C, D, b, E_0, af, bf, abs_tol=abs_tol, verbose=verbose)\n for i in range(len(ridge_list)):\n r = ridge_list[i]\n L.append(Ridge_Facet(r.E_r, r.ar, r.br, E_0, af, bf))\n G = af.T\n g = bf\n if verbose > 0:\n print('\\nStarting eq set ' + str(E_0) + '\\nStarting ridges ')\n for rr in L:\n print(str(rr.E_r))\n E.append(E_0)\n while len(L) > 0:\n rid_fac1 = L[0]\n if verbose > 0:\n print('\\nLooking for neighbors to ' + str(rid_fac1.E_0) +\n ' and ' + str(rid_fac1.E_r) + ' ..')\n E_adj, a_adj, b_adj = adjacent(C, D, b, rid_fac1, abs_tol=abs_tol)\n if verbose > 0:\n print('found neighbor ' + str(E_adj) +\n '. \\n\\nLooking for ridges of neighbor..')\n ridge_list = ridge(C, D, b, E_adj, a_adj, b_adj, abs_tol=abs_tol,\n verbose=verbose)\n if verbose > 0:\n print('found ' + str(len(ridge_list)) + ' ridges\\n')\n found_org = False\n for i in range(len(ridge_list)):\n r = ridge_list[i]\n E_r = r.E_r\n ar = r.ar\n br = r.br\n found = False\n for j in range(len(L)):\n rid_fac2 = L[j]\n A_r = rid_fac2.E_r\n if len(A_r) != len(E_r):\n continue\n t1 = np.sort(np.array(A_r))\n t2 = np.sort(np.array(E_r))\n if np.sum(np.abs(t1 - t2)) < abs_tol:\n found = True\n break\n if found:\n if verbose > 0:\n print('Ridge ' + str(E_r) +\n ' already visited, removing from L..')\n if rid_fac2 == rid_fac1:\n found_org = True\n L.remove(rid_fac2)\n else:\n if verbose > 0:\n print('Adding ridge-facet ' + str(E_adj) + ' ' + str(\n E_r) + '')\n L.append(Ridge_Facet(E_r, ar, br, E_adj, a_adj, b_adj))\n if not found_org:\n print('Expected ridge ' + str(rid_fac1.E_r))\n print('but got ridges ')\n for rid in ridge_list:\n print(rid.E_r)\n raise Exception(\n 'esp: ridge did not return neighboring ridge as expected')\n G = np.vstack([G, a_adj])\n g = np.hstack([g, b_adj])\n E.append(E_adj)\n if trans:\n g = g + np.dot(G, xc0)\n for Ef in E:\n Ef = nonzerorows[Ef]\n return G, g, E\n\n\ndef shoot(C, D, b, maxiter=1000, abs_tol=1e-07):\n \"\"\"Return random equality set of P that projects on a projection facet.\n\n Returns randomly selected equality set E_0 of P such\n that the projection of the equality set is a facet of the projection.\n\n @param C: Matrix defining the polytope Cx+Dy <= b\n @param D: Matrix defining the polytope Cx+Dy <= b\n @param b: Vector defining the polytope Cx+Dy <= b\n\n @return: `E_0,af,bf`: Equality set and affine hull\n \"\"\"\n d = C.shape[1]\n k = D.shape[1]\n iter = 0\n while True:\n if iter > maxiter:\n raise Exception('shoot: could not find starting equality set')\n gamma = np.random.rand(d) - 0.5\n c = np.zeros(k + 1)\n c[0] = -1\n G = np.hstack([np.array([np.dot(C, gamma)]).T, D])\n sol = solvers.lpsolve(c, G, b, solver='glpk')\n opt_sol = np.array(sol['x']).flatten()\n opt_dual = np.array(sol['z']).flatten()\n r_opt = opt_sol[0]\n y_opt = np.array(opt_sol[range(1, len(opt_sol))]).flatten()\n x_opt = r_opt * gamma\n E_0 = np.nonzero(np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) <\n abs_tol)[0]\n DE0 = D[E_0, :]\n CE0 = C[E_0, :]\n b0 = b[E_0]\n if rank(np.dot(null_space(DE0.T).T, CE0)) == 1:\n break\n iter += 1\n af, bf = proj_aff(CE0, DE0, b0, abs_tol=abs_tol)\n if is_dual_degenerate(c, G, b, None, None, opt_sol, opt_dual, abs_tol=\n abs_tol):\n E_0 = unique_equalityset(C, D, b, af, bf, abs_tol=abs_tol)\n af, bf = proj_aff(C[E_0, :], D[E_0, :], b[E_0])\n if len(bf) > 1:\n raise Exception('shoot: wrong dimension of affine hull')\n return E_0, af.flatten(), bf\n\n\ndef ridge(C, D, b, E, af, bf, abs_tol=1e-07, verbose=0):\n \"\"\"Compute all ridges of a facet in the projection.\n\n Input:\n `C,D,b`: Original polytope data\n `E,af,bf`: Equality set and affine hull of a facet in the projection\n\n Output:\n `ridge_list`: A list containing all the ridges of\n the facet as Ridge objects\n \"\"\"\n d = C.shape[1]\n k = D.shape[1]\n Er_list = []\n q = C.shape[0]\n E_c = np.setdiff1d(range(q), E)\n C_E = C[E, :]\n D_E = D[E, :]\n b_E = b[E, :]\n C_Ec = C[E_c, :]\n D_Ec = D[E_c, :]\n b_Ec = b[E_c]\n S = C_Ec - np.dot(np.dot(D_Ec, linalg.pinv(D_E)), C_E)\n L = np.dot(D_Ec, null_space(D_E))\n t = b_Ec - np.dot(D_Ec, np.dot(linalg.pinv(D_E), b_E))\n if rank(np.hstack([C_E, D_E])) < k + 1:\n if verbose > 1:\n print('Doing recursive ESP call')\n u, s, v = linalg.svd(np.array([af]), full_matrices=1)\n sigma = s[0]\n v = v.T * u[0, 0]\n V_hat = v[:, [0]]\n V_tilde = v[:, range(1, v.shape[1])]\n Cnew = np.dot(S, V_tilde)\n Dnew = L\n bnew = t - np.dot(S, V_hat).flatten() * bf / sigma\n Anew = np.hstack([Cnew, Dnew])\n xc2, yc2, cen2 = cheby_center(Cnew, Dnew, bnew)\n bnew = bnew - np.dot(Cnew, xc2).flatten() - np.dot(Dnew, yc2).flatten()\n Gt, gt, E_t = esp(Cnew, Dnew, bnew, centered=True, abs_tol=abs_tol,\n verbose=0)\n if len(E_t[0]) == 0 or len(E_t[1]) == 0:\n raise Exception(\n 'ridge: recursive call did not return any equality sets')\n for i in range(len(E_t)):\n E_f = E_t[i]\n er = np.sort(np.hstack([E, E_c[E_f]]))\n ar = np.dot(Gt[i, :], V_tilde.T).flatten()\n br0 = gt[i].flatten()\n ar = ar - af * np.dot(af.flatten(), ar.flatten())\n br = br0 - bf * np.dot(af.flatten(), ar.flatten())\n norm = np.sqrt(np.sum(ar * ar))\n ar = ar * np.sign(br) / norm\n br = br * np.sign(br) / norm\n br = br + np.dot(Gt[i, :], xc2) / norm\n if len(ar) > d:\n raise Exception('ridge: wrong length of new ridge!')\n Er_list.append(Ridge(er, ar, br))\n else:\n if verbose > 0:\n print('Doing direct calculation of ridges')\n X = np.arange(S.shape[0])\n while len(X) > 0:\n i = X[0]\n X = np.setdiff1d(X, i)\n if np.linalg.norm(S[i, :]) < abs_tol:\n continue\n Si = S[i, :]\n Si = Si / np.linalg.norm(Si)\n if np.linalg.norm(af - np.dot(Si, af) * Si) > abs_tol:\n test1 = null_space(np.vstack([np.hstack([af, bf]), np.\n hstack([S[i, :], t[i]])]), nonempty=True)\n test2 = np.hstack([S, np.array([t]).T])\n test = np.dot(test1.T, test2.T)\n test = np.sum(np.abs(test), 0)\n Q_i = np.nonzero(test > abs_tol)[0]\n Q = np.nonzero(test < abs_tol)[0]\n X = np.setdiff1d(X, Q)\n Sq = S[Q_i, :]\n tq = t[Q_i]\n c = np.zeros(d + 1)\n c[0] = 1\n Gup = np.hstack([-np.ones([Sq.shape[0], 1]), Sq])\n Gdo = np.hstack([-1, np.zeros(Sq.shape[1])])\n G = np.vstack([Gup, Gdo])\n h = np.hstack([tq, 1])\n Al = np.zeros([2, 1])\n Ar = np.vstack([af, S[i, :]])\n A = np.hstack([Al, Ar])\n bb = np.hstack([bf, t[i]])\n sol = solvers._solve_lp_using_cvxopt(c, G, h, A=A, b=bb)\n if sol['status'] == 'optimal':\n tau = sol['x'][0]\n if tau < -abs_tol:\n ar = np.array([S[i, :]]).flatten()\n br = t[i].flatten()\n ar = ar - af * np.dot(af.flatten(), ar.flatten())\n br = br - bf * np.dot(af.flatten(), ar.flatten())\n norm = np.sqrt(np.sum(ar * ar))\n ar = ar / norm\n br = br / norm\n Er_list.append(Ridge(np.sort(np.hstack([E, E_c[Q]])\n ), ar, br))\n return Er_list\n\n\ndef adjacent(C, D, b, rid_fac, abs_tol=1e-07):\n \"\"\"Compute the (unique) adjacent facet.\n\n @param rid_fac: A Ridge_Facet object containing the parameters for\n a facet and one of its ridges.\n\n @return: (E_adj,a_adj,b_adj): The equality set and parameters for\n the adjacent facet such that::\n\n P_{E_adj} = P intersection {x | a_adj x = b_adj}\n \"\"\"\n E = rid_fac.E_0\n af = rid_fac.af\n bf = rid_fac.bf\n E_r = rid_fac.E_r\n ar = rid_fac.ar\n br = rid_fac.br\n d = C.shape[1]\n k = D.shape[1]\n C_er = C[E_r, :]\n D_er = D[E_r, :]\n b_er = b[E_r]\n c = -np.hstack([ar, np.zeros(k)])\n G = np.hstack([C_er, D_er])\n h = b_er\n A = np.hstack([af, np.zeros(k)])\n sol = solvers._solve_lp_using_cvxopt(c, G, h, A=A.T, b=bf * (1 - 0.01))\n if sol['status'] != 'optimal':\n print(G)\n print(h)\n print(af)\n print(bf)\n print(ar)\n print(br)\n print(np.dot(af, ar))\n data = {}\n data['C'] = C\n data['D'] = D\n data['b'] = b\n sio.savemat('matlabdata', data)\n with open('polytope.p', 'wb') as f:\n pickle.dump(data, f)\n raise Exception('adjacent: Lp returned status ' + str(sol['status']))\n opt_sol = np.array(sol['x']).flatten()\n dual_opt_sol = np.array(sol['z']).flatten()\n x_opt = opt_sol[range(d)]\n y_opt = opt_sol[range(d, d + k)]\n if is_dual_degenerate(c.flatten(), G, h, A, bf * (1 - 0.01), opt_sol,\n dual_opt_sol, abs_tol=abs_tol):\n E_temp = np.nonzero(np.abs(np.dot(G, opt_sol) - h) < abs_tol)[0]\n a_temp, b_temp = proj_aff(C_er[E_temp, :], D_er[E_temp, :], b_er[\n E_temp], expected_dim=1, abs_tol=abs_tol)\n E_adj = unique_equalityset(C, D, b, a_temp, b_temp, abs_tol=abs_tol)\n if len(E_adj) == 0:\n data = {}\n data['C'] = C\n data['D'] = D\n data['b'] = b\n data['Er'] = E_r + 1\n data['ar'] = ar\n data['br'] = br\n data['Ef'] = E + 1\n data['af'] = af\n data['bf'] = bf\n sio.savemat('matlabdata', data)\n raise Exception(\n 'adjacent: equality set computation returned empty set')\n else:\n r = np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) < abs_tol\n E_adj = np.nonzero(r)[0]\n C_eadj = C[E_adj, :]\n D_eadj = D[E_adj, :]\n b_eadj = b[E_adj]\n af_adj, bf_adj = proj_aff(C_eadj, D_eadj, b_eadj, abs_tol=abs_tol)\n return E_adj, af_adj, bf_adj\n\n\ndef proj_aff(Ce, De, be, expected_dim=None, abs_tol=1e-07):\n \"\"\"Affine projection.\n\n Compute the set aff = {x | Ce x + De y = be} on the form\n aff = ({x | a x = b} intersection {Ce x + De y < be}).\n\n Input: Polytope parameters Ce, De and be\n\n Output: Constants a and b\n \"\"\"\n ind = np.nonzero(np.sum(np.abs(De), axis=0) > abs_tol)[0]\n D = De[:, ind]\n if D.shape[1] == 0:\n a = Ce\n b = be\n a_n, b_n = normalize(a, b)\n if expected_dim is not None:\n if expected_dim != b_n.size:\n raise Exception('proj_aff: wrong dimension calculated in 1')\n return a_n.flatten(), b_n\n sh = np.shape(D.T)\n m = sh[0]\n n = sh[1]\n nDe = null_space(D.T)\n a = np.dot(nDe.T, Ce)\n b = np.dot(nDe.T, be)\n a_n, b_n = normalize(a, b)\n if expected_dim is not None:\n if expected_dim != b_n.size:\n raise Exception('proj_aff: wrong dimension calculated in 2')\n return a_n, b_n\n\n\n<mask token>\n\n\ndef unique_equalityset(C, D, b, af, bf, abs_tol=1e-07, verbose=0):\n \"\"\"Return equality set E with the following property:\n\n P_E = {x | af x = bf} intersection P\n\n where P is the polytope C x + D y < b\n\n The inequalities have to be satisfied with equality everywhere on\n the face defined by af and bf.\n \"\"\"\n if D is not None:\n A = np.hstack([C, D])\n a = np.hstack([af, np.zeros(D.shape[1])])\n else:\n A = C\n a = af\n E = []\n for i in range(A.shape[0]):\n A_i = np.array(A[i, :])\n b_i = b[i]\n sol = solvers._solve_lp_using_cvxopt(c=A_i, G=A, h=b, A=a.T, b=bf)\n if sol['status'] != 'optimal':\n raise Exception('unique_equalityset: LP returned status ' + str\n (sol['status']))\n if np.abs(sol['primal objective'] - b_i) < abs_tol:\n E.append(i)\n if len(E) == 0:\n raise Exception('unique_equalityset: empty E')\n return np.array(E)\n\n\ndef unique_equalityset2(C, D, b, opt_sol, abs_tol=1e-07):\n A = np.hstack([C, D])\n E0 = np.nonzero(np.abs(np.dot(A, opt_sol) - b) < abs_tol)[0]\n af, bf = proj_aff(C[E0, :], D[E0, :], b[E0], expected_dim=1)\n ineq = np.hstack([af, np.zeros(D.shape[1])])\n G = np.vstack([A, np.vstack([ineq, -ineq])])\n h = np.hstack([b, np.hstack([bf, -bf])])\n m = G.shape[0]\n n = G.shape[1]\n e = 0.001\n v = np.vstack([np.zeros([1, n]), np.eye(n)]).T\n v = v - np.array([np.mean(v, axis=1)]).T\n v = v * e\n ht = h + np.amin(-np.dot(G, v), axis=1)\n H1 = np.hstack([G, -np.eye(m)])\n H2 = np.hstack([G, np.zeros([m, m])])\n H3 = np.hstack([np.zeros([m, n]), -np.eye(m)])\n H = np.vstack([H1, np.vstack([H2, H3])])\n h = np.hstack([ht, np.hstack([h, np.zeros(m)])])\n c = np.hstack([np.zeros(n), np.ones(m)])\n sol = solvers.lpsolve(c, H, h, solver='glpk')\n if not sol['status'] == 'optimal':\n raise Exception('unique_equalityset: LP returned status ' + str(sol\n ['status']))\n opt_sol2 = np.array(sol['x']).flatten()\n x = opt_sol2[range(n)]\n s = opt_sol2[range(n, len(opt_sol2))]\n E = np.nonzero(s > abs_tol)[0]\n print(E)\n E = np.sort(E[np.nonzero(E < C.shape[0])])\n at, bt = proj_aff(C[E, :], D[E, :], b[E])\n if bt.size != 1 or np.sum(np.abs(at - af)) + np.abs(bt - bf) > abs_tol:\n raise Exception('unique_equalityset2: affine hulls not the same')\n return E\n\n\ndef cheby_center(C, D, b):\n \"\"\"Calculate Chebyshev center for the polytope `C x + D y <= b`.\n\n Input:\n `C, D, b`: Polytope parameters\n\n Output:\n `x_0, y_0`: The chebyshev centra\n `boolean`: True if a point could be found, False otherwise.\n \"\"\"\n d = C.shape[1]\n k = D.shape[1]\n A = np.hstack([C, D])\n dim = np.shape(A)[1]\n c = -np.r_[np.zeros(dim), 1]\n norm2 = np.sqrt(np.sum(A * A, axis=1))\n G = np.c_[A, norm2]\n sol = solvers.lpsolve(c, G, h=b, solver='glpk')\n if sol['status'] == 'optimal':\n opt = np.array(sol['x'][0:-1]).flatten()\n return opt[range(d)], opt[range(d, d + k)], True\n else:\n return np.zeros(d), np.zeros(k), False\n\n\n<mask token>\n\n\ndef rank(A, eps=1e-15):\n u, s, vh = linalg.svd(A)\n m = A.shape[0]\n n = A.shape[1]\n tol = np.amax([m, n]) * np.amax(s) * eps\n return np.sum(s > tol)\n\n\ndef null_space(A, eps=1e-15, nonempty=False):\n \"\"\"Returns the null space N_A to matrix A such that A N_A = 0.\"\"\"\n u, s, v = linalg.svd(A, full_matrices=1)\n m = A.shape[0]\n n = A.shape[1]\n tol = np.amax([m, n]) * np.amax(s) * eps\n rank = np.sum(s > tol)\n N_space = v[range(rank, n), :].T\n if nonempty and len(N_space) == 0:\n N_space = v[range(np.amax(n - 1, 1), n), :]\n return N_space\n",
"step-4": "<mask token>\n\n\nclass Ridge(object):\n \"\"\"A ridge.\n\n Attributes:\n\n - `E_r`: Equality set of a facet\n\n - `ar, br`: Affine hull of the facet\n s.t. P_{E_0} = P intersection {x | ar x = br}.\n \"\"\"\n\n def __init__(self, E, a, b):\n self.E_r = E\n self.ar = a\n self.br = b\n\n\nclass Ridge_Facet(object):\n \"\"\"A ridge facet.\n\n Attributes:\n\n - `E_r`: Equality set of a ridge\n\n - `ar,br`: Affine hull of the ridge s.t.\n P_{E_f} intersection {x | ar x = br}\n defines the ridge, where E_f is the\n equality set of the facet.\n\n - `E_0`: Equality set of a facet\n\n - `af,bf`: Affine hull of the facet.\n \"\"\"\n\n def __init__(self, E_r, ar, br, E_0, af, bf):\n self.E_r = E_r\n self.ar = ar\n self.br = br\n self.E_0 = E_0\n self.af = af\n self.bf = bf\n\n\ndef esp(CC, DD, bb, centered=False, abs_tol=1e-10, verbose=0):\n \"\"\"Project polytope [C D] x <= b onto C coordinates.\n\n Projects the polytope [C D] x <= b onto the\n coordinates that correspond to C. The projection of the polytope\n P = {[C D]x <= b} where C is M x D and D is M x K is\n defined as proj(P) = {x in R^d | exist y in R^k s.t Cx + Dy < b}\n \"\"\"\n if 'glpk' not in solvers.installed_solvers:\n raise Exception(\n 'projection_esp error: Equality set projection requires `cvxopt.glpk` to run.'\n )\n nonzerorows = np.nonzero(np.sum(np.abs(np.hstack([CC, DD])), axis=1) >\n abs_tol)[0]\n nonzeroxcols = np.nonzero(np.sum(np.abs(CC), axis=0) > abs_tol)[0]\n nonzeroycols = np.nonzero(np.sum(np.abs(DD), axis=0) > abs_tol)[0]\n C = CC[nonzerorows, :].copy()\n D = DD[nonzerorows, :].copy()\n C = C[:, nonzeroxcols]\n D = D[:, nonzeroycols]\n b = bb[nonzerorows].copy()\n if not centered:\n xc0, yc0, trans = cheby_center(C, D, b)\n if trans:\n b = b - np.dot(C, xc0).flatten() - np.dot(D, yc0).flatten()\n else:\n b = b\n else:\n trans = False\n d = C.shape[1]\n k = D.shape[1]\n if verbose > 0:\n print('Projecting from dim ' + str(d + k) + ' to ' + str(d))\n if k == 0:\n return C, bb, []\n if d == 1:\n c = np.zeros(d + k)\n c[0] = 1\n G = np.hstack([C, D])\n sol = solvers.lpsolve(c, G, b, solver='glpk')\n if sol['status'] != 'optimal':\n raise Exception(\n 'esp: projection to 1D is not full-dimensional, LP returned status '\n + str(sol['status']))\n min_sol = np.array(sol['x']).flatten()\n min_dual_sol = np.array(sol['z']).flatten()\n sol = solvers.lpsolve(-c, G, b, solver='glpk')\n if sol['status'] != 'optimal':\n raise Exception(\n 'esp: projection to 1D is not full-dimensional, ' +\n 'LP returned status ' + str(sol['status']))\n max_sol = np.array(sol['x']).flatten()\n max_dual_sol = np.array(sol['z']).flatten()\n x_min = min_sol[0]\n x_max = max_sol[0]\n y_min = min_sol[range(1, k + 1)]\n y_max = max_sol[range(1, k + 1)]\n if is_dual_degenerate(c, G, b, None, None, min_sol, min_dual_sol):\n E_min = unique_equalityset(C, D, b, np.array([1.0]), x_min + \n abs_tol / 3, abs_tol=abs_tol)\n else:\n E_min = np.nonzero(np.abs(np.dot(G, min_sol) - b) < abs_tol)[0]\n if is_dual_degenerate(c, G, b, None, None, max_sol, max_dual_sol):\n E_max = unique_equalityset(C, D, b, np.array([1.0]), x_max - \n abs_tol / 3, abs_tol=abs_tol)\n else:\n E_max = np.nonzero(np.abs(np.dot(G, max_sol) - b) < abs_tol)[0]\n G = np.array([[1.0], [-1.0]])\n g = np.array([x_max, -x_min])\n if trans:\n g = g + np.dot(G, xc0)\n E_max = nonzerorows[E_max]\n E_min = nonzerorows[E_min]\n if verbose > 0:\n print('Returning projection from dim ' + str(d + k) +\n ' to dim 1 \\n')\n return G, g, [E_max, E_min]\n E = []\n L = []\n E_0, af, bf = shoot(C, D, b, abs_tol=abs_tol)\n ridge_list = ridge(C, D, b, E_0, af, bf, abs_tol=abs_tol, verbose=verbose)\n for i in range(len(ridge_list)):\n r = ridge_list[i]\n L.append(Ridge_Facet(r.E_r, r.ar, r.br, E_0, af, bf))\n G = af.T\n g = bf\n if verbose > 0:\n print('\\nStarting eq set ' + str(E_0) + '\\nStarting ridges ')\n for rr in L:\n print(str(rr.E_r))\n E.append(E_0)\n while len(L) > 0:\n rid_fac1 = L[0]\n if verbose > 0:\n print('\\nLooking for neighbors to ' + str(rid_fac1.E_0) +\n ' and ' + str(rid_fac1.E_r) + ' ..')\n E_adj, a_adj, b_adj = adjacent(C, D, b, rid_fac1, abs_tol=abs_tol)\n if verbose > 0:\n print('found neighbor ' + str(E_adj) +\n '. \\n\\nLooking for ridges of neighbor..')\n ridge_list = ridge(C, D, b, E_adj, a_adj, b_adj, abs_tol=abs_tol,\n verbose=verbose)\n if verbose > 0:\n print('found ' + str(len(ridge_list)) + ' ridges\\n')\n found_org = False\n for i in range(len(ridge_list)):\n r = ridge_list[i]\n E_r = r.E_r\n ar = r.ar\n br = r.br\n found = False\n for j in range(len(L)):\n rid_fac2 = L[j]\n A_r = rid_fac2.E_r\n if len(A_r) != len(E_r):\n continue\n t1 = np.sort(np.array(A_r))\n t2 = np.sort(np.array(E_r))\n if np.sum(np.abs(t1 - t2)) < abs_tol:\n found = True\n break\n if found:\n if verbose > 0:\n print('Ridge ' + str(E_r) +\n ' already visited, removing from L..')\n if rid_fac2 == rid_fac1:\n found_org = True\n L.remove(rid_fac2)\n else:\n if verbose > 0:\n print('Adding ridge-facet ' + str(E_adj) + ' ' + str(\n E_r) + '')\n L.append(Ridge_Facet(E_r, ar, br, E_adj, a_adj, b_adj))\n if not found_org:\n print('Expected ridge ' + str(rid_fac1.E_r))\n print('but got ridges ')\n for rid in ridge_list:\n print(rid.E_r)\n raise Exception(\n 'esp: ridge did not return neighboring ridge as expected')\n G = np.vstack([G, a_adj])\n g = np.hstack([g, b_adj])\n E.append(E_adj)\n if trans:\n g = g + np.dot(G, xc0)\n for Ef in E:\n Ef = nonzerorows[Ef]\n return G, g, E\n\n\ndef shoot(C, D, b, maxiter=1000, abs_tol=1e-07):\n \"\"\"Return random equality set of P that projects on a projection facet.\n\n Returns randomly selected equality set E_0 of P such\n that the projection of the equality set is a facet of the projection.\n\n @param C: Matrix defining the polytope Cx+Dy <= b\n @param D: Matrix defining the polytope Cx+Dy <= b\n @param b: Vector defining the polytope Cx+Dy <= b\n\n @return: `E_0,af,bf`: Equality set and affine hull\n \"\"\"\n d = C.shape[1]\n k = D.shape[1]\n iter = 0\n while True:\n if iter > maxiter:\n raise Exception('shoot: could not find starting equality set')\n gamma = np.random.rand(d) - 0.5\n c = np.zeros(k + 1)\n c[0] = -1\n G = np.hstack([np.array([np.dot(C, gamma)]).T, D])\n sol = solvers.lpsolve(c, G, b, solver='glpk')\n opt_sol = np.array(sol['x']).flatten()\n opt_dual = np.array(sol['z']).flatten()\n r_opt = opt_sol[0]\n y_opt = np.array(opt_sol[range(1, len(opt_sol))]).flatten()\n x_opt = r_opt * gamma\n E_0 = np.nonzero(np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) <\n abs_tol)[0]\n DE0 = D[E_0, :]\n CE0 = C[E_0, :]\n b0 = b[E_0]\n if rank(np.dot(null_space(DE0.T).T, CE0)) == 1:\n break\n iter += 1\n af, bf = proj_aff(CE0, DE0, b0, abs_tol=abs_tol)\n if is_dual_degenerate(c, G, b, None, None, opt_sol, opt_dual, abs_tol=\n abs_tol):\n E_0 = unique_equalityset(C, D, b, af, bf, abs_tol=abs_tol)\n af, bf = proj_aff(C[E_0, :], D[E_0, :], b[E_0])\n if len(bf) > 1:\n raise Exception('shoot: wrong dimension of affine hull')\n return E_0, af.flatten(), bf\n\n\ndef ridge(C, D, b, E, af, bf, abs_tol=1e-07, verbose=0):\n \"\"\"Compute all ridges of a facet in the projection.\n\n Input:\n `C,D,b`: Original polytope data\n `E,af,bf`: Equality set and affine hull of a facet in the projection\n\n Output:\n `ridge_list`: A list containing all the ridges of\n the facet as Ridge objects\n \"\"\"\n d = C.shape[1]\n k = D.shape[1]\n Er_list = []\n q = C.shape[0]\n E_c = np.setdiff1d(range(q), E)\n C_E = C[E, :]\n D_E = D[E, :]\n b_E = b[E, :]\n C_Ec = C[E_c, :]\n D_Ec = D[E_c, :]\n b_Ec = b[E_c]\n S = C_Ec - np.dot(np.dot(D_Ec, linalg.pinv(D_E)), C_E)\n L = np.dot(D_Ec, null_space(D_E))\n t = b_Ec - np.dot(D_Ec, np.dot(linalg.pinv(D_E), b_E))\n if rank(np.hstack([C_E, D_E])) < k + 1:\n if verbose > 1:\n print('Doing recursive ESP call')\n u, s, v = linalg.svd(np.array([af]), full_matrices=1)\n sigma = s[0]\n v = v.T * u[0, 0]\n V_hat = v[:, [0]]\n V_tilde = v[:, range(1, v.shape[1])]\n Cnew = np.dot(S, V_tilde)\n Dnew = L\n bnew = t - np.dot(S, V_hat).flatten() * bf / sigma\n Anew = np.hstack([Cnew, Dnew])\n xc2, yc2, cen2 = cheby_center(Cnew, Dnew, bnew)\n bnew = bnew - np.dot(Cnew, xc2).flatten() - np.dot(Dnew, yc2).flatten()\n Gt, gt, E_t = esp(Cnew, Dnew, bnew, centered=True, abs_tol=abs_tol,\n verbose=0)\n if len(E_t[0]) == 0 or len(E_t[1]) == 0:\n raise Exception(\n 'ridge: recursive call did not return any equality sets')\n for i in range(len(E_t)):\n E_f = E_t[i]\n er = np.sort(np.hstack([E, E_c[E_f]]))\n ar = np.dot(Gt[i, :], V_tilde.T).flatten()\n br0 = gt[i].flatten()\n ar = ar - af * np.dot(af.flatten(), ar.flatten())\n br = br0 - bf * np.dot(af.flatten(), ar.flatten())\n norm = np.sqrt(np.sum(ar * ar))\n ar = ar * np.sign(br) / norm\n br = br * np.sign(br) / norm\n br = br + np.dot(Gt[i, :], xc2) / norm\n if len(ar) > d:\n raise Exception('ridge: wrong length of new ridge!')\n Er_list.append(Ridge(er, ar, br))\n else:\n if verbose > 0:\n print('Doing direct calculation of ridges')\n X = np.arange(S.shape[0])\n while len(X) > 0:\n i = X[0]\n X = np.setdiff1d(X, i)\n if np.linalg.norm(S[i, :]) < abs_tol:\n continue\n Si = S[i, :]\n Si = Si / np.linalg.norm(Si)\n if np.linalg.norm(af - np.dot(Si, af) * Si) > abs_tol:\n test1 = null_space(np.vstack([np.hstack([af, bf]), np.\n hstack([S[i, :], t[i]])]), nonempty=True)\n test2 = np.hstack([S, np.array([t]).T])\n test = np.dot(test1.T, test2.T)\n test = np.sum(np.abs(test), 0)\n Q_i = np.nonzero(test > abs_tol)[0]\n Q = np.nonzero(test < abs_tol)[0]\n X = np.setdiff1d(X, Q)\n Sq = S[Q_i, :]\n tq = t[Q_i]\n c = np.zeros(d + 1)\n c[0] = 1\n Gup = np.hstack([-np.ones([Sq.shape[0], 1]), Sq])\n Gdo = np.hstack([-1, np.zeros(Sq.shape[1])])\n G = np.vstack([Gup, Gdo])\n h = np.hstack([tq, 1])\n Al = np.zeros([2, 1])\n Ar = np.vstack([af, S[i, :]])\n A = np.hstack([Al, Ar])\n bb = np.hstack([bf, t[i]])\n sol = solvers._solve_lp_using_cvxopt(c, G, h, A=A, b=bb)\n if sol['status'] == 'optimal':\n tau = sol['x'][0]\n if tau < -abs_tol:\n ar = np.array([S[i, :]]).flatten()\n br = t[i].flatten()\n ar = ar - af * np.dot(af.flatten(), ar.flatten())\n br = br - bf * np.dot(af.flatten(), ar.flatten())\n norm = np.sqrt(np.sum(ar * ar))\n ar = ar / norm\n br = br / norm\n Er_list.append(Ridge(np.sort(np.hstack([E, E_c[Q]])\n ), ar, br))\n return Er_list\n\n\ndef adjacent(C, D, b, rid_fac, abs_tol=1e-07):\n \"\"\"Compute the (unique) adjacent facet.\n\n @param rid_fac: A Ridge_Facet object containing the parameters for\n a facet and one of its ridges.\n\n @return: (E_adj,a_adj,b_adj): The equality set and parameters for\n the adjacent facet such that::\n\n P_{E_adj} = P intersection {x | a_adj x = b_adj}\n \"\"\"\n E = rid_fac.E_0\n af = rid_fac.af\n bf = rid_fac.bf\n E_r = rid_fac.E_r\n ar = rid_fac.ar\n br = rid_fac.br\n d = C.shape[1]\n k = D.shape[1]\n C_er = C[E_r, :]\n D_er = D[E_r, :]\n b_er = b[E_r]\n c = -np.hstack([ar, np.zeros(k)])\n G = np.hstack([C_er, D_er])\n h = b_er\n A = np.hstack([af, np.zeros(k)])\n sol = solvers._solve_lp_using_cvxopt(c, G, h, A=A.T, b=bf * (1 - 0.01))\n if sol['status'] != 'optimal':\n print(G)\n print(h)\n print(af)\n print(bf)\n print(ar)\n print(br)\n print(np.dot(af, ar))\n data = {}\n data['C'] = C\n data['D'] = D\n data['b'] = b\n sio.savemat('matlabdata', data)\n with open('polytope.p', 'wb') as f:\n pickle.dump(data, f)\n raise Exception('adjacent: Lp returned status ' + str(sol['status']))\n opt_sol = np.array(sol['x']).flatten()\n dual_opt_sol = np.array(sol['z']).flatten()\n x_opt = opt_sol[range(d)]\n y_opt = opt_sol[range(d, d + k)]\n if is_dual_degenerate(c.flatten(), G, h, A, bf * (1 - 0.01), opt_sol,\n dual_opt_sol, abs_tol=abs_tol):\n E_temp = np.nonzero(np.abs(np.dot(G, opt_sol) - h) < abs_tol)[0]\n a_temp, b_temp = proj_aff(C_er[E_temp, :], D_er[E_temp, :], b_er[\n E_temp], expected_dim=1, abs_tol=abs_tol)\n E_adj = unique_equalityset(C, D, b, a_temp, b_temp, abs_tol=abs_tol)\n if len(E_adj) == 0:\n data = {}\n data['C'] = C\n data['D'] = D\n data['b'] = b\n data['Er'] = E_r + 1\n data['ar'] = ar\n data['br'] = br\n data['Ef'] = E + 1\n data['af'] = af\n data['bf'] = bf\n sio.savemat('matlabdata', data)\n raise Exception(\n 'adjacent: equality set computation returned empty set')\n else:\n r = np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) < abs_tol\n E_adj = np.nonzero(r)[0]\n C_eadj = C[E_adj, :]\n D_eadj = D[E_adj, :]\n b_eadj = b[E_adj]\n af_adj, bf_adj = proj_aff(C_eadj, D_eadj, b_eadj, abs_tol=abs_tol)\n return E_adj, af_adj, bf_adj\n\n\ndef proj_aff(Ce, De, be, expected_dim=None, abs_tol=1e-07):\n \"\"\"Affine projection.\n\n Compute the set aff = {x | Ce x + De y = be} on the form\n aff = ({x | a x = b} intersection {Ce x + De y < be}).\n\n Input: Polytope parameters Ce, De and be\n\n Output: Constants a and b\n \"\"\"\n ind = np.nonzero(np.sum(np.abs(De), axis=0) > abs_tol)[0]\n D = De[:, ind]\n if D.shape[1] == 0:\n a = Ce\n b = be\n a_n, b_n = normalize(a, b)\n if expected_dim is not None:\n if expected_dim != b_n.size:\n raise Exception('proj_aff: wrong dimension calculated in 1')\n return a_n.flatten(), b_n\n sh = np.shape(D.T)\n m = sh[0]\n n = sh[1]\n nDe = null_space(D.T)\n a = np.dot(nDe.T, Ce)\n b = np.dot(nDe.T, be)\n a_n, b_n = normalize(a, b)\n if expected_dim is not None:\n if expected_dim != b_n.size:\n raise Exception('proj_aff: wrong dimension calculated in 2')\n return a_n, b_n\n\n\ndef is_dual_degenerate(c, G, h, A, b, x_opt, z_opt, abs_tol=1e-07):\n \"\"\"Return `True` if pair of dual problems is dual degenerate.\n\n Checks if the pair of dual problems::\n\n (P): min c'x (D): max h'z + b'y\n s.t Gx <= h s.t G'z + A'y = c\n Ax = b z <= 0\n\n is dual degenerate, i.e. if (P) has several optimal solutions.\n Optimal solutions x* and z* are required.\n\n Input:\n\n `G,h,A,b`: Parameters of (P)\n `x_opt`: One optimal solution to (P)\n `z_opt`: The optimal solution to (D) corresponding to\n _inequality constraints_ in (P)\n\n Output:\n `dual`: Boolean indicating whether (P) has many optimal solutions.\n \"\"\"\n D = -G\n d = -h.flatten()\n mu = -z_opt.flatten()\n I = np.nonzero(np.abs(np.dot(D, x_opt).flatten() - d) < abs_tol)[0]\n J = np.nonzero(mu > abs_tol)[0]\n i = mu < abs_tol\n i = i.astype(int)\n j = np.zeros(len(mu), dtype=int)\n j[I] = 1\n L = np.nonzero(i + j == 2)[0]\n nI = len(I)\n nJ = len(J)\n nL = len(L)\n DI = D[I, :]\n DJ = D[J, :]\n DL = D[L, :]\n dual = 0\n if A is None:\n test = DI\n else:\n test = np.vstack([DI, A])\n if rank(test) < np.amin(DI.shape):\n return True\n elif len(L) > 0:\n if A is None:\n Ae = DJ\n else:\n Ae = np.vstack([DJ, A])\n be = np.zeros(Ae.shape[0])\n Ai = -DL\n bi = np.zeros(nL)\n sol = solvers._solve_lp_using_cvxopt(c=-np.sum(DL, axis=0), G=Ai, h\n =bi, A=Ae, b=be)\n if sol['status'] == 'dual infeasible':\n return True\n if sol['primal objective'] > abs_tol:\n return True\n return False\n\n\ndef unique_equalityset(C, D, b, af, bf, abs_tol=1e-07, verbose=0):\n \"\"\"Return equality set E with the following property:\n\n P_E = {x | af x = bf} intersection P\n\n where P is the polytope C x + D y < b\n\n The inequalities have to be satisfied with equality everywhere on\n the face defined by af and bf.\n \"\"\"\n if D is not None:\n A = np.hstack([C, D])\n a = np.hstack([af, np.zeros(D.shape[1])])\n else:\n A = C\n a = af\n E = []\n for i in range(A.shape[0]):\n A_i = np.array(A[i, :])\n b_i = b[i]\n sol = solvers._solve_lp_using_cvxopt(c=A_i, G=A, h=b, A=a.T, b=bf)\n if sol['status'] != 'optimal':\n raise Exception('unique_equalityset: LP returned status ' + str\n (sol['status']))\n if np.abs(sol['primal objective'] - b_i) < abs_tol:\n E.append(i)\n if len(E) == 0:\n raise Exception('unique_equalityset: empty E')\n return np.array(E)\n\n\ndef unique_equalityset2(C, D, b, opt_sol, abs_tol=1e-07):\n A = np.hstack([C, D])\n E0 = np.nonzero(np.abs(np.dot(A, opt_sol) - b) < abs_tol)[0]\n af, bf = proj_aff(C[E0, :], D[E0, :], b[E0], expected_dim=1)\n ineq = np.hstack([af, np.zeros(D.shape[1])])\n G = np.vstack([A, np.vstack([ineq, -ineq])])\n h = np.hstack([b, np.hstack([bf, -bf])])\n m = G.shape[0]\n n = G.shape[1]\n e = 0.001\n v = np.vstack([np.zeros([1, n]), np.eye(n)]).T\n v = v - np.array([np.mean(v, axis=1)]).T\n v = v * e\n ht = h + np.amin(-np.dot(G, v), axis=1)\n H1 = np.hstack([G, -np.eye(m)])\n H2 = np.hstack([G, np.zeros([m, m])])\n H3 = np.hstack([np.zeros([m, n]), -np.eye(m)])\n H = np.vstack([H1, np.vstack([H2, H3])])\n h = np.hstack([ht, np.hstack([h, np.zeros(m)])])\n c = np.hstack([np.zeros(n), np.ones(m)])\n sol = solvers.lpsolve(c, H, h, solver='glpk')\n if not sol['status'] == 'optimal':\n raise Exception('unique_equalityset: LP returned status ' + str(sol\n ['status']))\n opt_sol2 = np.array(sol['x']).flatten()\n x = opt_sol2[range(n)]\n s = opt_sol2[range(n, len(opt_sol2))]\n E = np.nonzero(s > abs_tol)[0]\n print(E)\n E = np.sort(E[np.nonzero(E < C.shape[0])])\n at, bt = proj_aff(C[E, :], D[E, :], b[E])\n if bt.size != 1 or np.sum(np.abs(at - af)) + np.abs(bt - bf) > abs_tol:\n raise Exception('unique_equalityset2: affine hulls not the same')\n return E\n\n\ndef cheby_center(C, D, b):\n \"\"\"Calculate Chebyshev center for the polytope `C x + D y <= b`.\n\n Input:\n `C, D, b`: Polytope parameters\n\n Output:\n `x_0, y_0`: The chebyshev centra\n `boolean`: True if a point could be found, False otherwise.\n \"\"\"\n d = C.shape[1]\n k = D.shape[1]\n A = np.hstack([C, D])\n dim = np.shape(A)[1]\n c = -np.r_[np.zeros(dim), 1]\n norm2 = np.sqrt(np.sum(A * A, axis=1))\n G = np.c_[A, norm2]\n sol = solvers.lpsolve(c, G, h=b, solver='glpk')\n if sol['status'] == 'optimal':\n opt = np.array(sol['x'][0:-1]).flatten()\n return opt[range(d)], opt[range(d, d + k)], True\n else:\n return np.zeros(d), np.zeros(k), False\n\n\ndef normalize(AA, bb, abs_tol=1e-07):\n \"\"\"Normalize `A x = b` such that `A'A = 1` and `b > 0`.\n\n Also, remove duplicate lines.\n \"\"\"\n if AA.size == 0:\n return AA, bb\n dim = AA.size / bb.size\n A = AA.copy().reshape(bb.size, dim)\n b = bb.copy().reshape(bb.size, 1)\n keepind = np.nonzero(np.sum(np.abs(np.hstack([A, b])), axis=1) > abs_tol)[0\n ]\n A = A[keepind, :]\n b = b[keepind]\n anorm = np.sqrt(np.sum(A * A, axis=1))\n for i in range(len(anorm)):\n A[i, :] = A[i, :] * np.sign(b[i, 0]) / anorm[i]\n b[i, 0] = np.sign(b[i, 0]) * b[i, 0] / anorm[i]\n keep_row = []\n for i in range(len(anorm)):\n unique = True\n for j in range(i + 1, len(anorm)):\n test = np.sum(np.abs(A[i, :] - A[j, :])) + np.abs(b[i, 0] - b[j, 0]\n )\n if test < abs_tol:\n unique = False\n break\n if unique:\n keep_row.append(i)\n A_n = A[keep_row, :]\n b_n = b[keep_row, 0]\n if A_n.size == dim:\n A_n = A_n.flatten()\n return A_n, b_n.flatten()\n\n\ndef rank(A, eps=1e-15):\n u, s, vh = linalg.svd(A)\n m = A.shape[0]\n n = A.shape[1]\n tol = np.amax([m, n]) * np.amax(s) * eps\n return np.sum(s > tol)\n\n\ndef null_space(A, eps=1e-15, nonempty=False):\n \"\"\"Returns the null space N_A to matrix A such that A N_A = 0.\"\"\"\n u, s, v = linalg.svd(A, full_matrices=1)\n m = A.shape[0]\n n = A.shape[1]\n tol = np.amax([m, n]) * np.amax(s) * eps\n rank = np.sum(s > tol)\n N_space = v[range(rank, n), :].T\n if nonempty and len(N_space) == 0:\n N_space = v[range(np.amax(n - 1, 1), n), :]\n return N_space\n",
"step-5": "# Copyright (c) 2011-2014 by California Institute of Technology\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the California Institute of Technology nor\n# the names of its contributors may be used to endorse or promote\n# products derived from this software without specific prior\n# written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH\n# OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF\n# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT\n# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n# SUCH DAMAGE.\nr\"\"\"Equality Set Projection (ESP).\n\nNon-vertex polytope projection method from\n- https://web.archive.org/web/20150103142532/\n https://www-control.eng.cam.ac.uk/~cnj22/research/projection.html\n- https://infoscience.epfl.ch/record/169768\n\nVery unstable, can not handle complex polytopes.\n\n\nReference\n=========\n\n\\cite{Jones04}\n\"\"\"\n# Created by P. Nilsson, 8/2/11\nimport pickle\n\nimport numpy as np\nfrom scipy import io as sio\nfrom scipy import linalg\n\nfrom polytope import solvers\n\n\nclass Ridge(object):\n \"\"\"A ridge.\n\n Attributes:\n\n - `E_r`: Equality set of a facet\n\n - `ar, br`: Affine hull of the facet\n s.t. P_{E_0} = P intersection {x | ar x = br}.\n \"\"\"\n\n def __init__(self, E, a, b):\n self.E_r = E\n self.ar = a\n self.br = b\n\n\nclass Ridge_Facet(object):\n \"\"\"A ridge facet.\n\n Attributes:\n\n - `E_r`: Equality set of a ridge\n\n - `ar,br`: Affine hull of the ridge s.t.\n P_{E_f} intersection {x | ar x = br}\n defines the ridge, where E_f is the\n equality set of the facet.\n\n - `E_0`: Equality set of a facet\n\n - `af,bf`: Affine hull of the facet.\n \"\"\"\n\n def __init__(self, E_r, ar, br, E_0, af, bf):\n self.E_r = E_r\n self.ar = ar\n self.br = br\n self.E_0 = E_0\n self.af = af\n self.bf = bf\n\n\ndef esp(CC, DD, bb, centered=False, abs_tol=1e-10, verbose=0):\n \"\"\"Project polytope [C D] x <= b onto C coordinates.\n\n Projects the polytope [C D] x <= b onto the\n coordinates that correspond to C. The projection of the polytope\n P = {[C D]x <= b} where C is M x D and D is M x K is\n defined as proj(P) = {x in R^d | exist y in R^k s.t Cx + Dy < b}\n \"\"\"\n if 'glpk' not in solvers.installed_solvers:\n raise Exception(\n \"projection_esp error:\"\n \" Equality set projection requires `cvxopt.glpk` to run.\")\n # Remove zero columns and rows\n nonzerorows = np.nonzero(\n np.sum(np.abs(np.hstack([CC, DD])), axis=1) > abs_tol)[0]\n nonzeroxcols = np.nonzero(np.sum(np.abs(CC), axis=0) > abs_tol)[0]\n nonzeroycols = np.nonzero(np.sum(np.abs(DD), axis=0) > abs_tol)[0]\n C = CC[nonzerorows, :].copy()\n D = DD[nonzerorows, :].copy()\n C = C[:, nonzeroxcols]\n D = D[:, nonzeroycols]\n b = bb[nonzerorows].copy()\n # Make sure origo is inside polytope\n if not centered:\n xc0, yc0, trans = cheby_center(C, D, b)\n if trans:\n b = b - np.dot(C, xc0).flatten() - np.dot(D, yc0).flatten()\n else:\n b = b\n else:\n trans = False\n d = C.shape[1]\n k = D.shape[1]\n if verbose > 0:\n print(\"Projecting from dim \" + str(d + k) + \" to \" + str(d))\n if k == 0:\n # Not projecting\n return C, bb, []\n if d == 1:\n # Projection to 1D\n c = np.zeros(d + k)\n c[0] = 1\n G = np.hstack([C, D])\n sol = solvers.lpsolve(c, G, b, solver='glpk')\n if sol['status'] != \"optimal\":\n raise Exception(\n \"esp: projection to 1D is not full-dimensional, \"\n \"LP returned status \" + str(sol['status']))\n min_sol = np.array(sol['x']).flatten()\n min_dual_sol = np.array(sol['z']).flatten()\n sol = solvers.lpsolve(-c, G, b, solver='glpk')\n if sol['status'] != \"optimal\":\n raise Exception(\n \"esp: projection to 1D is not full-dimensional, \" +\n \"LP returned status \" + str(sol['status']))\n max_sol = np.array(sol['x']).flatten()\n max_dual_sol = np.array(sol['z']).flatten()\n # min, max\n x_min = min_sol[0]\n x_max = max_sol[0]\n y_min = min_sol[range(1, k + 1)]\n y_max = max_sol[range(1, k + 1)]\n if is_dual_degenerate(c, G, b, None, None, min_sol, min_dual_sol):\n # Min case, relax constraint a little to avoid infeasibility\n E_min = unique_equalityset(\n C, D, b, np.array([1.]), x_min + abs_tol / 3, abs_tol=abs_tol)\n else:\n E_min = np.nonzero(np.abs(np.dot(G, min_sol) - b) < abs_tol)[0]\n if is_dual_degenerate(c, G, b, None, None, max_sol, max_dual_sol):\n # Max case, relax constraint a little to avoid infeasibility\n E_max = unique_equalityset(\n C, D, b, np.array([1.]), x_max - abs_tol / 3, abs_tol=abs_tol)\n else:\n E_max = np.nonzero(np.abs(np.dot(G, max_sol) - b) < abs_tol)[0]\n G = np.array([[1.], [-1.]])\n g = np.array([x_max, -x_min])\n # Relocate\n if trans:\n g = g + np.dot(G, xc0)\n # Return zero cols/rows\n E_max = nonzerorows[E_max]\n E_min = nonzerorows[E_min]\n if verbose > 0:\n print(\n \"Returning projection from dim \" +\n str(d + k) + \" to dim 1 \\n\")\n return G, g, [E_max, E_min]\n E = []\n L = []\n E_0, af, bf = shoot(C, D, b, abs_tol=abs_tol)\n ridge_list = ridge(C, D, b, E_0, af, bf, abs_tol=abs_tol, verbose=verbose)\n for i in range(len(ridge_list)):\n r = ridge_list[i]\n L.append(Ridge_Facet(r.E_r, r.ar, r.br, E_0, af, bf))\n G = af.T\n g = bf\n if verbose > 0:\n print(\"\\nStarting eq set \" + str(E_0) + \"\\nStarting ridges \")\n for rr in L:\n print(str(rr.E_r))\n E.append(E_0)\n while len(L) > 0:\n rid_fac1 = L[0]\n if verbose > 0:\n print(\"\\nLooking for neighbors to \" + str(rid_fac1.E_0) +\n \" and \" + str(rid_fac1.E_r) + \" ..\")\n E_adj, a_adj, b_adj = adjacent(C, D, b, rid_fac1, abs_tol=abs_tol)\n if verbose > 0:\n print(\"found neighbor \" + str(E_adj) +\n \". \\n\\nLooking for ridges of neighbor..\")\n ridge_list = ridge(\n C, D, b, E_adj, a_adj, b_adj,\n abs_tol=abs_tol, verbose=verbose)\n if verbose > 0:\n print(\"found \" + str(len(ridge_list)) + \" ridges\\n\")\n found_org = False\n for i in range(len(ridge_list)):\n r = ridge_list[i]\n E_r = r.E_r\n ar = r.ar\n br = r.br\n found = False\n for j in range(len(L)):\n rid_fac2 = L[j]\n A_r = rid_fac2.E_r\n if len(A_r) != len(E_r):\n continue\n t1 = np.sort(np.array(A_r))\n t2 = np.sort(np.array(E_r))\n if np.sum(np.abs(t1 - t2)) < abs_tol:\n found = True\n break\n if found:\n if verbose > 0:\n print(\"Ridge \" + str(E_r) +\n \" already visited, removing from L..\")\n if rid_fac2 == rid_fac1:\n found_org = True\n L.remove(rid_fac2)\n else:\n if verbose > 0:\n print(\"Adding ridge-facet \" + str(E_adj) +\n \" \" + str(E_r) + \"\")\n L.append(Ridge_Facet(E_r, ar, br, E_adj, a_adj, b_adj))\n if not found_org:\n print(\"Expected ridge \" + str(rid_fac1.E_r))\n print(\"but got ridges \")\n for rid in ridge_list:\n print(rid.E_r)\n raise Exception(\n \"esp: ridge did not return neighboring ridge as expected\")\n G = np.vstack([G, a_adj])\n g = np.hstack([g, b_adj])\n E.append(E_adj)\n # Restore center\n if trans:\n g = g + np.dot(G, xc0)\n # Return zero rows\n for Ef in E:\n Ef = nonzerorows[Ef]\n return G, g, E\n\n\ndef shoot(C, D, b, maxiter=1000, abs_tol=1e-7):\n \"\"\"Return random equality set of P that projects on a projection facet.\n\n Returns randomly selected equality set E_0 of P such\n that the projection of the equality set is a facet of the projection.\n\n @param C: Matrix defining the polytope Cx+Dy <= b\n @param D: Matrix defining the polytope Cx+Dy <= b\n @param b: Vector defining the polytope Cx+Dy <= b\n\n @return: `E_0,af,bf`: Equality set and affine hull\n \"\"\"\n d = C.shape[1]\n k = D.shape[1]\n iter = 0\n while True:\n if iter > maxiter:\n raise Exception(\n \"shoot: could not find starting equality set\")\n gamma = np.random.rand(d) - 0.5\n c = np.zeros(k + 1)\n c[0] = -1\n G = np.hstack([np.array([np.dot(C, gamma)]).T, D])\n sol = solvers.lpsolve(c, G, b, solver='glpk')\n opt_sol = np.array(sol['x']).flatten()\n opt_dual = np.array(sol['z']).flatten()\n r_opt = opt_sol[0]\n y_opt = np.array(opt_sol[range(1, len(opt_sol))]).flatten()\n x_opt = r_opt * gamma\n E_0 = np.nonzero(\n np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) < abs_tol)[0]\n DE0 = D[E_0, :]\n CE0 = C[E_0, :]\n b0 = b[E_0]\n if rank(np.dot(null_space(DE0.T).T, CE0)) == 1:\n break\n iter += 1\n af, bf = proj_aff(CE0, DE0, b0, abs_tol=abs_tol)\n if is_dual_degenerate(c, G, b, None, None, opt_sol,\n opt_dual, abs_tol=abs_tol):\n E_0 = unique_equalityset(C, D, b, af, bf, abs_tol=abs_tol)\n af, bf = proj_aff(C[E_0, :], D[E_0, :], b[E_0])\n if len(bf) > 1:\n raise Exception(\"shoot: wrong dimension of affine hull\")\n return E_0, af.flatten(), bf\n\n\ndef ridge(C, D, b, E, af, bf, abs_tol=1e-7, verbose=0):\n \"\"\"Compute all ridges of a facet in the projection.\n\n Input:\n `C,D,b`: Original polytope data\n `E,af,bf`: Equality set and affine hull of a facet in the projection\n\n Output:\n `ridge_list`: A list containing all the ridges of\n the facet as Ridge objects\n \"\"\"\n d = C.shape[1]\n k = D.shape[1]\n Er_list = []\n q = C.shape[0]\n E_c = np.setdiff1d(range(q), E)\n # E slices\n C_E = C[E, :]\n D_E = D[E, :]\n b_E = b[E, :]\n # E_c slices\n C_Ec = C[E_c, :]\n D_Ec = D[E_c, :]\n b_Ec = b[E_c]\n # dots\n S = C_Ec - np.dot(np.dot(D_Ec, linalg.pinv(D_E)), C_E)\n L = np.dot(D_Ec, null_space(D_E))\n t = b_Ec - np.dot(D_Ec, np.dot(linalg.pinv(D_E), b_E))\n if rank(np.hstack([C_E, D_E])) < k + 1:\n if verbose > 1:\n print(\"Doing recursive ESP call\")\n u, s, v = linalg.svd(np.array([af]), full_matrices=1)\n sigma = s[0]\n v = v.T * u[0, 0] # Correct sign\n V_hat = v[:, [0]]\n V_tilde = v[:, range(1, v.shape[1])]\n Cnew = np.dot(S, V_tilde)\n Dnew = L\n bnew = t - np.dot(S, V_hat).flatten() * bf / sigma\n Anew = np.hstack([Cnew, Dnew])\n xc2, yc2, cen2 = cheby_center(Cnew, Dnew, bnew)\n bnew = bnew - np.dot(Cnew, xc2).flatten() - np.dot(Dnew, yc2).flatten()\n Gt, gt, E_t = esp(\n Cnew, Dnew, bnew,\n centered=True, abs_tol=abs_tol, verbose=0)\n if (len(E_t[0]) == 0) or (len(E_t[1]) == 0):\n raise Exception(\n \"ridge: recursive call did not return any equality sets\")\n for i in range(len(E_t)):\n E_f = E_t[i]\n er = np.sort(np.hstack([E, E_c[E_f]]))\n ar = np.dot(Gt[i, :], V_tilde.T).flatten()\n br0 = gt[i].flatten()\n # Make orthogonal to facet\n ar = ar - af * np.dot(af.flatten(), ar.flatten())\n br = br0 - bf * np.dot(af.flatten(), ar.flatten())\n # Normalize and make ridge equation point outwards\n norm = np.sqrt(np.sum(ar * ar))\n ar = ar * np.sign(br) / norm\n br = br * np.sign(br) / norm\n # Restore center\n br = br + np.dot(Gt[i, :], xc2) / norm\n if len(ar) > d:\n raise Exception(\"ridge: wrong length of new ridge!\")\n Er_list.append(Ridge(er, ar, br))\n else:\n if verbose > 0:\n print(\"Doing direct calculation of ridges\")\n X = np.arange(S.shape[0])\n while len(X) > 0:\n i = X[0]\n X = np.setdiff1d(X, i)\n if np.linalg.norm(S[i, :]) < abs_tol:\n continue\n Si = S[i, :]\n Si = Si / np.linalg.norm(Si)\n if np.linalg.norm(af - np.dot(Si, af) * Si) > abs_tol:\n test1 = null_space(\n np.vstack([\n np.hstack([af, bf]),\n np.hstack([S[i, :], t[i]])]),\n nonempty=True)\n test2 = np.hstack([S, np.array([t]).T])\n test = np.dot(test1.T, test2.T)\n test = np.sum(np.abs(test), 0)\n Q_i = np.nonzero(test > abs_tol)[0]\n Q = np.nonzero(test < abs_tol)[0]\n X = np.setdiff1d(X, Q)\n # Have Q_i\n Sq = S[Q_i, :]\n tq = t[Q_i]\n c = np.zeros(d + 1)\n c[0] = 1\n Gup = np.hstack([-np.ones([Sq.shape[0], 1]), Sq])\n Gdo = np.hstack([-1, np.zeros(Sq.shape[1])])\n G = np.vstack([Gup, Gdo])\n h = np.hstack([tq, 1])\n Al = np.zeros([2, 1])\n Ar = np.vstack([af, S[i, :]])\n A = np.hstack([Al, Ar])\n bb = np.hstack([bf, t[i]])\n sol = solvers._solve_lp_using_cvxopt(\n c, G, h, A=A, b=bb)\n if sol['status'] == 'optimal':\n tau = sol['x'][0]\n if tau < -abs_tol:\n ar = np.array([S[i, :]]).flatten()\n br = t[i].flatten()\n # Make orthogonal to facet\n ar = ar - af * np.dot(af.flatten(), ar.flatten())\n br = br - bf * np.dot(af.flatten(), ar.flatten())\n # Normalize and make ridge equation point outwards\n norm = np.sqrt(np.sum(ar * ar))\n ar = ar / norm\n br = br / norm\n # accumulate\n Er_list.append(\n Ridge(np.sort(np.hstack([E, E_c[Q]])), ar, br))\n return Er_list\n\n\ndef adjacent(C, D, b, rid_fac, abs_tol=1e-7):\n \"\"\"Compute the (unique) adjacent facet.\n\n @param rid_fac: A Ridge_Facet object containing the parameters for\n a facet and one of its ridges.\n\n @return: (E_adj,a_adj,b_adj): The equality set and parameters for\n the adjacent facet such that::\n\n P_{E_adj} = P intersection {x | a_adj x = b_adj}\n \"\"\"\n E = rid_fac.E_0\n af = rid_fac.af\n bf = rid_fac.bf\n #\n E_r = rid_fac.E_r\n ar = rid_fac.ar\n br = rid_fac.br\n # shape\n d = C.shape[1]\n k = D.shape[1]\n # E_r slices\n C_er = C[E_r, :]\n D_er = D[E_r, :]\n b_er = b[E_r]\n # stack\n c = -np.hstack([ar, np.zeros(k)])\n G = np.hstack([C_er, D_er])\n h = b_er\n A = np.hstack([af, np.zeros(k)])\n sol = solvers._solve_lp_using_cvxopt(\n c, G, h, A=A.T, b=bf * (1 - 0.01))\n if sol['status'] != \"optimal\":\n print(G)\n print(h)\n print(af)\n print(bf)\n print(ar)\n print(br)\n print(np.dot(af, ar))\n data = {}\n data[\"C\"] = C\n data[\"D\"] = D\n data[\"b\"] = b\n sio.savemat(\"matlabdata\", data)\n with open('polytope.p', 'wb') as f:\n pickle.dump(data, f)\n raise Exception(\n \"adjacent: Lp returned status \" + str(sol['status']))\n opt_sol = np.array(sol['x']).flatten()\n dual_opt_sol = np.array(sol['z']).flatten()\n x_opt = opt_sol[range(d)]\n y_opt = opt_sol[range(d, d + k)]\n if is_dual_degenerate(\n c.flatten(), G, h, A, bf * (1 - 0.01),\n opt_sol, dual_opt_sol, abs_tol=abs_tol):\n # If degenerate, compute affine hull and take preimage\n E_temp = np.nonzero(np.abs(np.dot(G, opt_sol) - h) < abs_tol)[0]\n a_temp, b_temp = proj_aff(\n C_er[E_temp, :], D_er[E_temp, :], b_er[E_temp],\n expected_dim=1, abs_tol=abs_tol)\n E_adj = unique_equalityset(C, D, b, a_temp, b_temp, abs_tol=abs_tol)\n if len(E_adj) == 0:\n data = {}\n data[\"C\"] = C\n data[\"D\"] = D\n data[\"b\"] = b\n data[\"Er\"] = E_r + 1\n data[\"ar\"] = ar\n data[\"br\"] = br\n data[\"Ef\"] = E + 1\n data[\"af\"] = af\n data[\"bf\"] = bf\n sio.savemat(\"matlabdata\", data)\n raise Exception(\n \"adjacent: equality set computation returned empty set\")\n else:\n r = np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) < abs_tol\n E_adj = np.nonzero(r)[0]\n C_eadj = C[E_adj, :]\n D_eadj = D[E_adj, :]\n b_eadj = b[E_adj]\n af_adj, bf_adj = proj_aff(C_eadj, D_eadj, b_eadj, abs_tol=abs_tol)\n return E_adj, af_adj, bf_adj\n\n\ndef proj_aff(Ce, De, be, expected_dim=None, abs_tol=1e-7):\n \"\"\"Affine projection.\n\n Compute the set aff = {x | Ce x + De y = be} on the form\n aff = ({x | a x = b} intersection {Ce x + De y < be}).\n\n Input: Polytope parameters Ce, De and be\n\n Output: Constants a and b\n \"\"\"\n # Remove zero columns\n ind = np.nonzero(np.sum(np.abs(De), axis=0) > abs_tol)[0]\n D = De[:, ind]\n if D.shape[1] == 0:\n a = Ce\n b = be\n a_n, b_n = normalize(a, b)\n if expected_dim is not None:\n if expected_dim != b_n.size:\n raise Exception(\n \"proj_aff: wrong dimension calculated in 1\")\n return a_n.flatten(), b_n\n sh = np.shape(D.T)\n m = sh[0]\n n = sh[1]\n nDe = null_space(D.T)\n a = np.dot(nDe.T, Ce)\n b = np.dot(nDe.T, be)\n a_n, b_n = normalize(a, b)\n if expected_dim is not None:\n if expected_dim != b_n.size:\n raise Exception(\"proj_aff: wrong dimension calculated in 2\")\n return a_n, b_n\n\n\ndef is_dual_degenerate(c, G, h, A, b, x_opt, z_opt, abs_tol=1e-7):\n \"\"\"Return `True` if pair of dual problems is dual degenerate.\n\n Checks if the pair of dual problems::\n\n (P): min c'x (D): max h'z + b'y\n s.t Gx <= h s.t G'z + A'y = c\n Ax = b z <= 0\n\n is dual degenerate, i.e. if (P) has several optimal solutions.\n Optimal solutions x* and z* are required.\n\n Input:\n\n `G,h,A,b`: Parameters of (P)\n `x_opt`: One optimal solution to (P)\n `z_opt`: The optimal solution to (D) corresponding to\n _inequality constraints_ in (P)\n\n Output:\n `dual`: Boolean indicating whether (P) has many optimal solutions.\n \"\"\"\n D = - G\n d = - h.flatten()\n mu = - z_opt.flatten() # mu >= 0\n # Active constraints\n I = np.nonzero(np.abs(np.dot(D, x_opt).flatten() - d) < abs_tol)[0]\n # Positive elements in dual opt\n J = np.nonzero(mu > abs_tol)[0]\n # i, j\n i = mu < abs_tol # Zero elements in dual opt\n i = i.astype(int)\n j = np.zeros(len(mu), dtype=int)\n j[I] = 1 # 1 if active\n # Indices where active constraints have 0 dual opt\n L = np.nonzero(i + j == 2)[0]\n # sizes\n nI = len(I)\n nJ = len(J)\n nL = len(L)\n # constraints\n DI = D[I, :] # Active constraints\n DJ = D[J, :] # Constraints with positive lagrange mult\n DL = D[L, :] # Active constraints with zero dual opt\n dual = 0\n if A is None:\n test = DI\n else:\n test = np.vstack([DI, A])\n if rank(test) < np.amin(DI.shape):\n return True\n else:\n if len(L) > 0:\n if A is None:\n Ae = DJ\n else:\n Ae = np.vstack([DJ, A])\n be = np.zeros(Ae.shape[0])\n Ai = - DL\n bi = np.zeros(nL)\n sol = solvers._solve_lp_using_cvxopt(\n c= - np.sum(DL, axis=0), G=Ai,\n h=bi, A=Ae, b=be)\n if sol['status'] == \"dual infeasible\":\n # Dual infeasible -> primal unbounded -> value>epsilon\n return True\n if sol['primal objective'] > abs_tol:\n return True\n return False\n\n\ndef unique_equalityset(C, D, b, af, bf, abs_tol=1e-7, verbose=0):\n \"\"\"Return equality set E with the following property:\n\n P_E = {x | af x = bf} intersection P\n\n where P is the polytope C x + D y < b\n\n The inequalities have to be satisfied with equality everywhere on\n the face defined by af and bf.\n \"\"\"\n if D is not None:\n A = np.hstack([C, D])\n a = np.hstack([af, np.zeros(D.shape[1])])\n else:\n A = C\n a = af\n E = []\n for i in range(A.shape[0]):\n A_i = np.array(A[i, :])\n b_i = b[i]\n sol = solvers._solve_lp_using_cvxopt(\n c=A_i, G=A, h=b,\n A=a.T, b=bf)\n if sol['status'] != \"optimal\":\n raise Exception(\n \"unique_equalityset: LP returned status \" +\n str(sol['status']))\n if np.abs(sol['primal objective'] - b_i) < abs_tol:\n # Constraint is active everywhere\n E.append(i)\n if len(E) == 0:\n raise Exception(\"unique_equalityset: empty E\")\n return np.array(E)\n\n\ndef unique_equalityset2(C, D, b, opt_sol, abs_tol=1e-7):\n A = np.hstack([C, D])\n E0 = np.nonzero(np.abs(np.dot(A, opt_sol) - b) < abs_tol)[0]\n af, bf = proj_aff(C[E0, :], D[E0, :], b[E0], expected_dim=1)\n # stack\n ineq = np.hstack([af, np.zeros(D.shape[1])])\n G = np.vstack([A, np.vstack([ineq, -ineq])])\n h = np.hstack([b, np.hstack([bf, -bf])])\n # shape\n m = G.shape[0]\n n = G.shape[1]\n # ht\n e = 1e-3\n v = np.vstack([np.zeros([1, n]), np.eye(n)]).T\n v = v - np.array([np.mean(v, axis=1)]).T\n v = v * e\n ht = h + np.amin(-np.dot(G, v), axis=1)\n # stack\n H1 = np.hstack([G, -np.eye(m)])\n H2 = np.hstack([G, np.zeros([m, m])])\n H3 = np.hstack([np.zeros([m, n]), -np.eye(m)])\n H = np.vstack([H1, np.vstack([H2, H3])])\n h = np.hstack([ht, np.hstack([h, np.zeros(m)])])\n c = np.hstack([np.zeros(n), np.ones(m)])\n sol = solvers.lpsolve(c, H, h, solver='glpk')\n if not sol['status'] == \"optimal\":\n raise Exception(\n \"unique_equalityset: LP returned status \" +\n str(sol['status']))\n opt_sol2 = np.array(sol['x']).flatten()\n x = opt_sol2[range(n)]\n s = opt_sol2[range(n, len(opt_sol2))]\n E = np.nonzero(s > abs_tol)[0]\n print(E)\n E = np.sort(E[np.nonzero(E < C.shape[0])])\n # Check that they define the same projection\n at, bt = proj_aff(C[E, :], D[E, :], b[E])\n if bt.size != 1 or np.sum(np.abs(at - af)) + np.abs(bt - bf) > abs_tol:\n raise Exception(\"unique_equalityset2: affine hulls not the same\")\n return E\n\n\ndef cheby_center(C, D, b):\n \"\"\"Calculate Chebyshev center for the polytope `C x + D y <= b`.\n\n Input:\n `C, D, b`: Polytope parameters\n\n Output:\n `x_0, y_0`: The chebyshev centra\n `boolean`: True if a point could be found, False otherwise.\n \"\"\"\n d = C.shape[1]\n k = D.shape[1]\n A = np.hstack([C, D])\n dim = np.shape(A)[1]\n c = - np.r_[np.zeros(dim), 1]\n norm2 = np.sqrt(np.sum(A * A, axis=1))\n G = np.c_[A, norm2]\n sol = solvers.lpsolve(c, G, h=b, solver='glpk')\n if sol['status'] == \"optimal\":\n opt = np.array(sol['x'][0:-1]).flatten()\n return opt[range(d)], opt[range(d, d + k)], True\n else:\n return np.zeros(d), np.zeros(k), False\n\n\ndef normalize(AA, bb, abs_tol=1e-7):\n \"\"\"Normalize `A x = b` such that `A'A = 1` and `b > 0`.\n\n Also, remove duplicate lines.\n \"\"\"\n if AA.size == 0:\n return AA, bb\n dim = AA.size / bb.size\n A = AA.copy().reshape(bb.size, dim)\n b = bb.copy().reshape(bb.size, 1)\n # Remove zero lines\n keepind = np.nonzero(\n np.sum(np.abs(np.hstack([A, b])), axis=1) > abs_tol)[0]\n A = A[keepind, :]\n b = b[keepind]\n # Normalize\n anorm = np.sqrt(np.sum(A * A, axis=1))\n for i in range(len(anorm)):\n A[i, :] = A[i, :] * np.sign(b[i, 0]) / anorm[i]\n b[i, 0] = np.sign(b[i, 0]) * b[i, 0] / anorm[i]\n # Remove duplicate rows\n keep_row = []\n for i in range(len(anorm)):\n unique = True\n for j in range(i + 1, len(anorm)):\n test = (np.sum(np.abs(A[i, :] - A[j, :])) +\n np.abs(b[i, 0] - b[j, 0]))\n if test < abs_tol:\n unique = False\n break\n if unique:\n keep_row.append(i)\n A_n = A[keep_row, :]\n b_n = b[keep_row, 0]\n # Return flat A if only one row\n if A_n.size == dim:\n A_n = A_n.flatten()\n return A_n, b_n.flatten()\n\n\ndef rank(A, eps=1e-15):\n u, s, vh = linalg.svd(A)\n m = A.shape[0]\n n = A.shape[1]\n tol = np.amax([m, n]) * np.amax(s) * eps\n return np.sum(s > tol)\n\n\ndef null_space(A, eps=1e-15, nonempty=False):\n \"\"\"Returns the null space N_A to matrix A such that A N_A = 0.\"\"\"\n u, s, v = linalg.svd(A, full_matrices=1)\n m = A.shape[0]\n n = A.shape[1]\n tol = np.amax([m, n]) * np.amax(s) * eps\n rank = np.sum(s > tol)\n N_space = v[range(rank, n), :].T\n if nonempty and (len(N_space) == 0):\n N_space = v[range(np.amax(n - 1, 1), n), :]\n return N_space\n",
"step-ids": [
7,
9,
16,
18,
20
]
}
|
[
7,
9,
16,
18,
20
] |
#!/usr/bin/env pytest
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: TopJSON driver test suite.
# Author: Even Rouault
#
###############################################################################
# Copyright (c) 2020, Even Rouault <even dot rouault at spatialys.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import ogrtest
import pytest
from osgeo import ogr
###############################################################################
# Test TopoJSON
def test_ogr_toposjon_objects_is_array():
ds = ogr.Open("data/topojson/topojson1.topojson")
lyr = ds.GetLayer(0)
assert lyr.GetName() == "a_layer"
feat = lyr.GetNextFeature()
ogrtest.check_feature_geometry(feat, "LINESTRING (100 1000,110 1000,110 1100)")
lyr = ds.GetLayer(1)
assert lyr.GetName() == "TopoJSON"
assert lyr.GetLayerDefn().GetFieldCount() == 2
assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == "id"
assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == "name"
expected_results = [
("foo", None, "POINT EMPTY"),
(None, None, "POINT EMPTY"),
(None, None, "POINT EMPTY"),
(None, None, "POINT (100 1010)"),
(None, None, "LINESTRING EMPTY"),
(None, None, "LINESTRING EMPTY"),
(None, None, "LINESTRING EMPTY"),
(None, None, "LINESTRING EMPTY"),
(None, None, "LINESTRING EMPTY"),
(None, None, "LINESTRING EMPTY"),
(None, None, "LINESTRING EMPTY"),
(None, None, "LINESTRING EMPTY"),
(None, "0", "LINESTRING EMPTY"),
(None, "foo", "LINESTRING EMPTY"),
("1", None, "LINESTRING (100 1000,110 1000,110 1100)"),
("2", None, "LINESTRING (110 1100,110 1000,100 1000)"),
(None, None, "POLYGON EMPTY"),
(None, None, "POLYGON EMPTY"),
(None, None, "POLYGON EMPTY"),
(
None,
None,
"POLYGON ((100 1000,110 1000,110 1100,100 1100,100 1000),(101 1010,101 1090,109 1090,109 1010,101 1010))",
),
(
None,
None,
"POLYGON ((110 1100,110 1000,100 1000,100 1100,110 1100),(101 1010,109 1010,109 1090,101 1090,101 1010))",
),
(None, None, "MULTIPOINT EMPTY"),
(None, None, "MULTIPOINT EMPTY"),
(None, None, "MULTIPOINT EMPTY"),
(None, None, "MULTIPOINT EMPTY"),
(None, None, "MULTIPOINT (100 1010,101 1020)"),
(None, None, "MULTIPOLYGON EMPTY"),
(None, None, "MULTIPOLYGON EMPTY"),
(None, None, "MULTIPOLYGON EMPTY"),
(
None,
None,
"MULTIPOLYGON (((110 1100,110 1000,100 1000,100 1100,110 1100)),((101 1010,109 1010,109 1090,101 1090,101 1010)))",
),
(None, None, "MULTILINESTRING EMPTY"),
(None, None, "MULTILINESTRING EMPTY"),
(None, None, "MULTILINESTRING ((100 1000,110 1000,110 1100))"),
(
None,
None,
"MULTILINESTRING ((100 1000,110 1000,110 1100,100 1100,100 1000))",
),
(
None,
None,
"MULTILINESTRING ((100 1000,110 1000,110 1100,100 1100,100 1000),(101 1010,101 1090,109 1090,109 1010,101 1010))",
),
]
assert lyr.GetFeatureCount() == len(expected_results)
for i, exp_result in enumerate(expected_results):
feat = lyr.GetNextFeature()
if (
feat.GetField("id") != exp_result[0]
or feat.GetField("name") != exp_result[1]
or feat.GetGeometryRef().ExportToWkt() != exp_result[2]
):
feat.DumpReadable()
print(exp_result)
print(feat.GetField("name"))
pytest.fail("failure at feat index %d" % i)
ds = None
def test_ogr_toposjon_objects_is_dict():
ds = ogr.Open("data/topojson/topojson2.topojson")
lyr = ds.GetLayer(0)
assert lyr.GetName() == "a_layer"
assert lyr.GetLayerDefn().GetFieldCount() == 2
assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == "id"
assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == "name"
feat = lyr.GetNextFeature()
assert feat["id"] == "foo"
assert feat["name"] == "line"
ogrtest.check_feature_geometry(feat, "LINESTRING (100 1000,110 1000,110 1100)")
lyr = ds.GetLayer(1)
assert lyr.GetName() == "TopoJSON"
feat = lyr.GetNextFeature()
ogrtest.check_feature_geometry(feat, "LINESTRING (100 1000,110 1000,110 1100)")
ds = None
def test_ogr_toposjon_no_transform():
ds = ogr.Open("data/topojson/topojson3.topojson")
lyr = ds.GetLayer(0)
assert lyr.GetName() == "a_layer"
feat = lyr.GetNextFeature()
ogrtest.check_feature_geometry(feat, "LINESTRING (0 0,10 0,0 10,10 0,0 0)")
lyr = ds.GetLayer(1)
assert lyr.GetName() == "TopoJSON"
feat = lyr.GetNextFeature()
ogrtest.check_feature_geometry(feat, "LINESTRING (0 0,10 0,0 10,10 0,0 0)")
ds = None
|
normal
|
{
"blob_id": "270dba92af583e37c35ed5365f764adfdc2f947d",
"index": 2112,
"step-1": "<mask token>\n\n\ndef test_ogr_toposjon_objects_is_dict():\n ds = ogr.Open('data/topojson/topojson2.topojson')\n lyr = ds.GetLayer(0)\n assert lyr.GetName() == 'a_layer'\n assert lyr.GetLayerDefn().GetFieldCount() == 2\n assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == 'id'\n assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == 'name'\n feat = lyr.GetNextFeature()\n assert feat['id'] == 'foo'\n assert feat['name'] == 'line'\n ogrtest.check_feature_geometry(feat,\n 'LINESTRING (100 1000,110 1000,110 1100)')\n lyr = ds.GetLayer(1)\n assert lyr.GetName() == 'TopoJSON'\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat,\n 'LINESTRING (100 1000,110 1000,110 1100)')\n ds = None\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_ogr_toposjon_objects_is_dict():\n ds = ogr.Open('data/topojson/topojson2.topojson')\n lyr = ds.GetLayer(0)\n assert lyr.GetName() == 'a_layer'\n assert lyr.GetLayerDefn().GetFieldCount() == 2\n assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == 'id'\n assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == 'name'\n feat = lyr.GetNextFeature()\n assert feat['id'] == 'foo'\n assert feat['name'] == 'line'\n ogrtest.check_feature_geometry(feat,\n 'LINESTRING (100 1000,110 1000,110 1100)')\n lyr = ds.GetLayer(1)\n assert lyr.GetName() == 'TopoJSON'\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat,\n 'LINESTRING (100 1000,110 1000,110 1100)')\n ds = None\n\n\ndef test_ogr_toposjon_no_transform():\n ds = ogr.Open('data/topojson/topojson3.topojson')\n lyr = ds.GetLayer(0)\n assert lyr.GetName() == 'a_layer'\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat, 'LINESTRING (0 0,10 0,0 10,10 0,0 0)')\n lyr = ds.GetLayer(1)\n assert lyr.GetName() == 'TopoJSON'\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat, 'LINESTRING (0 0,10 0,0 10,10 0,0 0)')\n ds = None\n",
"step-3": "<mask token>\n\n\ndef test_ogr_toposjon_objects_is_array():\n ds = ogr.Open('data/topojson/topojson1.topojson')\n lyr = ds.GetLayer(0)\n assert lyr.GetName() == 'a_layer'\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat,\n 'LINESTRING (100 1000,110 1000,110 1100)')\n lyr = ds.GetLayer(1)\n assert lyr.GetName() == 'TopoJSON'\n assert lyr.GetLayerDefn().GetFieldCount() == 2\n assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == 'id'\n assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == 'name'\n expected_results = [('foo', None, 'POINT EMPTY'), (None, None,\n 'POINT EMPTY'), (None, None, 'POINT EMPTY'), (None, None,\n 'POINT (100 1010)'), (None, None, 'LINESTRING EMPTY'), (None, None,\n 'LINESTRING EMPTY'), (None, None, 'LINESTRING EMPTY'), (None, None,\n 'LINESTRING EMPTY'), (None, None, 'LINESTRING EMPTY'), (None, None,\n 'LINESTRING EMPTY'), (None, None, 'LINESTRING EMPTY'), (None, None,\n 'LINESTRING EMPTY'), (None, '0', 'LINESTRING EMPTY'), (None, 'foo',\n 'LINESTRING EMPTY'), ('1', None,\n 'LINESTRING (100 1000,110 1000,110 1100)'), ('2', None,\n 'LINESTRING (110 1100,110 1000,100 1000)'), (None, None,\n 'POLYGON EMPTY'), (None, None, 'POLYGON EMPTY'), (None, None,\n 'POLYGON EMPTY'), (None, None,\n 'POLYGON ((100 1000,110 1000,110 1100,100 1100,100 1000),(101 1010,101 1090,109 1090,109 1010,101 1010))'\n ), (None, None,\n 'POLYGON ((110 1100,110 1000,100 1000,100 1100,110 1100),(101 1010,109 1010,109 1090,101 1090,101 1010))'\n ), (None, None, 'MULTIPOINT EMPTY'), (None, None,\n 'MULTIPOINT EMPTY'), (None, None, 'MULTIPOINT EMPTY'), (None, None,\n 'MULTIPOINT EMPTY'), (None, None, 'MULTIPOINT (100 1010,101 1020)'),\n (None, None, 'MULTIPOLYGON EMPTY'), (None, None,\n 'MULTIPOLYGON EMPTY'), (None, None, 'MULTIPOLYGON EMPTY'), (None,\n None,\n 'MULTIPOLYGON (((110 1100,110 1000,100 1000,100 1100,110 1100)),((101 1010,109 1010,109 1090,101 1090,101 1010)))'\n ), (None, None, 'MULTILINESTRING EMPTY'), (None, None,\n 'MULTILINESTRING EMPTY'), (None, None,\n 'MULTILINESTRING ((100 1000,110 1000,110 1100))'), (None, None,\n 'MULTILINESTRING ((100 1000,110 1000,110 1100,100 1100,100 1000))'),\n (None, None,\n 'MULTILINESTRING ((100 1000,110 1000,110 1100,100 1100,100 1000),(101 1010,101 1090,109 1090,109 1010,101 1010))'\n )]\n assert lyr.GetFeatureCount() == len(expected_results)\n for i, exp_result in enumerate(expected_results):\n feat = lyr.GetNextFeature()\n if feat.GetField('id') != exp_result[0] or feat.GetField('name'\n ) != exp_result[1] or feat.GetGeometryRef().ExportToWkt(\n ) != exp_result[2]:\n feat.DumpReadable()\n print(exp_result)\n print(feat.GetField('name'))\n pytest.fail('failure at feat index %d' % i)\n ds = None\n\n\ndef test_ogr_toposjon_objects_is_dict():\n ds = ogr.Open('data/topojson/topojson2.topojson')\n lyr = ds.GetLayer(0)\n assert lyr.GetName() == 'a_layer'\n assert lyr.GetLayerDefn().GetFieldCount() == 2\n assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == 'id'\n assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == 'name'\n feat = lyr.GetNextFeature()\n assert feat['id'] == 'foo'\n assert feat['name'] == 'line'\n ogrtest.check_feature_geometry(feat,\n 'LINESTRING (100 1000,110 1000,110 1100)')\n lyr = ds.GetLayer(1)\n assert lyr.GetName() == 'TopoJSON'\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat,\n 'LINESTRING (100 1000,110 1000,110 1100)')\n ds = None\n\n\ndef test_ogr_toposjon_no_transform():\n ds = ogr.Open('data/topojson/topojson3.topojson')\n lyr = ds.GetLayer(0)\n assert lyr.GetName() == 'a_layer'\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat, 'LINESTRING (0 0,10 0,0 10,10 0,0 0)')\n lyr = ds.GetLayer(1)\n assert lyr.GetName() == 'TopoJSON'\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat, 'LINESTRING (0 0,10 0,0 10,10 0,0 0)')\n ds = None\n",
"step-4": "import ogrtest\nimport pytest\nfrom osgeo import ogr\n\n\ndef test_ogr_toposjon_objects_is_array():\n ds = ogr.Open('data/topojson/topojson1.topojson')\n lyr = ds.GetLayer(0)\n assert lyr.GetName() == 'a_layer'\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat,\n 'LINESTRING (100 1000,110 1000,110 1100)')\n lyr = ds.GetLayer(1)\n assert lyr.GetName() == 'TopoJSON'\n assert lyr.GetLayerDefn().GetFieldCount() == 2\n assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == 'id'\n assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == 'name'\n expected_results = [('foo', None, 'POINT EMPTY'), (None, None,\n 'POINT EMPTY'), (None, None, 'POINT EMPTY'), (None, None,\n 'POINT (100 1010)'), (None, None, 'LINESTRING EMPTY'), (None, None,\n 'LINESTRING EMPTY'), (None, None, 'LINESTRING EMPTY'), (None, None,\n 'LINESTRING EMPTY'), (None, None, 'LINESTRING EMPTY'), (None, None,\n 'LINESTRING EMPTY'), (None, None, 'LINESTRING EMPTY'), (None, None,\n 'LINESTRING EMPTY'), (None, '0', 'LINESTRING EMPTY'), (None, 'foo',\n 'LINESTRING EMPTY'), ('1', None,\n 'LINESTRING (100 1000,110 1000,110 1100)'), ('2', None,\n 'LINESTRING (110 1100,110 1000,100 1000)'), (None, None,\n 'POLYGON EMPTY'), (None, None, 'POLYGON EMPTY'), (None, None,\n 'POLYGON EMPTY'), (None, None,\n 'POLYGON ((100 1000,110 1000,110 1100,100 1100,100 1000),(101 1010,101 1090,109 1090,109 1010,101 1010))'\n ), (None, None,\n 'POLYGON ((110 1100,110 1000,100 1000,100 1100,110 1100),(101 1010,109 1010,109 1090,101 1090,101 1010))'\n ), (None, None, 'MULTIPOINT EMPTY'), (None, None,\n 'MULTIPOINT EMPTY'), (None, None, 'MULTIPOINT EMPTY'), (None, None,\n 'MULTIPOINT EMPTY'), (None, None, 'MULTIPOINT (100 1010,101 1020)'),\n (None, None, 'MULTIPOLYGON EMPTY'), (None, None,\n 'MULTIPOLYGON EMPTY'), (None, None, 'MULTIPOLYGON EMPTY'), (None,\n None,\n 'MULTIPOLYGON (((110 1100,110 1000,100 1000,100 1100,110 1100)),((101 1010,109 1010,109 1090,101 1090,101 1010)))'\n ), (None, None, 'MULTILINESTRING EMPTY'), (None, None,\n 'MULTILINESTRING EMPTY'), (None, None,\n 'MULTILINESTRING ((100 1000,110 1000,110 1100))'), (None, None,\n 'MULTILINESTRING ((100 1000,110 1000,110 1100,100 1100,100 1000))'),\n (None, None,\n 'MULTILINESTRING ((100 1000,110 1000,110 1100,100 1100,100 1000),(101 1010,101 1090,109 1090,109 1010,101 1010))'\n )]\n assert lyr.GetFeatureCount() == len(expected_results)\n for i, exp_result in enumerate(expected_results):\n feat = lyr.GetNextFeature()\n if feat.GetField('id') != exp_result[0] or feat.GetField('name'\n ) != exp_result[1] or feat.GetGeometryRef().ExportToWkt(\n ) != exp_result[2]:\n feat.DumpReadable()\n print(exp_result)\n print(feat.GetField('name'))\n pytest.fail('failure at feat index %d' % i)\n ds = None\n\n\ndef test_ogr_toposjon_objects_is_dict():\n ds = ogr.Open('data/topojson/topojson2.topojson')\n lyr = ds.GetLayer(0)\n assert lyr.GetName() == 'a_layer'\n assert lyr.GetLayerDefn().GetFieldCount() == 2\n assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == 'id'\n assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == 'name'\n feat = lyr.GetNextFeature()\n assert feat['id'] == 'foo'\n assert feat['name'] == 'line'\n ogrtest.check_feature_geometry(feat,\n 'LINESTRING (100 1000,110 1000,110 1100)')\n lyr = ds.GetLayer(1)\n assert lyr.GetName() == 'TopoJSON'\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat,\n 'LINESTRING (100 1000,110 1000,110 1100)')\n ds = None\n\n\ndef test_ogr_toposjon_no_transform():\n ds = ogr.Open('data/topojson/topojson3.topojson')\n lyr = ds.GetLayer(0)\n assert lyr.GetName() == 'a_layer'\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat, 'LINESTRING (0 0,10 0,0 10,10 0,0 0)')\n lyr = ds.GetLayer(1)\n assert lyr.GetName() == 'TopoJSON'\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat, 'LINESTRING (0 0,10 0,0 10,10 0,0 0)')\n ds = None\n",
"step-5": "#!/usr/bin/env pytest\n# -*- coding: utf-8 -*-\n###############################################################################\n# $Id$\n#\n# Project: GDAL/OGR Test Suite\n# Purpose: TopJSON driver test suite.\n# Author: Even Rouault\n#\n###############################################################################\n# Copyright (c) 2020, Even Rouault <even dot rouault at spatialys.com>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n###############################################################################\n\nimport ogrtest\nimport pytest\n\nfrom osgeo import ogr\n\n###############################################################################\n# Test TopoJSON\n\n\ndef test_ogr_toposjon_objects_is_array():\n\n ds = ogr.Open(\"data/topojson/topojson1.topojson\")\n lyr = ds.GetLayer(0)\n assert lyr.GetName() == \"a_layer\"\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat, \"LINESTRING (100 1000,110 1000,110 1100)\")\n\n lyr = ds.GetLayer(1)\n assert lyr.GetName() == \"TopoJSON\"\n assert lyr.GetLayerDefn().GetFieldCount() == 2\n assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == \"id\"\n assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == \"name\"\n expected_results = [\n (\"foo\", None, \"POINT EMPTY\"),\n (None, None, \"POINT EMPTY\"),\n (None, None, \"POINT EMPTY\"),\n (None, None, \"POINT (100 1010)\"),\n (None, None, \"LINESTRING EMPTY\"),\n (None, None, \"LINESTRING EMPTY\"),\n (None, None, \"LINESTRING EMPTY\"),\n (None, None, \"LINESTRING EMPTY\"),\n (None, None, \"LINESTRING EMPTY\"),\n (None, None, \"LINESTRING EMPTY\"),\n (None, None, \"LINESTRING EMPTY\"),\n (None, None, \"LINESTRING EMPTY\"),\n (None, \"0\", \"LINESTRING EMPTY\"),\n (None, \"foo\", \"LINESTRING EMPTY\"),\n (\"1\", None, \"LINESTRING (100 1000,110 1000,110 1100)\"),\n (\"2\", None, \"LINESTRING (110 1100,110 1000,100 1000)\"),\n (None, None, \"POLYGON EMPTY\"),\n (None, None, \"POLYGON EMPTY\"),\n (None, None, \"POLYGON EMPTY\"),\n (\n None,\n None,\n \"POLYGON ((100 1000,110 1000,110 1100,100 1100,100 1000),(101 1010,101 1090,109 1090,109 1010,101 1010))\",\n ),\n (\n None,\n None,\n \"POLYGON ((110 1100,110 1000,100 1000,100 1100,110 1100),(101 1010,109 1010,109 1090,101 1090,101 1010))\",\n ),\n (None, None, \"MULTIPOINT EMPTY\"),\n (None, None, \"MULTIPOINT EMPTY\"),\n (None, None, \"MULTIPOINT EMPTY\"),\n (None, None, \"MULTIPOINT EMPTY\"),\n (None, None, \"MULTIPOINT (100 1010,101 1020)\"),\n (None, None, \"MULTIPOLYGON EMPTY\"),\n (None, None, \"MULTIPOLYGON EMPTY\"),\n (None, None, \"MULTIPOLYGON EMPTY\"),\n (\n None,\n None,\n \"MULTIPOLYGON (((110 1100,110 1000,100 1000,100 1100,110 1100)),((101 1010,109 1010,109 1090,101 1090,101 1010)))\",\n ),\n (None, None, \"MULTILINESTRING EMPTY\"),\n (None, None, \"MULTILINESTRING EMPTY\"),\n (None, None, \"MULTILINESTRING ((100 1000,110 1000,110 1100))\"),\n (\n None,\n None,\n \"MULTILINESTRING ((100 1000,110 1000,110 1100,100 1100,100 1000))\",\n ),\n (\n None,\n None,\n \"MULTILINESTRING ((100 1000,110 1000,110 1100,100 1100,100 1000),(101 1010,101 1090,109 1090,109 1010,101 1010))\",\n ),\n ]\n assert lyr.GetFeatureCount() == len(expected_results)\n for i, exp_result in enumerate(expected_results):\n feat = lyr.GetNextFeature()\n if (\n feat.GetField(\"id\") != exp_result[0]\n or feat.GetField(\"name\") != exp_result[1]\n or feat.GetGeometryRef().ExportToWkt() != exp_result[2]\n ):\n feat.DumpReadable()\n print(exp_result)\n print(feat.GetField(\"name\"))\n pytest.fail(\"failure at feat index %d\" % i)\n ds = None\n\n\ndef test_ogr_toposjon_objects_is_dict():\n\n ds = ogr.Open(\"data/topojson/topojson2.topojson\")\n lyr = ds.GetLayer(0)\n assert lyr.GetName() == \"a_layer\"\n assert lyr.GetLayerDefn().GetFieldCount() == 2\n assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == \"id\"\n assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == \"name\"\n feat = lyr.GetNextFeature()\n assert feat[\"id\"] == \"foo\"\n assert feat[\"name\"] == \"line\"\n ogrtest.check_feature_geometry(feat, \"LINESTRING (100 1000,110 1000,110 1100)\")\n\n lyr = ds.GetLayer(1)\n assert lyr.GetName() == \"TopoJSON\"\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat, \"LINESTRING (100 1000,110 1000,110 1100)\")\n\n ds = None\n\n\ndef test_ogr_toposjon_no_transform():\n\n ds = ogr.Open(\"data/topojson/topojson3.topojson\")\n lyr = ds.GetLayer(0)\n assert lyr.GetName() == \"a_layer\"\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat, \"LINESTRING (0 0,10 0,0 10,10 0,0 0)\")\n\n lyr = ds.GetLayer(1)\n assert lyr.GetName() == \"TopoJSON\"\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat, \"LINESTRING (0 0,10 0,0 10,10 0,0 0)\")\n ds = None\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import torch
import tarfile
import pickle
import pandas
import json
import argparse
from pathlib import Path
import numpy as np
import shutil
from shutil import copyfile
import os
import re
import pandas as pd
import sys
from numpy import asarray
from numpy import savetxt
sys.path.append("..")
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, required=True, help='dir holding sequences as separate files')
parser.add_argument('--maxlen', type=int, default=500, help='maximum length of sequence')
parser.add_argument('--ext', type=str, default='tar.gz', help='extention of files with sequences')
parser.add_argument('--datetime', type=bool, default=False, help='if time values in event sequences are represented in datetime format')
parser.add_argument('--save_dir', type=str, default = './', help='path to save results')
parser.add_argument('--maxsize', type=int, default=None, help='max number of sequences')
args = parser.parse_args()
return args
def tranform_data(args):
"""
Loads the sequences saved in the given directory.
Args:
data_dir (str, Path) - directory containing sequences
save_dir - directory for saving transform data
maxsize (int) - maximum number of sequences to load
maxlen (int) - maximum length of sequence, the sequences longer than maxlen will be truncated
ext (str) - extension of files in data_dir directory
datetime (bool) - variable meaning if time values in files are represented in datetime format
"""
data_dir = args.data_dir
save_dir = args.save_dir
os.makedirs(save_dir)
maxsize = args.maxsize
maxlen = args.maxlen
ext = args.ext
datetime = args.datetime
classes = set()
nb_files = 0
time_col = 'time'
event_col = 'event'
gt_ids = None
if args.ext == "pkl":
with open(Path(args.data_dir, "fx_labels"), "rb") as fp:
gt_ids = pickle.load(fp)[:maxsize]
labels = np.unique(gt_ids)
gt_data = []
for i in range (len(gt_ids)):
gt_data.append(int(np.nonzero(gt_ids[i] == labels)[0]))
gt = {'cluster_id': gt_data}
print(gt_data)
gt_table = pd.DataFrame(data=gt)
gt_table.to_csv(Path(save_dir, 'clusters.csv'))
if Path(args.data_dir, 'clusters.csv').exists():
gt_ids = pd.read_csv(Path(args.data_dir, 'clusters.csv'))[:(maxsize)]
gt_ids.to_csv(Path(save_dir, 'clusters.csv'))
args = parse_arguments()
print(args)
tranform_data(args)
|
normal
|
{
"blob_id": "da55d9a6534525e58b6c1d2db997e90a1c9b0f36",
"index": 1427,
"step-1": "<mask token>\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_dir', type=str, required=True, help=\n 'dir holding sequences as separate files')\n parser.add_argument('--maxlen', type=int, default=500, help=\n 'maximum length of sequence')\n parser.add_argument('--ext', type=str, default='tar.gz', help=\n 'extention of files with sequences')\n parser.add_argument('--datetime', type=bool, default=False, help=\n 'if time values in event sequences are represented in datetime format')\n parser.add_argument('--save_dir', type=str, default='./', help=\n 'path to save results')\n parser.add_argument('--maxsize', type=int, default=None, help=\n 'max number of sequences')\n args = parser.parse_args()\n return args\n\n\ndef tranform_data(args):\n \"\"\"\n Loads the sequences saved in the given directory.\n Args:\n data_dir (str, Path) - directory containing sequences\n save_dir - directory for saving transform data\n maxsize (int) - maximum number of sequences to load\n maxlen (int) - maximum length of sequence, the sequences longer than maxlen will be truncated\n ext (str) - extension of files in data_dir directory\n datetime (bool) - variable meaning if time values in files are represented in datetime format\n \n \"\"\"\n data_dir = args.data_dir\n save_dir = args.save_dir\n os.makedirs(save_dir)\n maxsize = args.maxsize\n maxlen = args.maxlen\n ext = args.ext\n datetime = args.datetime\n classes = set()\n nb_files = 0\n time_col = 'time'\n event_col = 'event'\n gt_ids = None\n if args.ext == 'pkl':\n with open(Path(args.data_dir, 'fx_labels'), 'rb') as fp:\n gt_ids = pickle.load(fp)[:maxsize]\n labels = np.unique(gt_ids)\n gt_data = []\n for i in range(len(gt_ids)):\n gt_data.append(int(np.nonzero(gt_ids[i] == labels)[0]))\n gt = {'cluster_id': gt_data}\n print(gt_data)\n gt_table = pd.DataFrame(data=gt)\n gt_table.to_csv(Path(save_dir, 'clusters.csv'))\n if Path(args.data_dir, 'clusters.csv').exists():\n gt_ids = pd.read_csv(Path(args.data_dir, 'clusters.csv'))[:maxsize]\n gt_ids.to_csv(Path(save_dir, 'clusters.csv'))\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.path.append('..')\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_dir', type=str, required=True, help=\n 'dir holding sequences as separate files')\n parser.add_argument('--maxlen', type=int, default=500, help=\n 'maximum length of sequence')\n parser.add_argument('--ext', type=str, default='tar.gz', help=\n 'extention of files with sequences')\n parser.add_argument('--datetime', type=bool, default=False, help=\n 'if time values in event sequences are represented in datetime format')\n parser.add_argument('--save_dir', type=str, default='./', help=\n 'path to save results')\n parser.add_argument('--maxsize', type=int, default=None, help=\n 'max number of sequences')\n args = parser.parse_args()\n return args\n\n\ndef tranform_data(args):\n \"\"\"\n Loads the sequences saved in the given directory.\n Args:\n data_dir (str, Path) - directory containing sequences\n save_dir - directory for saving transform data\n maxsize (int) - maximum number of sequences to load\n maxlen (int) - maximum length of sequence, the sequences longer than maxlen will be truncated\n ext (str) - extension of files in data_dir directory\n datetime (bool) - variable meaning if time values in files are represented in datetime format\n \n \"\"\"\n data_dir = args.data_dir\n save_dir = args.save_dir\n os.makedirs(save_dir)\n maxsize = args.maxsize\n maxlen = args.maxlen\n ext = args.ext\n datetime = args.datetime\n classes = set()\n nb_files = 0\n time_col = 'time'\n event_col = 'event'\n gt_ids = None\n if args.ext == 'pkl':\n with open(Path(args.data_dir, 'fx_labels'), 'rb') as fp:\n gt_ids = pickle.load(fp)[:maxsize]\n labels = np.unique(gt_ids)\n gt_data = []\n for i in range(len(gt_ids)):\n gt_data.append(int(np.nonzero(gt_ids[i] == labels)[0]))\n gt = {'cluster_id': gt_data}\n print(gt_data)\n gt_table = pd.DataFrame(data=gt)\n gt_table.to_csv(Path(save_dir, 'clusters.csv'))\n if Path(args.data_dir, 'clusters.csv').exists():\n gt_ids = pd.read_csv(Path(args.data_dir, 'clusters.csv'))[:maxsize]\n gt_ids.to_csv(Path(save_dir, 'clusters.csv'))\n\n\n<mask token>\nprint(args)\ntranform_data(args)\n",
"step-3": "<mask token>\nsys.path.append('..')\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_dir', type=str, required=True, help=\n 'dir holding sequences as separate files')\n parser.add_argument('--maxlen', type=int, default=500, help=\n 'maximum length of sequence')\n parser.add_argument('--ext', type=str, default='tar.gz', help=\n 'extention of files with sequences')\n parser.add_argument('--datetime', type=bool, default=False, help=\n 'if time values in event sequences are represented in datetime format')\n parser.add_argument('--save_dir', type=str, default='./', help=\n 'path to save results')\n parser.add_argument('--maxsize', type=int, default=None, help=\n 'max number of sequences')\n args = parser.parse_args()\n return args\n\n\ndef tranform_data(args):\n \"\"\"\n Loads the sequences saved in the given directory.\n Args:\n data_dir (str, Path) - directory containing sequences\n save_dir - directory for saving transform data\n maxsize (int) - maximum number of sequences to load\n maxlen (int) - maximum length of sequence, the sequences longer than maxlen will be truncated\n ext (str) - extension of files in data_dir directory\n datetime (bool) - variable meaning if time values in files are represented in datetime format\n \n \"\"\"\n data_dir = args.data_dir\n save_dir = args.save_dir\n os.makedirs(save_dir)\n maxsize = args.maxsize\n maxlen = args.maxlen\n ext = args.ext\n datetime = args.datetime\n classes = set()\n nb_files = 0\n time_col = 'time'\n event_col = 'event'\n gt_ids = None\n if args.ext == 'pkl':\n with open(Path(args.data_dir, 'fx_labels'), 'rb') as fp:\n gt_ids = pickle.load(fp)[:maxsize]\n labels = np.unique(gt_ids)\n gt_data = []\n for i in range(len(gt_ids)):\n gt_data.append(int(np.nonzero(gt_ids[i] == labels)[0]))\n gt = {'cluster_id': gt_data}\n print(gt_data)\n gt_table = pd.DataFrame(data=gt)\n gt_table.to_csv(Path(save_dir, 'clusters.csv'))\n if Path(args.data_dir, 'clusters.csv').exists():\n gt_ids = pd.read_csv(Path(args.data_dir, 'clusters.csv'))[:maxsize]\n gt_ids.to_csv(Path(save_dir, 'clusters.csv'))\n\n\nargs = parse_arguments()\nprint(args)\ntranform_data(args)\n",
"step-4": "import torch\nimport tarfile\nimport pickle\nimport pandas\nimport json\nimport argparse\nfrom pathlib import Path\nimport numpy as np\nimport shutil\nfrom shutil import copyfile\nimport os\nimport re\nimport pandas as pd\nimport sys\nfrom numpy import asarray\nfrom numpy import savetxt\nsys.path.append('..')\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_dir', type=str, required=True, help=\n 'dir holding sequences as separate files')\n parser.add_argument('--maxlen', type=int, default=500, help=\n 'maximum length of sequence')\n parser.add_argument('--ext', type=str, default='tar.gz', help=\n 'extention of files with sequences')\n parser.add_argument('--datetime', type=bool, default=False, help=\n 'if time values in event sequences are represented in datetime format')\n parser.add_argument('--save_dir', type=str, default='./', help=\n 'path to save results')\n parser.add_argument('--maxsize', type=int, default=None, help=\n 'max number of sequences')\n args = parser.parse_args()\n return args\n\n\ndef tranform_data(args):\n \"\"\"\n Loads the sequences saved in the given directory.\n Args:\n data_dir (str, Path) - directory containing sequences\n save_dir - directory for saving transform data\n maxsize (int) - maximum number of sequences to load\n maxlen (int) - maximum length of sequence, the sequences longer than maxlen will be truncated\n ext (str) - extension of files in data_dir directory\n datetime (bool) - variable meaning if time values in files are represented in datetime format\n \n \"\"\"\n data_dir = args.data_dir\n save_dir = args.save_dir\n os.makedirs(save_dir)\n maxsize = args.maxsize\n maxlen = args.maxlen\n ext = args.ext\n datetime = args.datetime\n classes = set()\n nb_files = 0\n time_col = 'time'\n event_col = 'event'\n gt_ids = None\n if args.ext == 'pkl':\n with open(Path(args.data_dir, 'fx_labels'), 'rb') as fp:\n gt_ids = pickle.load(fp)[:maxsize]\n labels = np.unique(gt_ids)\n gt_data = []\n for i in range(len(gt_ids)):\n gt_data.append(int(np.nonzero(gt_ids[i] == labels)[0]))\n gt = {'cluster_id': gt_data}\n print(gt_data)\n gt_table = pd.DataFrame(data=gt)\n gt_table.to_csv(Path(save_dir, 'clusters.csv'))\n if Path(args.data_dir, 'clusters.csv').exists():\n gt_ids = pd.read_csv(Path(args.data_dir, 'clusters.csv'))[:maxsize]\n gt_ids.to_csv(Path(save_dir, 'clusters.csv'))\n\n\nargs = parse_arguments()\nprint(args)\ntranform_data(args)\n",
"step-5": "import torch\nimport tarfile\nimport pickle\nimport pandas\nimport json\nimport argparse\nfrom pathlib import Path\nimport numpy as np\nimport shutil\nfrom shutil import copyfile\nimport os\nimport re\nimport pandas as pd\nimport sys\nfrom numpy import asarray\nfrom numpy import savetxt\nsys.path.append(\"..\")\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_dir', type=str, required=True, help='dir holding sequences as separate files')\n parser.add_argument('--maxlen', type=int, default=500, help='maximum length of sequence')\n parser.add_argument('--ext', type=str, default='tar.gz', help='extention of files with sequences')\n parser.add_argument('--datetime', type=bool, default=False, help='if time values in event sequences are represented in datetime format')\n parser.add_argument('--save_dir', type=str, default = './', help='path to save results')\n parser.add_argument('--maxsize', type=int, default=None, help='max number of sequences')\n args = parser.parse_args()\n return args\ndef tranform_data(args):\n \"\"\"\n Loads the sequences saved in the given directory.\n Args:\n data_dir (str, Path) - directory containing sequences\n save_dir - directory for saving transform data\n maxsize (int) - maximum number of sequences to load\n maxlen (int) - maximum length of sequence, the sequences longer than maxlen will be truncated\n ext (str) - extension of files in data_dir directory\n datetime (bool) - variable meaning if time values in files are represented in datetime format\n \n \"\"\"\n data_dir = args.data_dir\n save_dir = args.save_dir\n os.makedirs(save_dir)\n maxsize = args.maxsize\n maxlen = args.maxlen \n ext = args.ext\n datetime = args.datetime\n classes = set()\n nb_files = 0\n time_col = 'time'\n event_col = 'event'\n gt_ids = None\n if args.ext == \"pkl\":\n with open(Path(args.data_dir, \"fx_labels\"), \"rb\") as fp:\n gt_ids = pickle.load(fp)[:maxsize]\n labels = np.unique(gt_ids)\n gt_data = []\n for i in range (len(gt_ids)):\n gt_data.append(int(np.nonzero(gt_ids[i] == labels)[0]))\n gt = {'cluster_id': gt_data}\n print(gt_data)\n gt_table = pd.DataFrame(data=gt)\n gt_table.to_csv(Path(save_dir, 'clusters.csv'))\n if Path(args.data_dir, 'clusters.csv').exists():\n gt_ids = pd.read_csv(Path(args.data_dir, 'clusters.csv'))[:(maxsize)]\n gt_ids.to_csv(Path(save_dir, 'clusters.csv'))\n \n\n\n\nargs = parse_arguments()\nprint(args)\ntranform_data(args)",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# coding: utf-8
from django.test.client import Client
from django.contrib.contenttypes.models import ContentType
from main.models import Descriptor, ResourceThematic, ThematicArea
from utils.tests import BaseTestCase
from models import *
def minimal_form_data():
'''
Define a minimal fields for submit a media form
'''
form_data = {
'status': '0',
'title': 'Foto 1',
'description': 'Foto 1',
'media_type' : '1',
'main-descriptor-content_type-object_id-TOTAL_FORMS': '0',
'main-descriptor-content_type-object_id-INITIAL_FORMS': '0',
'main-keyword-content_type-object_id-TOTAL_FORMS': '0',
'main-keyword-content_type-object_id-INITIAL_FORMS': '0',
'main-resourcethematic-content_type-object_id-TOTAL_FORMS': '0',
'main-resourcethematic-content_type-object_id-INITIAL_FORMS': '0',
}
return form_data
def complete_form_data():
'''
Define missing fields for a valid submission of media object
'''
missing_fields = {
'link' : 'http://www.youtube.com',
'publication_date' : '01/12/2015',
'main-descriptor-content_type-object_id-TOTAL_FORMS' : '1',
'main-descriptor-content_type-object_id-0-id' : '',
'main-descriptor-content_type-object_id-0-text' : 'malaria',
'main-descriptor-content_type-object_id-0-code' : '^d8462',
'main-descriptor-content_type-object_id-0-status' : '0',
'main-resourcethematic-content_type-object_id-TOTAL_FORMS' : '1',
'main-resourcethematic-content_type-object_id-0-thematic_area' : '1',
'main-resourcethematic-content_type-object_id-0-status' : '0',
}
complete_form_data = minimal_form_data()
complete_form_data.update(missing_fields)
return complete_form_data
def create_media_object():
'''
Create media object for tests
'''
# Create a Media object and test that is present on list
media1 = Media.objects.create(status=0,title='Midia de teste (BR1.1)',
media_type_id=1, link='http://bvsalud.org', created_by_id=1,
cooperative_center_code='BR1.1')
media_ct = ContentType.objects.get_for_model(media1)
descriptor = Descriptor.objects.create(object_id=1, content_type=media_ct, text='malaria')
thematic = ResourceThematic.objects.create(object_id=1, content_type=media_ct, thematic_area_id=1)
media2 = Media.objects.create(status=0,title='Media de prueba (PY3.1)',
media_type_id=1, link='http://bvsalud.org', created_by_id=2,
cooperative_center_code='PY3.1')
class MultimediaTest(BaseTestCase):
"""
Tests for multimedia app
"""
def setUp(self):
super(MultimediaTest, self).setUp()
# create auxiliary models used on tests
media_type = MediaType.objects.create(acronym='video', name='Video')
thematic_area = ThematicArea.objects.create(acronym='LISBR1.1', name='Teste')
def test_list_media(self):
"""
Test list media
"""
self.login_editor()
create_media_object()
response = self.client.get('/multimedia/')
self.assertContains(response, "Midia de teste (BR1.1")
# list only medias from user cooperative center (BR1.1)
self.assertNotContains(response, "Media de prueba (PY3.1)")
def test_add_media(self):
"""
Tests create media
"""
self.login_editor()
# invalid submission with missing required fields
form_data = minimal_form_data()
response = self.client.post('/multimedia/new', form_data )
self.assertContains(response,'Por favor verifique os campos obrigatórios')
self.assertContains(response,'Você precisa inserir pelo menos um descritor de assunto')
self.assertContains(response,'Você precisa selecionar pelo menos uma área temática')
# complete form_data with required fields and re-submit form
form_data = complete_form_data()
# test valid submission
# after submit a valid content the view will redirect to /multimedia and list the objects
# follow=True will allow check if the new data is on the list
response = self.client.post('/multimedia/new', form_data, follow=True)
self.assertRedirects(response, '/multimedia/')
self.assertContains(response, "Foto 1")
# check if is set cooperative center code of user (editor = BR1.1)
self.assertEquals(Media.objects.all()[0].cooperative_center_code, "BR1.1")
def test_edit_media(self):
"""
Tests edit media
"""
self.login_editor()
create_media_object()
media_test = Media.objects.all()[0]
url = '/multimedia/edit/{0}'.format(media_test.id)
response = self.client.get(url)
# Test if return form with fields
self.assertContains(response, media_test.title)
# Test changes values and submit
form_data = complete_form_data()
form_data['status'] = '1'
response = self.client.post(url, form_data)
# check for validation of descriptor and thematic area for status = Admitted
self.assertContains(response, "é necessário ter pelo menos um descritor")
# check for normal edition
form_data['status'] = '0'
response = self.client.post(url, form_data, follow=True)
self.assertRedirects(response, '/multimedia/')
self.assertContains(response, "Foto 1")
def test_delete_media(self):
"""
Tests delete media
"""
self.login_editor()
create_media_object()
response = self.client.get('/multimedia/delete/1')
self.assertContains(response, "Você tem certeza que deseja apagar?")
response = self.client.post('/multimedia/delete/1')
self.assertTrue(Media.objects.filter(id=1).count() == 0)
self.assertTrue(Descriptor.objects.filter(object_id=1).count() == 0)
self.assertTrue(ResourceThematic.objects.filter(object_id=1).count() == 0)
self.assertRedirects(response, '/multimedia/')
def test_list_media_type(self):
"""
Tests list media type
"""
# check if documentalist has access to list media-types
self.login_documentalist()
response = self.client.get('/multimedia/media-types/' )
# 403 = unauthorized
self.assertEqual(response.status_code, 403)
self.client.logout()
self.login_admin()
response = self.client.get('/multimedia/media-types/')
self.assertContains(response, "Video")
def test_add_media_type(self):
"""
Tests create media type
"""
# check if documentalist has access to create new media-types
self.login_documentalist()
response = self.client.get('/multimedia/media-type/new' )
# 403 = unauthorized
self.assertEqual(response.status_code, 403)
self.client.logout()
self.login_admin()
form_data = {
'status': '0',
'acronym': 'foto',
'name': 'Foto',
'language' : 'pt-br',
'mediatypelocal_set-TOTAL_FORMS': '0',
'mediatypelocal_set-INITIAL_FORMS': '0',
}
response = self.client.post('/multimedia/media-type/new', form_data, follow=True )
self.assertRedirects(response, '/multimedia/media-types')
self.assertContains(response, "Foto")
def test_list_media_collection(self):
"""
Tests list of media collection
"""
self.login_editor()
# Create a media collection object and test that is present on list
MediaCollection.objects.create(name='Coleção 1',
description='Coleção de teste 1',
created_by_id=1, cooperative_center_code='BR1.1')
MediaCollection.objects.create(name='Coleção 2',
description='Coleção de teste 2',
created_by_id=2, cooperative_center_code='BR1.1')
MediaCollection.objects.create(name='Coleção 3',
description='Coleção de teste 3',
created_by_id=3, cooperative_center_code='PY3.8')
response = self.client.get('/multimedia/collections')
# check if only one collection is returned (restrict by user)
self.assertContains(response, "Coleção 1")
self.assertEquals(response.context['object_list'].count(), 3)
# check if return only colections from cooperative center BR1.1
response = self.client.get('/multimedia/collections/?filter_created_by_cc=BR1.1')
self.assertEquals(response.context['object_list'].count(), 2)
def test_add_media_collection(self):
"""
Tests add media collection
"""
self.login_editor()
form_data = {
'name': 'Coleção nova',
'description': 'Coleção de teste',
'language': 'pt-br',
'mediacollectionlocal_set-TOTAL_FORMS': '0',
'mediacollectionlocal_set-INITIAL_FORMS': '0',
}
response = self.client.post('/multimedia/collection/new', form_data, follow=True )
self.assertRedirects(response, '/multimedia/collections')
self.assertContains(response, "Coleção nova")
|
normal
|
{
"blob_id": "a253ab5ef80a61c3784862625cde81de4c4ef984",
"index": 2094,
"step-1": "<mask token>\n\n\nclass MultimediaTest(BaseTestCase):\n <mask token>\n <mask token>\n <mask token>\n\n def test_add_media(self):\n \"\"\"\n Tests create media\n \"\"\"\n self.login_editor()\n form_data = minimal_form_data()\n response = self.client.post('/multimedia/new', form_data)\n self.assertContains(response,\n 'Por favor verifique os campos obrigatórios')\n self.assertContains(response,\n 'Você precisa inserir pelo menos um descritor de assunto')\n self.assertContains(response,\n 'Você precisa selecionar pelo menos uma área temática')\n form_data = complete_form_data()\n response = self.client.post('/multimedia/new', form_data, follow=True)\n self.assertRedirects(response, '/multimedia/')\n self.assertContains(response, 'Foto 1')\n self.assertEquals(Media.objects.all()[0].cooperative_center_code,\n 'BR1.1')\n\n def test_edit_media(self):\n \"\"\"\n Tests edit media\n \"\"\"\n self.login_editor()\n create_media_object()\n media_test = Media.objects.all()[0]\n url = '/multimedia/edit/{0}'.format(media_test.id)\n response = self.client.get(url)\n self.assertContains(response, media_test.title)\n form_data = complete_form_data()\n form_data['status'] = '1'\n response = self.client.post(url, form_data)\n self.assertContains(response,\n 'é necessário ter pelo menos um descritor')\n form_data['status'] = '0'\n response = self.client.post(url, form_data, follow=True)\n self.assertRedirects(response, '/multimedia/')\n self.assertContains(response, 'Foto 1')\n\n def test_delete_media(self):\n \"\"\"\n Tests delete media\n \"\"\"\n self.login_editor()\n create_media_object()\n response = self.client.get('/multimedia/delete/1')\n self.assertContains(response, 'Você tem certeza que deseja apagar?')\n response = self.client.post('/multimedia/delete/1')\n self.assertTrue(Media.objects.filter(id=1).count() == 0)\n self.assertTrue(Descriptor.objects.filter(object_id=1).count() == 0)\n self.assertTrue(ResourceThematic.objects.filter(object_id=1).count(\n ) == 0)\n self.assertRedirects(response, '/multimedia/')\n\n def test_list_media_type(self):\n \"\"\"\n Tests list media type\n \"\"\"\n self.login_documentalist()\n response = self.client.get('/multimedia/media-types/')\n self.assertEqual(response.status_code, 403)\n self.client.logout()\n self.login_admin()\n response = self.client.get('/multimedia/media-types/')\n self.assertContains(response, 'Video')\n\n def test_add_media_type(self):\n \"\"\"\n Tests create media type\n \"\"\"\n self.login_documentalist()\n response = self.client.get('/multimedia/media-type/new')\n self.assertEqual(response.status_code, 403)\n self.client.logout()\n self.login_admin()\n form_data = {'status': '0', 'acronym': 'foto', 'name': 'Foto',\n 'language': 'pt-br', 'mediatypelocal_set-TOTAL_FORMS': '0',\n 'mediatypelocal_set-INITIAL_FORMS': '0'}\n response = self.client.post('/multimedia/media-type/new', form_data,\n follow=True)\n self.assertRedirects(response, '/multimedia/media-types')\n self.assertContains(response, 'Foto')\n\n def test_list_media_collection(self):\n \"\"\"\n Tests list of media collection\n \"\"\"\n self.login_editor()\n MediaCollection.objects.create(name='Coleção 1', description=\n 'Coleção de teste 1', created_by_id=1, cooperative_center_code=\n 'BR1.1')\n MediaCollection.objects.create(name='Coleção 2', description=\n 'Coleção de teste 2', created_by_id=2, cooperative_center_code=\n 'BR1.1')\n MediaCollection.objects.create(name='Coleção 3', description=\n 'Coleção de teste 3', created_by_id=3, cooperative_center_code=\n 'PY3.8')\n response = self.client.get('/multimedia/collections')\n self.assertContains(response, 'Coleção 1')\n self.assertEquals(response.context['object_list'].count(), 3)\n response = self.client.get(\n '/multimedia/collections/?filter_created_by_cc=BR1.1')\n self.assertEquals(response.context['object_list'].count(), 2)\n\n def test_add_media_collection(self):\n \"\"\"\n Tests add media collection\n \"\"\"\n self.login_editor()\n form_data = {'name': 'Coleção nova', 'description':\n 'Coleção de teste', 'language': 'pt-br',\n 'mediacollectionlocal_set-TOTAL_FORMS': '0',\n 'mediacollectionlocal_set-INITIAL_FORMS': '0'}\n response = self.client.post('/multimedia/collection/new', form_data,\n follow=True)\n self.assertRedirects(response, '/multimedia/collections')\n self.assertContains(response, 'Coleção nova')\n",
"step-2": "<mask token>\n\n\nclass MultimediaTest(BaseTestCase):\n <mask token>\n\n def setUp(self):\n super(MultimediaTest, self).setUp()\n media_type = MediaType.objects.create(acronym='video', name='Video')\n thematic_area = ThematicArea.objects.create(acronym='LISBR1.1',\n name='Teste')\n <mask token>\n\n def test_add_media(self):\n \"\"\"\n Tests create media\n \"\"\"\n self.login_editor()\n form_data = minimal_form_data()\n response = self.client.post('/multimedia/new', form_data)\n self.assertContains(response,\n 'Por favor verifique os campos obrigatórios')\n self.assertContains(response,\n 'Você precisa inserir pelo menos um descritor de assunto')\n self.assertContains(response,\n 'Você precisa selecionar pelo menos uma área temática')\n form_data = complete_form_data()\n response = self.client.post('/multimedia/new', form_data, follow=True)\n self.assertRedirects(response, '/multimedia/')\n self.assertContains(response, 'Foto 1')\n self.assertEquals(Media.objects.all()[0].cooperative_center_code,\n 'BR1.1')\n\n def test_edit_media(self):\n \"\"\"\n Tests edit media\n \"\"\"\n self.login_editor()\n create_media_object()\n media_test = Media.objects.all()[0]\n url = '/multimedia/edit/{0}'.format(media_test.id)\n response = self.client.get(url)\n self.assertContains(response, media_test.title)\n form_data = complete_form_data()\n form_data['status'] = '1'\n response = self.client.post(url, form_data)\n self.assertContains(response,\n 'é necessário ter pelo menos um descritor')\n form_data['status'] = '0'\n response = self.client.post(url, form_data, follow=True)\n self.assertRedirects(response, '/multimedia/')\n self.assertContains(response, 'Foto 1')\n\n def test_delete_media(self):\n \"\"\"\n Tests delete media\n \"\"\"\n self.login_editor()\n create_media_object()\n response = self.client.get('/multimedia/delete/1')\n self.assertContains(response, 'Você tem certeza que deseja apagar?')\n response = self.client.post('/multimedia/delete/1')\n self.assertTrue(Media.objects.filter(id=1).count() == 0)\n self.assertTrue(Descriptor.objects.filter(object_id=1).count() == 0)\n self.assertTrue(ResourceThematic.objects.filter(object_id=1).count(\n ) == 0)\n self.assertRedirects(response, '/multimedia/')\n\n def test_list_media_type(self):\n \"\"\"\n Tests list media type\n \"\"\"\n self.login_documentalist()\n response = self.client.get('/multimedia/media-types/')\n self.assertEqual(response.status_code, 403)\n self.client.logout()\n self.login_admin()\n response = self.client.get('/multimedia/media-types/')\n self.assertContains(response, 'Video')\n\n def test_add_media_type(self):\n \"\"\"\n Tests create media type\n \"\"\"\n self.login_documentalist()\n response = self.client.get('/multimedia/media-type/new')\n self.assertEqual(response.status_code, 403)\n self.client.logout()\n self.login_admin()\n form_data = {'status': '0', 'acronym': 'foto', 'name': 'Foto',\n 'language': 'pt-br', 'mediatypelocal_set-TOTAL_FORMS': '0',\n 'mediatypelocal_set-INITIAL_FORMS': '0'}\n response = self.client.post('/multimedia/media-type/new', form_data,\n follow=True)\n self.assertRedirects(response, '/multimedia/media-types')\n self.assertContains(response, 'Foto')\n\n def test_list_media_collection(self):\n \"\"\"\n Tests list of media collection\n \"\"\"\n self.login_editor()\n MediaCollection.objects.create(name='Coleção 1', description=\n 'Coleção de teste 1', created_by_id=1, cooperative_center_code=\n 'BR1.1')\n MediaCollection.objects.create(name='Coleção 2', description=\n 'Coleção de teste 2', created_by_id=2, cooperative_center_code=\n 'BR1.1')\n MediaCollection.objects.create(name='Coleção 3', description=\n 'Coleção de teste 3', created_by_id=3, cooperative_center_code=\n 'PY3.8')\n response = self.client.get('/multimedia/collections')\n self.assertContains(response, 'Coleção 1')\n self.assertEquals(response.context['object_list'].count(), 3)\n response = self.client.get(\n '/multimedia/collections/?filter_created_by_cc=BR1.1')\n self.assertEquals(response.context['object_list'].count(), 2)\n\n def test_add_media_collection(self):\n \"\"\"\n Tests add media collection\n \"\"\"\n self.login_editor()\n form_data = {'name': 'Coleção nova', 'description':\n 'Coleção de teste', 'language': 'pt-br',\n 'mediacollectionlocal_set-TOTAL_FORMS': '0',\n 'mediacollectionlocal_set-INITIAL_FORMS': '0'}\n response = self.client.post('/multimedia/collection/new', form_data,\n follow=True)\n self.assertRedirects(response, '/multimedia/collections')\n self.assertContains(response, 'Coleção nova')\n",
"step-3": "<mask token>\n\n\ndef minimal_form_data():\n \"\"\"\n Define a minimal fields for submit a media form\n \"\"\"\n form_data = {'status': '0', 'title': 'Foto 1', 'description': 'Foto 1',\n 'media_type': '1',\n 'main-descriptor-content_type-object_id-TOTAL_FORMS': '0',\n 'main-descriptor-content_type-object_id-INITIAL_FORMS': '0',\n 'main-keyword-content_type-object_id-TOTAL_FORMS': '0',\n 'main-keyword-content_type-object_id-INITIAL_FORMS': '0',\n 'main-resourcethematic-content_type-object_id-TOTAL_FORMS': '0',\n 'main-resourcethematic-content_type-object_id-INITIAL_FORMS': '0'}\n return form_data\n\n\n<mask token>\n\n\nclass MultimediaTest(BaseTestCase):\n \"\"\"\n Tests for multimedia app\n \"\"\"\n\n def setUp(self):\n super(MultimediaTest, self).setUp()\n media_type = MediaType.objects.create(acronym='video', name='Video')\n thematic_area = ThematicArea.objects.create(acronym='LISBR1.1',\n name='Teste')\n\n def test_list_media(self):\n \"\"\"\n Test list media\n \"\"\"\n self.login_editor()\n create_media_object()\n response = self.client.get('/multimedia/')\n self.assertContains(response, 'Midia de teste (BR1.1')\n self.assertNotContains(response, 'Media de prueba (PY3.1)')\n\n def test_add_media(self):\n \"\"\"\n Tests create media\n \"\"\"\n self.login_editor()\n form_data = minimal_form_data()\n response = self.client.post('/multimedia/new', form_data)\n self.assertContains(response,\n 'Por favor verifique os campos obrigatórios')\n self.assertContains(response,\n 'Você precisa inserir pelo menos um descritor de assunto')\n self.assertContains(response,\n 'Você precisa selecionar pelo menos uma área temática')\n form_data = complete_form_data()\n response = self.client.post('/multimedia/new', form_data, follow=True)\n self.assertRedirects(response, '/multimedia/')\n self.assertContains(response, 'Foto 1')\n self.assertEquals(Media.objects.all()[0].cooperative_center_code,\n 'BR1.1')\n\n def test_edit_media(self):\n \"\"\"\n Tests edit media\n \"\"\"\n self.login_editor()\n create_media_object()\n media_test = Media.objects.all()[0]\n url = '/multimedia/edit/{0}'.format(media_test.id)\n response = self.client.get(url)\n self.assertContains(response, media_test.title)\n form_data = complete_form_data()\n form_data['status'] = '1'\n response = self.client.post(url, form_data)\n self.assertContains(response,\n 'é necessário ter pelo menos um descritor')\n form_data['status'] = '0'\n response = self.client.post(url, form_data, follow=True)\n self.assertRedirects(response, '/multimedia/')\n self.assertContains(response, 'Foto 1')\n\n def test_delete_media(self):\n \"\"\"\n Tests delete media\n \"\"\"\n self.login_editor()\n create_media_object()\n response = self.client.get('/multimedia/delete/1')\n self.assertContains(response, 'Você tem certeza que deseja apagar?')\n response = self.client.post('/multimedia/delete/1')\n self.assertTrue(Media.objects.filter(id=1).count() == 0)\n self.assertTrue(Descriptor.objects.filter(object_id=1).count() == 0)\n self.assertTrue(ResourceThematic.objects.filter(object_id=1).count(\n ) == 0)\n self.assertRedirects(response, '/multimedia/')\n\n def test_list_media_type(self):\n \"\"\"\n Tests list media type\n \"\"\"\n self.login_documentalist()\n response = self.client.get('/multimedia/media-types/')\n self.assertEqual(response.status_code, 403)\n self.client.logout()\n self.login_admin()\n response = self.client.get('/multimedia/media-types/')\n self.assertContains(response, 'Video')\n\n def test_add_media_type(self):\n \"\"\"\n Tests create media type\n \"\"\"\n self.login_documentalist()\n response = self.client.get('/multimedia/media-type/new')\n self.assertEqual(response.status_code, 403)\n self.client.logout()\n self.login_admin()\n form_data = {'status': '0', 'acronym': 'foto', 'name': 'Foto',\n 'language': 'pt-br', 'mediatypelocal_set-TOTAL_FORMS': '0',\n 'mediatypelocal_set-INITIAL_FORMS': '0'}\n response = self.client.post('/multimedia/media-type/new', form_data,\n follow=True)\n self.assertRedirects(response, '/multimedia/media-types')\n self.assertContains(response, 'Foto')\n\n def test_list_media_collection(self):\n \"\"\"\n Tests list of media collection\n \"\"\"\n self.login_editor()\n MediaCollection.objects.create(name='Coleção 1', description=\n 'Coleção de teste 1', created_by_id=1, cooperative_center_code=\n 'BR1.1')\n MediaCollection.objects.create(name='Coleção 2', description=\n 'Coleção de teste 2', created_by_id=2, cooperative_center_code=\n 'BR1.1')\n MediaCollection.objects.create(name='Coleção 3', description=\n 'Coleção de teste 3', created_by_id=3, cooperative_center_code=\n 'PY3.8')\n response = self.client.get('/multimedia/collections')\n self.assertContains(response, 'Coleção 1')\n self.assertEquals(response.context['object_list'].count(), 3)\n response = self.client.get(\n '/multimedia/collections/?filter_created_by_cc=BR1.1')\n self.assertEquals(response.context['object_list'].count(), 2)\n\n def test_add_media_collection(self):\n \"\"\"\n Tests add media collection\n \"\"\"\n self.login_editor()\n form_data = {'name': 'Coleção nova', 'description':\n 'Coleção de teste', 'language': 'pt-br',\n 'mediacollectionlocal_set-TOTAL_FORMS': '0',\n 'mediacollectionlocal_set-INITIAL_FORMS': '0'}\n response = self.client.post('/multimedia/collection/new', form_data,\n follow=True)\n self.assertRedirects(response, '/multimedia/collections')\n self.assertContains(response, 'Coleção nova')\n",
"step-4": "<mask token>\n\n\ndef minimal_form_data():\n \"\"\"\n Define a minimal fields for submit a media form\n \"\"\"\n form_data = {'status': '0', 'title': 'Foto 1', 'description': 'Foto 1',\n 'media_type': '1',\n 'main-descriptor-content_type-object_id-TOTAL_FORMS': '0',\n 'main-descriptor-content_type-object_id-INITIAL_FORMS': '0',\n 'main-keyword-content_type-object_id-TOTAL_FORMS': '0',\n 'main-keyword-content_type-object_id-INITIAL_FORMS': '0',\n 'main-resourcethematic-content_type-object_id-TOTAL_FORMS': '0',\n 'main-resourcethematic-content_type-object_id-INITIAL_FORMS': '0'}\n return form_data\n\n\ndef complete_form_data():\n \"\"\"\n Define missing fields for a valid submission of media object\n \"\"\"\n missing_fields = {'link': 'http://www.youtube.com', 'publication_date':\n '01/12/2015', 'main-descriptor-content_type-object_id-TOTAL_FORMS':\n '1', 'main-descriptor-content_type-object_id-0-id': '',\n 'main-descriptor-content_type-object_id-0-text': 'malaria',\n 'main-descriptor-content_type-object_id-0-code': '^d8462',\n 'main-descriptor-content_type-object_id-0-status': '0',\n 'main-resourcethematic-content_type-object_id-TOTAL_FORMS': '1',\n 'main-resourcethematic-content_type-object_id-0-thematic_area': '1',\n 'main-resourcethematic-content_type-object_id-0-status': '0'}\n complete_form_data = minimal_form_data()\n complete_form_data.update(missing_fields)\n return complete_form_data\n\n\ndef create_media_object():\n \"\"\"\n Create media object for tests\n \"\"\"\n media1 = Media.objects.create(status=0, title='Midia de teste (BR1.1)',\n media_type_id=1, link='http://bvsalud.org', created_by_id=1,\n cooperative_center_code='BR1.1')\n media_ct = ContentType.objects.get_for_model(media1)\n descriptor = Descriptor.objects.create(object_id=1, content_type=\n media_ct, text='malaria')\n thematic = ResourceThematic.objects.create(object_id=1, content_type=\n media_ct, thematic_area_id=1)\n media2 = Media.objects.create(status=0, title='Media de prueba (PY3.1)',\n media_type_id=1, link='http://bvsalud.org', created_by_id=2,\n cooperative_center_code='PY3.1')\n\n\nclass MultimediaTest(BaseTestCase):\n \"\"\"\n Tests for multimedia app\n \"\"\"\n\n def setUp(self):\n super(MultimediaTest, self).setUp()\n media_type = MediaType.objects.create(acronym='video', name='Video')\n thematic_area = ThematicArea.objects.create(acronym='LISBR1.1',\n name='Teste')\n\n def test_list_media(self):\n \"\"\"\n Test list media\n \"\"\"\n self.login_editor()\n create_media_object()\n response = self.client.get('/multimedia/')\n self.assertContains(response, 'Midia de teste (BR1.1')\n self.assertNotContains(response, 'Media de prueba (PY3.1)')\n\n def test_add_media(self):\n \"\"\"\n Tests create media\n \"\"\"\n self.login_editor()\n form_data = minimal_form_data()\n response = self.client.post('/multimedia/new', form_data)\n self.assertContains(response,\n 'Por favor verifique os campos obrigatórios')\n self.assertContains(response,\n 'Você precisa inserir pelo menos um descritor de assunto')\n self.assertContains(response,\n 'Você precisa selecionar pelo menos uma área temática')\n form_data = complete_form_data()\n response = self.client.post('/multimedia/new', form_data, follow=True)\n self.assertRedirects(response, '/multimedia/')\n self.assertContains(response, 'Foto 1')\n self.assertEquals(Media.objects.all()[0].cooperative_center_code,\n 'BR1.1')\n\n def test_edit_media(self):\n \"\"\"\n Tests edit media\n \"\"\"\n self.login_editor()\n create_media_object()\n media_test = Media.objects.all()[0]\n url = '/multimedia/edit/{0}'.format(media_test.id)\n response = self.client.get(url)\n self.assertContains(response, media_test.title)\n form_data = complete_form_data()\n form_data['status'] = '1'\n response = self.client.post(url, form_data)\n self.assertContains(response,\n 'é necessário ter pelo menos um descritor')\n form_data['status'] = '0'\n response = self.client.post(url, form_data, follow=True)\n self.assertRedirects(response, '/multimedia/')\n self.assertContains(response, 'Foto 1')\n\n def test_delete_media(self):\n \"\"\"\n Tests delete media\n \"\"\"\n self.login_editor()\n create_media_object()\n response = self.client.get('/multimedia/delete/1')\n self.assertContains(response, 'Você tem certeza que deseja apagar?')\n response = self.client.post('/multimedia/delete/1')\n self.assertTrue(Media.objects.filter(id=1).count() == 0)\n self.assertTrue(Descriptor.objects.filter(object_id=1).count() == 0)\n self.assertTrue(ResourceThematic.objects.filter(object_id=1).count(\n ) == 0)\n self.assertRedirects(response, '/multimedia/')\n\n def test_list_media_type(self):\n \"\"\"\n Tests list media type\n \"\"\"\n self.login_documentalist()\n response = self.client.get('/multimedia/media-types/')\n self.assertEqual(response.status_code, 403)\n self.client.logout()\n self.login_admin()\n response = self.client.get('/multimedia/media-types/')\n self.assertContains(response, 'Video')\n\n def test_add_media_type(self):\n \"\"\"\n Tests create media type\n \"\"\"\n self.login_documentalist()\n response = self.client.get('/multimedia/media-type/new')\n self.assertEqual(response.status_code, 403)\n self.client.logout()\n self.login_admin()\n form_data = {'status': '0', 'acronym': 'foto', 'name': 'Foto',\n 'language': 'pt-br', 'mediatypelocal_set-TOTAL_FORMS': '0',\n 'mediatypelocal_set-INITIAL_FORMS': '0'}\n response = self.client.post('/multimedia/media-type/new', form_data,\n follow=True)\n self.assertRedirects(response, '/multimedia/media-types')\n self.assertContains(response, 'Foto')\n\n def test_list_media_collection(self):\n \"\"\"\n Tests list of media collection\n \"\"\"\n self.login_editor()\n MediaCollection.objects.create(name='Coleção 1', description=\n 'Coleção de teste 1', created_by_id=1, cooperative_center_code=\n 'BR1.1')\n MediaCollection.objects.create(name='Coleção 2', description=\n 'Coleção de teste 2', created_by_id=2, cooperative_center_code=\n 'BR1.1')\n MediaCollection.objects.create(name='Coleção 3', description=\n 'Coleção de teste 3', created_by_id=3, cooperative_center_code=\n 'PY3.8')\n response = self.client.get('/multimedia/collections')\n self.assertContains(response, 'Coleção 1')\n self.assertEquals(response.context['object_list'].count(), 3)\n response = self.client.get(\n '/multimedia/collections/?filter_created_by_cc=BR1.1')\n self.assertEquals(response.context['object_list'].count(), 2)\n\n def test_add_media_collection(self):\n \"\"\"\n Tests add media collection\n \"\"\"\n self.login_editor()\n form_data = {'name': 'Coleção nova', 'description':\n 'Coleção de teste', 'language': 'pt-br',\n 'mediacollectionlocal_set-TOTAL_FORMS': '0',\n 'mediacollectionlocal_set-INITIAL_FORMS': '0'}\n response = self.client.post('/multimedia/collection/new', form_data,\n follow=True)\n self.assertRedirects(response, '/multimedia/collections')\n self.assertContains(response, 'Coleção nova')\n",
"step-5": "# coding: utf-8\n\nfrom django.test.client import Client\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom main.models import Descriptor, ResourceThematic, ThematicArea\n\nfrom utils.tests import BaseTestCase\nfrom models import *\n\ndef minimal_form_data():\n '''\n Define a minimal fields for submit a media form\n '''\n\n form_data = {\n 'status': '0',\n 'title': 'Foto 1',\n 'description': 'Foto 1',\n 'media_type' : '1',\n\n 'main-descriptor-content_type-object_id-TOTAL_FORMS': '0',\n 'main-descriptor-content_type-object_id-INITIAL_FORMS': '0',\n\n 'main-keyword-content_type-object_id-TOTAL_FORMS': '0',\n 'main-keyword-content_type-object_id-INITIAL_FORMS': '0',\n\n 'main-resourcethematic-content_type-object_id-TOTAL_FORMS': '0',\n 'main-resourcethematic-content_type-object_id-INITIAL_FORMS': '0',\n }\n\n return form_data\n\ndef complete_form_data():\n '''\n Define missing fields for a valid submission of media object\n '''\n\n missing_fields = {\n 'link' : 'http://www.youtube.com',\n 'publication_date' : '01/12/2015',\n\n 'main-descriptor-content_type-object_id-TOTAL_FORMS' : '1',\n\n 'main-descriptor-content_type-object_id-0-id' : '',\n 'main-descriptor-content_type-object_id-0-text' : 'malaria',\n 'main-descriptor-content_type-object_id-0-code' : '^d8462',\n 'main-descriptor-content_type-object_id-0-status' : '0',\n\n 'main-resourcethematic-content_type-object_id-TOTAL_FORMS' : '1',\n 'main-resourcethematic-content_type-object_id-0-thematic_area' : '1',\n 'main-resourcethematic-content_type-object_id-0-status' : '0',\n }\n\n complete_form_data = minimal_form_data()\n complete_form_data.update(missing_fields)\n\n return complete_form_data\n\n\ndef create_media_object():\n '''\n Create media object for tests\n '''\n\n # Create a Media object and test that is present on list\n media1 = Media.objects.create(status=0,title='Midia de teste (BR1.1)',\n media_type_id=1, link='http://bvsalud.org', created_by_id=1,\n cooperative_center_code='BR1.1')\n\n media_ct = ContentType.objects.get_for_model(media1)\n descriptor = Descriptor.objects.create(object_id=1, content_type=media_ct, text='malaria')\n thematic = ResourceThematic.objects.create(object_id=1, content_type=media_ct, thematic_area_id=1)\n\n media2 = Media.objects.create(status=0,title='Media de prueba (PY3.1)',\n media_type_id=1, link='http://bvsalud.org', created_by_id=2,\n cooperative_center_code='PY3.1')\n\n\nclass MultimediaTest(BaseTestCase):\n \"\"\"\n Tests for multimedia app\n \"\"\"\n\n def setUp(self):\n super(MultimediaTest, self).setUp()\n\n # create auxiliary models used on tests\n media_type = MediaType.objects.create(acronym='video', name='Video')\n thematic_area = ThematicArea.objects.create(acronym='LISBR1.1', name='Teste')\n\n\n def test_list_media(self):\n \"\"\"\n Test list media\n \"\"\"\n self.login_editor()\n create_media_object()\n\n response = self.client.get('/multimedia/')\n self.assertContains(response, \"Midia de teste (BR1.1\")\n\n # list only medias from user cooperative center (BR1.1)\n self.assertNotContains(response, \"Media de prueba (PY3.1)\")\n\n\n def test_add_media(self):\n \"\"\"\n Tests create media\n \"\"\"\n self.login_editor()\n\n # invalid submission with missing required fields\n form_data = minimal_form_data()\n response = self.client.post('/multimedia/new', form_data )\n\n self.assertContains(response,'Por favor verifique os campos obrigatórios')\n self.assertContains(response,'Você precisa inserir pelo menos um descritor de assunto')\n self.assertContains(response,'Você precisa selecionar pelo menos uma área temática')\n\n # complete form_data with required fields and re-submit form\n form_data = complete_form_data()\n\n # test valid submission\n # after submit a valid content the view will redirect to /multimedia and list the objects\n # follow=True will allow check if the new data is on the list\n response = self.client.post('/multimedia/new', form_data, follow=True)\n self.assertRedirects(response, '/multimedia/')\n self.assertContains(response, \"Foto 1\")\n\n # check if is set cooperative center code of user (editor = BR1.1)\n self.assertEquals(Media.objects.all()[0].cooperative_center_code, \"BR1.1\")\n\n def test_edit_media(self):\n \"\"\"\n Tests edit media\n \"\"\"\n self.login_editor()\n create_media_object()\n\n media_test = Media.objects.all()[0]\n url = '/multimedia/edit/{0}'.format(media_test.id)\n response = self.client.get(url)\n\n # Test if return form with fields\n self.assertContains(response, media_test.title)\n\n # Test changes values and submit\n form_data = complete_form_data()\n form_data['status'] = '1'\n\n response = self.client.post(url, form_data)\n # check for validation of descriptor and thematic area for status = Admitted\n self.assertContains(response, \"é necessário ter pelo menos um descritor\")\n\n # check for normal edition\n form_data['status'] = '0'\n response = self.client.post(url, form_data, follow=True)\n self.assertRedirects(response, '/multimedia/')\n self.assertContains(response, \"Foto 1\")\n\n\n def test_delete_media(self):\n \"\"\"\n Tests delete media\n \"\"\"\n self.login_editor()\n create_media_object()\n\n response = self.client.get('/multimedia/delete/1')\n self.assertContains(response, \"Você tem certeza que deseja apagar?\")\n\n response = self.client.post('/multimedia/delete/1')\n\n self.assertTrue(Media.objects.filter(id=1).count() == 0)\n self.assertTrue(Descriptor.objects.filter(object_id=1).count() == 0)\n self.assertTrue(ResourceThematic.objects.filter(object_id=1).count() == 0)\n\n self.assertRedirects(response, '/multimedia/')\n\n\n def test_list_media_type(self):\n \"\"\"\n Tests list media type\n \"\"\"\n\n # check if documentalist has access to list media-types\n self.login_documentalist()\n response = self.client.get('/multimedia/media-types/' )\n\n # 403 = unauthorized\n self.assertEqual(response.status_code, 403)\n\n self.client.logout()\n self.login_admin()\n\n response = self.client.get('/multimedia/media-types/')\n self.assertContains(response, \"Video\")\n\n\n def test_add_media_type(self):\n \"\"\"\n Tests create media type\n \"\"\"\n\n # check if documentalist has access to create new media-types\n self.login_documentalist()\n response = self.client.get('/multimedia/media-type/new' )\n\n # 403 = unauthorized\n self.assertEqual(response.status_code, 403)\n\n self.client.logout()\n self.login_admin()\n\n form_data = {\n 'status': '0',\n 'acronym': 'foto',\n 'name': 'Foto',\n 'language' : 'pt-br',\n 'mediatypelocal_set-TOTAL_FORMS': '0',\n 'mediatypelocal_set-INITIAL_FORMS': '0',\n }\n\n response = self.client.post('/multimedia/media-type/new', form_data, follow=True )\n\n self.assertRedirects(response, '/multimedia/media-types')\n self.assertContains(response, \"Foto\")\n\n\n def test_list_media_collection(self):\n \"\"\"\n Tests list of media collection\n \"\"\"\n self.login_editor()\n\n # Create a media collection object and test that is present on list\n MediaCollection.objects.create(name='Coleção 1',\n description='Coleção de teste 1',\n created_by_id=1, cooperative_center_code='BR1.1')\n\n MediaCollection.objects.create(name='Coleção 2',\n description='Coleção de teste 2',\n created_by_id=2, cooperative_center_code='BR1.1')\n\n MediaCollection.objects.create(name='Coleção 3',\n description='Coleção de teste 3',\n created_by_id=3, cooperative_center_code='PY3.8')\n\n\n response = self.client.get('/multimedia/collections')\n # check if only one collection is returned (restrict by user)\n self.assertContains(response, \"Coleção 1\")\n self.assertEquals(response.context['object_list'].count(), 3)\n\n # check if return only colections from cooperative center BR1.1\n response = self.client.get('/multimedia/collections/?filter_created_by_cc=BR1.1')\n self.assertEquals(response.context['object_list'].count(), 2)\n\n\n def test_add_media_collection(self):\n \"\"\"\n Tests add media collection\n \"\"\"\n self.login_editor()\n\n form_data = {\n 'name': 'Coleção nova',\n 'description': 'Coleção de teste',\n 'language': 'pt-br',\n 'mediacollectionlocal_set-TOTAL_FORMS': '0',\n 'mediacollectionlocal_set-INITIAL_FORMS': '0',\n }\n\n response = self.client.post('/multimedia/collection/new', form_data, follow=True )\n\n self.assertRedirects(response, '/multimedia/collections')\n self.assertContains(response, \"Coleção nova\")\n",
"step-ids": [
8,
9,
12,
14,
16
]
}
|
[
8,
9,
12,
14,
16
] |
from flask import render_template, url_for, escape, redirect, abort
from app import core
from database import db
@core.route('/post')
@core.route('/categorie')
@core.route('/tag')
def returnToHome():
return redirect(url_for('home'))
|
normal
|
{
"blob_id": "c27d6279d1ea84bab3c0abd4ca9a08de202219da",
"index": 1748,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/post')\[email protected]('/categorie')\[email protected]('/tag')\ndef returnToHome():\n return redirect(url_for('home'))\n",
"step-3": "from flask import render_template, url_for, escape, redirect, abort\nfrom app import core\nfrom database import db\n\n\[email protected]('/post')\[email protected]('/categorie')\[email protected]('/tag')\ndef returnToHome():\n return redirect(url_for('home'))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import logging
from sleekxmpp import ClientXMPP
from sleekxmpp.exceptions import IqError, IqTimeout
class EchoBot(ClientXMPP):
def __init__(self, jid, password):
ClientXMPP.__init__(self, jid, password)
self.add_event_handler("session_start", self.session_start)
self.register_plugin('xep_0045') # Multi-User Chat
def session_start(self, event):
self.send_presence()
self.get_roster()
self['xep_0045'].joinMUC('[email protected]', 'your_name', wait=True)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG, format='%(levelname)-8s %(message)s')
xmpp = EchoBot('[email protected]', 'your_password')
xmpp.connect()
xmpp.process(block=True)
|
normal
|
{
"blob_id": "3b531c5935f0be89536c95ff471f96b4249d951c",
"index": 2521,
"step-1": "<mask token>\n\n\nclass EchoBot(ClientXMPP):\n\n def __init__(self, jid, password):\n ClientXMPP.__init__(self, jid, password)\n self.add_event_handler('session_start', self.session_start)\n self.register_plugin('xep_0045')\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass EchoBot(ClientXMPP):\n\n def __init__(self, jid, password):\n ClientXMPP.__init__(self, jid, password)\n self.add_event_handler('session_start', self.session_start)\n self.register_plugin('xep_0045')\n\n def session_start(self, event):\n self.send_presence()\n self.get_roster()\n self['xep_0045'].joinMUC('[email protected]', 'your_name',\n wait=True)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass EchoBot(ClientXMPP):\n\n def __init__(self, jid, password):\n ClientXMPP.__init__(self, jid, password)\n self.add_event_handler('session_start', self.session_start)\n self.register_plugin('xep_0045')\n\n def session_start(self, event):\n self.send_presence()\n self.get_roster()\n self['xep_0045'].joinMUC('[email protected]', 'your_name',\n wait=True)\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.DEBUG, format=\n '%(levelname)-8s %(message)s')\n xmpp = EchoBot('[email protected]', 'your_password')\n xmpp.connect()\n xmpp.process(block=True)\n",
"step-4": "import logging\nfrom sleekxmpp import ClientXMPP\nfrom sleekxmpp.exceptions import IqError, IqTimeout\n\n\nclass EchoBot(ClientXMPP):\n\n def __init__(self, jid, password):\n ClientXMPP.__init__(self, jid, password)\n self.add_event_handler('session_start', self.session_start)\n self.register_plugin('xep_0045')\n\n def session_start(self, event):\n self.send_presence()\n self.get_roster()\n self['xep_0045'].joinMUC('[email protected]', 'your_name',\n wait=True)\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.DEBUG, format=\n '%(levelname)-8s %(message)s')\n xmpp = EchoBot('[email protected]', 'your_password')\n xmpp.connect()\n xmpp.process(block=True)\n",
"step-5": "import logging\nfrom sleekxmpp import ClientXMPP\nfrom sleekxmpp.exceptions import IqError, IqTimeout\n\nclass EchoBot(ClientXMPP):\n\n def __init__(self, jid, password):\n ClientXMPP.__init__(self, jid, password)\n self.add_event_handler(\"session_start\", self.session_start)\n self.register_plugin('xep_0045') # Multi-User Chat\n\n def session_start(self, event):\n self.send_presence()\n self.get_roster()\n self['xep_0045'].joinMUC('[email protected]', 'your_name', wait=True)\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.DEBUG, format='%(levelname)-8s %(message)s')\n\n xmpp = EchoBot('[email protected]', 'your_password')\n xmpp.connect()\n xmpp.process(block=True)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# 홍준이는 요즘 주식에 빠져있다. 그는 미래를 내다보는 눈이 뛰어나, 날 별로 주가를 예상하고 언제나 그게 맞아떨어진다. 매일 그는 아래 세 가지 중 한 행동을 한다.
# 1. 주식 하나를 산다.
# 2. 원하는 만큼 가지고 있는 주식을 판다.
# 3. 아무것도 안한다.
# 홍준이는 미래를 예상하는 뛰어난 안목을 가졌지만, 어떻게 해야 자신이 최대 이익을 얻을 수 있는지 모른다. 따라서 당신에게 날 별로 주식의 가격을 알려주었을 때, 최대 이익이 얼마나 되는지 계산을 해달라고 부탁했다.
# 예를 들어 날 수가 3일이고 날 별로 주가가 10, 7, 6일 때, 주가가 계속 감소하므로 최대 이익은 0이 된다. 그러나 만약 날 별로 주가가 3, 5, 9일 때는 처음 두 날에 주식을 하나씩 사고, 마지막날 다 팔아 버리면 이익이 10이 된다.
# 입력의 첫 줄에는 테스트케이스 수를 나타내는 자연수 T가 주어진다. 각 테스트케이스 별로 첫 줄에는 날의 수를 나타내는 자연수 N(2 ≤ N ≤ 1,000,000)이 주어지고, 둘째 줄에는 날 별 주가를 나타내는 N개의 자연수들이 공백으로 구분되어 순서대로 주어진다. 날 별 주가는 10,000이하다.
# 예제 입력
# 3
# 3
# 10 7 6
# 3
# 3 5 9
# 5
# 1 1 3 1 2
# 예제 출력
# 0
# 10
# 5
|
normal
|
{
"blob_id": "d3f6fb612e314ee2b86f6218719ecac2cc642c59",
"index": 2992,
"step-1": "# 홍준이는 요즘 주식에 빠져있다. 그는 미래를 내다보는 눈이 뛰어나, 날 별로 주가를 예상하고 언제나 그게 맞아떨어진다. 매일 그는 아래 세 가지 중 한 행동을 한다.\n\n# 1. 주식 하나를 산다.\n# 2. 원하는 만큼 가지고 있는 주식을 판다.\n# 3. 아무것도 안한다.\n\n# 홍준이는 미래를 예상하는 뛰어난 안목을 가졌지만, 어떻게 해야 자신이 최대 이익을 얻을 수 있는지 모른다. 따라서 당신에게 날 별로 주식의 가격을 알려주었을 때, 최대 이익이 얼마나 되는지 계산을 해달라고 부탁했다.\n\n# 예를 들어 날 수가 3일이고 날 별로 주가가 10, 7, 6일 때, 주가가 계속 감소하므로 최대 이익은 0이 된다. 그러나 만약 날 별로 주가가 3, 5, 9일 때는 처음 두 날에 주식을 하나씩 사고, 마지막날 다 팔아 버리면 이익이 10이 된다.\n# 입력의 첫 줄에는 테스트케이스 수를 나타내는 자연수 T가 주어진다. 각 테스트케이스 별로 첫 줄에는 날의 수를 나타내는 자연수 N(2 ≤ N ≤ 1,000,000)이 주어지고, 둘째 줄에는 날 별 주가를 나타내는 N개의 자연수들이 공백으로 구분되어 순서대로 주어진다. 날 별 주가는 10,000이하다.\n\n# 예제 입력\n# 3\n# 3\n# 10 7 6\n# 3\n# 3 5 9\n# 5\n# 1 1 3 1 2\n\n# 예제 출력\n# 0\n# 10\n# 5\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
1
]
}
|
[
1
] |
import gdalnumeric
#Input File
src = "../dati/islands/islands.tif"
#Output
tgt = "../dati/islands/islands_classified.jpg"
srcArr = gdalnumeric.LoadFile(src)
classes = gdalnumeric.numpy.histogram(srcArr,bins=2)[1]
print classes
#Color look-up table (LUT) - must be len(classes)+1.
#Specified as R,G,B tuples
lut = [[255,0,0],[0,0,0],[255,255,255]]
start = 1
rgb = gdalnumeric.numpy.zeros((3, srcArr.shape[0], srcArr.shape[1],),gdalnumeric.numpy.float32)
# Process all classes and assign colors
for i in range(len(classes)):
mask = gdalnumeric.numpy.logical_and(start <= srcArr, srcArr <= classes[i])
for j in range(len(lut[i])):
rgb[j] = gdalnumeric.numpy.choose(mask, (rgb[j], lut[i][j]))
start = classes[i]+1
# Save the image
gdalnumeric.SaveArray(rgb.astype(gdalnumeric.numpy.uint8), tgt, format="GTIFF",prototype=src)
|
normal
|
{
"blob_id": "f29d377e8a8fd6d2e156da665478d7a4c167f7d5",
"index": 3601,
"step-1": "import gdalnumeric\n\n#Input File\nsrc = \"../dati/islands/islands.tif\"\n\n#Output\ntgt = \"../dati/islands/islands_classified.jpg\"\n\nsrcArr = gdalnumeric.LoadFile(src)\n\nclasses = gdalnumeric.numpy.histogram(srcArr,bins=2)[1]\nprint classes\n\n#Color look-up table (LUT) - must be len(classes)+1.\n#Specified as R,G,B tuples\nlut = [[255,0,0],[0,0,0],[255,255,255]]\n\nstart = 1\n\nrgb = gdalnumeric.numpy.zeros((3, srcArr.shape[0], srcArr.shape[1],),gdalnumeric.numpy.float32)\n\n# Process all classes and assign colors\nfor i in range(len(classes)):\n mask = gdalnumeric.numpy.logical_and(start <= srcArr, srcArr <= classes[i])\n for j in range(len(lut[i])):\n rgb[j] = gdalnumeric.numpy.choose(mask, (rgb[j], lut[i][j]))\n start = classes[i]+1\n\n# Save the image\ngdalnumeric.SaveArray(rgb.astype(gdalnumeric.numpy.uint8), tgt, format=\"GTIFF\",prototype=src)",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# Copyright (c) 2008-2016 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Test the `interpolation` module."""
from __future__ import division
import logging
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
import pytest
from scipy.spatial import cKDTree, Delaunay
from scipy.spatial.distance import cdist
from metpy.cbook import get_test_data
from metpy.gridding.gridding_functions import calc_kappa
from metpy.gridding.interpolation import (barnes_point, barnes_weights, cressman_point,
cressman_weights, inverse_distance,
natural_neighbor, nn_point)
from metpy.gridding.triangles import dist_2, find_natural_neighbors
logging.getLogger('metpy.gridding.interpolation').setLevel(logging.ERROR)
@pytest.fixture()
def test_data():
r"""Return data used for tests in this file."""
x = np.array([8, 67, 79, 10, 52, 53, 98, 34, 15, 58], dtype=float)
y = np.array([24, 87, 48, 94, 98, 66, 14, 24, 60, 16], dtype=float)
z = np.array([0.064, 4.489, 6.241, 0.1, 2.704, 2.809, 9.604, 1.156,
0.225, 3.364], dtype=float)
return x, y, z
@pytest.fixture()
def test_grid():
r"""Return grid locations used for tests in this file."""
with get_test_data('interpolation_test_grid.npz') as fobj:
data = np.load(fobj)
return data['xg'], data['yg']
def test_natural_neighbor(test_data, test_grid):
r"""Test natural neighbor interpolation function."""
xp, yp, z = test_data
xg, yg = test_grid
img = natural_neighbor(xp, yp, z, xg, yg)
with get_test_data('nn_bbox0to100.npz') as fobj:
truth = np.load(fobj)['img']
assert_array_almost_equal(truth, img)
interp_methods = ['cressman', 'barnes']
@pytest.mark.parametrize('method', interp_methods)
def test_inverse_distance(method, test_data, test_grid):
r"""Test inverse distance interpolation function."""
xp, yp, z = test_data
xg, yg = test_grid
extra_kw = {}
if method == 'cressman':
extra_kw['r'] = 20
extra_kw['min_neighbors'] = 1
test_file = 'cressman_r20_mn1.npz'
elif method == 'barnes':
extra_kw['r'] = 40
extra_kw['kappa'] = 100
test_file = 'barnes_r40_k100.npz'
img = inverse_distance(xp, yp, z, xg, yg, kind=method, **extra_kw)
with get_test_data(test_file) as fobj:
truth = np.load(fobj)['img']
assert_array_almost_equal(truth, img)
def test_nn_point(test_data):
r"""Test find natural neighbors for a point interpolation function."""
xp, yp, z = test_data
tri = Delaunay(list(zip(xp, yp)))
sim_gridx = [30]
sim_gridy = [30]
members, tri_info = find_natural_neighbors(tri,
list(zip(sim_gridx, sim_gridy)))
val = nn_point(xp, yp, z, [sim_gridx[0], sim_gridy[0]],
tri, members[0], tri_info)
truth = 1.009
assert_almost_equal(truth, val, 3)
def test_barnes_weights():
r"""Test Barnes weights function."""
kappa = 1000000
gamma = 0.5
dist = np.array([1000, 2000, 3000, 4000])**2
weights = barnes_weights(dist, kappa, gamma) * 10000000
truth = [1353352.832366126918939,
3354.626279025118388,
.152299797447126,
.000000126641655]
assert_array_almost_equal(truth, weights)
def test_cressman_weights():
r"""Test Cressman weights function."""
r = 5000
dist = np.array([1000, 2000, 3000, 4000])**2
weights = cressman_weights(dist, r)
truth = [0.923076923076923,
0.724137931034482,
0.470588235294117,
0.219512195121951]
assert_array_almost_equal(truth, weights)
def test_cressman_point(test_data):
r"""Test Cressman interpolation for a point function."""
xp, yp, z = test_data
r = 40
obs_tree = cKDTree(list(zip(xp, yp)))
indices = obs_tree.query_ball_point([30, 30], r=r)
dists = dist_2(30, 30, xp[indices], yp[indices])
values = z[indices]
truth = 1.05499444404
value = cressman_point(dists, values, r)
assert_almost_equal(truth, value)
def test_barnes_point(test_data):
r"""Test Barnes interpolation for a point function."""
xp, yp, z = test_data
r = 40
obs_tree = cKDTree(list(zip(xp, yp)))
indices = obs_tree.query_ball_point([60, 60], r=r)
dists = dist_2(60, 60, xp[indices], yp[indices])
values = z[indices]
truth = 4.08718241061
ave_spacing = np.mean((cdist(list(zip(xp, yp)), list(zip(xp, yp)))))
kappa = calc_kappa(ave_spacing)
value = barnes_point(dists, values, kappa)
assert_almost_equal(truth, value)
|
normal
|
{
"blob_id": "9e987e057ee5322765415b84e84ef3c4d2827742",
"index": 5466,
"step-1": "<mask token>\n\n\[email protected]()\ndef test_data():\n \"\"\"Return data used for tests in this file.\"\"\"\n x = np.array([8, 67, 79, 10, 52, 53, 98, 34, 15, 58], dtype=float)\n y = np.array([24, 87, 48, 94, 98, 66, 14, 24, 60, 16], dtype=float)\n z = np.array([0.064, 4.489, 6.241, 0.1, 2.704, 2.809, 9.604, 1.156, \n 0.225, 3.364], dtype=float)\n return x, y, z\n\n\[email protected]()\ndef test_grid():\n \"\"\"Return grid locations used for tests in this file.\"\"\"\n with get_test_data('interpolation_test_grid.npz') as fobj:\n data = np.load(fobj)\n return data['xg'], data['yg']\n\n\ndef test_natural_neighbor(test_data, test_grid):\n \"\"\"Test natural neighbor interpolation function.\"\"\"\n xp, yp, z = test_data\n xg, yg = test_grid\n img = natural_neighbor(xp, yp, z, xg, yg)\n with get_test_data('nn_bbox0to100.npz') as fobj:\n truth = np.load(fobj)['img']\n assert_array_almost_equal(truth, img)\n\n\n<mask token>\n\n\[email protected]('method', interp_methods)\ndef test_inverse_distance(method, test_data, test_grid):\n \"\"\"Test inverse distance interpolation function.\"\"\"\n xp, yp, z = test_data\n xg, yg = test_grid\n extra_kw = {}\n if method == 'cressman':\n extra_kw['r'] = 20\n extra_kw['min_neighbors'] = 1\n test_file = 'cressman_r20_mn1.npz'\n elif method == 'barnes':\n extra_kw['r'] = 40\n extra_kw['kappa'] = 100\n test_file = 'barnes_r40_k100.npz'\n img = inverse_distance(xp, yp, z, xg, yg, kind=method, **extra_kw)\n with get_test_data(test_file) as fobj:\n truth = np.load(fobj)['img']\n assert_array_almost_equal(truth, img)\n\n\n<mask token>\n\n\ndef test_barnes_weights():\n \"\"\"Test Barnes weights function.\"\"\"\n kappa = 1000000\n gamma = 0.5\n dist = np.array([1000, 2000, 3000, 4000]) ** 2\n weights = barnes_weights(dist, kappa, gamma) * 10000000\n truth = [1353352.8323661268, 3354.6262790251185, 0.152299797447126, \n 1.26641655e-07]\n assert_array_almost_equal(truth, weights)\n\n\n<mask token>\n\n\ndef test_cressman_point(test_data):\n \"\"\"Test Cressman interpolation for a point function.\"\"\"\n xp, yp, z = test_data\n r = 40\n obs_tree = cKDTree(list(zip(xp, yp)))\n indices = obs_tree.query_ball_point([30, 30], r=r)\n dists = dist_2(30, 30, xp[indices], yp[indices])\n values = z[indices]\n truth = 1.05499444404\n value = cressman_point(dists, values, r)\n assert_almost_equal(truth, value)\n\n\ndef test_barnes_point(test_data):\n \"\"\"Test Barnes interpolation for a point function.\"\"\"\n xp, yp, z = test_data\n r = 40\n obs_tree = cKDTree(list(zip(xp, yp)))\n indices = obs_tree.query_ball_point([60, 60], r=r)\n dists = dist_2(60, 60, xp[indices], yp[indices])\n values = z[indices]\n truth = 4.08718241061\n ave_spacing = np.mean(cdist(list(zip(xp, yp)), list(zip(xp, yp))))\n kappa = calc_kappa(ave_spacing)\n value = barnes_point(dists, values, kappa)\n assert_almost_equal(truth, value)\n",
"step-2": "<mask token>\n\n\[email protected]()\ndef test_data():\n \"\"\"Return data used for tests in this file.\"\"\"\n x = np.array([8, 67, 79, 10, 52, 53, 98, 34, 15, 58], dtype=float)\n y = np.array([24, 87, 48, 94, 98, 66, 14, 24, 60, 16], dtype=float)\n z = np.array([0.064, 4.489, 6.241, 0.1, 2.704, 2.809, 9.604, 1.156, \n 0.225, 3.364], dtype=float)\n return x, y, z\n\n\[email protected]()\ndef test_grid():\n \"\"\"Return grid locations used for tests in this file.\"\"\"\n with get_test_data('interpolation_test_grid.npz') as fobj:\n data = np.load(fobj)\n return data['xg'], data['yg']\n\n\ndef test_natural_neighbor(test_data, test_grid):\n \"\"\"Test natural neighbor interpolation function.\"\"\"\n xp, yp, z = test_data\n xg, yg = test_grid\n img = natural_neighbor(xp, yp, z, xg, yg)\n with get_test_data('nn_bbox0to100.npz') as fobj:\n truth = np.load(fobj)['img']\n assert_array_almost_equal(truth, img)\n\n\n<mask token>\n\n\[email protected]('method', interp_methods)\ndef test_inverse_distance(method, test_data, test_grid):\n \"\"\"Test inverse distance interpolation function.\"\"\"\n xp, yp, z = test_data\n xg, yg = test_grid\n extra_kw = {}\n if method == 'cressman':\n extra_kw['r'] = 20\n extra_kw['min_neighbors'] = 1\n test_file = 'cressman_r20_mn1.npz'\n elif method == 'barnes':\n extra_kw['r'] = 40\n extra_kw['kappa'] = 100\n test_file = 'barnes_r40_k100.npz'\n img = inverse_distance(xp, yp, z, xg, yg, kind=method, **extra_kw)\n with get_test_data(test_file) as fobj:\n truth = np.load(fobj)['img']\n assert_array_almost_equal(truth, img)\n\n\ndef test_nn_point(test_data):\n \"\"\"Test find natural neighbors for a point interpolation function.\"\"\"\n xp, yp, z = test_data\n tri = Delaunay(list(zip(xp, yp)))\n sim_gridx = [30]\n sim_gridy = [30]\n members, tri_info = find_natural_neighbors(tri, list(zip(sim_gridx,\n sim_gridy)))\n val = nn_point(xp, yp, z, [sim_gridx[0], sim_gridy[0]], tri, members[0],\n tri_info)\n truth = 1.009\n assert_almost_equal(truth, val, 3)\n\n\ndef test_barnes_weights():\n \"\"\"Test Barnes weights function.\"\"\"\n kappa = 1000000\n gamma = 0.5\n dist = np.array([1000, 2000, 3000, 4000]) ** 2\n weights = barnes_weights(dist, kappa, gamma) * 10000000\n truth = [1353352.8323661268, 3354.6262790251185, 0.152299797447126, \n 1.26641655e-07]\n assert_array_almost_equal(truth, weights)\n\n\ndef test_cressman_weights():\n \"\"\"Test Cressman weights function.\"\"\"\n r = 5000\n dist = np.array([1000, 2000, 3000, 4000]) ** 2\n weights = cressman_weights(dist, r)\n truth = [0.923076923076923, 0.724137931034482, 0.470588235294117, \n 0.219512195121951]\n assert_array_almost_equal(truth, weights)\n\n\ndef test_cressman_point(test_data):\n \"\"\"Test Cressman interpolation for a point function.\"\"\"\n xp, yp, z = test_data\n r = 40\n obs_tree = cKDTree(list(zip(xp, yp)))\n indices = obs_tree.query_ball_point([30, 30], r=r)\n dists = dist_2(30, 30, xp[indices], yp[indices])\n values = z[indices]\n truth = 1.05499444404\n value = cressman_point(dists, values, r)\n assert_almost_equal(truth, value)\n\n\ndef test_barnes_point(test_data):\n \"\"\"Test Barnes interpolation for a point function.\"\"\"\n xp, yp, z = test_data\n r = 40\n obs_tree = cKDTree(list(zip(xp, yp)))\n indices = obs_tree.query_ball_point([60, 60], r=r)\n dists = dist_2(60, 60, xp[indices], yp[indices])\n values = z[indices]\n truth = 4.08718241061\n ave_spacing = np.mean(cdist(list(zip(xp, yp)), list(zip(xp, yp))))\n kappa = calc_kappa(ave_spacing)\n value = barnes_point(dists, values, kappa)\n assert_almost_equal(truth, value)\n",
"step-3": "<mask token>\nlogging.getLogger('metpy.gridding.interpolation').setLevel(logging.ERROR)\n\n\[email protected]()\ndef test_data():\n \"\"\"Return data used for tests in this file.\"\"\"\n x = np.array([8, 67, 79, 10, 52, 53, 98, 34, 15, 58], dtype=float)\n y = np.array([24, 87, 48, 94, 98, 66, 14, 24, 60, 16], dtype=float)\n z = np.array([0.064, 4.489, 6.241, 0.1, 2.704, 2.809, 9.604, 1.156, \n 0.225, 3.364], dtype=float)\n return x, y, z\n\n\[email protected]()\ndef test_grid():\n \"\"\"Return grid locations used for tests in this file.\"\"\"\n with get_test_data('interpolation_test_grid.npz') as fobj:\n data = np.load(fobj)\n return data['xg'], data['yg']\n\n\ndef test_natural_neighbor(test_data, test_grid):\n \"\"\"Test natural neighbor interpolation function.\"\"\"\n xp, yp, z = test_data\n xg, yg = test_grid\n img = natural_neighbor(xp, yp, z, xg, yg)\n with get_test_data('nn_bbox0to100.npz') as fobj:\n truth = np.load(fobj)['img']\n assert_array_almost_equal(truth, img)\n\n\n<mask token>\n\n\[email protected]('method', interp_methods)\ndef test_inverse_distance(method, test_data, test_grid):\n \"\"\"Test inverse distance interpolation function.\"\"\"\n xp, yp, z = test_data\n xg, yg = test_grid\n extra_kw = {}\n if method == 'cressman':\n extra_kw['r'] = 20\n extra_kw['min_neighbors'] = 1\n test_file = 'cressman_r20_mn1.npz'\n elif method == 'barnes':\n extra_kw['r'] = 40\n extra_kw['kappa'] = 100\n test_file = 'barnes_r40_k100.npz'\n img = inverse_distance(xp, yp, z, xg, yg, kind=method, **extra_kw)\n with get_test_data(test_file) as fobj:\n truth = np.load(fobj)['img']\n assert_array_almost_equal(truth, img)\n\n\ndef test_nn_point(test_data):\n \"\"\"Test find natural neighbors for a point interpolation function.\"\"\"\n xp, yp, z = test_data\n tri = Delaunay(list(zip(xp, yp)))\n sim_gridx = [30]\n sim_gridy = [30]\n members, tri_info = find_natural_neighbors(tri, list(zip(sim_gridx,\n sim_gridy)))\n val = nn_point(xp, yp, z, [sim_gridx[0], sim_gridy[0]], tri, members[0],\n tri_info)\n truth = 1.009\n assert_almost_equal(truth, val, 3)\n\n\ndef test_barnes_weights():\n \"\"\"Test Barnes weights function.\"\"\"\n kappa = 1000000\n gamma = 0.5\n dist = np.array([1000, 2000, 3000, 4000]) ** 2\n weights = barnes_weights(dist, kappa, gamma) * 10000000\n truth = [1353352.8323661268, 3354.6262790251185, 0.152299797447126, \n 1.26641655e-07]\n assert_array_almost_equal(truth, weights)\n\n\ndef test_cressman_weights():\n \"\"\"Test Cressman weights function.\"\"\"\n r = 5000\n dist = np.array([1000, 2000, 3000, 4000]) ** 2\n weights = cressman_weights(dist, r)\n truth = [0.923076923076923, 0.724137931034482, 0.470588235294117, \n 0.219512195121951]\n assert_array_almost_equal(truth, weights)\n\n\ndef test_cressman_point(test_data):\n \"\"\"Test Cressman interpolation for a point function.\"\"\"\n xp, yp, z = test_data\n r = 40\n obs_tree = cKDTree(list(zip(xp, yp)))\n indices = obs_tree.query_ball_point([30, 30], r=r)\n dists = dist_2(30, 30, xp[indices], yp[indices])\n values = z[indices]\n truth = 1.05499444404\n value = cressman_point(dists, values, r)\n assert_almost_equal(truth, value)\n\n\ndef test_barnes_point(test_data):\n \"\"\"Test Barnes interpolation for a point function.\"\"\"\n xp, yp, z = test_data\n r = 40\n obs_tree = cKDTree(list(zip(xp, yp)))\n indices = obs_tree.query_ball_point([60, 60], r=r)\n dists = dist_2(60, 60, xp[indices], yp[indices])\n values = z[indices]\n truth = 4.08718241061\n ave_spacing = np.mean(cdist(list(zip(xp, yp)), list(zip(xp, yp))))\n kappa = calc_kappa(ave_spacing)\n value = barnes_point(dists, values, kappa)\n assert_almost_equal(truth, value)\n",
"step-4": "<mask token>\nlogging.getLogger('metpy.gridding.interpolation').setLevel(logging.ERROR)\n\n\[email protected]()\ndef test_data():\n \"\"\"Return data used for tests in this file.\"\"\"\n x = np.array([8, 67, 79, 10, 52, 53, 98, 34, 15, 58], dtype=float)\n y = np.array([24, 87, 48, 94, 98, 66, 14, 24, 60, 16], dtype=float)\n z = np.array([0.064, 4.489, 6.241, 0.1, 2.704, 2.809, 9.604, 1.156, \n 0.225, 3.364], dtype=float)\n return x, y, z\n\n\[email protected]()\ndef test_grid():\n \"\"\"Return grid locations used for tests in this file.\"\"\"\n with get_test_data('interpolation_test_grid.npz') as fobj:\n data = np.load(fobj)\n return data['xg'], data['yg']\n\n\ndef test_natural_neighbor(test_data, test_grid):\n \"\"\"Test natural neighbor interpolation function.\"\"\"\n xp, yp, z = test_data\n xg, yg = test_grid\n img = natural_neighbor(xp, yp, z, xg, yg)\n with get_test_data('nn_bbox0to100.npz') as fobj:\n truth = np.load(fobj)['img']\n assert_array_almost_equal(truth, img)\n\n\ninterp_methods = ['cressman', 'barnes']\n\n\[email protected]('method', interp_methods)\ndef test_inverse_distance(method, test_data, test_grid):\n \"\"\"Test inverse distance interpolation function.\"\"\"\n xp, yp, z = test_data\n xg, yg = test_grid\n extra_kw = {}\n if method == 'cressman':\n extra_kw['r'] = 20\n extra_kw['min_neighbors'] = 1\n test_file = 'cressman_r20_mn1.npz'\n elif method == 'barnes':\n extra_kw['r'] = 40\n extra_kw['kappa'] = 100\n test_file = 'barnes_r40_k100.npz'\n img = inverse_distance(xp, yp, z, xg, yg, kind=method, **extra_kw)\n with get_test_data(test_file) as fobj:\n truth = np.load(fobj)['img']\n assert_array_almost_equal(truth, img)\n\n\ndef test_nn_point(test_data):\n \"\"\"Test find natural neighbors for a point interpolation function.\"\"\"\n xp, yp, z = test_data\n tri = Delaunay(list(zip(xp, yp)))\n sim_gridx = [30]\n sim_gridy = [30]\n members, tri_info = find_natural_neighbors(tri, list(zip(sim_gridx,\n sim_gridy)))\n val = nn_point(xp, yp, z, [sim_gridx[0], sim_gridy[0]], tri, members[0],\n tri_info)\n truth = 1.009\n assert_almost_equal(truth, val, 3)\n\n\ndef test_barnes_weights():\n \"\"\"Test Barnes weights function.\"\"\"\n kappa = 1000000\n gamma = 0.5\n dist = np.array([1000, 2000, 3000, 4000]) ** 2\n weights = barnes_weights(dist, kappa, gamma) * 10000000\n truth = [1353352.8323661268, 3354.6262790251185, 0.152299797447126, \n 1.26641655e-07]\n assert_array_almost_equal(truth, weights)\n\n\ndef test_cressman_weights():\n \"\"\"Test Cressman weights function.\"\"\"\n r = 5000\n dist = np.array([1000, 2000, 3000, 4000]) ** 2\n weights = cressman_weights(dist, r)\n truth = [0.923076923076923, 0.724137931034482, 0.470588235294117, \n 0.219512195121951]\n assert_array_almost_equal(truth, weights)\n\n\ndef test_cressman_point(test_data):\n \"\"\"Test Cressman interpolation for a point function.\"\"\"\n xp, yp, z = test_data\n r = 40\n obs_tree = cKDTree(list(zip(xp, yp)))\n indices = obs_tree.query_ball_point([30, 30], r=r)\n dists = dist_2(30, 30, xp[indices], yp[indices])\n values = z[indices]\n truth = 1.05499444404\n value = cressman_point(dists, values, r)\n assert_almost_equal(truth, value)\n\n\ndef test_barnes_point(test_data):\n \"\"\"Test Barnes interpolation for a point function.\"\"\"\n xp, yp, z = test_data\n r = 40\n obs_tree = cKDTree(list(zip(xp, yp)))\n indices = obs_tree.query_ball_point([60, 60], r=r)\n dists = dist_2(60, 60, xp[indices], yp[indices])\n values = z[indices]\n truth = 4.08718241061\n ave_spacing = np.mean(cdist(list(zip(xp, yp)), list(zip(xp, yp))))\n kappa = calc_kappa(ave_spacing)\n value = barnes_point(dists, values, kappa)\n assert_almost_equal(truth, value)\n",
"step-5": "# Copyright (c) 2008-2016 MetPy Developers.\n# Distributed under the terms of the BSD 3-Clause License.\n# SPDX-License-Identifier: BSD-3-Clause\n\"\"\"Test the `interpolation` module.\"\"\"\n\nfrom __future__ import division\n\nimport logging\n\nimport numpy as np\nfrom numpy.testing import assert_almost_equal, assert_array_almost_equal\nimport pytest\nfrom scipy.spatial import cKDTree, Delaunay\nfrom scipy.spatial.distance import cdist\n\nfrom metpy.cbook import get_test_data\nfrom metpy.gridding.gridding_functions import calc_kappa\nfrom metpy.gridding.interpolation import (barnes_point, barnes_weights, cressman_point,\n cressman_weights, inverse_distance,\n natural_neighbor, nn_point)\nfrom metpy.gridding.triangles import dist_2, find_natural_neighbors\n\nlogging.getLogger('metpy.gridding.interpolation').setLevel(logging.ERROR)\n\n\[email protected]()\ndef test_data():\n r\"\"\"Return data used for tests in this file.\"\"\"\n x = np.array([8, 67, 79, 10, 52, 53, 98, 34, 15, 58], dtype=float)\n y = np.array([24, 87, 48, 94, 98, 66, 14, 24, 60, 16], dtype=float)\n z = np.array([0.064, 4.489, 6.241, 0.1, 2.704, 2.809, 9.604, 1.156,\n 0.225, 3.364], dtype=float)\n\n return x, y, z\n\n\[email protected]()\ndef test_grid():\n r\"\"\"Return grid locations used for tests in this file.\"\"\"\n with get_test_data('interpolation_test_grid.npz') as fobj:\n data = np.load(fobj)\n return data['xg'], data['yg']\n\n\ndef test_natural_neighbor(test_data, test_grid):\n r\"\"\"Test natural neighbor interpolation function.\"\"\"\n xp, yp, z = test_data\n xg, yg = test_grid\n\n img = natural_neighbor(xp, yp, z, xg, yg)\n\n with get_test_data('nn_bbox0to100.npz') as fobj:\n truth = np.load(fobj)['img']\n\n assert_array_almost_equal(truth, img)\n\n\ninterp_methods = ['cressman', 'barnes']\n\n\[email protected]('method', interp_methods)\ndef test_inverse_distance(method, test_data, test_grid):\n r\"\"\"Test inverse distance interpolation function.\"\"\"\n xp, yp, z = test_data\n xg, yg = test_grid\n\n extra_kw = {}\n if method == 'cressman':\n extra_kw['r'] = 20\n extra_kw['min_neighbors'] = 1\n test_file = 'cressman_r20_mn1.npz'\n elif method == 'barnes':\n extra_kw['r'] = 40\n extra_kw['kappa'] = 100\n test_file = 'barnes_r40_k100.npz'\n\n img = inverse_distance(xp, yp, z, xg, yg, kind=method, **extra_kw)\n\n with get_test_data(test_file) as fobj:\n truth = np.load(fobj)['img']\n\n assert_array_almost_equal(truth, img)\n\n\ndef test_nn_point(test_data):\n r\"\"\"Test find natural neighbors for a point interpolation function.\"\"\"\n xp, yp, z = test_data\n\n tri = Delaunay(list(zip(xp, yp)))\n\n sim_gridx = [30]\n sim_gridy = [30]\n\n members, tri_info = find_natural_neighbors(tri,\n list(zip(sim_gridx, sim_gridy)))\n\n val = nn_point(xp, yp, z, [sim_gridx[0], sim_gridy[0]],\n tri, members[0], tri_info)\n\n truth = 1.009\n\n assert_almost_equal(truth, val, 3)\n\n\ndef test_barnes_weights():\n r\"\"\"Test Barnes weights function.\"\"\"\n kappa = 1000000\n\n gamma = 0.5\n\n dist = np.array([1000, 2000, 3000, 4000])**2\n\n weights = barnes_weights(dist, kappa, gamma) * 10000000\n\n truth = [1353352.832366126918939,\n 3354.626279025118388,\n .152299797447126,\n .000000126641655]\n\n assert_array_almost_equal(truth, weights)\n\n\ndef test_cressman_weights():\n r\"\"\"Test Cressman weights function.\"\"\"\n r = 5000\n\n dist = np.array([1000, 2000, 3000, 4000])**2\n\n weights = cressman_weights(dist, r)\n\n truth = [0.923076923076923,\n 0.724137931034482,\n 0.470588235294117,\n 0.219512195121951]\n\n assert_array_almost_equal(truth, weights)\n\n\ndef test_cressman_point(test_data):\n r\"\"\"Test Cressman interpolation for a point function.\"\"\"\n xp, yp, z = test_data\n\n r = 40\n\n obs_tree = cKDTree(list(zip(xp, yp)))\n\n indices = obs_tree.query_ball_point([30, 30], r=r)\n\n dists = dist_2(30, 30, xp[indices], yp[indices])\n values = z[indices]\n\n truth = 1.05499444404\n\n value = cressman_point(dists, values, r)\n\n assert_almost_equal(truth, value)\n\n\ndef test_barnes_point(test_data):\n r\"\"\"Test Barnes interpolation for a point function.\"\"\"\n xp, yp, z = test_data\n\n r = 40\n\n obs_tree = cKDTree(list(zip(xp, yp)))\n\n indices = obs_tree.query_ball_point([60, 60], r=r)\n\n dists = dist_2(60, 60, xp[indices], yp[indices])\n values = z[indices]\n\n truth = 4.08718241061\n\n ave_spacing = np.mean((cdist(list(zip(xp, yp)), list(zip(xp, yp)))))\n\n kappa = calc_kappa(ave_spacing)\n\n value = barnes_point(dists, values, kappa)\n\n assert_almost_equal(truth, value)\n",
"step-ids": [
7,
9,
10,
11,
13
]
}
|
[
7,
9,
10,
11,
13
] |
class cal4:
def setdata(self,n1):
self.n1 = n1
def display(self):
return n1*n1
n1 = int(input("Enter number: "))
c = cal4()
print(c.display())
|
normal
|
{
"blob_id": "65b90fccd0ee74b369475aa9fe33f159881c8b82",
"index": 6645,
"step-1": "class cal4:\n\n def setdata(self, n1):\n self.n1 = n1\n <mask token>\n\n\n<mask token>\n",
"step-2": "class cal4:\n\n def setdata(self, n1):\n self.n1 = n1\n\n def display(self):\n return n1 * n1\n\n\n<mask token>\n",
"step-3": "class cal4:\n\n def setdata(self, n1):\n self.n1 = n1\n\n def display(self):\n return n1 * n1\n\n\n<mask token>\nprint(c.display())\n",
"step-4": "class cal4:\n\n def setdata(self, n1):\n self.n1 = n1\n\n def display(self):\n return n1 * n1\n\n\nn1 = int(input('Enter number: '))\nc = cal4()\nprint(c.display())\n",
"step-5": "class cal4:\r\n def setdata(self,n1):\r\n self.n1 = n1\r\n def display(self):\r\n return n1*n1\r\nn1 = int(input(\"Enter number: \"))\r\nc = cal4()\r\n\r\nprint(c.display())",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import pytest
from ansiblediscover.graph.node import Node
def test_build_identifier():
assert 'role:server_base' == Node.build_identifier('server_base', 'role')
def test_identifier():
node = Node('server_base', 'role', 'irrelevant')
assert 'role:server_base' == node.identifier()
def test_add_successor():
parent = Node('appserver', 'playbook', 'appserver.yml')
child = Node('server_base', 'role', 'roles/server_base')
parent.add_successor(child)
assert child in parent.successors
assert parent in child.predecessors
def test_add_predecessor():
parent = Node('appserver', 'playbook', 'appserver.yml')
child = Node('server_base', 'role', 'roles/server_base')
child.add_predecessor(parent)
assert child in parent.successors
assert parent in child.predecessors
def test_str():
name = 'myname'
typestring = 'mytype'
path = 'mypath'
node = Node(name, typestring, path)
assert str((typestring, name, path)) == str(node)
@pytest.mark.parametrize('this, other, equal', [
(('myname', 'mytype', 'mypath'), ('myname', 'mytype', 'mypath'), True),
(('myname', 'mytype', 'mypath'), ('othername', 'mytype', 'mypath'), False),
(('myname', 'mytype', 'mypath'), ('myname', 'othertype', 'mypath'), False),
(('myname', 'mytype', 'mypath'), ('myname', 'othertype', 'otherpath'), False),
])
def test_eq(this, other, equal):
this_node = Node(*this)
other_node = Node(*other)
assert (equal and (this_node == other_node)) or (not equal and (this_node != other_node))
@pytest.mark.parametrize('other', [
None,
[],
('myname', 'mytype', 'mypath'),
])
def test_eq_unequal_types(other):
this = Node('myname', 'mytype', 'mypath')
assert this != other
|
normal
|
{
"blob_id": "8e22db940124f92d3048055cf72dcaa79564cdc6",
"index": 1953,
"step-1": "<mask token>\n\n\ndef test_build_identifier():\n assert 'role:server_base' == Node.build_identifier('server_base', 'role')\n\n\ndef test_identifier():\n node = Node('server_base', 'role', 'irrelevant')\n assert 'role:server_base' == node.identifier()\n\n\ndef test_add_successor():\n parent = Node('appserver', 'playbook', 'appserver.yml')\n child = Node('server_base', 'role', 'roles/server_base')\n parent.add_successor(child)\n assert child in parent.successors\n assert parent in child.predecessors\n\n\n<mask token>\n\n\[email protected]('this, other, equal', [(('myname', 'mytype',\n 'mypath'), ('myname', 'mytype', 'mypath'), True), (('myname', 'mytype',\n 'mypath'), ('othername', 'mytype', 'mypath'), False), (('myname',\n 'mytype', 'mypath'), ('myname', 'othertype', 'mypath'), False), ((\n 'myname', 'mytype', 'mypath'), ('myname', 'othertype', 'otherpath'), \n False)])\ndef test_eq(this, other, equal):\n this_node = Node(*this)\n other_node = Node(*other)\n assert equal and this_node == other_node or not equal and this_node != other_node\n\n\[email protected]('other', [None, [], ('myname', 'mytype', 'mypath')])\ndef test_eq_unequal_types(other):\n this = Node('myname', 'mytype', 'mypath')\n assert this != other\n",
"step-2": "<mask token>\n\n\ndef test_build_identifier():\n assert 'role:server_base' == Node.build_identifier('server_base', 'role')\n\n\ndef test_identifier():\n node = Node('server_base', 'role', 'irrelevant')\n assert 'role:server_base' == node.identifier()\n\n\ndef test_add_successor():\n parent = Node('appserver', 'playbook', 'appserver.yml')\n child = Node('server_base', 'role', 'roles/server_base')\n parent.add_successor(child)\n assert child in parent.successors\n assert parent in child.predecessors\n\n\n<mask token>\n\n\ndef test_str():\n name = 'myname'\n typestring = 'mytype'\n path = 'mypath'\n node = Node(name, typestring, path)\n assert str((typestring, name, path)) == str(node)\n\n\[email protected]('this, other, equal', [(('myname', 'mytype',\n 'mypath'), ('myname', 'mytype', 'mypath'), True), (('myname', 'mytype',\n 'mypath'), ('othername', 'mytype', 'mypath'), False), (('myname',\n 'mytype', 'mypath'), ('myname', 'othertype', 'mypath'), False), ((\n 'myname', 'mytype', 'mypath'), ('myname', 'othertype', 'otherpath'), \n False)])\ndef test_eq(this, other, equal):\n this_node = Node(*this)\n other_node = Node(*other)\n assert equal and this_node == other_node or not equal and this_node != other_node\n\n\[email protected]('other', [None, [], ('myname', 'mytype', 'mypath')])\ndef test_eq_unequal_types(other):\n this = Node('myname', 'mytype', 'mypath')\n assert this != other\n",
"step-3": "<mask token>\n\n\ndef test_build_identifier():\n assert 'role:server_base' == Node.build_identifier('server_base', 'role')\n\n\ndef test_identifier():\n node = Node('server_base', 'role', 'irrelevant')\n assert 'role:server_base' == node.identifier()\n\n\ndef test_add_successor():\n parent = Node('appserver', 'playbook', 'appserver.yml')\n child = Node('server_base', 'role', 'roles/server_base')\n parent.add_successor(child)\n assert child in parent.successors\n assert parent in child.predecessors\n\n\ndef test_add_predecessor():\n parent = Node('appserver', 'playbook', 'appserver.yml')\n child = Node('server_base', 'role', 'roles/server_base')\n child.add_predecessor(parent)\n assert child in parent.successors\n assert parent in child.predecessors\n\n\ndef test_str():\n name = 'myname'\n typestring = 'mytype'\n path = 'mypath'\n node = Node(name, typestring, path)\n assert str((typestring, name, path)) == str(node)\n\n\[email protected]('this, other, equal', [(('myname', 'mytype',\n 'mypath'), ('myname', 'mytype', 'mypath'), True), (('myname', 'mytype',\n 'mypath'), ('othername', 'mytype', 'mypath'), False), (('myname',\n 'mytype', 'mypath'), ('myname', 'othertype', 'mypath'), False), ((\n 'myname', 'mytype', 'mypath'), ('myname', 'othertype', 'otherpath'), \n False)])\ndef test_eq(this, other, equal):\n this_node = Node(*this)\n other_node = Node(*other)\n assert equal and this_node == other_node or not equal and this_node != other_node\n\n\[email protected]('other', [None, [], ('myname', 'mytype', 'mypath')])\ndef test_eq_unequal_types(other):\n this = Node('myname', 'mytype', 'mypath')\n assert this != other\n",
"step-4": "import pytest\nfrom ansiblediscover.graph.node import Node\n\n\ndef test_build_identifier():\n assert 'role:server_base' == Node.build_identifier('server_base', 'role')\n\n\ndef test_identifier():\n node = Node('server_base', 'role', 'irrelevant')\n assert 'role:server_base' == node.identifier()\n\n\ndef test_add_successor():\n parent = Node('appserver', 'playbook', 'appserver.yml')\n child = Node('server_base', 'role', 'roles/server_base')\n parent.add_successor(child)\n assert child in parent.successors\n assert parent in child.predecessors\n\n\ndef test_add_predecessor():\n parent = Node('appserver', 'playbook', 'appserver.yml')\n child = Node('server_base', 'role', 'roles/server_base')\n child.add_predecessor(parent)\n assert child in parent.successors\n assert parent in child.predecessors\n\n\ndef test_str():\n name = 'myname'\n typestring = 'mytype'\n path = 'mypath'\n node = Node(name, typestring, path)\n assert str((typestring, name, path)) == str(node)\n\n\[email protected]('this, other, equal', [(('myname', 'mytype',\n 'mypath'), ('myname', 'mytype', 'mypath'), True), (('myname', 'mytype',\n 'mypath'), ('othername', 'mytype', 'mypath'), False), (('myname',\n 'mytype', 'mypath'), ('myname', 'othertype', 'mypath'), False), ((\n 'myname', 'mytype', 'mypath'), ('myname', 'othertype', 'otherpath'), \n False)])\ndef test_eq(this, other, equal):\n this_node = Node(*this)\n other_node = Node(*other)\n assert equal and this_node == other_node or not equal and this_node != other_node\n\n\[email protected]('other', [None, [], ('myname', 'mytype', 'mypath')])\ndef test_eq_unequal_types(other):\n this = Node('myname', 'mytype', 'mypath')\n assert this != other\n",
"step-5": "import pytest\n\nfrom ansiblediscover.graph.node import Node\n\n\ndef test_build_identifier():\n assert 'role:server_base' == Node.build_identifier('server_base', 'role')\n\n\ndef test_identifier():\n node = Node('server_base', 'role', 'irrelevant')\n assert 'role:server_base' == node.identifier()\n\n\ndef test_add_successor():\n parent = Node('appserver', 'playbook', 'appserver.yml')\n child = Node('server_base', 'role', 'roles/server_base')\n\n parent.add_successor(child)\n\n assert child in parent.successors\n assert parent in child.predecessors\n\n\ndef test_add_predecessor():\n parent = Node('appserver', 'playbook', 'appserver.yml')\n child = Node('server_base', 'role', 'roles/server_base')\n\n child.add_predecessor(parent)\n\n assert child in parent.successors\n assert parent in child.predecessors\n\n\ndef test_str():\n name = 'myname'\n typestring = 'mytype'\n path = 'mypath'\n node = Node(name, typestring, path)\n\n assert str((typestring, name, path)) == str(node)\n\n\[email protected]('this, other, equal', [\n (('myname', 'mytype', 'mypath'), ('myname', 'mytype', 'mypath'), True),\n (('myname', 'mytype', 'mypath'), ('othername', 'mytype', 'mypath'), False),\n (('myname', 'mytype', 'mypath'), ('myname', 'othertype', 'mypath'), False),\n (('myname', 'mytype', 'mypath'), ('myname', 'othertype', 'otherpath'), False),\n])\ndef test_eq(this, other, equal):\n this_node = Node(*this)\n other_node = Node(*other)\n\n assert (equal and (this_node == other_node)) or (not equal and (this_node != other_node))\n\n\[email protected]('other', [\n None,\n [],\n ('myname', 'mytype', 'mypath'),\n])\ndef test_eq_unequal_types(other):\n this = Node('myname', 'mytype', 'mypath')\n assert this != other\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
# Кицела Каролина ИВТ 3 курс
# Вариант 6
# Найти сумму всех чисел с плавающей точкой
b = ("name", " DeLorean DMC-12", "motor_pos", "rear", "n_of_wheels", 4,
"n_of_passengers", 2, "weight", 1.230, "height", 1.140, "length", 4.216,
"width", 1.857, "max_speed", 177)
print sum(b[9:16:2])
|
normal
|
{
"blob_id": "b5160a2574dd2c4eec542d7aca8288da0feadaba",
"index": 5702,
"step-1": "# Кицела Каролина ИВТ 3 курс \n# Вариант 6 \n# Найти сумму всех чисел с плавающей точкой\n\nb = (\"name\",\t\"\tDeLorean\tDMC-12\",\t\"motor_pos\",\t\"rear\",\t\"n_of_wheels\",\t4,\n\"n_of_passengers\",\t2,\t\"weight\",\t1.230,\t\"height\",\t1.140,\t\"length\",\t4.216,\n \"width\", 1.857, \"max_speed\", 177)\n\nprint sum(b[9:16:2])\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import random
import json
import os
from pico2d import *
import game_framework
import game_world
import menu_world
import game_state
from Start_menu import Menu
name = "MenuState"
boy = None
Start_menu = None
menu_time =None
def enter():
global Start_menu
Start_menu = Menu()
menu_world.add_object(Start_menu, 0)
def exit():
menu_world.clear()
def pause():
pass
def resume():
pass
def handle_events():
global Start_menu,menu_time
events = get_events()
for event in events:
if event.type == SDL_QUIT:
game_framework.quit()
elif event.type == SDL_KEYDOWN and event.key == SDLK_ESCAPE:
game_framework.quit()
elif Start_menu.start ==1:
menu_time =get_time()
game_framework.change_state(game_state)
#game_framework.quit()
else:
Start_menu.handle_event(event)
def update():
for game_object in menu_world.all_objects():
game_object.update()
def draw():
clear_canvas()
for game_object in menu_world.all_objects():
game_object.draw()
update_canvas()
|
normal
|
{
"blob_id": "fee2ddca5888c9db00d2d7a4fe11ba20c4e31685",
"index": 1909,
"step-1": "<mask token>\n\n\ndef enter():\n global Start_menu\n Start_menu = Menu()\n menu_world.add_object(Start_menu, 0)\n\n\n<mask token>\n\n\ndef handle_events():\n global Start_menu, menu_time\n events = get_events()\n for event in events:\n if event.type == SDL_QUIT:\n game_framework.quit()\n elif event.type == SDL_KEYDOWN and event.key == SDLK_ESCAPE:\n game_framework.quit()\n elif Start_menu.start == 1:\n menu_time = get_time()\n game_framework.change_state(game_state)\n else:\n Start_menu.handle_event(event)\n\n\ndef update():\n for game_object in menu_world.all_objects():\n game_object.update()\n\n\ndef draw():\n clear_canvas()\n for game_object in menu_world.all_objects():\n game_object.draw()\n update_canvas()\n",
"step-2": "<mask token>\n\n\ndef enter():\n global Start_menu\n Start_menu = Menu()\n menu_world.add_object(Start_menu, 0)\n\n\n<mask token>\n\n\ndef pause():\n pass\n\n\ndef resume():\n pass\n\n\ndef handle_events():\n global Start_menu, menu_time\n events = get_events()\n for event in events:\n if event.type == SDL_QUIT:\n game_framework.quit()\n elif event.type == SDL_KEYDOWN and event.key == SDLK_ESCAPE:\n game_framework.quit()\n elif Start_menu.start == 1:\n menu_time = get_time()\n game_framework.change_state(game_state)\n else:\n Start_menu.handle_event(event)\n\n\ndef update():\n for game_object in menu_world.all_objects():\n game_object.update()\n\n\ndef draw():\n clear_canvas()\n for game_object in menu_world.all_objects():\n game_object.draw()\n update_canvas()\n",
"step-3": "<mask token>\n\n\ndef enter():\n global Start_menu\n Start_menu = Menu()\n menu_world.add_object(Start_menu, 0)\n\n\ndef exit():\n menu_world.clear()\n\n\ndef pause():\n pass\n\n\ndef resume():\n pass\n\n\ndef handle_events():\n global Start_menu, menu_time\n events = get_events()\n for event in events:\n if event.type == SDL_QUIT:\n game_framework.quit()\n elif event.type == SDL_KEYDOWN and event.key == SDLK_ESCAPE:\n game_framework.quit()\n elif Start_menu.start == 1:\n menu_time = get_time()\n game_framework.change_state(game_state)\n else:\n Start_menu.handle_event(event)\n\n\ndef update():\n for game_object in menu_world.all_objects():\n game_object.update()\n\n\ndef draw():\n clear_canvas()\n for game_object in menu_world.all_objects():\n game_object.draw()\n update_canvas()\n",
"step-4": "<mask token>\nname = 'MenuState'\nboy = None\nStart_menu = None\nmenu_time = None\n\n\ndef enter():\n global Start_menu\n Start_menu = Menu()\n menu_world.add_object(Start_menu, 0)\n\n\ndef exit():\n menu_world.clear()\n\n\ndef pause():\n pass\n\n\ndef resume():\n pass\n\n\ndef handle_events():\n global Start_menu, menu_time\n events = get_events()\n for event in events:\n if event.type == SDL_QUIT:\n game_framework.quit()\n elif event.type == SDL_KEYDOWN and event.key == SDLK_ESCAPE:\n game_framework.quit()\n elif Start_menu.start == 1:\n menu_time = get_time()\n game_framework.change_state(game_state)\n else:\n Start_menu.handle_event(event)\n\n\ndef update():\n for game_object in menu_world.all_objects():\n game_object.update()\n\n\ndef draw():\n clear_canvas()\n for game_object in menu_world.all_objects():\n game_object.draw()\n update_canvas()\n",
"step-5": "import random\nimport json\nimport os\n\nfrom pico2d import *\nimport game_framework\nimport game_world\nimport menu_world\nimport game_state\n\n\nfrom Start_menu import Menu\n\nname = \"MenuState\"\n\nboy = None\nStart_menu = None\nmenu_time =None\ndef enter():\n global Start_menu\n Start_menu = Menu()\n menu_world.add_object(Start_menu, 0)\n\ndef exit():\n menu_world.clear()\n\ndef pause():\n pass\n\n\ndef resume():\n pass\n\n\ndef handle_events():\n global Start_menu,menu_time\n events = get_events()\n for event in events:\n if event.type == SDL_QUIT:\n game_framework.quit()\n elif event.type == SDL_KEYDOWN and event.key == SDLK_ESCAPE:\n game_framework.quit()\n elif Start_menu.start ==1:\n menu_time =get_time()\n game_framework.change_state(game_state)\n\n #game_framework.quit()\n else:\n Start_menu.handle_event(event)\n\n\ndef update():\n for game_object in menu_world.all_objects():\n game_object.update()\n\n\n\ndef draw():\n clear_canvas()\n for game_object in menu_world.all_objects():\n game_object.draw()\n update_canvas()\n\n\n\n\n\n\n",
"step-ids": [
4,
6,
7,
8,
10
]
}
|
[
4,
6,
7,
8,
10
] |
from unittest import TestCase
from attendance import Member
__author__ = 'colin'
class TestMember(TestCase):
def test_here(self):
member = Member("John", "Doe")
self.assertFalse(member.attended)
member.here()
self.assertTrue(member.attended)
|
normal
|
{
"blob_id": "a6713a4edece14a88bd9c8ddd483ff8e16acdbcc",
"index": 9695,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestMember(TestCase):\n\n def test_here(self):\n member = Member('John', 'Doe')\n self.assertFalse(member.attended)\n member.here()\n self.assertTrue(member.attended)\n",
"step-3": "<mask token>\n__author__ = 'colin'\n\n\nclass TestMember(TestCase):\n\n def test_here(self):\n member = Member('John', 'Doe')\n self.assertFalse(member.attended)\n member.here()\n self.assertTrue(member.attended)\n",
"step-4": "from unittest import TestCase\nfrom attendance import Member\n__author__ = 'colin'\n\n\nclass TestMember(TestCase):\n\n def test_here(self):\n member = Member('John', 'Doe')\n self.assertFalse(member.attended)\n member.here()\n self.assertTrue(member.attended)\n",
"step-5": "from unittest import TestCase\nfrom attendance import Member\n\n__author__ = 'colin'\n\n\nclass TestMember(TestCase):\n def test_here(self):\n member = Member(\"John\", \"Doe\")\n self.assertFalse(member.attended)\n member.here()\n self.assertTrue(member.attended)",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
#!/usr/bin/env python3
import torch
import torch.nn as nn
import torch.nn.functional as F
import pytorch_lightning as pl
import torchmetrics
class BaselineModule(pl.LightningModule):
def __init__(self, input_size, num_classes=4, lr=3e-4):
super().__init__()
self.backbone = nn.Sequential( # CBR-Tiny arXiv:1902.07208
nn.Conv2d(3, 64, 5),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(3, 2),
nn.Conv2d(64, 256, 5),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.MaxPool2d(3, 2),
nn.Conv2d(256, 512, 5),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.MaxPool2d(3, 2),
nn.AdaptiveAvgPool2d((1, 1)),
)
hidden_size = self._get_hidden_size(input_size)
self.classifier = nn.Linear(hidden_size, num_classes)
self.lr = lr
self.train_acc = torchmetrics.Accuracy()
self.val_acc = torchmetrics.Accuracy()
def _get_hidden_size(self, input_size):
self.backbone(torch.randn(1, 3, input_size, input_size))
def forward(self, input_tensor):
hidden = self.backbone(input_tensor)
return self.classifier(hidden.squeeze())
def training_step(self, batch, batch_idx):
input_tensor, target = batch
logits = self(input_tensor)
loss = F.cross_entropy(logits, target)
self.train_acc(F.softmax(logits, 1), target)
self.log('train_acc', self.train_acc, on_epoch=True)
self.log('train_loss', loss, on_epoch=True)
return loss
def validation_step(self, batch, batch_idx):
input_tensor, target = batch
logits = self(input_tensor)
loss = F.cross_entropy(logits, target)
self.val_acc(F.softmax(logits, 1), target)
self.log('val_acc', self.val_acc, on_epoch=True)
self.log('val_loss', loss, on_epoch=True)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)
return optimizer
|
normal
|
{
"blob_id": "7d43b20ebee2f4cd509bbd896c9e6ae8b2c4b354",
"index": 7128,
"step-1": "<mask token>\n\n\nclass BaselineModule(pl.LightningModule):\n <mask token>\n\n def _get_hidden_size(self, input_size):\n self.backbone(torch.randn(1, 3, input_size, input_size))\n\n def forward(self, input_tensor):\n hidden = self.backbone(input_tensor)\n return self.classifier(hidden.squeeze())\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass BaselineModule(pl.LightningModule):\n <mask token>\n\n def _get_hidden_size(self, input_size):\n self.backbone(torch.randn(1, 3, input_size, input_size))\n\n def forward(self, input_tensor):\n hidden = self.backbone(input_tensor)\n return self.classifier(hidden.squeeze())\n\n def training_step(self, batch, batch_idx):\n input_tensor, target = batch\n logits = self(input_tensor)\n loss = F.cross_entropy(logits, target)\n self.train_acc(F.softmax(logits, 1), target)\n self.log('train_acc', self.train_acc, on_epoch=True)\n self.log('train_loss', loss, on_epoch=True)\n return loss\n\n def validation_step(self, batch, batch_idx):\n input_tensor, target = batch\n logits = self(input_tensor)\n loss = F.cross_entropy(logits, target)\n self.val_acc(F.softmax(logits, 1), target)\n self.log('val_acc', self.val_acc, on_epoch=True)\n self.log('val_loss', loss, on_epoch=True)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass BaselineModule(pl.LightningModule):\n <mask token>\n\n def _get_hidden_size(self, input_size):\n self.backbone(torch.randn(1, 3, input_size, input_size))\n\n def forward(self, input_tensor):\n hidden = self.backbone(input_tensor)\n return self.classifier(hidden.squeeze())\n\n def training_step(self, batch, batch_idx):\n input_tensor, target = batch\n logits = self(input_tensor)\n loss = F.cross_entropy(logits, target)\n self.train_acc(F.softmax(logits, 1), target)\n self.log('train_acc', self.train_acc, on_epoch=True)\n self.log('train_loss', loss, on_epoch=True)\n return loss\n\n def validation_step(self, batch, batch_idx):\n input_tensor, target = batch\n logits = self(input_tensor)\n loss = F.cross_entropy(logits, target)\n self.val_acc(F.softmax(logits, 1), target)\n self.log('val_acc', self.val_acc, on_epoch=True)\n self.log('val_loss', loss, on_epoch=True)\n\n def configure_optimizers(self):\n optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)\n return optimizer\n",
"step-4": "<mask token>\n\n\nclass BaselineModule(pl.LightningModule):\n\n def __init__(self, input_size, num_classes=4, lr=0.0003):\n super().__init__()\n self.backbone = nn.Sequential(nn.Conv2d(3, 64, 5), nn.BatchNorm2d(\n 64), nn.ReLU(), nn.MaxPool2d(3, 2), nn.Conv2d(64, 256, 5), nn.\n BatchNorm2d(256), nn.ReLU(), nn.MaxPool2d(3, 2), nn.Conv2d(256,\n 512, 5), nn.BatchNorm2d(512), nn.ReLU(), nn.MaxPool2d(3, 2), nn\n .AdaptiveAvgPool2d((1, 1)))\n hidden_size = self._get_hidden_size(input_size)\n self.classifier = nn.Linear(hidden_size, num_classes)\n self.lr = lr\n self.train_acc = torchmetrics.Accuracy()\n self.val_acc = torchmetrics.Accuracy()\n\n def _get_hidden_size(self, input_size):\n self.backbone(torch.randn(1, 3, input_size, input_size))\n\n def forward(self, input_tensor):\n hidden = self.backbone(input_tensor)\n return self.classifier(hidden.squeeze())\n\n def training_step(self, batch, batch_idx):\n input_tensor, target = batch\n logits = self(input_tensor)\n loss = F.cross_entropy(logits, target)\n self.train_acc(F.softmax(logits, 1), target)\n self.log('train_acc', self.train_acc, on_epoch=True)\n self.log('train_loss', loss, on_epoch=True)\n return loss\n\n def validation_step(self, batch, batch_idx):\n input_tensor, target = batch\n logits = self(input_tensor)\n loss = F.cross_entropy(logits, target)\n self.val_acc(F.softmax(logits, 1), target)\n self.log('val_acc', self.val_acc, on_epoch=True)\n self.log('val_loss', loss, on_epoch=True)\n\n def configure_optimizers(self):\n optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)\n return optimizer\n",
"step-5": "#!/usr/bin/env python3\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport pytorch_lightning as pl\nimport torchmetrics\n\nclass BaselineModule(pl.LightningModule):\n def __init__(self, input_size, num_classes=4, lr=3e-4):\n super().__init__()\n\n self.backbone = nn.Sequential( # CBR-Tiny arXiv:1902.07208\n nn.Conv2d(3, 64, 5),\n nn.BatchNorm2d(64),\n nn.ReLU(),\n nn.MaxPool2d(3, 2),\n nn.Conv2d(64, 256, 5),\n nn.BatchNorm2d(256),\n nn.ReLU(),\n nn.MaxPool2d(3, 2),\n nn.Conv2d(256, 512, 5),\n nn.BatchNorm2d(512),\n nn.ReLU(),\n nn.MaxPool2d(3, 2),\n nn.AdaptiveAvgPool2d((1, 1)),\n )\n\n hidden_size = self._get_hidden_size(input_size)\n\n self.classifier = nn.Linear(hidden_size, num_classes)\n self.lr = lr\n\n self.train_acc = torchmetrics.Accuracy()\n self.val_acc = torchmetrics.Accuracy()\n\n def _get_hidden_size(self, input_size):\n self.backbone(torch.randn(1, 3, input_size, input_size))\n\n def forward(self, input_tensor):\n hidden = self.backbone(input_tensor)\n return self.classifier(hidden.squeeze())\n\n def training_step(self, batch, batch_idx):\n input_tensor, target = batch\n\n logits = self(input_tensor)\n loss = F.cross_entropy(logits, target)\n\n self.train_acc(F.softmax(logits, 1), target)\n self.log('train_acc', self.train_acc, on_epoch=True)\n self.log('train_loss', loss, on_epoch=True)\n\n return loss\n\n def validation_step(self, batch, batch_idx):\n input_tensor, target = batch\n\n logits = self(input_tensor)\n loss = F.cross_entropy(logits, target)\n\n self.val_acc(F.softmax(logits, 1), target)\n self.log('val_acc', self.val_acc, on_epoch=True)\n self.log('val_loss', loss, on_epoch=True)\n\n\n def configure_optimizers(self):\n optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)\n return optimizer\n",
"step-ids": [
3,
5,
6,
7,
9
]
}
|
[
3,
5,
6,
7,
9
] |
## Import modules
import matplotlib, sys, datetime, time
matplotlib.use('TkAgg')
from math import *
from numpy import *
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib import dates
import matplotlib.pyplot as plt
from Tkinter import *
## Load the data
data = loadtxt("data/data011c.txt", unpack = True, skiprows=1, comments = '#')
temperature = data[7]
humidity = data[6]
light = data[8]
timer = data[9]
year, month, day, hour, minute, second = data[0], data[1], data[2], data[3], data[4], data[5]
## Make empty are to append the formatted dates
date_times = []
## Format the dates to dd.mm.yyyy hh:mm:ss
for i in range(len(year)): # can be the length of any arbitrary data set
# this makes a nice long string of the "day.month.year hour:min:sec"
date_times.append(str(int(day[i])).zfill(2) + "." + str(int(month[i])).zfill(2) + "." + str(int(year[i])) +
" " + str(int(hour[i])).zfill(2) + ":" + str(int(minute[i])).zfill(2) + ":" + str(int(second[i])).zfill(2) )
## String format of the date
pattern = '%d.%m.%Y %H:%M:%S'
## Convert the list of date_times to epoch time in seconds
epoch = []
for datetimes in date_times:
epoch.append(int(time.mktime(time.strptime(datetimes, pattern))))
## Convert epoch time to list of dateformatter objects
dts = map(datetime.datetime.fromtimestamp, epoch)
fds = dates.date2num(dts)
hfmt = dates.DateFormatter('%m/%d %H:%M')
## Create interface object
master = Tk()
## Set the title and size
master.title("Room Sensor")
master.geometry("1200x600")
## Create figure to add onto interface window
f = Figure(figsize=(9,5), dpi=100,)# facecolor='black')
## Not sure what zorder does
f.zorder
## within the figure create subplot called a
a = f.add_subplot(111)
## Add figure onto interface window
dataPlot = FigureCanvasTkAgg(f, master)
dataPlot.draw()
## Turn figure into a widget
dataPlot.get_tk_widget().place(x = 240, y = 40)
## Add plot toolbar widget
toolbar = NavigationToolbar2TkAgg(dataPlot, master)
toolbar.update()
toolbar.place(x = 240, y = 560)
## Functions to switch between plots
def show_temp():
## Clear the figure
a.clear()
## Plot the temperature
## a.plot(timer,temperature, "r.--")
a.plot(fds,temperature, "r.--")
a.set_ylabel("Temperature (Degrees Celsius)", color = "r")
a.xaxis.set_major_formatter(hfmt)
a.grid(color = "r")
## a.set_ylim([20.0,30.0])
for tick in a.xaxis.get_major_ticks():
tick.label.set_fontsize(7)
tick.label.set_rotation(15)
tick.label.set_color("r")
for tick in a.yaxis.get_major_ticks():
tick.label.set_color("r")
## Reset the toolbar
toolbar.update()
f.canvas.draw()
def show_humidity():
a.clear()
a.plot(fds,humidity, "b.--")
a.set_ylabel("Humidity %", color = "b")
a.xaxis.set_major_formatter(hfmt)
a.grid(color = "blue")
for tick in a.xaxis.get_major_ticks():
tick.label.set_fontsize(7)
tick.label.set_rotation(15)
tick.label.set_color("b")
for tick in a.yaxis.get_major_ticks():
tick.label.set_color("b")
toolbar.update()
f.canvas.draw()
def show_light():
a.clear()
a.plot(fds,light, "g.--")
a.set_ylabel("Ambient Light", color = "g")
a.xaxis.set_major_formatter(hfmt)
a.grid(color = "g")
for tick in a.xaxis.get_major_ticks():
tick.label.set_fontsize(7)
tick.label.set_rotation(15)
tick.label.set_color("g")
for tick in a.yaxis.get_major_ticks():
tick.label.set_color("g")
toolbar.update()
f.canvas.draw()
## Load icon and button images
tempButton = PhotoImage(file="images/temp_button.gif")
hmdButton = PhotoImage(file="images/hmd_button.gif")
lightButton = PhotoImage(file="images/light_button.gif")
tempIcon = PhotoImage(file="images/temp_icon.gif")
hmdIcon = PhotoImage(file="images/hmd_icon.gif")
lightIcon = PhotoImage(file="images/light_icon.gif")
## Create button widgets
Button1 = Button(master, image = tempButton, command = show_temp, height = 50, width = 109)
Button2 = Button(master, image = hmdButton, command = show_humidity, height = 50, width = 109)
Button3 = Button(master, image = lightButton, command = show_light, height = 50, width = 109)
## Create labels
Label1 = Label(master, image = tempIcon, height = 50, width = 50)
Label2 = Label(master, image = hmdIcon, height = 50, width = 50)
Label3 = Label(master, image = lightIcon, height = 50, width = 50)
## Place the buttons and labels to specific location
Button1.place(x=60,y=110)
Button2.place(x=60,y=260)
Button3.place(x=60,y=410)
Label1.place(x=180, y=111)
Label2.place(x=180, y=261)
Label3.place(x=180, y=411)
## Start with the temperature graph showing
show_temp()
## Run the main interface loop
master.mainloop()
|
normal
|
{
"blob_id": "2de12085ddc73fed85dda8ce3d6908b42fdc4bcc",
"index": 3046,
"step-1": "<mask token>\n\n\ndef show_humidity():\n a.clear()\n a.plot(fds, humidity, 'b.--')\n a.set_ylabel('Humidity %', color='b')\n a.xaxis.set_major_formatter(hfmt)\n a.grid(color='blue')\n for tick in a.xaxis.get_major_ticks():\n tick.label.set_fontsize(7)\n tick.label.set_rotation(15)\n tick.label.set_color('b')\n for tick in a.yaxis.get_major_ticks():\n tick.label.set_color('b')\n toolbar.update()\n f.canvas.draw()\n\n\ndef show_light():\n a.clear()\n a.plot(fds, light, 'g.--')\n a.set_ylabel('Ambient Light', color='g')\n a.xaxis.set_major_formatter(hfmt)\n a.grid(color='g')\n for tick in a.xaxis.get_major_ticks():\n tick.label.set_fontsize(7)\n tick.label.set_rotation(15)\n tick.label.set_color('g')\n for tick in a.yaxis.get_major_ticks():\n tick.label.set_color('g')\n toolbar.update()\n f.canvas.draw()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef show_temp():\n a.clear()\n a.plot(fds, temperature, 'r.--')\n a.set_ylabel('Temperature (Degrees Celsius)', color='r')\n a.xaxis.set_major_formatter(hfmt)\n a.grid(color='r')\n for tick in a.xaxis.get_major_ticks():\n tick.label.set_fontsize(7)\n tick.label.set_rotation(15)\n tick.label.set_color('r')\n for tick in a.yaxis.get_major_ticks():\n tick.label.set_color('r')\n toolbar.update()\n f.canvas.draw()\n\n\ndef show_humidity():\n a.clear()\n a.plot(fds, humidity, 'b.--')\n a.set_ylabel('Humidity %', color='b')\n a.xaxis.set_major_formatter(hfmt)\n a.grid(color='blue')\n for tick in a.xaxis.get_major_ticks():\n tick.label.set_fontsize(7)\n tick.label.set_rotation(15)\n tick.label.set_color('b')\n for tick in a.yaxis.get_major_ticks():\n tick.label.set_color('b')\n toolbar.update()\n f.canvas.draw()\n\n\ndef show_light():\n a.clear()\n a.plot(fds, light, 'g.--')\n a.set_ylabel('Ambient Light', color='g')\n a.xaxis.set_major_formatter(hfmt)\n a.grid(color='g')\n for tick in a.xaxis.get_major_ticks():\n tick.label.set_fontsize(7)\n tick.label.set_rotation(15)\n tick.label.set_color('g')\n for tick in a.yaxis.get_major_ticks():\n tick.label.set_color('g')\n toolbar.update()\n f.canvas.draw()\n\n\n<mask token>\n",
"step-3": "<mask token>\nmatplotlib.use('TkAgg')\n<mask token>\ndata = loadtxt('data/data011c.txt', unpack=True, skiprows=1, comments='#')\ntemperature = data[7]\nhumidity = data[6]\nlight = data[8]\ntimer = data[9]\nyear, month, day, hour, minute, second = data[0], data[1], data[2], data[3\n ], data[4], data[5]\ndate_times = []\nfor i in range(len(year)):\n date_times.append(str(int(day[i])).zfill(2) + '.' + str(int(month[i])).\n zfill(2) + '.' + str(int(year[i])) + ' ' + str(int(hour[i])).zfill(\n 2) + ':' + str(int(minute[i])).zfill(2) + ':' + str(int(second[i]))\n .zfill(2))\npattern = '%d.%m.%Y %H:%M:%S'\nepoch = []\nfor datetimes in date_times:\n epoch.append(int(time.mktime(time.strptime(datetimes, pattern))))\ndts = map(datetime.datetime.fromtimestamp, epoch)\nfds = dates.date2num(dts)\nhfmt = dates.DateFormatter('%m/%d %H:%M')\nmaster = Tk()\nmaster.title('Room Sensor')\nmaster.geometry('1200x600')\nf = Figure(figsize=(9, 5), dpi=100)\nf.zorder\na = f.add_subplot(111)\ndataPlot = FigureCanvasTkAgg(f, master)\ndataPlot.draw()\ndataPlot.get_tk_widget().place(x=240, y=40)\ntoolbar = NavigationToolbar2TkAgg(dataPlot, master)\ntoolbar.update()\ntoolbar.place(x=240, y=560)\n\n\ndef show_temp():\n a.clear()\n a.plot(fds, temperature, 'r.--')\n a.set_ylabel('Temperature (Degrees Celsius)', color='r')\n a.xaxis.set_major_formatter(hfmt)\n a.grid(color='r')\n for tick in a.xaxis.get_major_ticks():\n tick.label.set_fontsize(7)\n tick.label.set_rotation(15)\n tick.label.set_color('r')\n for tick in a.yaxis.get_major_ticks():\n tick.label.set_color('r')\n toolbar.update()\n f.canvas.draw()\n\n\ndef show_humidity():\n a.clear()\n a.plot(fds, humidity, 'b.--')\n a.set_ylabel('Humidity %', color='b')\n a.xaxis.set_major_formatter(hfmt)\n a.grid(color='blue')\n for tick in a.xaxis.get_major_ticks():\n tick.label.set_fontsize(7)\n tick.label.set_rotation(15)\n tick.label.set_color('b')\n for tick in a.yaxis.get_major_ticks():\n tick.label.set_color('b')\n toolbar.update()\n f.canvas.draw()\n\n\ndef show_light():\n a.clear()\n a.plot(fds, light, 'g.--')\n a.set_ylabel('Ambient Light', color='g')\n a.xaxis.set_major_formatter(hfmt)\n a.grid(color='g')\n for tick in a.xaxis.get_major_ticks():\n tick.label.set_fontsize(7)\n tick.label.set_rotation(15)\n tick.label.set_color('g')\n for tick in a.yaxis.get_major_ticks():\n tick.label.set_color('g')\n toolbar.update()\n f.canvas.draw()\n\n\ntempButton = PhotoImage(file='images/temp_button.gif')\nhmdButton = PhotoImage(file='images/hmd_button.gif')\nlightButton = PhotoImage(file='images/light_button.gif')\ntempIcon = PhotoImage(file='images/temp_icon.gif')\nhmdIcon = PhotoImage(file='images/hmd_icon.gif')\nlightIcon = PhotoImage(file='images/light_icon.gif')\nButton1 = Button(master, image=tempButton, command=show_temp, height=50,\n width=109)\nButton2 = Button(master, image=hmdButton, command=show_humidity, height=50,\n width=109)\nButton3 = Button(master, image=lightButton, command=show_light, height=50,\n width=109)\nLabel1 = Label(master, image=tempIcon, height=50, width=50)\nLabel2 = Label(master, image=hmdIcon, height=50, width=50)\nLabel3 = Label(master, image=lightIcon, height=50, width=50)\nButton1.place(x=60, y=110)\nButton2.place(x=60, y=260)\nButton3.place(x=60, y=410)\nLabel1.place(x=180, y=111)\nLabel2.place(x=180, y=261)\nLabel3.place(x=180, y=411)\nshow_temp()\nmaster.mainloop()\n",
"step-4": "import matplotlib, sys, datetime, time\nmatplotlib.use('TkAgg')\nfrom math import *\nfrom numpy import *\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg\nfrom matplotlib.figure import Figure\nfrom matplotlib import dates\nimport matplotlib.pyplot as plt\nfrom Tkinter import *\ndata = loadtxt('data/data011c.txt', unpack=True, skiprows=1, comments='#')\ntemperature = data[7]\nhumidity = data[6]\nlight = data[8]\ntimer = data[9]\nyear, month, day, hour, minute, second = data[0], data[1], data[2], data[3\n ], data[4], data[5]\ndate_times = []\nfor i in range(len(year)):\n date_times.append(str(int(day[i])).zfill(2) + '.' + str(int(month[i])).\n zfill(2) + '.' + str(int(year[i])) + ' ' + str(int(hour[i])).zfill(\n 2) + ':' + str(int(minute[i])).zfill(2) + ':' + str(int(second[i]))\n .zfill(2))\npattern = '%d.%m.%Y %H:%M:%S'\nepoch = []\nfor datetimes in date_times:\n epoch.append(int(time.mktime(time.strptime(datetimes, pattern))))\ndts = map(datetime.datetime.fromtimestamp, epoch)\nfds = dates.date2num(dts)\nhfmt = dates.DateFormatter('%m/%d %H:%M')\nmaster = Tk()\nmaster.title('Room Sensor')\nmaster.geometry('1200x600')\nf = Figure(figsize=(9, 5), dpi=100)\nf.zorder\na = f.add_subplot(111)\ndataPlot = FigureCanvasTkAgg(f, master)\ndataPlot.draw()\ndataPlot.get_tk_widget().place(x=240, y=40)\ntoolbar = NavigationToolbar2TkAgg(dataPlot, master)\ntoolbar.update()\ntoolbar.place(x=240, y=560)\n\n\ndef show_temp():\n a.clear()\n a.plot(fds, temperature, 'r.--')\n a.set_ylabel('Temperature (Degrees Celsius)', color='r')\n a.xaxis.set_major_formatter(hfmt)\n a.grid(color='r')\n for tick in a.xaxis.get_major_ticks():\n tick.label.set_fontsize(7)\n tick.label.set_rotation(15)\n tick.label.set_color('r')\n for tick in a.yaxis.get_major_ticks():\n tick.label.set_color('r')\n toolbar.update()\n f.canvas.draw()\n\n\ndef show_humidity():\n a.clear()\n a.plot(fds, humidity, 'b.--')\n a.set_ylabel('Humidity %', color='b')\n a.xaxis.set_major_formatter(hfmt)\n a.grid(color='blue')\n for tick in a.xaxis.get_major_ticks():\n tick.label.set_fontsize(7)\n tick.label.set_rotation(15)\n tick.label.set_color('b')\n for tick in a.yaxis.get_major_ticks():\n tick.label.set_color('b')\n toolbar.update()\n f.canvas.draw()\n\n\ndef show_light():\n a.clear()\n a.plot(fds, light, 'g.--')\n a.set_ylabel('Ambient Light', color='g')\n a.xaxis.set_major_formatter(hfmt)\n a.grid(color='g')\n for tick in a.xaxis.get_major_ticks():\n tick.label.set_fontsize(7)\n tick.label.set_rotation(15)\n tick.label.set_color('g')\n for tick in a.yaxis.get_major_ticks():\n tick.label.set_color('g')\n toolbar.update()\n f.canvas.draw()\n\n\ntempButton = PhotoImage(file='images/temp_button.gif')\nhmdButton = PhotoImage(file='images/hmd_button.gif')\nlightButton = PhotoImage(file='images/light_button.gif')\ntempIcon = PhotoImage(file='images/temp_icon.gif')\nhmdIcon = PhotoImage(file='images/hmd_icon.gif')\nlightIcon = PhotoImage(file='images/light_icon.gif')\nButton1 = Button(master, image=tempButton, command=show_temp, height=50,\n width=109)\nButton2 = Button(master, image=hmdButton, command=show_humidity, height=50,\n width=109)\nButton3 = Button(master, image=lightButton, command=show_light, height=50,\n width=109)\nLabel1 = Label(master, image=tempIcon, height=50, width=50)\nLabel2 = Label(master, image=hmdIcon, height=50, width=50)\nLabel3 = Label(master, image=lightIcon, height=50, width=50)\nButton1.place(x=60, y=110)\nButton2.place(x=60, y=260)\nButton3.place(x=60, y=410)\nLabel1.place(x=180, y=111)\nLabel2.place(x=180, y=261)\nLabel3.place(x=180, y=411)\nshow_temp()\nmaster.mainloop()\n",
"step-5": "## Import modules\nimport matplotlib, sys, datetime, time\nmatplotlib.use('TkAgg')\nfrom math import *\nfrom numpy import *\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg\nfrom matplotlib.figure import Figure\nfrom matplotlib import dates\nimport matplotlib.pyplot as plt\nfrom Tkinter import *\n\n## Load the data\ndata = loadtxt(\"data/data011c.txt\", unpack = True, skiprows=1, comments = '#')\ntemperature = data[7]\nhumidity = data[6]\nlight = data[8]\ntimer = data[9]\nyear, month, day, hour, minute, second = data[0], data[1], data[2], data[3], data[4], data[5]\n\n## Make empty are to append the formatted dates\ndate_times = [] \n\n## Format the dates to dd.mm.yyyy hh:mm:ss\nfor i in range(len(year)): # can be the length of any arbitrary data set\n # this makes a nice long string of the \"day.month.year hour:min:sec\"\n date_times.append(str(int(day[i])).zfill(2) + \".\" + str(int(month[i])).zfill(2) + \".\" + str(int(year[i])) +\n \" \" + str(int(hour[i])).zfill(2) + \":\" + str(int(minute[i])).zfill(2) + \":\" + str(int(second[i])).zfill(2) )\n\n## String format of the date\npattern = '%d.%m.%Y %H:%M:%S'\n\n## Convert the list of date_times to epoch time in seconds\nepoch = []\nfor datetimes in date_times:\n epoch.append(int(time.mktime(time.strptime(datetimes, pattern))))\n\n## Convert epoch time to list of dateformatter objects\ndts = map(datetime.datetime.fromtimestamp, epoch)\nfds = dates.date2num(dts)\nhfmt = dates.DateFormatter('%m/%d %H:%M')\n\n## Create interface object\nmaster = Tk()\n## Set the title and size\nmaster.title(\"Room Sensor\")\nmaster.geometry(\"1200x600\")\n\n## Create figure to add onto interface window\nf = Figure(figsize=(9,5), dpi=100,)# facecolor='black')\n## Not sure what zorder does\nf.zorder\n## within the figure create subplot called a\na = f.add_subplot(111)\n\n## Add figure onto interface window\ndataPlot = FigureCanvasTkAgg(f, master)\ndataPlot.draw()\n## Turn figure into a widget\ndataPlot.get_tk_widget().place(x = 240, y = 40)\n## Add plot toolbar widget\ntoolbar = NavigationToolbar2TkAgg(dataPlot, master)\ntoolbar.update()\ntoolbar.place(x = 240, y = 560)\n\n## Functions to switch between plots \n\ndef show_temp():\n ## Clear the figure\n a.clear()\n ## Plot the temperature\n## a.plot(timer,temperature, \"r.--\")\n a.plot(fds,temperature, \"r.--\")\n a.set_ylabel(\"Temperature (Degrees Celsius)\", color = \"r\")\n a.xaxis.set_major_formatter(hfmt)\n a.grid(color = \"r\")\n## a.set_ylim([20.0,30.0])\n for tick in a.xaxis.get_major_ticks():\n tick.label.set_fontsize(7) \n tick.label.set_rotation(15)\n tick.label.set_color(\"r\")\n for tick in a.yaxis.get_major_ticks():\n tick.label.set_color(\"r\")\n ## Reset the toolbar\n toolbar.update()\n f.canvas.draw()\n \ndef show_humidity():\n a.clear()\n a.plot(fds,humidity, \"b.--\")\n a.set_ylabel(\"Humidity %\", color = \"b\")\n a.xaxis.set_major_formatter(hfmt)\n a.grid(color = \"blue\")\n for tick in a.xaxis.get_major_ticks():\n tick.label.set_fontsize(7) \n tick.label.set_rotation(15)\n tick.label.set_color(\"b\")\n for tick in a.yaxis.get_major_ticks():\n tick.label.set_color(\"b\")\n toolbar.update()\n f.canvas.draw()\n \ndef show_light():\n a.clear()\n a.plot(fds,light, \"g.--\")\n a.set_ylabel(\"Ambient Light\", color = \"g\")\n a.xaxis.set_major_formatter(hfmt)\n a.grid(color = \"g\")\n for tick in a.xaxis.get_major_ticks():\n tick.label.set_fontsize(7) \n tick.label.set_rotation(15)\n tick.label.set_color(\"g\")\n for tick in a.yaxis.get_major_ticks():\n tick.label.set_color(\"g\")\n toolbar.update()\n f.canvas.draw()\n\n## Load icon and button images\ntempButton = PhotoImage(file=\"images/temp_button.gif\")\nhmdButton = PhotoImage(file=\"images/hmd_button.gif\")\nlightButton = PhotoImage(file=\"images/light_button.gif\")\ntempIcon = PhotoImage(file=\"images/temp_icon.gif\")\nhmdIcon = PhotoImage(file=\"images/hmd_icon.gif\")\nlightIcon = PhotoImage(file=\"images/light_icon.gif\")\n\n## Create button widgets\nButton1 = Button(master, image = tempButton, command = show_temp, height = 50, width = 109)\nButton2 = Button(master, image = hmdButton, command = show_humidity, height = 50, width = 109)\nButton3 = Button(master, image = lightButton, command = show_light, height = 50, width = 109)\n## Create labels\nLabel1 = Label(master, image = tempIcon, height = 50, width = 50)\nLabel2 = Label(master, image = hmdIcon, height = 50, width = 50)\nLabel3 = Label(master, image = lightIcon, height = 50, width = 50)\n## Place the buttons and labels to specific location\nButton1.place(x=60,y=110)\nButton2.place(x=60,y=260)\nButton3.place(x=60,y=410)\nLabel1.place(x=180, y=111)\nLabel2.place(x=180, y=261)\nLabel3.place(x=180, y=411)\n## Start with the temperature graph showing\nshow_temp()\n## Run the main interface loop\nmaster.mainloop()\n",
"step-ids": [
2,
3,
5,
6,
7
]
}
|
[
2,
3,
5,
6,
7
] |
import numpy as np
import matplotlib.pyplot as plt
import sys
import os
from azavg_util import plot_azav
from binormalized_cbar import MidpointNormalize
from diagnostic_reading import ReferenceState
dirname = sys.argv[1]
datadir = dirname + '/data/'
plotdir = dirname + '/plots/'
if (not os.path.isdir(plotdir)):
os.makedirs(plotdir)
ref = ReferenceState(dirname + '/reference', '')
H_rho = -1./ref.dlnrho
# Get grid info
rr,tt,cost,sint,rr_depth,ri,ro,d = np.load(datadir + 'grid_info.npy')
nr, nt = len(rr), len(tt)
H_rho_2d = H_rho.reshape((1, nr))
vr2_p,vt2_p,vp2_p,vrvp_p,vrvt_p,vtvp_p,\
vr2_m,vt2_m,vp2_m, vrvp_m, vrvt_m, vtvp_m, fplus, fminus\
= np.load(datadir + 'rs_raw.npy')
vrvp_t = vrvp_m + vrvp_p
vrvt_t = vrvt_m + vrvt_p
vtvp_t = vtvp_m + vtvp_p
vr2_t = vr2_m + vr2_p
vt2_t = vt2_m + vt2_p
vp2_t = vp2_m + vp2_p
# Total velocity
v2_p = vr2_p + vt2_p + vp2_p
v2_m = vr2_m + vt2_p + vp2_m
v2_t = vr2_t + vt2_p + vp2_t
Om = 7.8e-6
ro_p = np.sqrt(v2_p)/(2*Om*H_rho_2d)
ro_m = np.sqrt(v2_m)/(2*Om*H_rho_2d)
ro_t = np.sqrt(v2_t)/(2*Om*H_rho_2d)
# Plot radial angular momentum transport
fig, ax = plt.subplots()
plot_azav(fig, ax, ro_m, rr, cost, sint,
contours=False, notfloat=False, units='')
plt.title(r'$({\rm{Ro}}_{\rm{c}})_+$',fontsize=16)
plt.tight_layout()
plt.show()
plt.savefig(plotdir + 'rossby_mer_p.png')
plt.close()
|
normal
|
{
"blob_id": "e5c30488c8c1682171c57a11a8ecedc5ccd4d851",
"index": 5607,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif not os.path.isdir(plotdir):\n os.makedirs(plotdir)\n<mask token>\nplot_azav(fig, ax, ro_m, rr, cost, sint, contours=False, notfloat=False,\n units='')\nplt.title('$({\\\\rm{Ro}}_{\\\\rm{c}})_+$', fontsize=16)\nplt.tight_layout()\nplt.show()\nplt.savefig(plotdir + 'rossby_mer_p.png')\nplt.close()\n",
"step-3": "<mask token>\ndirname = sys.argv[1]\ndatadir = dirname + '/data/'\nplotdir = dirname + '/plots/'\nif not os.path.isdir(plotdir):\n os.makedirs(plotdir)\nref = ReferenceState(dirname + '/reference', '')\nH_rho = -1.0 / ref.dlnrho\nrr, tt, cost, sint, rr_depth, ri, ro, d = np.load(datadir + 'grid_info.npy')\nnr, nt = len(rr), len(tt)\nH_rho_2d = H_rho.reshape((1, nr))\n(vr2_p, vt2_p, vp2_p, vrvp_p, vrvt_p, vtvp_p, vr2_m, vt2_m, vp2_m, vrvp_m,\n vrvt_m, vtvp_m, fplus, fminus) = np.load(datadir + 'rs_raw.npy')\nvrvp_t = vrvp_m + vrvp_p\nvrvt_t = vrvt_m + vrvt_p\nvtvp_t = vtvp_m + vtvp_p\nvr2_t = vr2_m + vr2_p\nvt2_t = vt2_m + vt2_p\nvp2_t = vp2_m + vp2_p\nv2_p = vr2_p + vt2_p + vp2_p\nv2_m = vr2_m + vt2_p + vp2_m\nv2_t = vr2_t + vt2_p + vp2_t\nOm = 7.8e-06\nro_p = np.sqrt(v2_p) / (2 * Om * H_rho_2d)\nro_m = np.sqrt(v2_m) / (2 * Om * H_rho_2d)\nro_t = np.sqrt(v2_t) / (2 * Om * H_rho_2d)\nfig, ax = plt.subplots()\nplot_azav(fig, ax, ro_m, rr, cost, sint, contours=False, notfloat=False,\n units='')\nplt.title('$({\\\\rm{Ro}}_{\\\\rm{c}})_+$', fontsize=16)\nplt.tight_layout()\nplt.show()\nplt.savefig(plotdir + 'rossby_mer_p.png')\nplt.close()\n",
"step-4": "import numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nimport os\nfrom azavg_util import plot_azav\nfrom binormalized_cbar import MidpointNormalize\nfrom diagnostic_reading import ReferenceState\ndirname = sys.argv[1]\ndatadir = dirname + '/data/'\nplotdir = dirname + '/plots/'\nif not os.path.isdir(plotdir):\n os.makedirs(plotdir)\nref = ReferenceState(dirname + '/reference', '')\nH_rho = -1.0 / ref.dlnrho\nrr, tt, cost, sint, rr_depth, ri, ro, d = np.load(datadir + 'grid_info.npy')\nnr, nt = len(rr), len(tt)\nH_rho_2d = H_rho.reshape((1, nr))\n(vr2_p, vt2_p, vp2_p, vrvp_p, vrvt_p, vtvp_p, vr2_m, vt2_m, vp2_m, vrvp_m,\n vrvt_m, vtvp_m, fplus, fminus) = np.load(datadir + 'rs_raw.npy')\nvrvp_t = vrvp_m + vrvp_p\nvrvt_t = vrvt_m + vrvt_p\nvtvp_t = vtvp_m + vtvp_p\nvr2_t = vr2_m + vr2_p\nvt2_t = vt2_m + vt2_p\nvp2_t = vp2_m + vp2_p\nv2_p = vr2_p + vt2_p + vp2_p\nv2_m = vr2_m + vt2_p + vp2_m\nv2_t = vr2_t + vt2_p + vp2_t\nOm = 7.8e-06\nro_p = np.sqrt(v2_p) / (2 * Om * H_rho_2d)\nro_m = np.sqrt(v2_m) / (2 * Om * H_rho_2d)\nro_t = np.sqrt(v2_t) / (2 * Om * H_rho_2d)\nfig, ax = plt.subplots()\nplot_azav(fig, ax, ro_m, rr, cost, sint, contours=False, notfloat=False,\n units='')\nplt.title('$({\\\\rm{Ro}}_{\\\\rm{c}})_+$', fontsize=16)\nplt.tight_layout()\nplt.show()\nplt.savefig(plotdir + 'rossby_mer_p.png')\nplt.close()\n",
"step-5": "import numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nimport os\nfrom azavg_util import plot_azav\nfrom binormalized_cbar import MidpointNormalize\nfrom diagnostic_reading import ReferenceState\n\ndirname = sys.argv[1]\n\ndatadir = dirname + '/data/'\nplotdir = dirname + '/plots/'\n\nif (not os.path.isdir(plotdir)):\n os.makedirs(plotdir)\n\nref = ReferenceState(dirname + '/reference', '')\nH_rho = -1./ref.dlnrho\n\n# Get grid info\nrr,tt,cost,sint,rr_depth,ri,ro,d = np.load(datadir + 'grid_info.npy')\nnr, nt = len(rr), len(tt)\n\nH_rho_2d = H_rho.reshape((1, nr)) \n\nvr2_p,vt2_p,vp2_p,vrvp_p,vrvt_p,vtvp_p,\\\n vr2_m,vt2_m,vp2_m, vrvp_m, vrvt_m, vtvp_m, fplus, fminus\\\n = np.load(datadir + 'rs_raw.npy') \n\nvrvp_t = vrvp_m + vrvp_p\nvrvt_t = vrvt_m + vrvt_p\nvtvp_t = vtvp_m + vtvp_p\n\nvr2_t = vr2_m + vr2_p\nvt2_t = vt2_m + vt2_p\nvp2_t = vp2_m + vp2_p\n\n# Total velocity\nv2_p = vr2_p + vt2_p + vp2_p\nv2_m = vr2_m + vt2_p + vp2_m\nv2_t = vr2_t + vt2_p + vp2_t\n\nOm = 7.8e-6\nro_p = np.sqrt(v2_p)/(2*Om*H_rho_2d)\nro_m = np.sqrt(v2_m)/(2*Om*H_rho_2d)\nro_t = np.sqrt(v2_t)/(2*Om*H_rho_2d)\n\n# Plot radial angular momentum transport\n\nfig, ax = plt.subplots()\nplot_azav(fig, ax, ro_m, rr, cost, sint,\n contours=False, notfloat=False, units='')\nplt.title(r'$({\\rm{Ro}}_{\\rm{c}})_+$',fontsize=16)\nplt.tight_layout()\nplt.show()\nplt.savefig(plotdir + 'rossby_mer_p.png')\nplt.close()\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# i change it for change1
# change 1.py in master
i = 1
# fix bug for boss
|
normal
|
{
"blob_id": "92f4f1c8a4e04b07ed7c05d5bb733c0b9c28bd05",
"index": 5325,
"step-1": "<mask token>\n",
"step-2": "i = 1\n",
"step-3": "# i change it for change1\n# change 1.py in master\ni = 1\n# fix bug for boss\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import, unicode_literals
import os
from qtpy.QtCore import *
# from qtpy.QtGui import *
from qtpy.QtWidgets import *
from six import string_types
from ..widgets import PathParamWidget, RelPathParamWidget, FilePathWidget
class TypeBase(object):
@classmethod
def create(cls, _):
"""
Create instance or return class
"""
return cls
@classmethod
def control(cls, delegate, property_item, parent):
return None
@staticmethod
def data(value):
"""
return item's data() value
"""
return value
@classmethod
def value(cls, control):
return None
@staticmethod
def icon(_):
return None
@classmethod
def height(cls):
return -1
@classmethod
def default(cls, value):
return value
@classmethod
def filter(cls, value):
return value
@classmethod
def set_link(cls, value):
pass
@classmethod
def link_value(cls, default_value, link_value):
return link_value or default_value
@classmethod
def sizeHint(cls):
return QSize(-1, -1)
@classmethod
def setup(cls, item):
pass
@classmethod
def set_value(cls, control, value):
control.setText(value)
is_persistent_editor = False
class TypeBool(TypeBase):
@classmethod
def control(cls, delegate, property_item, parent):
combo = QComboBox(parent)
combo.addItem("Yes")
combo.addItem("No")
return combo
@classmethod
def set_value(cls, control, value):
control.setCurrentIndex(0 if value else 1)
@classmethod
def value(cls, control):
return control.currentIndex() == 0
@staticmethod
def data(value):
return "Yes" if value else "No"
class CheckBox(QCheckBox):
def __init__(self, item, parent):
super(CheckBox, self).__init__(parent)
self.item = item
# noinspection PyUnresolvedReferences
self.stateChanged.connect(self.on_state_changed)
def on_state_changed(self, state):
self.item.set_value(state == Qt.Checked, force_update=True)
class TypeCheck(TypeBase):
is_persistent_editor = True
@classmethod
def control(cls, delegate, property_item, parent):
check = CheckBox(property_item, parent)
return check
@classmethod
def set_value(cls, control, value):
# type: (QCheckBox, bool) -> None
control.setCheckState(Qt.Checked if value else Qt.Unchecked)
@classmethod
def value(cls, control):
# type: (QCheckBox) -> bool
return control.isChecked()
class TypeFilePath(TypeBase):
@classmethod
def control(cls, delegate, property_item, parent):
return FilePathWidget(delegate, property_item.params, parent=parent)
@classmethod
def set_value(cls, control, value):
control.setText(value)
@classmethod
def value(cls, control):
return control.text()
@classmethod
def filter(cls, value):
return os.path.normpath(value) if value else value
@classmethod
def link_value(cls, default_value, link_value):
if default_value is None and link_value is None:
return ""
if link_value is None:
return default_value
if default_value is None:
return link_value
return os.path.join(default_value, link_value)
@classmethod
def sizeHint(cls):
return QSize(-1, 28)
class TypeDirPath(TypeBase):
@classmethod
def control(cls, delegate, property_item, parent):
return PathParamWidget(delegate, parent=parent)
@classmethod
def set_value(cls, control, value):
control.setText(value)
@classmethod
def value(cls, control):
return control.text()
@classmethod
def filter(cls, value):
return os.path.normpath(value) if value else value
@classmethod
def link_value(cls, default_value, link_value):
if default_value is None and link_value is None:
return ""
if link_value is None:
return default_value
if default_value is None:
return link_value
return os.path.join(default_value, link_value)
@classmethod
def sizeHint(cls):
return QSize(-1, 28)
class TypeRelDirPath(TypeDirPath):
@classmethod
def create(cls, params):
return cls(params)
def __init__(self, params):
self.relpath = params.get("relpath", ".")
def control(self, delegate, property_item, parent):
return RelPathParamWidget(delegate, relpath=self.relpath, parent=parent)
def default(self, path):
self.relpath = path or "."
return "."
def set_link(self, value):
self.relpath = value or "."
def filter(self, value):
if not value:
return "."
try:
if os.path.isabs(value):
return os.path.relpath(value, self.relpath)
else:
return value
except ValueError:
return "."
# noinspection PyArgumentList
class TypeChoice(TypeBase):
@classmethod
def create(cls, params):
return cls(params.get("choices", []))
def __init__(self, choices):
self.selects = []
self._data_dict = {}
self.setup_choices(choices)
def setup_choices(self, choices):
self.selects = []
for item in choices:
if isinstance(item, string_types):
item = {
"text": item,
"value": item,
}
self.selects.append(item)
self._data_dict = {item["value"]: item for item in self.selects}
def control(self, delegate, property_item, parent):
combo = QComboBox(parent)
self.setup_combo_box(combo)
return combo
def setup_combo_box(self, combo):
for i, item in enumerate(self.selects):
combo.addItem(item["text"])
combo.setItemData(i, item["value"])
if "icon" in item:
combo.setItemIcon(i, item["icon"])
# noinspection PyMethodOverriding
@staticmethod
def set_value(combo, value):
# type: (QComboBox, str) -> None
index = combo.findData(value)
combo.setCurrentIndex(index)
@classmethod
def value(cls, combo):
# type: (QComboBox, str) -> None
return combo.itemData(combo.currentIndex())
# noinspection PyMethodOverriding
def data(self, value):
return self._data_dict[value]["text"] if value in self._data_dict else None
# noinspection PyMethodOverriding
def icon(self, value):
try:
return self._data_dict[value]["icon"] if value in self._data_dict else None
except KeyError:
return None
|
normal
|
{
"blob_id": "ee91e8c9dcb940882733b2d23b74a76d0392f4fe",
"index": 2126,
"step-1": "<mask token>\n\n\nclass TypeDirPath(TypeBase):\n\n @classmethod\n def control(cls, delegate, property_item, parent):\n return PathParamWidget(delegate, parent=parent)\n <mask token>\n\n @classmethod\n def value(cls, control):\n return control.text()\n <mask token>\n\n @classmethod\n def link_value(cls, default_value, link_value):\n if default_value is None and link_value is None:\n return ''\n if link_value is None:\n return default_value\n if default_value is None:\n return link_value\n return os.path.join(default_value, link_value)\n <mask token>\n\n\nclass TypeRelDirPath(TypeDirPath):\n\n @classmethod\n def create(cls, params):\n return cls(params)\n\n def __init__(self, params):\n self.relpath = params.get('relpath', '.')\n\n def control(self, delegate, property_item, parent):\n return RelPathParamWidget(delegate, relpath=self.relpath, parent=parent\n )\n\n def default(self, path):\n self.relpath = path or '.'\n return '.'\n\n def set_link(self, value):\n self.relpath = value or '.'\n\n def filter(self, value):\n if not value:\n return '.'\n try:\n if os.path.isabs(value):\n return os.path.relpath(value, self.relpath)\n else:\n return value\n except ValueError:\n return '.'\n\n\nclass TypeChoice(TypeBase):\n\n @classmethod\n def create(cls, params):\n return cls(params.get('choices', []))\n\n def __init__(self, choices):\n self.selects = []\n self._data_dict = {}\n self.setup_choices(choices)\n\n def setup_choices(self, choices):\n self.selects = []\n for item in choices:\n if isinstance(item, string_types):\n item = {'text': item, 'value': item}\n self.selects.append(item)\n self._data_dict = {item['value']: item for item in self.selects}\n\n def control(self, delegate, property_item, parent):\n combo = QComboBox(parent)\n self.setup_combo_box(combo)\n return combo\n\n def setup_combo_box(self, combo):\n for i, item in enumerate(self.selects):\n combo.addItem(item['text'])\n combo.setItemData(i, item['value'])\n if 'icon' in item:\n combo.setItemIcon(i, item['icon'])\n\n @staticmethod\n def set_value(combo, value):\n index = combo.findData(value)\n combo.setCurrentIndex(index)\n\n @classmethod\n def value(cls, combo):\n return combo.itemData(combo.currentIndex())\n\n def data(self, value):\n return self._data_dict[value]['text'\n ] if value in self._data_dict else None\n\n def icon(self, value):\n try:\n return self._data_dict[value]['icon'\n ] if value in self._data_dict else None\n except KeyError:\n return None\n",
"step-2": "<mask token>\n\n\nclass TypeBool(TypeBase):\n\n @classmethod\n def control(cls, delegate, property_item, parent):\n combo = QComboBox(parent)\n combo.addItem('Yes')\n combo.addItem('No')\n return combo\n <mask token>\n <mask token>\n <mask token>\n\n\nclass CheckBox(QCheckBox):\n\n def __init__(self, item, parent):\n super(CheckBox, self).__init__(parent)\n self.item = item\n self.stateChanged.connect(self.on_state_changed)\n\n def on_state_changed(self, state):\n self.item.set_value(state == Qt.Checked, force_update=True)\n\n\nclass TypeCheck(TypeBase):\n is_persistent_editor = True\n\n @classmethod\n def control(cls, delegate, property_item, parent):\n check = CheckBox(property_item, parent)\n return check\n\n @classmethod\n def set_value(cls, control, value):\n control.setCheckState(Qt.Checked if value else Qt.Unchecked)\n\n @classmethod\n def value(cls, control):\n return control.isChecked()\n\n\nclass TypeFilePath(TypeBase):\n\n @classmethod\n def control(cls, delegate, property_item, parent):\n return FilePathWidget(delegate, property_item.params, parent=parent)\n\n @classmethod\n def set_value(cls, control, value):\n control.setText(value)\n\n @classmethod\n def value(cls, control):\n return control.text()\n\n @classmethod\n def filter(cls, value):\n return os.path.normpath(value) if value else value\n\n @classmethod\n def link_value(cls, default_value, link_value):\n if default_value is None and link_value is None:\n return ''\n if link_value is None:\n return default_value\n if default_value is None:\n return link_value\n return os.path.join(default_value, link_value)\n\n @classmethod\n def sizeHint(cls):\n return QSize(-1, 28)\n\n\nclass TypeDirPath(TypeBase):\n\n @classmethod\n def control(cls, delegate, property_item, parent):\n return PathParamWidget(delegate, parent=parent)\n\n @classmethod\n def set_value(cls, control, value):\n control.setText(value)\n\n @classmethod\n def value(cls, control):\n return control.text()\n\n @classmethod\n def filter(cls, value):\n return os.path.normpath(value) if value else value\n\n @classmethod\n def link_value(cls, default_value, link_value):\n if default_value is None and link_value is None:\n return ''\n if link_value is None:\n return default_value\n if default_value is None:\n return link_value\n return os.path.join(default_value, link_value)\n\n @classmethod\n def sizeHint(cls):\n return QSize(-1, 28)\n\n\nclass TypeRelDirPath(TypeDirPath):\n\n @classmethod\n def create(cls, params):\n return cls(params)\n\n def __init__(self, params):\n self.relpath = params.get('relpath', '.')\n\n def control(self, delegate, property_item, parent):\n return RelPathParamWidget(delegate, relpath=self.relpath, parent=parent\n )\n\n def default(self, path):\n self.relpath = path or '.'\n return '.'\n\n def set_link(self, value):\n self.relpath = value or '.'\n\n def filter(self, value):\n if not value:\n return '.'\n try:\n if os.path.isabs(value):\n return os.path.relpath(value, self.relpath)\n else:\n return value\n except ValueError:\n return '.'\n\n\nclass TypeChoice(TypeBase):\n\n @classmethod\n def create(cls, params):\n return cls(params.get('choices', []))\n\n def __init__(self, choices):\n self.selects = []\n self._data_dict = {}\n self.setup_choices(choices)\n\n def setup_choices(self, choices):\n self.selects = []\n for item in choices:\n if isinstance(item, string_types):\n item = {'text': item, 'value': item}\n self.selects.append(item)\n self._data_dict = {item['value']: item for item in self.selects}\n\n def control(self, delegate, property_item, parent):\n combo = QComboBox(parent)\n self.setup_combo_box(combo)\n return combo\n\n def setup_combo_box(self, combo):\n for i, item in enumerate(self.selects):\n combo.addItem(item['text'])\n combo.setItemData(i, item['value'])\n if 'icon' in item:\n combo.setItemIcon(i, item['icon'])\n\n @staticmethod\n def set_value(combo, value):\n index = combo.findData(value)\n combo.setCurrentIndex(index)\n\n @classmethod\n def value(cls, combo):\n return combo.itemData(combo.currentIndex())\n\n def data(self, value):\n return self._data_dict[value]['text'\n ] if value in self._data_dict else None\n\n def icon(self, value):\n try:\n return self._data_dict[value]['icon'\n ] if value in self._data_dict else None\n except KeyError:\n return None\n",
"step-3": "<mask token>\n\n\nclass TypeBool(TypeBase):\n\n @classmethod\n def control(cls, delegate, property_item, parent):\n combo = QComboBox(parent)\n combo.addItem('Yes')\n combo.addItem('No')\n return combo\n <mask token>\n\n @classmethod\n def value(cls, control):\n return control.currentIndex() == 0\n\n @staticmethod\n def data(value):\n return 'Yes' if value else 'No'\n\n\nclass CheckBox(QCheckBox):\n\n def __init__(self, item, parent):\n super(CheckBox, self).__init__(parent)\n self.item = item\n self.stateChanged.connect(self.on_state_changed)\n\n def on_state_changed(self, state):\n self.item.set_value(state == Qt.Checked, force_update=True)\n\n\nclass TypeCheck(TypeBase):\n is_persistent_editor = True\n\n @classmethod\n def control(cls, delegate, property_item, parent):\n check = CheckBox(property_item, parent)\n return check\n\n @classmethod\n def set_value(cls, control, value):\n control.setCheckState(Qt.Checked if value else Qt.Unchecked)\n\n @classmethod\n def value(cls, control):\n return control.isChecked()\n\n\nclass TypeFilePath(TypeBase):\n\n @classmethod\n def control(cls, delegate, property_item, parent):\n return FilePathWidget(delegate, property_item.params, parent=parent)\n\n @classmethod\n def set_value(cls, control, value):\n control.setText(value)\n\n @classmethod\n def value(cls, control):\n return control.text()\n\n @classmethod\n def filter(cls, value):\n return os.path.normpath(value) if value else value\n\n @classmethod\n def link_value(cls, default_value, link_value):\n if default_value is None and link_value is None:\n return ''\n if link_value is None:\n return default_value\n if default_value is None:\n return link_value\n return os.path.join(default_value, link_value)\n\n @classmethod\n def sizeHint(cls):\n return QSize(-1, 28)\n\n\nclass TypeDirPath(TypeBase):\n\n @classmethod\n def control(cls, delegate, property_item, parent):\n return PathParamWidget(delegate, parent=parent)\n\n @classmethod\n def set_value(cls, control, value):\n control.setText(value)\n\n @classmethod\n def value(cls, control):\n return control.text()\n\n @classmethod\n def filter(cls, value):\n return os.path.normpath(value) if value else value\n\n @classmethod\n def link_value(cls, default_value, link_value):\n if default_value is None and link_value is None:\n return ''\n if link_value is None:\n return default_value\n if default_value is None:\n return link_value\n return os.path.join(default_value, link_value)\n\n @classmethod\n def sizeHint(cls):\n return QSize(-1, 28)\n\n\nclass TypeRelDirPath(TypeDirPath):\n\n @classmethod\n def create(cls, params):\n return cls(params)\n\n def __init__(self, params):\n self.relpath = params.get('relpath', '.')\n\n def control(self, delegate, property_item, parent):\n return RelPathParamWidget(delegate, relpath=self.relpath, parent=parent\n )\n\n def default(self, path):\n self.relpath = path or '.'\n return '.'\n\n def set_link(self, value):\n self.relpath = value or '.'\n\n def filter(self, value):\n if not value:\n return '.'\n try:\n if os.path.isabs(value):\n return os.path.relpath(value, self.relpath)\n else:\n return value\n except ValueError:\n return '.'\n\n\nclass TypeChoice(TypeBase):\n\n @classmethod\n def create(cls, params):\n return cls(params.get('choices', []))\n\n def __init__(self, choices):\n self.selects = []\n self._data_dict = {}\n self.setup_choices(choices)\n\n def setup_choices(self, choices):\n self.selects = []\n for item in choices:\n if isinstance(item, string_types):\n item = {'text': item, 'value': item}\n self.selects.append(item)\n self._data_dict = {item['value']: item for item in self.selects}\n\n def control(self, delegate, property_item, parent):\n combo = QComboBox(parent)\n self.setup_combo_box(combo)\n return combo\n\n def setup_combo_box(self, combo):\n for i, item in enumerate(self.selects):\n combo.addItem(item['text'])\n combo.setItemData(i, item['value'])\n if 'icon' in item:\n combo.setItemIcon(i, item['icon'])\n\n @staticmethod\n def set_value(combo, value):\n index = combo.findData(value)\n combo.setCurrentIndex(index)\n\n @classmethod\n def value(cls, combo):\n return combo.itemData(combo.currentIndex())\n\n def data(self, value):\n return self._data_dict[value]['text'\n ] if value in self._data_dict else None\n\n def icon(self, value):\n try:\n return self._data_dict[value]['icon'\n ] if value in self._data_dict else None\n except KeyError:\n return None\n",
"step-4": "<mask token>\n\n\nclass TypeBase(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass TypeBool(TypeBase):\n\n @classmethod\n def control(cls, delegate, property_item, parent):\n combo = QComboBox(parent)\n combo.addItem('Yes')\n combo.addItem('No')\n return combo\n\n @classmethod\n def set_value(cls, control, value):\n control.setCurrentIndex(0 if value else 1)\n\n @classmethod\n def value(cls, control):\n return control.currentIndex() == 0\n\n @staticmethod\n def data(value):\n return 'Yes' if value else 'No'\n\n\nclass CheckBox(QCheckBox):\n\n def __init__(self, item, parent):\n super(CheckBox, self).__init__(parent)\n self.item = item\n self.stateChanged.connect(self.on_state_changed)\n\n def on_state_changed(self, state):\n self.item.set_value(state == Qt.Checked, force_update=True)\n\n\nclass TypeCheck(TypeBase):\n is_persistent_editor = True\n\n @classmethod\n def control(cls, delegate, property_item, parent):\n check = CheckBox(property_item, parent)\n return check\n\n @classmethod\n def set_value(cls, control, value):\n control.setCheckState(Qt.Checked if value else Qt.Unchecked)\n\n @classmethod\n def value(cls, control):\n return control.isChecked()\n\n\nclass TypeFilePath(TypeBase):\n\n @classmethod\n def control(cls, delegate, property_item, parent):\n return FilePathWidget(delegate, property_item.params, parent=parent)\n\n @classmethod\n def set_value(cls, control, value):\n control.setText(value)\n\n @classmethod\n def value(cls, control):\n return control.text()\n\n @classmethod\n def filter(cls, value):\n return os.path.normpath(value) if value else value\n\n @classmethod\n def link_value(cls, default_value, link_value):\n if default_value is None and link_value is None:\n return ''\n if link_value is None:\n return default_value\n if default_value is None:\n return link_value\n return os.path.join(default_value, link_value)\n\n @classmethod\n def sizeHint(cls):\n return QSize(-1, 28)\n\n\nclass TypeDirPath(TypeBase):\n\n @classmethod\n def control(cls, delegate, property_item, parent):\n return PathParamWidget(delegate, parent=parent)\n\n @classmethod\n def set_value(cls, control, value):\n control.setText(value)\n\n @classmethod\n def value(cls, control):\n return control.text()\n\n @classmethod\n def filter(cls, value):\n return os.path.normpath(value) if value else value\n\n @classmethod\n def link_value(cls, default_value, link_value):\n if default_value is None and link_value is None:\n return ''\n if link_value is None:\n return default_value\n if default_value is None:\n return link_value\n return os.path.join(default_value, link_value)\n\n @classmethod\n def sizeHint(cls):\n return QSize(-1, 28)\n\n\nclass TypeRelDirPath(TypeDirPath):\n\n @classmethod\n def create(cls, params):\n return cls(params)\n\n def __init__(self, params):\n self.relpath = params.get('relpath', '.')\n\n def control(self, delegate, property_item, parent):\n return RelPathParamWidget(delegate, relpath=self.relpath, parent=parent\n )\n\n def default(self, path):\n self.relpath = path or '.'\n return '.'\n\n def set_link(self, value):\n self.relpath = value or '.'\n\n def filter(self, value):\n if not value:\n return '.'\n try:\n if os.path.isabs(value):\n return os.path.relpath(value, self.relpath)\n else:\n return value\n except ValueError:\n return '.'\n\n\nclass TypeChoice(TypeBase):\n\n @classmethod\n def create(cls, params):\n return cls(params.get('choices', []))\n\n def __init__(self, choices):\n self.selects = []\n self._data_dict = {}\n self.setup_choices(choices)\n\n def setup_choices(self, choices):\n self.selects = []\n for item in choices:\n if isinstance(item, string_types):\n item = {'text': item, 'value': item}\n self.selects.append(item)\n self._data_dict = {item['value']: item for item in self.selects}\n\n def control(self, delegate, property_item, parent):\n combo = QComboBox(parent)\n self.setup_combo_box(combo)\n return combo\n\n def setup_combo_box(self, combo):\n for i, item in enumerate(self.selects):\n combo.addItem(item['text'])\n combo.setItemData(i, item['value'])\n if 'icon' in item:\n combo.setItemIcon(i, item['icon'])\n\n @staticmethod\n def set_value(combo, value):\n index = combo.findData(value)\n combo.setCurrentIndex(index)\n\n @classmethod\n def value(cls, combo):\n return combo.itemData(combo.currentIndex())\n\n def data(self, value):\n return self._data_dict[value]['text'\n ] if value in self._data_dict else None\n\n def icon(self, value):\n try:\n return self._data_dict[value]['icon'\n ] if value in self._data_dict else None\n except KeyError:\n return None\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import division, print_function, absolute_import, unicode_literals\n\nimport os\n\nfrom qtpy.QtCore import *\n# from qtpy.QtGui import *\nfrom qtpy.QtWidgets import *\nfrom six import string_types\n\nfrom ..widgets import PathParamWidget, RelPathParamWidget, FilePathWidget\n\n\nclass TypeBase(object):\n @classmethod\n def create(cls, _):\n \"\"\"\n Create instance or return class\n \"\"\"\n return cls\n\n @classmethod\n def control(cls, delegate, property_item, parent):\n return None\n\n @staticmethod\n def data(value):\n \"\"\"\n return item's data() value\n \"\"\"\n return value\n\n @classmethod\n def value(cls, control):\n return None\n\n @staticmethod\n def icon(_):\n return None\n\n @classmethod\n def height(cls):\n return -1\n\n @classmethod\n def default(cls, value):\n return value\n\n @classmethod\n def filter(cls, value):\n return value\n\n @classmethod\n def set_link(cls, value):\n pass\n\n @classmethod\n def link_value(cls, default_value, link_value):\n return link_value or default_value\n\n @classmethod\n def sizeHint(cls):\n return QSize(-1, -1)\n\n @classmethod\n def setup(cls, item):\n pass\n\n @classmethod\n def set_value(cls, control, value):\n control.setText(value)\n\n is_persistent_editor = False\n\n\nclass TypeBool(TypeBase):\n @classmethod\n def control(cls, delegate, property_item, parent):\n combo = QComboBox(parent)\n combo.addItem(\"Yes\")\n combo.addItem(\"No\")\n return combo\n\n @classmethod\n def set_value(cls, control, value):\n control.setCurrentIndex(0 if value else 1)\n\n @classmethod\n def value(cls, control):\n return control.currentIndex() == 0\n\n @staticmethod\n def data(value):\n return \"Yes\" if value else \"No\"\n\n\nclass CheckBox(QCheckBox):\n def __init__(self, item, parent):\n super(CheckBox, self).__init__(parent)\n self.item = item\n # noinspection PyUnresolvedReferences\n self.stateChanged.connect(self.on_state_changed)\n\n def on_state_changed(self, state):\n self.item.set_value(state == Qt.Checked, force_update=True)\n\n\nclass TypeCheck(TypeBase):\n is_persistent_editor = True\n\n @classmethod\n def control(cls, delegate, property_item, parent):\n check = CheckBox(property_item, parent)\n return check\n\n @classmethod\n def set_value(cls, control, value):\n # type: (QCheckBox, bool) -> None\n control.setCheckState(Qt.Checked if value else Qt.Unchecked)\n\n @classmethod\n def value(cls, control):\n # type: (QCheckBox) -> bool\n return control.isChecked()\n\n\nclass TypeFilePath(TypeBase):\n @classmethod\n def control(cls, delegate, property_item, parent):\n return FilePathWidget(delegate, property_item.params, parent=parent)\n\n @classmethod\n def set_value(cls, control, value):\n control.setText(value)\n\n @classmethod\n def value(cls, control):\n return control.text()\n\n @classmethod\n def filter(cls, value):\n return os.path.normpath(value) if value else value\n\n @classmethod\n def link_value(cls, default_value, link_value):\n if default_value is None and link_value is None:\n return \"\"\n if link_value is None:\n return default_value\n if default_value is None:\n return link_value\n return os.path.join(default_value, link_value)\n\n @classmethod\n def sizeHint(cls):\n return QSize(-1, 28)\n\n\nclass TypeDirPath(TypeBase):\n @classmethod\n def control(cls, delegate, property_item, parent):\n return PathParamWidget(delegate, parent=parent)\n\n @classmethod\n def set_value(cls, control, value):\n control.setText(value)\n\n @classmethod\n def value(cls, control):\n return control.text()\n\n @classmethod\n def filter(cls, value):\n return os.path.normpath(value) if value else value\n\n @classmethod\n def link_value(cls, default_value, link_value):\n if default_value is None and link_value is None:\n return \"\"\n if link_value is None:\n return default_value\n if default_value is None:\n return link_value\n return os.path.join(default_value, link_value)\n\n @classmethod\n def sizeHint(cls):\n return QSize(-1, 28)\n\n\nclass TypeRelDirPath(TypeDirPath):\n @classmethod\n def create(cls, params):\n return cls(params)\n\n def __init__(self, params):\n self.relpath = params.get(\"relpath\", \".\")\n\n def control(self, delegate, property_item, parent):\n return RelPathParamWidget(delegate, relpath=self.relpath, parent=parent)\n\n def default(self, path):\n self.relpath = path or \".\"\n return \".\"\n\n def set_link(self, value):\n self.relpath = value or \".\"\n\n def filter(self, value):\n if not value:\n return \".\"\n try:\n if os.path.isabs(value):\n return os.path.relpath(value, self.relpath)\n else:\n return value\n except ValueError:\n return \".\"\n\n\n# noinspection PyArgumentList\nclass TypeChoice(TypeBase):\n @classmethod\n def create(cls, params):\n return cls(params.get(\"choices\", []))\n\n def __init__(self, choices):\n self.selects = []\n self._data_dict = {}\n self.setup_choices(choices)\n\n def setup_choices(self, choices):\n self.selects = []\n\n for item in choices:\n if isinstance(item, string_types):\n item = {\n \"text\": item,\n \"value\": item,\n }\n self.selects.append(item)\n self._data_dict = {item[\"value\"]: item for item in self.selects}\n\n def control(self, delegate, property_item, parent):\n combo = QComboBox(parent)\n self.setup_combo_box(combo)\n return combo\n\n def setup_combo_box(self, combo):\n for i, item in enumerate(self.selects):\n combo.addItem(item[\"text\"])\n combo.setItemData(i, item[\"value\"])\n if \"icon\" in item:\n combo.setItemIcon(i, item[\"icon\"])\n\n # noinspection PyMethodOverriding\n @staticmethod\n def set_value(combo, value):\n # type: (QComboBox, str) -> None\n index = combo.findData(value)\n combo.setCurrentIndex(index)\n\n @classmethod\n def value(cls, combo):\n # type: (QComboBox, str) -> None\n return combo.itemData(combo.currentIndex())\n\n # noinspection PyMethodOverriding\n def data(self, value):\n return self._data_dict[value][\"text\"] if value in self._data_dict else None\n\n # noinspection PyMethodOverriding\n def icon(self, value):\n try:\n return self._data_dict[value][\"icon\"] if value in self._data_dict else None\n except KeyError:\n return None\n",
"step-ids": [
21,
41,
43,
45,
61
]
}
|
[
21,
41,
43,
45,
61
] |
import time
import optparse
from IPy import IP as IPTEST
ttlValues = {}
THRESH = 5
def checkTTL(ipsrc,ttl):
if IPTEST(ipsrc).iptype() == 'PRIVATE':
return
if not ttlValues.has_key(ipsrc):
pkt = srl(IP(dst=ipsrc) / TCMP(),retry=0,timeout=0,verbose=0)
ttlValues[ipsrc] = pkt.ttl
if abs(int(ttl) - int(ttlValues[ipsrc])) > THRESH:
print '\n[!] Detected Possible Spoofed Packer From:'+ipsrc
print '[!] TTL:'+ttl+',Actual TTL:'+str(ttlVaules[ipsrc])
def testTTL(pkt):
try:
if pkt.haslayer(IP):
ipsrc = pkt.getlayer(IP).src
ttl = str(pkt.ttl)
checkTTL(ipsrc,ttl)
except:
pass
def main():
parser = optparse.OptionParser("usage%prog"+"-i<interface> -t<thresh>")
parser.add_option('-i',dest='iface',type='string',help='specify network interface')
parser.add_option('-t',dest='thresh',type='int',help='specify threshold count')
(options,args) = parser.parse_args()
if options.iface == None:
conf.iface = 'eth0'
else:
conf.iface = options.iface
if options.thresh != None:
THRESH = options.thresh
else:
THRESH = 5
sniff(prn=testTTL,store=0)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "7081211336793bfde60b5c922f6ab9461a475949",
"index": 1616,
"step-1": "import time\r\nimport optparse\r\nfrom IPy import IP as IPTEST\r\nttlValues = {}\r\nTHRESH = 5\r\ndef checkTTL(ipsrc,ttl):\r\n if IPTEST(ipsrc).iptype() == 'PRIVATE':\r\n return\r\n if not ttlValues.has_key(ipsrc):\r\n pkt = srl(IP(dst=ipsrc) / TCMP(),retry=0,timeout=0,verbose=0)\r\n ttlValues[ipsrc] = pkt.ttl\r\n if abs(int(ttl) - int(ttlValues[ipsrc])) > THRESH:\r\n print '\\n[!] Detected Possible Spoofed Packer From:'+ipsrc\r\n print '[!] TTL:'+ttl+',Actual TTL:'+str(ttlVaules[ipsrc])\r\ndef testTTL(pkt):\r\n try:\r\n if pkt.haslayer(IP):\r\n ipsrc = pkt.getlayer(IP).src\r\n ttl = str(pkt.ttl)\r\n checkTTL(ipsrc,ttl)\r\n except:\r\n pass\r\ndef main():\r\n parser = optparse.OptionParser(\"usage%prog\"+\"-i<interface> -t<thresh>\")\r\n parser.add_option('-i',dest='iface',type='string',help='specify network interface')\r\n parser.add_option('-t',dest='thresh',type='int',help='specify threshold count')\r\n (options,args) = parser.parse_args()\r\n if options.iface == None:\r\n conf.iface = 'eth0'\r\n else:\r\n conf.iface = options.iface\r\n if options.thresh != None:\r\n THRESH = options.thresh\r\n else:\r\n THRESH = 5\r\n sniff(prn=testTTL,store=0)\r\nif __name__ == '__main__':\r\n main()",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import pygame
from Actor import Actor
import PlayerInput
class TestActor(Actor):
def __init__(self):
super(TestActor, self).__init__()
def act(self):
self.key_commands()
def key_commands(self):
if PlayerInput.is_key_down(pygame.K_LEFT):
self.set_location(self.x - 1, self.y)
if PlayerInput.is_key_down(pygame.K_RIGHT):
self.set_location(self.x + 1, self.y)
if PlayerInput.is_key_down(pygame.K_UP):
self.set_location(self.x, self.y - 1)
if PlayerInput.is_key_down(pygame.K_DOWN):
self.set_location(self.x, self.y + 1)
|
normal
|
{
"blob_id": "9cb11c2bf032aa16abd3463ecdb8997addedc912",
"index": 1570,
"step-1": "<mask token>\n\n\nclass TestActor(Actor):\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestActor(Actor):\n <mask token>\n\n def act(self):\n self.key_commands()\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TestActor(Actor):\n\n def __init__(self):\n super(TestActor, self).__init__()\n\n def act(self):\n self.key_commands()\n <mask token>\n",
"step-4": "<mask token>\n\n\nclass TestActor(Actor):\n\n def __init__(self):\n super(TestActor, self).__init__()\n\n def act(self):\n self.key_commands()\n\n def key_commands(self):\n if PlayerInput.is_key_down(pygame.K_LEFT):\n self.set_location(self.x - 1, self.y)\n if PlayerInput.is_key_down(pygame.K_RIGHT):\n self.set_location(self.x + 1, self.y)\n if PlayerInput.is_key_down(pygame.K_UP):\n self.set_location(self.x, self.y - 1)\n if PlayerInput.is_key_down(pygame.K_DOWN):\n self.set_location(self.x, self.y + 1)\n",
"step-5": "import pygame\nfrom Actor import Actor\nimport PlayerInput\n\n\nclass TestActor(Actor):\n\n def __init__(self):\n super(TestActor, self).__init__()\n\n def act(self):\n self.key_commands()\n\n def key_commands(self):\n if PlayerInput.is_key_down(pygame.K_LEFT):\n self.set_location(self.x - 1, self.y)\n if PlayerInput.is_key_down(pygame.K_RIGHT):\n self.set_location(self.x + 1, self.y)\n if PlayerInput.is_key_down(pygame.K_UP):\n self.set_location(self.x, self.y - 1)\n if PlayerInput.is_key_down(pygame.K_DOWN):\n self.set_location(self.x, self.y + 1)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
'''
Created on Jul 10, 2018
@author: daniel
'''
#from multiprocessing import Process, Manager
#from keras.utils import np_utils
import sys
import os
from keras.utils import np_utils
from _codecs import decode
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from DataHandlers.SegNetDataHandler import SegNetDataHandler
import numpy as np
import matplotlib.pyplot as plt
from keras.models import load_model
from Mylayers import MaxPoolingWithArgmax2D, MaxUnpooling2D
import math
from CustomLosses import dice_coef, dice_coef_multilabel, dice_coef_loss, combinedDiceAndChamfer, dice_coef_multilabel_loss, combinedHausdorffAndDice
from dipy.segment.mask import clean_cc_mask
DATA_DIR = os.path.abspath("../")
sys.path.append(DATA_DIR)
def computeDice(im1, im2):
im1 = np.asarray(im1).astype(np.bool)
im2 = np.asarray(im2).astype(np.bool)
if im1.shape != im2.shape:
raise ValueError("Shape mismatch: im1 and im2 must have the same shape.")
intersection = np.logical_and(im1, im2)
dice = 2. * intersection.sum() / (im1.sum() + im2.sum())
if math.isnan(dice):
return 0
return dice
def main():
num_testing_patients = 4
n_labels = 1
normalize = True
modes = ["flair"]
dataHandler = SegNetDataHandler("Data/BRATS_2018/HGG_Testing",
num_patients = num_testing_patients,
modes = modes)
dataHandler.loadData()
dataHandler.preprocessForNetwork()
x_test = np.array(dataHandler.X)
x_seg_test = dataHandler.labels
dataHandler.clear()
segnet = load_model("Models/segnet_2018-10-28-14:37/model.h5", custom_objects={'MaxPoolingWithArgmax2D': MaxPoolingWithArgmax2D,
'MaxUnpooling2D':MaxUnpooling2D,
'combinedDiceAndChamfer':combinedDiceAndChamfer,
'combinedHausdorffAndDice': combinedHausdorffAndDice,
'dice_coef':dice_coef,
'dice_coef_loss':dice_coef_loss,
'dice_coef_multilabel': dice_coef_multilabel,
'dice_coef_multilabel_loss' : dice_coef_multilabel_loss})
if normalize:
mu = np.mean(x_test)
sigma = np.std(x_test)
x_test -= mu
x_test /= sigma
decoded_imgs = segnet.predict(x_test)
if n_labels > 1:
#x_seg_test = np_utils.to_categorical(x_seg_test)
#x_seg_test = np.argmax(x_seg_test, axis=3)
decoded_imgs = [np.argmax(x, axis = 1) for x in decoded_imgs]
else:
for x in x_seg_test:
x[x > 0.5] = 1
x[x < 0.5] = 0
for x in decoded_imgs:
x[x > 0.5] = 1
x[x < 0.5] = 0
decoded_imgs = [x.reshape(dataHandler.W, dataHandler.W) for x in decoded_imgs]
N = len(decoded_imgs)
avg_dice = 0
for i in range(N):
foo = decoded_imgs[i].reshape(dataHandler.W, dataHandler.W)
dice = computeDice(x_seg_test[i], foo)
avg_dice = avg_dice + dice
print(str(avg_dice/N))
for i in range(N):
fig = plt.figure()
plt.gray();
fig.add_subplot(1,3,1)
plt.imshow(x_test[i,:,:,0])
plt.axis('off')
plt.title('Original')
fig.add_subplot(1,3,2)
plt.imshow(x_seg_test[i])
plt.axis('off')
plt.title('GT Segment')
fig.add_subplot(1,3,3)
plt.imshow(decoded_imgs[i])
plt.axis('off')
plt.title('Predicted Segment')
plt.show()
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "cb03fcf9c9cb61b3546865fe40cc411745e1fc94",
"index": 6872,
"step-1": "<mask token>\n\n\ndef computeDice(im1, im2):\n im1 = np.asarray(im1).astype(np.bool)\n im2 = np.asarray(im2).astype(np.bool)\n if im1.shape != im2.shape:\n raise ValueError(\n 'Shape mismatch: im1 and im2 must have the same shape.')\n intersection = np.logical_and(im1, im2)\n dice = 2.0 * intersection.sum() / (im1.sum() + im2.sum())\n if math.isnan(dice):\n return 0\n return dice\n\n\ndef main():\n num_testing_patients = 4\n n_labels = 1\n normalize = True\n modes = ['flair']\n dataHandler = SegNetDataHandler('Data/BRATS_2018/HGG_Testing',\n num_patients=num_testing_patients, modes=modes)\n dataHandler.loadData()\n dataHandler.preprocessForNetwork()\n x_test = np.array(dataHandler.X)\n x_seg_test = dataHandler.labels\n dataHandler.clear()\n segnet = load_model('Models/segnet_2018-10-28-14:37/model.h5',\n custom_objects={'MaxPoolingWithArgmax2D': MaxPoolingWithArgmax2D,\n 'MaxUnpooling2D': MaxUnpooling2D, 'combinedDiceAndChamfer':\n combinedDiceAndChamfer, 'combinedHausdorffAndDice':\n combinedHausdorffAndDice, 'dice_coef': dice_coef, 'dice_coef_loss':\n dice_coef_loss, 'dice_coef_multilabel': dice_coef_multilabel,\n 'dice_coef_multilabel_loss': dice_coef_multilabel_loss})\n if normalize:\n mu = np.mean(x_test)\n sigma = np.std(x_test)\n x_test -= mu\n x_test /= sigma\n decoded_imgs = segnet.predict(x_test)\n if n_labels > 1:\n decoded_imgs = [np.argmax(x, axis=1) for x in decoded_imgs]\n else:\n for x in x_seg_test:\n x[x > 0.5] = 1\n x[x < 0.5] = 0\n for x in decoded_imgs:\n x[x > 0.5] = 1\n x[x < 0.5] = 0\n decoded_imgs = [x.reshape(dataHandler.W, dataHandler.W) for x in\n decoded_imgs]\n N = len(decoded_imgs)\n avg_dice = 0\n for i in range(N):\n foo = decoded_imgs[i].reshape(dataHandler.W, dataHandler.W)\n dice = computeDice(x_seg_test[i], foo)\n avg_dice = avg_dice + dice\n print(str(avg_dice / N))\n for i in range(N):\n fig = plt.figure()\n plt.gray()\n fig.add_subplot(1, 3, 1)\n plt.imshow(x_test[i, :, :, 0])\n plt.axis('off')\n plt.title('Original')\n fig.add_subplot(1, 3, 2)\n plt.imshow(x_seg_test[i])\n plt.axis('off')\n plt.title('GT Segment')\n fig.add_subplot(1, 3, 3)\n plt.imshow(decoded_imgs[i])\n plt.axis('off')\n plt.title('Predicted Segment')\n plt.show()\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\n<mask token>\nsys.path.append(DATA_DIR)\n\n\ndef computeDice(im1, im2):\n im1 = np.asarray(im1).astype(np.bool)\n im2 = np.asarray(im2).astype(np.bool)\n if im1.shape != im2.shape:\n raise ValueError(\n 'Shape mismatch: im1 and im2 must have the same shape.')\n intersection = np.logical_and(im1, im2)\n dice = 2.0 * intersection.sum() / (im1.sum() + im2.sum())\n if math.isnan(dice):\n return 0\n return dice\n\n\ndef main():\n num_testing_patients = 4\n n_labels = 1\n normalize = True\n modes = ['flair']\n dataHandler = SegNetDataHandler('Data/BRATS_2018/HGG_Testing',\n num_patients=num_testing_patients, modes=modes)\n dataHandler.loadData()\n dataHandler.preprocessForNetwork()\n x_test = np.array(dataHandler.X)\n x_seg_test = dataHandler.labels\n dataHandler.clear()\n segnet = load_model('Models/segnet_2018-10-28-14:37/model.h5',\n custom_objects={'MaxPoolingWithArgmax2D': MaxPoolingWithArgmax2D,\n 'MaxUnpooling2D': MaxUnpooling2D, 'combinedDiceAndChamfer':\n combinedDiceAndChamfer, 'combinedHausdorffAndDice':\n combinedHausdorffAndDice, 'dice_coef': dice_coef, 'dice_coef_loss':\n dice_coef_loss, 'dice_coef_multilabel': dice_coef_multilabel,\n 'dice_coef_multilabel_loss': dice_coef_multilabel_loss})\n if normalize:\n mu = np.mean(x_test)\n sigma = np.std(x_test)\n x_test -= mu\n x_test /= sigma\n decoded_imgs = segnet.predict(x_test)\n if n_labels > 1:\n decoded_imgs = [np.argmax(x, axis=1) for x in decoded_imgs]\n else:\n for x in x_seg_test:\n x[x > 0.5] = 1\n x[x < 0.5] = 0\n for x in decoded_imgs:\n x[x > 0.5] = 1\n x[x < 0.5] = 0\n decoded_imgs = [x.reshape(dataHandler.W, dataHandler.W) for x in\n decoded_imgs]\n N = len(decoded_imgs)\n avg_dice = 0\n for i in range(N):\n foo = decoded_imgs[i].reshape(dataHandler.W, dataHandler.W)\n dice = computeDice(x_seg_test[i], foo)\n avg_dice = avg_dice + dice\n print(str(avg_dice / N))\n for i in range(N):\n fig = plt.figure()\n plt.gray()\n fig.add_subplot(1, 3, 1)\n plt.imshow(x_test[i, :, :, 0])\n plt.axis('off')\n plt.title('Original')\n fig.add_subplot(1, 3, 2)\n plt.imshow(x_seg_test[i])\n plt.axis('off')\n plt.title('GT Segment')\n fig.add_subplot(1, 3, 3)\n plt.imshow(decoded_imgs[i])\n plt.axis('off')\n plt.title('Predicted Segment')\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\n<mask token>\nDATA_DIR = os.path.abspath('../')\nsys.path.append(DATA_DIR)\n\n\ndef computeDice(im1, im2):\n im1 = np.asarray(im1).astype(np.bool)\n im2 = np.asarray(im2).astype(np.bool)\n if im1.shape != im2.shape:\n raise ValueError(\n 'Shape mismatch: im1 and im2 must have the same shape.')\n intersection = np.logical_and(im1, im2)\n dice = 2.0 * intersection.sum() / (im1.sum() + im2.sum())\n if math.isnan(dice):\n return 0\n return dice\n\n\ndef main():\n num_testing_patients = 4\n n_labels = 1\n normalize = True\n modes = ['flair']\n dataHandler = SegNetDataHandler('Data/BRATS_2018/HGG_Testing',\n num_patients=num_testing_patients, modes=modes)\n dataHandler.loadData()\n dataHandler.preprocessForNetwork()\n x_test = np.array(dataHandler.X)\n x_seg_test = dataHandler.labels\n dataHandler.clear()\n segnet = load_model('Models/segnet_2018-10-28-14:37/model.h5',\n custom_objects={'MaxPoolingWithArgmax2D': MaxPoolingWithArgmax2D,\n 'MaxUnpooling2D': MaxUnpooling2D, 'combinedDiceAndChamfer':\n combinedDiceAndChamfer, 'combinedHausdorffAndDice':\n combinedHausdorffAndDice, 'dice_coef': dice_coef, 'dice_coef_loss':\n dice_coef_loss, 'dice_coef_multilabel': dice_coef_multilabel,\n 'dice_coef_multilabel_loss': dice_coef_multilabel_loss})\n if normalize:\n mu = np.mean(x_test)\n sigma = np.std(x_test)\n x_test -= mu\n x_test /= sigma\n decoded_imgs = segnet.predict(x_test)\n if n_labels > 1:\n decoded_imgs = [np.argmax(x, axis=1) for x in decoded_imgs]\n else:\n for x in x_seg_test:\n x[x > 0.5] = 1\n x[x < 0.5] = 0\n for x in decoded_imgs:\n x[x > 0.5] = 1\n x[x < 0.5] = 0\n decoded_imgs = [x.reshape(dataHandler.W, dataHandler.W) for x in\n decoded_imgs]\n N = len(decoded_imgs)\n avg_dice = 0\n for i in range(N):\n foo = decoded_imgs[i].reshape(dataHandler.W, dataHandler.W)\n dice = computeDice(x_seg_test[i], foo)\n avg_dice = avg_dice + dice\n print(str(avg_dice / N))\n for i in range(N):\n fig = plt.figure()\n plt.gray()\n fig.add_subplot(1, 3, 1)\n plt.imshow(x_test[i, :, :, 0])\n plt.axis('off')\n plt.title('Original')\n fig.add_subplot(1, 3, 2)\n plt.imshow(x_seg_test[i])\n plt.axis('off')\n plt.title('GT Segment')\n fig.add_subplot(1, 3, 3)\n plt.imshow(decoded_imgs[i])\n plt.axis('off')\n plt.title('Predicted Segment')\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nimport sys\nimport os\nfrom keras.utils import np_utils\nfrom _codecs import decode\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\nfrom DataHandlers.SegNetDataHandler import SegNetDataHandler\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom keras.models import load_model\nfrom Mylayers import MaxPoolingWithArgmax2D, MaxUnpooling2D\nimport math\nfrom CustomLosses import dice_coef, dice_coef_multilabel, dice_coef_loss, combinedDiceAndChamfer, dice_coef_multilabel_loss, combinedHausdorffAndDice\nfrom dipy.segment.mask import clean_cc_mask\nDATA_DIR = os.path.abspath('../')\nsys.path.append(DATA_DIR)\n\n\ndef computeDice(im1, im2):\n im1 = np.asarray(im1).astype(np.bool)\n im2 = np.asarray(im2).astype(np.bool)\n if im1.shape != im2.shape:\n raise ValueError(\n 'Shape mismatch: im1 and im2 must have the same shape.')\n intersection = np.logical_and(im1, im2)\n dice = 2.0 * intersection.sum() / (im1.sum() + im2.sum())\n if math.isnan(dice):\n return 0\n return dice\n\n\ndef main():\n num_testing_patients = 4\n n_labels = 1\n normalize = True\n modes = ['flair']\n dataHandler = SegNetDataHandler('Data/BRATS_2018/HGG_Testing',\n num_patients=num_testing_patients, modes=modes)\n dataHandler.loadData()\n dataHandler.preprocessForNetwork()\n x_test = np.array(dataHandler.X)\n x_seg_test = dataHandler.labels\n dataHandler.clear()\n segnet = load_model('Models/segnet_2018-10-28-14:37/model.h5',\n custom_objects={'MaxPoolingWithArgmax2D': MaxPoolingWithArgmax2D,\n 'MaxUnpooling2D': MaxUnpooling2D, 'combinedDiceAndChamfer':\n combinedDiceAndChamfer, 'combinedHausdorffAndDice':\n combinedHausdorffAndDice, 'dice_coef': dice_coef, 'dice_coef_loss':\n dice_coef_loss, 'dice_coef_multilabel': dice_coef_multilabel,\n 'dice_coef_multilabel_loss': dice_coef_multilabel_loss})\n if normalize:\n mu = np.mean(x_test)\n sigma = np.std(x_test)\n x_test -= mu\n x_test /= sigma\n decoded_imgs = segnet.predict(x_test)\n if n_labels > 1:\n decoded_imgs = [np.argmax(x, axis=1) for x in decoded_imgs]\n else:\n for x in x_seg_test:\n x[x > 0.5] = 1\n x[x < 0.5] = 0\n for x in decoded_imgs:\n x[x > 0.5] = 1\n x[x < 0.5] = 0\n decoded_imgs = [x.reshape(dataHandler.W, dataHandler.W) for x in\n decoded_imgs]\n N = len(decoded_imgs)\n avg_dice = 0\n for i in range(N):\n foo = decoded_imgs[i].reshape(dataHandler.W, dataHandler.W)\n dice = computeDice(x_seg_test[i], foo)\n avg_dice = avg_dice + dice\n print(str(avg_dice / N))\n for i in range(N):\n fig = plt.figure()\n plt.gray()\n fig.add_subplot(1, 3, 1)\n plt.imshow(x_test[i, :, :, 0])\n plt.axis('off')\n plt.title('Original')\n fig.add_subplot(1, 3, 2)\n plt.imshow(x_seg_test[i])\n plt.axis('off')\n plt.title('GT Segment')\n fig.add_subplot(1, 3, 3)\n plt.imshow(decoded_imgs[i])\n plt.axis('off')\n plt.title('Predicted Segment')\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "'''\nCreated on Jul 10, 2018\n\n@author: daniel\n'''\n\n#from multiprocessing import Process, Manager\n#from keras.utils import np_utils\nimport sys\nimport os\nfrom keras.utils import np_utils\nfrom _codecs import decode\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\nfrom DataHandlers.SegNetDataHandler import SegNetDataHandler\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom keras.models import load_model\nfrom Mylayers import MaxPoolingWithArgmax2D, MaxUnpooling2D\nimport math\nfrom CustomLosses import dice_coef, dice_coef_multilabel, dice_coef_loss, combinedDiceAndChamfer, dice_coef_multilabel_loss, combinedHausdorffAndDice\nfrom dipy.segment.mask import clean_cc_mask\n\nDATA_DIR = os.path.abspath(\"../\")\nsys.path.append(DATA_DIR)\n\ndef computeDice(im1, im2):\n im1 = np.asarray(im1).astype(np.bool)\n im2 = np.asarray(im2).astype(np.bool)\n \n if im1.shape != im2.shape:\n raise ValueError(\"Shape mismatch: im1 and im2 must have the same shape.\")\n\n intersection = np.logical_and(im1, im2)\n dice = 2. * intersection.sum() / (im1.sum() + im2.sum())\n if math.isnan(dice):\n return 0\n return dice\ndef main():\n\n \n num_testing_patients = 4\n n_labels = 1\n normalize = True\n modes = [\"flair\"]\n dataHandler = SegNetDataHandler(\"Data/BRATS_2018/HGG_Testing\", \n num_patients = num_testing_patients, \n modes = modes)\n dataHandler.loadData()\n dataHandler.preprocessForNetwork()\n x_test = np.array(dataHandler.X)\n x_seg_test = dataHandler.labels\n dataHandler.clear()\n\n segnet = load_model(\"Models/segnet_2018-10-28-14:37/model.h5\", custom_objects={'MaxPoolingWithArgmax2D': MaxPoolingWithArgmax2D, \n 'MaxUnpooling2D':MaxUnpooling2D, \n 'combinedDiceAndChamfer':combinedDiceAndChamfer,\n 'combinedHausdorffAndDice': combinedHausdorffAndDice,\n 'dice_coef':dice_coef, \n 'dice_coef_loss':dice_coef_loss,\n 'dice_coef_multilabel': dice_coef_multilabel,\n 'dice_coef_multilabel_loss' : dice_coef_multilabel_loss})\n \n \n \n if normalize:\n mu = np.mean(x_test)\n sigma = np.std(x_test)\n x_test -= mu\n x_test /= sigma\n decoded_imgs = segnet.predict(x_test)\n\n if n_labels > 1:\n #x_seg_test = np_utils.to_categorical(x_seg_test)\n #x_seg_test = np.argmax(x_seg_test, axis=3)\n decoded_imgs = [np.argmax(x, axis = 1) for x in decoded_imgs]\n else:\n for x in x_seg_test:\n x[x > 0.5] = 1\n x[x < 0.5] = 0\n for x in decoded_imgs:\n x[x > 0.5] = 1\n x[x < 0.5] = 0\n \n\n decoded_imgs = [x.reshape(dataHandler.W, dataHandler.W) for x in decoded_imgs]\n\n\n N = len(decoded_imgs)\n\n \n \n avg_dice = 0\n \n for i in range(N):\n foo = decoded_imgs[i].reshape(dataHandler.W, dataHandler.W)\n dice = computeDice(x_seg_test[i], foo)\n avg_dice = avg_dice + dice\n print(str(avg_dice/N))\n \n \n for i in range(N):\n fig = plt.figure()\n plt.gray(); \n fig.add_subplot(1,3,1)\n plt.imshow(x_test[i,:,:,0])\n plt.axis('off')\n plt.title('Original')\n \n fig.add_subplot(1,3,2)\n plt.imshow(x_seg_test[i])\n plt.axis('off')\n plt.title('GT Segment')\n \n fig.add_subplot(1,3,3)\n\n plt.imshow(decoded_imgs[i])\n plt.axis('off')\n plt.title('Predicted Segment')\n\n plt.show()\n \n\n\nif __name__ == \"__main__\":\n main() \n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
"""script for subpixel experiment (not tested)
"""
import numpy as np
from tqdm import tqdm
import logging
from pathlib import Path
import paddle
import paddle.optimizer
import paddle.io
from utils.loader import dataLoader
from utils.loader import modelLoader
from utils.loader import pretrainedLoader
from utils.tools import dict_update
from utils.utils import labels2Dto3D
from utils.utils import flattenDetection
from utils.utils import labels2Dto3D_flattened
from utils.utils import pltImshow
from utils.utils import saveImg
from utils.utils import precisionRecall_torch
from utils.utils import save_checkpoint
@paddle.no_grad()
class Val_model_subpixel(object):
def __init__(self, config, device='gpu', verbose=False):
self.config = config
self.model = self.config['name']
self.params = self.config['params']
self.weights_path = self.config['pretrained']
self.device = device
pass
def loadModel(self):
from utils.loader import modelLoader
self.net = modelLoader(model=self.model, **self.params)
checkpoint = paddle.load(self.weights_path)
self.net.load_dict(checkpoint['model_state_dict'])
self.net = self.net.to(self.device)
logging.info('successfully load pretrained model from: %s',
self.weights_path)
pass
def extract_patches(self, label_idx, img):
from utils.losses import extract_patches
patch_size = self.config['params']['patch_size']
patches = extract_patches(label_idx.to(self.device),
img.to(self.device),
patch_size=patch_size)
return patches
pass
def run(self, patches):
with paddle.no_grad():
pred_res = self.net(patches)
return pred_res
pass
if __name__ == '__main__':
filename = 'configs/magicpoint_repeatability.yaml'
import yaml
device = 'cuda' if paddle.is_compiled_with_cuda() else 'cpu'
device = device.replace('cuda', 'gpu')
device = paddle.set_device(device)
paddle.set_default_dtype('float32')
with open(filename, 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
task = config['data']['dataset']
from utils.loader import dataLoader_test as dataLoader
data = dataLoader(config, dataset='hpatches')
test_set, test_loader = data['test_set'], data['test_loader']
for i, sample in tqdm(enumerate(test_loader)):
if i > 1:
break
val_agent = Val_model_subpixel(config['subpixel'], device=device)
val_agent.loadModel()
img = sample['image']
print('image: ', img.shape)
points = paddle.to_tensor([[1, 2], [3, 4]])
def points_to_4d(points):
num_of_points = points.shape[0]
cols = paddle.to_tensor(paddle.zeros([num_of_points, 1]).requires_grad_(False), dtype=paddle.float32)
points = paddle.concat((cols, cols, paddle.to_tensor(points, dtype=paddle.float32)), axis=1)
return points
label_idx = points_to_4d(points)
patches = val_agent.extract_patches(label_idx, img)
points_res = val_agent.run(patches)
|
normal
|
{
"blob_id": "fc89fdf17f887ea398be5b36d4d6f0444d64b3e0",
"index": 8026,
"step-1": "<mask token>\n\n\[email protected]_grad()\nclass Val_model_subpixel(object):\n <mask token>\n\n def loadModel(self):\n from utils.loader import modelLoader\n self.net = modelLoader(model=self.model, **self.params)\n checkpoint = paddle.load(self.weights_path)\n self.net.load_dict(checkpoint['model_state_dict'])\n self.net = self.net.to(self.device)\n logging.info('successfully load pretrained model from: %s', self.\n weights_path)\n pass\n\n def extract_patches(self, label_idx, img):\n from utils.losses import extract_patches\n patch_size = self.config['params']['patch_size']\n patches = extract_patches(label_idx.to(self.device), img.to(self.\n device), patch_size=patch_size)\n return patches\n pass\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]_grad()\nclass Val_model_subpixel(object):\n\n def __init__(self, config, device='gpu', verbose=False):\n self.config = config\n self.model = self.config['name']\n self.params = self.config['params']\n self.weights_path = self.config['pretrained']\n self.device = device\n pass\n\n def loadModel(self):\n from utils.loader import modelLoader\n self.net = modelLoader(model=self.model, **self.params)\n checkpoint = paddle.load(self.weights_path)\n self.net.load_dict(checkpoint['model_state_dict'])\n self.net = self.net.to(self.device)\n logging.info('successfully load pretrained model from: %s', self.\n weights_path)\n pass\n\n def extract_patches(self, label_idx, img):\n from utils.losses import extract_patches\n patch_size = self.config['params']['patch_size']\n patches = extract_patches(label_idx.to(self.device), img.to(self.\n device), patch_size=patch_size)\n return patches\n pass\n\n def run(self, patches):\n with paddle.no_grad():\n pred_res = self.net(patches)\n return pred_res\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\[email protected]_grad()\nclass Val_model_subpixel(object):\n\n def __init__(self, config, device='gpu', verbose=False):\n self.config = config\n self.model = self.config['name']\n self.params = self.config['params']\n self.weights_path = self.config['pretrained']\n self.device = device\n pass\n\n def loadModel(self):\n from utils.loader import modelLoader\n self.net = modelLoader(model=self.model, **self.params)\n checkpoint = paddle.load(self.weights_path)\n self.net.load_dict(checkpoint['model_state_dict'])\n self.net = self.net.to(self.device)\n logging.info('successfully load pretrained model from: %s', self.\n weights_path)\n pass\n\n def extract_patches(self, label_idx, img):\n from utils.losses import extract_patches\n patch_size = self.config['params']['patch_size']\n patches = extract_patches(label_idx.to(self.device), img.to(self.\n device), patch_size=patch_size)\n return patches\n pass\n\n def run(self, patches):\n with paddle.no_grad():\n pred_res = self.net(patches)\n return pred_res\n pass\n\n\nif __name__ == '__main__':\n filename = 'configs/magicpoint_repeatability.yaml'\n import yaml\n device = 'cuda' if paddle.is_compiled_with_cuda() else 'cpu'\n device = device.replace('cuda', 'gpu')\n device = paddle.set_device(device)\n paddle.set_default_dtype('float32')\n with open(filename, 'r') as f:\n config = yaml.load(f, Loader=yaml.FullLoader)\n task = config['data']['dataset']\n from utils.loader import dataLoader_test as dataLoader\n data = dataLoader(config, dataset='hpatches')\n test_set, test_loader = data['test_set'], data['test_loader']\n for i, sample in tqdm(enumerate(test_loader)):\n if i > 1:\n break\n val_agent = Val_model_subpixel(config['subpixel'], device=device)\n val_agent.loadModel()\n img = sample['image']\n print('image: ', img.shape)\n points = paddle.to_tensor([[1, 2], [3, 4]])\n\n def points_to_4d(points):\n num_of_points = points.shape[0]\n cols = paddle.to_tensor(paddle.zeros([num_of_points, 1]).\n requires_grad_(False), dtype=paddle.float32)\n points = paddle.concat((cols, cols, paddle.to_tensor(points,\n dtype=paddle.float32)), axis=1)\n return points\n label_idx = points_to_4d(points)\n patches = val_agent.extract_patches(label_idx, img)\n points_res = val_agent.run(patches)\n",
"step-4": "<mask token>\nimport numpy as np\nfrom tqdm import tqdm\nimport logging\nfrom pathlib import Path\nimport paddle\nimport paddle.optimizer\nimport paddle.io\nfrom utils.loader import dataLoader\nfrom utils.loader import modelLoader\nfrom utils.loader import pretrainedLoader\nfrom utils.tools import dict_update\nfrom utils.utils import labels2Dto3D\nfrom utils.utils import flattenDetection\nfrom utils.utils import labels2Dto3D_flattened\nfrom utils.utils import pltImshow\nfrom utils.utils import saveImg\nfrom utils.utils import precisionRecall_torch\nfrom utils.utils import save_checkpoint\n\n\[email protected]_grad()\nclass Val_model_subpixel(object):\n\n def __init__(self, config, device='gpu', verbose=False):\n self.config = config\n self.model = self.config['name']\n self.params = self.config['params']\n self.weights_path = self.config['pretrained']\n self.device = device\n pass\n\n def loadModel(self):\n from utils.loader import modelLoader\n self.net = modelLoader(model=self.model, **self.params)\n checkpoint = paddle.load(self.weights_path)\n self.net.load_dict(checkpoint['model_state_dict'])\n self.net = self.net.to(self.device)\n logging.info('successfully load pretrained model from: %s', self.\n weights_path)\n pass\n\n def extract_patches(self, label_idx, img):\n from utils.losses import extract_patches\n patch_size = self.config['params']['patch_size']\n patches = extract_patches(label_idx.to(self.device), img.to(self.\n device), patch_size=patch_size)\n return patches\n pass\n\n def run(self, patches):\n with paddle.no_grad():\n pred_res = self.net(patches)\n return pred_res\n pass\n\n\nif __name__ == '__main__':\n filename = 'configs/magicpoint_repeatability.yaml'\n import yaml\n device = 'cuda' if paddle.is_compiled_with_cuda() else 'cpu'\n device = device.replace('cuda', 'gpu')\n device = paddle.set_device(device)\n paddle.set_default_dtype('float32')\n with open(filename, 'r') as f:\n config = yaml.load(f, Loader=yaml.FullLoader)\n task = config['data']['dataset']\n from utils.loader import dataLoader_test as dataLoader\n data = dataLoader(config, dataset='hpatches')\n test_set, test_loader = data['test_set'], data['test_loader']\n for i, sample in tqdm(enumerate(test_loader)):\n if i > 1:\n break\n val_agent = Val_model_subpixel(config['subpixel'], device=device)\n val_agent.loadModel()\n img = sample['image']\n print('image: ', img.shape)\n points = paddle.to_tensor([[1, 2], [3, 4]])\n\n def points_to_4d(points):\n num_of_points = points.shape[0]\n cols = paddle.to_tensor(paddle.zeros([num_of_points, 1]).\n requires_grad_(False), dtype=paddle.float32)\n points = paddle.concat((cols, cols, paddle.to_tensor(points,\n dtype=paddle.float32)), axis=1)\n return points\n label_idx = points_to_4d(points)\n patches = val_agent.extract_patches(label_idx, img)\n points_res = val_agent.run(patches)\n",
"step-5": "\"\"\"script for subpixel experiment (not tested)\n\"\"\"\nimport numpy as np\nfrom tqdm import tqdm\nimport logging\nfrom pathlib import Path\n\nimport paddle\nimport paddle.optimizer\nimport paddle.io\n\nfrom utils.loader import dataLoader\nfrom utils.loader import modelLoader\nfrom utils.loader import pretrainedLoader\nfrom utils.tools import dict_update\nfrom utils.utils import labels2Dto3D\nfrom utils.utils import flattenDetection\nfrom utils.utils import labels2Dto3D_flattened\nfrom utils.utils import pltImshow\nfrom utils.utils import saveImg\nfrom utils.utils import precisionRecall_torch\nfrom utils.utils import save_checkpoint\n\n\[email protected]_grad()\nclass Val_model_subpixel(object):\n\n def __init__(self, config, device='gpu', verbose=False):\n self.config = config\n self.model = self.config['name']\n self.params = self.config['params']\n self.weights_path = self.config['pretrained']\n self.device = device\n pass\n\n def loadModel(self):\n from utils.loader import modelLoader\n self.net = modelLoader(model=self.model, **self.params)\n\n checkpoint = paddle.load(self.weights_path)\n self.net.load_dict(checkpoint['model_state_dict'])\n\n self.net = self.net.to(self.device)\n logging.info('successfully load pretrained model from: %s',\n self.weights_path)\n pass\n\n def extract_patches(self, label_idx, img):\n from utils.losses import extract_patches\n patch_size = self.config['params']['patch_size']\n patches = extract_patches(label_idx.to(self.device),\n img.to(self.device),\n patch_size=patch_size)\n return patches\n pass\n\n def run(self, patches):\n with paddle.no_grad():\n pred_res = self.net(patches)\n return pred_res\n pass\n\n\nif __name__ == '__main__':\n filename = 'configs/magicpoint_repeatability.yaml'\n import yaml\n\n device = 'cuda' if paddle.is_compiled_with_cuda() else 'cpu'\n device = device.replace('cuda', 'gpu')\n device = paddle.set_device(device)\n\n paddle.set_default_dtype('float32')\n\n with open(filename, 'r') as f:\n config = yaml.load(f, Loader=yaml.FullLoader)\n\n task = config['data']['dataset']\n\n from utils.loader import dataLoader_test as dataLoader\n\n data = dataLoader(config, dataset='hpatches')\n test_set, test_loader = data['test_set'], data['test_loader']\n for i, sample in tqdm(enumerate(test_loader)):\n if i > 1:\n break\n\n val_agent = Val_model_subpixel(config['subpixel'], device=device)\n val_agent.loadModel()\n\n img = sample['image']\n print('image: ', img.shape)\n points = paddle.to_tensor([[1, 2], [3, 4]])\n\n def points_to_4d(points):\n num_of_points = points.shape[0]\n cols = paddle.to_tensor(paddle.zeros([num_of_points, 1]).requires_grad_(False), dtype=paddle.float32)\n points = paddle.concat((cols, cols, paddle.to_tensor(points, dtype=paddle.float32)), axis=1)\n return points\n label_idx = points_to_4d(points)\n\n patches = val_agent.extract_patches(label_idx, img)\n points_res = val_agent.run(patches)\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
from faker import Faker
from generators.uniform_distribution_gen import UniformDistributionGen
from generators.random_relation_gen import RandomRelationGen
from base.field_base import FieldBase
from generators.normal_distribution_gen import NormalDistributionGen
from generators.first_name_generator import FirstNameGenerator
from generators.last_name_generator import LastNameGenerator
from generators.universal_function_generator import UniversalFunctionGenerator
from generators.print_relations_generator import PrintRelationsGenerator
from base.model_base import ModelBase
from base.class_base import ClassBase
class A:
def __init__(self) -> None:
self.alpha: str = ""
self.C: C = None
class B:
def __init__(self) -> None:
self.alpha: str = ""
self.C: C = None
class C:
def __init__(self) -> None:
self.alpha: str = ""
self.beta: str = ""
self.gamma: str = ""
self.delta: str = ""
if __name__ == "__main__":
model = ModelBase()
# Person
cb_a = ClassBase(model, A, 10)
cb_b = ClassBase(model, B, 10)
cb_c = ClassBase(model, C, 10)
FieldBase(cb_a, PrintRelationsGenerator(),
"alpha", related_fields=["C.alpha", "C.beta", "C.gamma"])
FieldBase(cb_a, RandomRelationGen(cb_c), "C")
FieldBase(cb_b, PrintRelationsGenerator(),
"alpha", related_fields=["C.alpha", "C.beta", "C.gamma"])
FieldBase(cb_b, RandomRelationGen(cb_c), "C")
FieldBase(cb_c, PrintRelationsGenerator(),
"alpha", related_fields=["beta"])
FieldBase(cb_c, PrintRelationsGenerator(),
"beta", related_fields=["gamma"])
FieldBase(cb_c, PrintRelationsGenerator(),
"gamma", related_fields=["delta"])
FieldBase(cb_c, UniversalFunctionGenerator(
f=Faker().paragraph, nb_sentences=1),
"delta")
model.create_instances()
model.map_field_graph_full()
model.print_generation_order()
model.draw_field_graph()
model.fill_in_instances()
print("")
|
normal
|
{
"blob_id": "0926606a222e1277935a48ba7f0ea886fb4e298a",
"index": 5234,
"step-1": "<mask token>\n\n\nclass A:\n <mask token>\n\n\nclass B:\n\n def __init__(self) ->None:\n self.alpha: str = ''\n self.C: C = None\n\n\nclass C:\n\n def __init__(self) ->None:\n self.alpha: str = ''\n self.beta: str = ''\n self.gamma: str = ''\n self.delta: str = ''\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass A:\n\n def __init__(self) ->None:\n self.alpha: str = ''\n self.C: C = None\n\n\nclass B:\n\n def __init__(self) ->None:\n self.alpha: str = ''\n self.C: C = None\n\n\nclass C:\n\n def __init__(self) ->None:\n self.alpha: str = ''\n self.beta: str = ''\n self.gamma: str = ''\n self.delta: str = ''\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass A:\n\n def __init__(self) ->None:\n self.alpha: str = ''\n self.C: C = None\n\n\nclass B:\n\n def __init__(self) ->None:\n self.alpha: str = ''\n self.C: C = None\n\n\nclass C:\n\n def __init__(self) ->None:\n self.alpha: str = ''\n self.beta: str = ''\n self.gamma: str = ''\n self.delta: str = ''\n\n\nif __name__ == '__main__':\n model = ModelBase()\n cb_a = ClassBase(model, A, 10)\n cb_b = ClassBase(model, B, 10)\n cb_c = ClassBase(model, C, 10)\n FieldBase(cb_a, PrintRelationsGenerator(), 'alpha', related_fields=[\n 'C.alpha', 'C.beta', 'C.gamma'])\n FieldBase(cb_a, RandomRelationGen(cb_c), 'C')\n FieldBase(cb_b, PrintRelationsGenerator(), 'alpha', related_fields=[\n 'C.alpha', 'C.beta', 'C.gamma'])\n FieldBase(cb_b, RandomRelationGen(cb_c), 'C')\n FieldBase(cb_c, PrintRelationsGenerator(), 'alpha', related_fields=['beta']\n )\n FieldBase(cb_c, PrintRelationsGenerator(), 'beta', related_fields=['gamma']\n )\n FieldBase(cb_c, PrintRelationsGenerator(), 'gamma', related_fields=[\n 'delta'])\n FieldBase(cb_c, UniversalFunctionGenerator(f=Faker().paragraph,\n nb_sentences=1), 'delta')\n model.create_instances()\n model.map_field_graph_full()\n model.print_generation_order()\n model.draw_field_graph()\n model.fill_in_instances()\n print('')\n",
"step-4": "from faker import Faker\nfrom generators.uniform_distribution_gen import UniformDistributionGen\nfrom generators.random_relation_gen import RandomRelationGen\nfrom base.field_base import FieldBase\nfrom generators.normal_distribution_gen import NormalDistributionGen\nfrom generators.first_name_generator import FirstNameGenerator\nfrom generators.last_name_generator import LastNameGenerator\nfrom generators.universal_function_generator import UniversalFunctionGenerator\nfrom generators.print_relations_generator import PrintRelationsGenerator\nfrom base.model_base import ModelBase\nfrom base.class_base import ClassBase\n\n\nclass A:\n\n def __init__(self) ->None:\n self.alpha: str = ''\n self.C: C = None\n\n\nclass B:\n\n def __init__(self) ->None:\n self.alpha: str = ''\n self.C: C = None\n\n\nclass C:\n\n def __init__(self) ->None:\n self.alpha: str = ''\n self.beta: str = ''\n self.gamma: str = ''\n self.delta: str = ''\n\n\nif __name__ == '__main__':\n model = ModelBase()\n cb_a = ClassBase(model, A, 10)\n cb_b = ClassBase(model, B, 10)\n cb_c = ClassBase(model, C, 10)\n FieldBase(cb_a, PrintRelationsGenerator(), 'alpha', related_fields=[\n 'C.alpha', 'C.beta', 'C.gamma'])\n FieldBase(cb_a, RandomRelationGen(cb_c), 'C')\n FieldBase(cb_b, PrintRelationsGenerator(), 'alpha', related_fields=[\n 'C.alpha', 'C.beta', 'C.gamma'])\n FieldBase(cb_b, RandomRelationGen(cb_c), 'C')\n FieldBase(cb_c, PrintRelationsGenerator(), 'alpha', related_fields=['beta']\n )\n FieldBase(cb_c, PrintRelationsGenerator(), 'beta', related_fields=['gamma']\n )\n FieldBase(cb_c, PrintRelationsGenerator(), 'gamma', related_fields=[\n 'delta'])\n FieldBase(cb_c, UniversalFunctionGenerator(f=Faker().paragraph,\n nb_sentences=1), 'delta')\n model.create_instances()\n model.map_field_graph_full()\n model.print_generation_order()\n model.draw_field_graph()\n model.fill_in_instances()\n print('')\n",
"step-5": "from faker import Faker\nfrom generators.uniform_distribution_gen import UniformDistributionGen\nfrom generators.random_relation_gen import RandomRelationGen\nfrom base.field_base import FieldBase\nfrom generators.normal_distribution_gen import NormalDistributionGen\nfrom generators.first_name_generator import FirstNameGenerator\nfrom generators.last_name_generator import LastNameGenerator\nfrom generators.universal_function_generator import UniversalFunctionGenerator\nfrom generators.print_relations_generator import PrintRelationsGenerator\nfrom base.model_base import ModelBase\nfrom base.class_base import ClassBase\n\n\nclass A:\n def __init__(self) -> None:\n self.alpha: str = \"\"\n self.C: C = None\n\n\nclass B:\n def __init__(self) -> None:\n self.alpha: str = \"\"\n self.C: C = None\n\n\nclass C:\n def __init__(self) -> None:\n self.alpha: str = \"\"\n self.beta: str = \"\"\n self.gamma: str = \"\"\n self.delta: str = \"\"\n\n\nif __name__ == \"__main__\":\n model = ModelBase()\n\n # Person\n cb_a = ClassBase(model, A, 10)\n cb_b = ClassBase(model, B, 10)\n cb_c = ClassBase(model, C, 10)\n\n FieldBase(cb_a, PrintRelationsGenerator(),\n \"alpha\", related_fields=[\"C.alpha\", \"C.beta\", \"C.gamma\"])\n FieldBase(cb_a, RandomRelationGen(cb_c), \"C\")\n\n FieldBase(cb_b, PrintRelationsGenerator(),\n \"alpha\", related_fields=[\"C.alpha\", \"C.beta\", \"C.gamma\"])\n FieldBase(cb_b, RandomRelationGen(cb_c), \"C\")\n\n FieldBase(cb_c, PrintRelationsGenerator(),\n \"alpha\", related_fields=[\"beta\"])\n FieldBase(cb_c, PrintRelationsGenerator(),\n \"beta\", related_fields=[\"gamma\"])\n FieldBase(cb_c, PrintRelationsGenerator(),\n \"gamma\", related_fields=[\"delta\"])\n FieldBase(cb_c, UniversalFunctionGenerator(\n f=Faker().paragraph, nb_sentences=1),\n \"delta\")\n\n model.create_instances()\n model.map_field_graph_full()\n model.print_generation_order()\n model.draw_field_graph()\n model.fill_in_instances()\n print(\"\")\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
import utils
from problems_2019 import intcode
def run(commands=None):
memory = utils.get_input()[0]
initial_inputs = intcode.commands_to_input(commands or [])
program = intcode.Program(memory, initial_inputs=initial_inputs, output_mode=intcode.OutputMode.BUFFER)
while True:
_, return_signal = program.run()
for output in program.yield_outputs():
try:
print(chr(output), end='')
except ValueError:
print(output)
if return_signal == intcode.ReturnSignal.AWAITING_INPUT:
# Run in interactive mode if more commands needed
program.add_inputs(*intcode.commands_to_input([input()]))
elif return_signal == intcode.ReturnSignal.RETURN_AND_HALT:
return
else:
raise Exception(f'Unexpected return signal {return_signal}')
@utils.part
def part_1():
commands = [
'south',
'take food ration',
'west',
'north',
'north',
'east',
'take astrolabe',
'west',
'south',
'south',
'east',
'north',
'east',
'south',
'take weather machine',
'west',
'take ornament',
'east',
'north',
'east',
'east',
'east',
'south',
]
run(commands=commands)
|
normal
|
{
"blob_id": "e3aa38b5d01823ed27bca65331e9c7315238750a",
"index": 8974,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]\ndef part_1():\n commands = ['south', 'take food ration', 'west', 'north', 'north',\n 'east', 'take astrolabe', 'west', 'south', 'south', 'east', 'north',\n 'east', 'south', 'take weather machine', 'west', 'take ornament',\n 'east', 'north', 'east', 'east', 'east', 'south']\n run(commands=commands)\n",
"step-3": "<mask token>\n\n\ndef run(commands=None):\n memory = utils.get_input()[0]\n initial_inputs = intcode.commands_to_input(commands or [])\n program = intcode.Program(memory, initial_inputs=initial_inputs,\n output_mode=intcode.OutputMode.BUFFER)\n while True:\n _, return_signal = program.run()\n for output in program.yield_outputs():\n try:\n print(chr(output), end='')\n except ValueError:\n print(output)\n if return_signal == intcode.ReturnSignal.AWAITING_INPUT:\n program.add_inputs(*intcode.commands_to_input([input()]))\n elif return_signal == intcode.ReturnSignal.RETURN_AND_HALT:\n return\n else:\n raise Exception(f'Unexpected return signal {return_signal}')\n\n\[email protected]\ndef part_1():\n commands = ['south', 'take food ration', 'west', 'north', 'north',\n 'east', 'take astrolabe', 'west', 'south', 'south', 'east', 'north',\n 'east', 'south', 'take weather machine', 'west', 'take ornament',\n 'east', 'north', 'east', 'east', 'east', 'south']\n run(commands=commands)\n",
"step-4": "import utils\nfrom problems_2019 import intcode\n\n\ndef run(commands=None):\n memory = utils.get_input()[0]\n initial_inputs = intcode.commands_to_input(commands or [])\n program = intcode.Program(memory, initial_inputs=initial_inputs,\n output_mode=intcode.OutputMode.BUFFER)\n while True:\n _, return_signal = program.run()\n for output in program.yield_outputs():\n try:\n print(chr(output), end='')\n except ValueError:\n print(output)\n if return_signal == intcode.ReturnSignal.AWAITING_INPUT:\n program.add_inputs(*intcode.commands_to_input([input()]))\n elif return_signal == intcode.ReturnSignal.RETURN_AND_HALT:\n return\n else:\n raise Exception(f'Unexpected return signal {return_signal}')\n\n\[email protected]\ndef part_1():\n commands = ['south', 'take food ration', 'west', 'north', 'north',\n 'east', 'take astrolabe', 'west', 'south', 'south', 'east', 'north',\n 'east', 'south', 'take weather machine', 'west', 'take ornament',\n 'east', 'north', 'east', 'east', 'east', 'south']\n run(commands=commands)\n",
"step-5": "import utils\n\nfrom problems_2019 import intcode\n\n\ndef run(commands=None):\n memory = utils.get_input()[0]\n initial_inputs = intcode.commands_to_input(commands or [])\n program = intcode.Program(memory, initial_inputs=initial_inputs, output_mode=intcode.OutputMode.BUFFER)\n\n while True:\n _, return_signal = program.run()\n for output in program.yield_outputs():\n try:\n print(chr(output), end='')\n except ValueError:\n print(output)\n\n if return_signal == intcode.ReturnSignal.AWAITING_INPUT:\n # Run in interactive mode if more commands needed\n program.add_inputs(*intcode.commands_to_input([input()]))\n elif return_signal == intcode.ReturnSignal.RETURN_AND_HALT:\n return\n else:\n raise Exception(f'Unexpected return signal {return_signal}')\n\n\[email protected]\ndef part_1():\n commands = [\n 'south',\n 'take food ration',\n 'west',\n 'north',\n 'north',\n 'east',\n 'take astrolabe',\n 'west',\n 'south',\n 'south',\n 'east',\n 'north',\n 'east',\n 'south',\n 'take weather machine',\n 'west',\n 'take ornament',\n 'east',\n 'north',\n 'east',\n 'east',\n 'east',\n 'south',\n ]\n run(commands=commands)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python3
import argparse
import boutvecma
import easyvvuq as uq
import chaospy
import os
import numpy as np
import time
from dask.distributed import Client
from dask_jobqueue import SLURMCluster
import matplotlib.pyplot as plt
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="EasyVVUQ applied to BOUT++")
parser.add_argument(
"--batch",
"-b",
help="Run on a batch (SLURM) system",
action="store_true",
default=False,
)
args = parser.parse_args()
campaign = uq.CampaignDask(name="Conduction.")
print(f"Running in {campaign.campaign_dir}")
encoder = boutvecma.BOUTEncoder(template_input="models/conduction/data/BOUT.inp")
decoder = boutvecma.BOUTDecoder(variables=["T"])
params = {
"conduction:chi": {"type": "float", "min": 0.0, "max": 1e3, "default": 1.0},
"T:scale": {"type": "float", "min": 0.0, "max": 1e3, "default": 1.0},
"T:gauss_width": {"type": "float", "min": 0.0, "max": 1e3, "default": 0.2},
"T:gauss_centre": {
"type": "float",
"min": 0.0,
"max": 2 * np.pi,
"default": np.pi,
},
}
campaign.add_app("1D_conduction", params=params, encoder=encoder, decoder=decoder)
vary = {
"conduction:chi": chaospy.Uniform(0.2, 4.0),
"T:scale": chaospy.Uniform(0.5, 1.5),
"T:gauss_width": chaospy.Uniform(0.01, 0.4),
"T:gauss_centre": chaospy.Uniform(0.0, 2 * np.pi),
}
sampler = uq.sampling.PCESampler(vary=vary, polynomial_order=3)
campaign.set_sampler(sampler)
campaign.draw_samples()
run_dirs = campaign.populate_runs_dir()
print(f"Created run directories: {run_dirs}")
if args.batch:
# Example of use on Viking
cluster = SLURMCluster(
job_extra=[
"--job-name=VVUQ",
"--account=PHYS-YPIRSE-2019",
],
cores=1,
memory="1 GB",
processes=1,
walltime="00:10:00",
interface="ib0",
)
cluster.scale(16)
print(f"Job script:\n{cluster.job_script()}")
client = Client(cluster)
else:
client = Client(processes=True, threads_per_worker=1)
print(client)
time_start = time.time()
campaign.apply_for_each_run_dir(
uq.actions.ExecuteLocal(
os.path.abspath("build/models/conduction/conduction -q -q -q -d .")
),
client,
)
client.close()
time_end = time.time()
print(f"Finished, took {time_end - time_start}")
campaign.collate()
campaign.apply_analysis(uq.analysis.PCEAnalysis(sampler=sampler, qoi_cols=["T"]))
results = campaign.get_last_analysis()
state_filename = os.path.join(campaign.campaign_dir, "campaign_state.json")
campaign.save_state(state_filename)
plt.figure()
results.plot_moments(
"T", xlabel=r"$\rho$", filename=f"{campaign.campaign_dir}/moments.png"
)
plt.figure()
results.plot_sobols_first(
"T", xlabel=r"$\rho$", filename=f"{campaign.campaign_dir}/sobols_first.png"
)
|
normal
|
{
"blob_id": "416f4c6bbd2f2b9562ab2d1477df4ebc45070d8d",
"index": 5060,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='EasyVVUQ applied to BOUT++')\n parser.add_argument('--batch', '-b', help=\n 'Run on a batch (SLURM) system', action='store_true', default=False)\n args = parser.parse_args()\n campaign = uq.CampaignDask(name='Conduction.')\n print(f'Running in {campaign.campaign_dir}')\n encoder = boutvecma.BOUTEncoder(template_input=\n 'models/conduction/data/BOUT.inp')\n decoder = boutvecma.BOUTDecoder(variables=['T'])\n params = {'conduction:chi': {'type': 'float', 'min': 0.0, 'max': 1000.0,\n 'default': 1.0}, 'T:scale': {'type': 'float', 'min': 0.0, 'max': \n 1000.0, 'default': 1.0}, 'T:gauss_width': {'type': 'float', 'min': \n 0.0, 'max': 1000.0, 'default': 0.2}, 'T:gauss_centre': {'type':\n 'float', 'min': 0.0, 'max': 2 * np.pi, 'default': np.pi}}\n campaign.add_app('1D_conduction', params=params, encoder=encoder,\n decoder=decoder)\n vary = {'conduction:chi': chaospy.Uniform(0.2, 4.0), 'T:scale': chaospy\n .Uniform(0.5, 1.5), 'T:gauss_width': chaospy.Uniform(0.01, 0.4),\n 'T:gauss_centre': chaospy.Uniform(0.0, 2 * np.pi)}\n sampler = uq.sampling.PCESampler(vary=vary, polynomial_order=3)\n campaign.set_sampler(sampler)\n campaign.draw_samples()\n run_dirs = campaign.populate_runs_dir()\n print(f'Created run directories: {run_dirs}')\n if args.batch:\n cluster = SLURMCluster(job_extra=['--job-name=VVUQ',\n '--account=PHYS-YPIRSE-2019'], cores=1, memory='1 GB',\n processes=1, walltime='00:10:00', interface='ib0')\n cluster.scale(16)\n print(f'Job script:\\n{cluster.job_script()}')\n client = Client(cluster)\n else:\n client = Client(processes=True, threads_per_worker=1)\n print(client)\n time_start = time.time()\n campaign.apply_for_each_run_dir(uq.actions.ExecuteLocal(os.path.abspath\n ('build/models/conduction/conduction -q -q -q -d .')), client)\n client.close()\n time_end = time.time()\n print(f'Finished, took {time_end - time_start}')\n campaign.collate()\n campaign.apply_analysis(uq.analysis.PCEAnalysis(sampler=sampler,\n qoi_cols=['T']))\n results = campaign.get_last_analysis()\n state_filename = os.path.join(campaign.campaign_dir, 'campaign_state.json')\n campaign.save_state(state_filename)\n plt.figure()\n results.plot_moments('T', xlabel='$\\\\rho$', filename=\n f'{campaign.campaign_dir}/moments.png')\n plt.figure()\n results.plot_sobols_first('T', xlabel='$\\\\rho$', filename=\n f'{campaign.campaign_dir}/sobols_first.png')\n",
"step-3": "import argparse\nimport boutvecma\nimport easyvvuq as uq\nimport chaospy\nimport os\nimport numpy as np\nimport time\nfrom dask.distributed import Client\nfrom dask_jobqueue import SLURMCluster\nimport matplotlib.pyplot as plt\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='EasyVVUQ applied to BOUT++')\n parser.add_argument('--batch', '-b', help=\n 'Run on a batch (SLURM) system', action='store_true', default=False)\n args = parser.parse_args()\n campaign = uq.CampaignDask(name='Conduction.')\n print(f'Running in {campaign.campaign_dir}')\n encoder = boutvecma.BOUTEncoder(template_input=\n 'models/conduction/data/BOUT.inp')\n decoder = boutvecma.BOUTDecoder(variables=['T'])\n params = {'conduction:chi': {'type': 'float', 'min': 0.0, 'max': 1000.0,\n 'default': 1.0}, 'T:scale': {'type': 'float', 'min': 0.0, 'max': \n 1000.0, 'default': 1.0}, 'T:gauss_width': {'type': 'float', 'min': \n 0.0, 'max': 1000.0, 'default': 0.2}, 'T:gauss_centre': {'type':\n 'float', 'min': 0.0, 'max': 2 * np.pi, 'default': np.pi}}\n campaign.add_app('1D_conduction', params=params, encoder=encoder,\n decoder=decoder)\n vary = {'conduction:chi': chaospy.Uniform(0.2, 4.0), 'T:scale': chaospy\n .Uniform(0.5, 1.5), 'T:gauss_width': chaospy.Uniform(0.01, 0.4),\n 'T:gauss_centre': chaospy.Uniform(0.0, 2 * np.pi)}\n sampler = uq.sampling.PCESampler(vary=vary, polynomial_order=3)\n campaign.set_sampler(sampler)\n campaign.draw_samples()\n run_dirs = campaign.populate_runs_dir()\n print(f'Created run directories: {run_dirs}')\n if args.batch:\n cluster = SLURMCluster(job_extra=['--job-name=VVUQ',\n '--account=PHYS-YPIRSE-2019'], cores=1, memory='1 GB',\n processes=1, walltime='00:10:00', interface='ib0')\n cluster.scale(16)\n print(f'Job script:\\n{cluster.job_script()}')\n client = Client(cluster)\n else:\n client = Client(processes=True, threads_per_worker=1)\n print(client)\n time_start = time.time()\n campaign.apply_for_each_run_dir(uq.actions.ExecuteLocal(os.path.abspath\n ('build/models/conduction/conduction -q -q -q -d .')), client)\n client.close()\n time_end = time.time()\n print(f'Finished, took {time_end - time_start}')\n campaign.collate()\n campaign.apply_analysis(uq.analysis.PCEAnalysis(sampler=sampler,\n qoi_cols=['T']))\n results = campaign.get_last_analysis()\n state_filename = os.path.join(campaign.campaign_dir, 'campaign_state.json')\n campaign.save_state(state_filename)\n plt.figure()\n results.plot_moments('T', xlabel='$\\\\rho$', filename=\n f'{campaign.campaign_dir}/moments.png')\n plt.figure()\n results.plot_sobols_first('T', xlabel='$\\\\rho$', filename=\n f'{campaign.campaign_dir}/sobols_first.png')\n",
"step-4": "#!/usr/bin/env python3\n\nimport argparse\nimport boutvecma\nimport easyvvuq as uq\nimport chaospy\nimport os\nimport numpy as np\nimport time\nfrom dask.distributed import Client\nfrom dask_jobqueue import SLURMCluster\nimport matplotlib.pyplot as plt\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description=\"EasyVVUQ applied to BOUT++\")\n parser.add_argument(\n \"--batch\",\n \"-b\",\n help=\"Run on a batch (SLURM) system\",\n action=\"store_true\",\n default=False,\n )\n args = parser.parse_args()\n\n campaign = uq.CampaignDask(name=\"Conduction.\")\n print(f\"Running in {campaign.campaign_dir}\")\n encoder = boutvecma.BOUTEncoder(template_input=\"models/conduction/data/BOUT.inp\")\n decoder = boutvecma.BOUTDecoder(variables=[\"T\"])\n params = {\n \"conduction:chi\": {\"type\": \"float\", \"min\": 0.0, \"max\": 1e3, \"default\": 1.0},\n \"T:scale\": {\"type\": \"float\", \"min\": 0.0, \"max\": 1e3, \"default\": 1.0},\n \"T:gauss_width\": {\"type\": \"float\", \"min\": 0.0, \"max\": 1e3, \"default\": 0.2},\n \"T:gauss_centre\": {\n \"type\": \"float\",\n \"min\": 0.0,\n \"max\": 2 * np.pi,\n \"default\": np.pi,\n },\n }\n\n campaign.add_app(\"1D_conduction\", params=params, encoder=encoder, decoder=decoder)\n\n vary = {\n \"conduction:chi\": chaospy.Uniform(0.2, 4.0),\n \"T:scale\": chaospy.Uniform(0.5, 1.5),\n \"T:gauss_width\": chaospy.Uniform(0.01, 0.4),\n \"T:gauss_centre\": chaospy.Uniform(0.0, 2 * np.pi),\n }\n\n sampler = uq.sampling.PCESampler(vary=vary, polynomial_order=3)\n campaign.set_sampler(sampler)\n\n campaign.draw_samples()\n\n run_dirs = campaign.populate_runs_dir()\n\n print(f\"Created run directories: {run_dirs}\")\n\n if args.batch:\n # Example of use on Viking\n cluster = SLURMCluster(\n job_extra=[\n \"--job-name=VVUQ\",\n \"--account=PHYS-YPIRSE-2019\",\n ],\n cores=1,\n memory=\"1 GB\",\n processes=1,\n walltime=\"00:10:00\",\n interface=\"ib0\",\n )\n cluster.scale(16)\n print(f\"Job script:\\n{cluster.job_script()}\")\n client = Client(cluster)\n else:\n client = Client(processes=True, threads_per_worker=1)\n\n print(client)\n\n time_start = time.time()\n campaign.apply_for_each_run_dir(\n uq.actions.ExecuteLocal(\n os.path.abspath(\"build/models/conduction/conduction -q -q -q -d .\")\n ),\n client,\n )\n client.close()\n\n time_end = time.time()\n\n print(f\"Finished, took {time_end - time_start}\")\n\n campaign.collate()\n\n campaign.apply_analysis(uq.analysis.PCEAnalysis(sampler=sampler, qoi_cols=[\"T\"]))\n\n results = campaign.get_last_analysis()\n\n state_filename = os.path.join(campaign.campaign_dir, \"campaign_state.json\")\n campaign.save_state(state_filename)\n\n plt.figure()\n results.plot_moments(\n \"T\", xlabel=r\"$\\rho$\", filename=f\"{campaign.campaign_dir}/moments.png\"\n )\n plt.figure()\n results.plot_sobols_first(\n \"T\", xlabel=r\"$\\rho$\", filename=f\"{campaign.campaign_dir}/sobols_first.png\"\n )\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import math
import numpy as np
import h5py
import matplotlib.pyplot as plt
import scipy
from PIL import Image
from scipy import ndimage
import tensorflow as tf
from tensorflow.python.framework import ops
from pathlib import Path
from scipy.io import loadmat
from skimage.transform import resize
from sklearn.model_selection import train_test_split
import cv2
import keras
from keras.models import Model
from keras.optimizers import RMSprop
from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D
from keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D
from keras.layers.normalization import BatchNormalization
from keras.models import Model,Sequential
from keras.callbacks import ModelCheckpoint
from keras.optimizers import Adadelta, RMSprop,SGD,Adam
from keras import regularizers
from keras import backend as K
from keras.utils import to_categorical
#Load data
X_train = np.load('X_train.npy')
Y_train = np.load('Y_train.npy')
X_test = np.load('X_test.npy')
Y_test = np.load('Y_test.npy')
y_test_orig = np.load('y_test_orig.npy')
y_train_orig = np.load('y_train_orig.npy')
print('X_train shape:' +str(X_train.shape))
print('Y_train shape:' +str(Y_train.shape))
print('X_test shape:' + str(X_test.shape))
print('Y_test shape:' +str(Y_test.shape))
print('y_train_orig shape:' + str(y_train_orig.shape))
print('y_test_orig shape:' + str(y_test_orig.shape))
batch_size = 32
epochs = 200
inChannel = 1
x, y = 128, 128
input_shape = (x, y, inChannel)
num_classes = 7
def model(input_shape):
# Define the input placeholder as a tensor with shape input_shape. Think of this as your input image!
# kernel_regularizer=regularizers.l2(0.01)
X_input = Input(input_shape)
# CONV1 -> BN -> RELU Block applied to X
X = Conv2D(8, (4, 4), strides = (1, 1), name = 'conv0',kernel_regularizer=regularizers.l2(0.001),padding="same")(X_input)
X = BatchNormalization(axis = 3, name = 'bn0')(X)
X = Activation('relu')(X)
#X = Dropout(0.5)(X)
# MAXPOOL1
X = MaxPooling2D((2, 2), name='max_pool')(X)
# CONV2 -> BN -> RELU Block applied to X
X = Conv2D(16, (2, 2), strides=(1, 1), name='conv1',kernel_regularizer=regularizers.l2(0.001),padding="same")(X)
X = BatchNormalization(axis=3, name='bn1')(X)
X = Activation('relu')(X)
#X = Dropout(0.5)(X)
# MAXPOOL2
X = MaxPooling2D((2, 2), name='max_pool2')(X)
# CONV3 -> BN -> RELU Block applied to X
X = Conv2D(32, (1, 1), strides=(1, 1), name='conv2',kernel_regularizer=regularizers.l2(0.001),padding="same")(X)
X = BatchNormalization(axis=3, name='bn2')(X)
X = Activation('relu')(X)
#X = Dropout(0.5)(X)
# MAXPOOL3
X = MaxPooling2D((2, 2), name='max_pool3')(X)
# FLATTEN X (means convert it to a vector) + FULLYCONNECTED
X = Flatten()(X)
X = Dense(7, activation='softmax', name='fc')(X)
# Create model. This creates your Keras model instance, you'll use this instance to train/test the model.
model = Model(inputs = X_input, outputs = X, name='CNN')
return model
CNN_model = model(input_shape)
CNN_model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
CNN_model.summary()
Train = CNN_model.fit(X_train,Y_train,epochs=epochs,batch_size=batch_size,validation_data=(X_test, Y_test))
plt.plot(Train.history['accuracy'])
plt.plot(Train.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
test_eval = CNN_model.evaluate(X_test,Y_test)
print('Test loss:', test_eval[0])
print('Test accuracy:', test_eval[1])
predicted_classes = CNN_model.predict(X_test)
predicted_classes = np.argmax(np.round(predicted_classes),axis=1)
correct = [i for i,item in enumerate(predicted_classes) if item == y_test_orig[i]]
wrong = [i for i,item in enumerate(predicted_classes) if item != y_test_orig[i]]
print(predicted_classes)
print(y_test_orig)
print(correct)
print(wrong)
accuracy={}
for i in range(7):
all = np.sum(y_train_orig == i)
correct = np.array([predicted_classes == y_train_orig]) & np.array([predicted_classes == i])
correct_count = np.sum(correct)
accuracy[i] = correct_count/all
print(all)
print(correct_count)
accuracy={}
for i in range(7):
all = np.sum(y_test_orig == i)
correct = np.array([predicted_classes == y_test_orig]) & np.array([predicted_classes == i])
correct_count = np.sum(correct)
accuracy[i] = correct_count/all
print(all)
print(correct_count)
print('C0 accuracy = '+ str(accuracy[0]))
print('C1 accuracy = '+ str(accuracy[1]))
print('C2 accuracy = '+ str(accuracy[2]))
print('C3 accuracy = '+ str(accuracy[3]))
print('C4 accuracy = '+ str(accuracy[4]))
print('C5 accuracy = '+ str(accuracy[5]))
print('C6 accuracy = '+ str(accuracy[6]))
#img = correct[1]
#plt.imshow(X_test[img][:,:,0])
#plt.show()
for i in range(len(wrong)):
print(Y_test[wrong[i]], 'ground truth:' +str(y_test_orig[wrong[i]]), 'predict:' +str(predicted_classes[wrong[i]]))
plt.imshow(X_test[wrong[i]][:,:,0])
plt.colorbar()
plt.show()
|
normal
|
{
"blob_id": "5c315a49ead80e8d8ce057bd774f97bce098de59",
"index": 5443,
"step-1": "<mask token>\n\n\ndef model(input_shape):\n X_input = Input(input_shape)\n X = Conv2D(8, (4, 4), strides=(1, 1), name='conv0', kernel_regularizer=\n regularizers.l2(0.001), padding='same')(X_input)\n X = BatchNormalization(axis=3, name='bn0')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((2, 2), name='max_pool')(X)\n X = Conv2D(16, (2, 2), strides=(1, 1), name='conv1', kernel_regularizer\n =regularizers.l2(0.001), padding='same')(X)\n X = BatchNormalization(axis=3, name='bn1')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((2, 2), name='max_pool2')(X)\n X = Conv2D(32, (1, 1), strides=(1, 1), name='conv2', kernel_regularizer\n =regularizers.l2(0.001), padding='same')(X)\n X = BatchNormalization(axis=3, name='bn2')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((2, 2), name='max_pool3')(X)\n X = Flatten()(X)\n X = Dense(7, activation='softmax', name='fc')(X)\n model = Model(inputs=X_input, outputs=X, name='CNN')\n return model\n\n\n<mask token>\n",
"step-2": "<mask token>\nprint('X_train shape:' + str(X_train.shape))\nprint('Y_train shape:' + str(Y_train.shape))\nprint('X_test shape:' + str(X_test.shape))\nprint('Y_test shape:' + str(Y_test.shape))\nprint('y_train_orig shape:' + str(y_train_orig.shape))\nprint('y_test_orig shape:' + str(y_test_orig.shape))\n<mask token>\n\n\ndef model(input_shape):\n X_input = Input(input_shape)\n X = Conv2D(8, (4, 4), strides=(1, 1), name='conv0', kernel_regularizer=\n regularizers.l2(0.001), padding='same')(X_input)\n X = BatchNormalization(axis=3, name='bn0')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((2, 2), name='max_pool')(X)\n X = Conv2D(16, (2, 2), strides=(1, 1), name='conv1', kernel_regularizer\n =regularizers.l2(0.001), padding='same')(X)\n X = BatchNormalization(axis=3, name='bn1')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((2, 2), name='max_pool2')(X)\n X = Conv2D(32, (1, 1), strides=(1, 1), name='conv2', kernel_regularizer\n =regularizers.l2(0.001), padding='same')(X)\n X = BatchNormalization(axis=3, name='bn2')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((2, 2), name='max_pool3')(X)\n X = Flatten()(X)\n X = Dense(7, activation='softmax', name='fc')(X)\n model = Model(inputs=X_input, outputs=X, name='CNN')\n return model\n\n\n<mask token>\nCNN_model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=['accuracy'])\nCNN_model.summary()\n<mask token>\nplt.plot(Train.history['accuracy'])\nplt.plot(Train.history['val_accuracy'])\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()\n<mask token>\nprint('Test loss:', test_eval[0])\nprint('Test accuracy:', test_eval[1])\n<mask token>\nprint(predicted_classes)\nprint(y_test_orig)\nprint(correct)\nprint(wrong)\n<mask token>\nfor i in range(7):\n all = np.sum(y_train_orig == i)\n correct = np.array([predicted_classes == y_train_orig]) & np.array([\n predicted_classes == i])\n correct_count = np.sum(correct)\n accuracy[i] = correct_count / all\n print(all)\n print(correct_count)\n<mask token>\nfor i in range(7):\n all = np.sum(y_test_orig == i)\n correct = np.array([predicted_classes == y_test_orig]) & np.array([\n predicted_classes == i])\n correct_count = np.sum(correct)\n accuracy[i] = correct_count / all\n print(all)\n print(correct_count)\nprint('C0 accuracy = ' + str(accuracy[0]))\nprint('C1 accuracy = ' + str(accuracy[1]))\nprint('C2 accuracy = ' + str(accuracy[2]))\nprint('C3 accuracy = ' + str(accuracy[3]))\nprint('C4 accuracy = ' + str(accuracy[4]))\nprint('C5 accuracy = ' + str(accuracy[5]))\nprint('C6 accuracy = ' + str(accuracy[6]))\nfor i in range(len(wrong)):\n print(Y_test[wrong[i]], 'ground truth:' + str(y_test_orig[wrong[i]]), \n 'predict:' + str(predicted_classes[wrong[i]]))\n plt.imshow(X_test[wrong[i]][:, :, 0])\n plt.colorbar()\n plt.show()\n",
"step-3": "<mask token>\nX_train = np.load('X_train.npy')\nY_train = np.load('Y_train.npy')\nX_test = np.load('X_test.npy')\nY_test = np.load('Y_test.npy')\ny_test_orig = np.load('y_test_orig.npy')\ny_train_orig = np.load('y_train_orig.npy')\nprint('X_train shape:' + str(X_train.shape))\nprint('Y_train shape:' + str(Y_train.shape))\nprint('X_test shape:' + str(X_test.shape))\nprint('Y_test shape:' + str(Y_test.shape))\nprint('y_train_orig shape:' + str(y_train_orig.shape))\nprint('y_test_orig shape:' + str(y_test_orig.shape))\nbatch_size = 32\nepochs = 200\ninChannel = 1\nx, y = 128, 128\ninput_shape = x, y, inChannel\nnum_classes = 7\n\n\ndef model(input_shape):\n X_input = Input(input_shape)\n X = Conv2D(8, (4, 4), strides=(1, 1), name='conv0', kernel_regularizer=\n regularizers.l2(0.001), padding='same')(X_input)\n X = BatchNormalization(axis=3, name='bn0')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((2, 2), name='max_pool')(X)\n X = Conv2D(16, (2, 2), strides=(1, 1), name='conv1', kernel_regularizer\n =regularizers.l2(0.001), padding='same')(X)\n X = BatchNormalization(axis=3, name='bn1')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((2, 2), name='max_pool2')(X)\n X = Conv2D(32, (1, 1), strides=(1, 1), name='conv2', kernel_regularizer\n =regularizers.l2(0.001), padding='same')(X)\n X = BatchNormalization(axis=3, name='bn2')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((2, 2), name='max_pool3')(X)\n X = Flatten()(X)\n X = Dense(7, activation='softmax', name='fc')(X)\n model = Model(inputs=X_input, outputs=X, name='CNN')\n return model\n\n\nCNN_model = model(input_shape)\nCNN_model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=['accuracy'])\nCNN_model.summary()\nTrain = CNN_model.fit(X_train, Y_train, epochs=epochs, batch_size=\n batch_size, validation_data=(X_test, Y_test))\nplt.plot(Train.history['accuracy'])\nplt.plot(Train.history['val_accuracy'])\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()\ntest_eval = CNN_model.evaluate(X_test, Y_test)\nprint('Test loss:', test_eval[0])\nprint('Test accuracy:', test_eval[1])\npredicted_classes = CNN_model.predict(X_test)\npredicted_classes = np.argmax(np.round(predicted_classes), axis=1)\ncorrect = [i for i, item in enumerate(predicted_classes) if item ==\n y_test_orig[i]]\nwrong = [i for i, item in enumerate(predicted_classes) if item !=\n y_test_orig[i]]\nprint(predicted_classes)\nprint(y_test_orig)\nprint(correct)\nprint(wrong)\naccuracy = {}\nfor i in range(7):\n all = np.sum(y_train_orig == i)\n correct = np.array([predicted_classes == y_train_orig]) & np.array([\n predicted_classes == i])\n correct_count = np.sum(correct)\n accuracy[i] = correct_count / all\n print(all)\n print(correct_count)\naccuracy = {}\nfor i in range(7):\n all = np.sum(y_test_orig == i)\n correct = np.array([predicted_classes == y_test_orig]) & np.array([\n predicted_classes == i])\n correct_count = np.sum(correct)\n accuracy[i] = correct_count / all\n print(all)\n print(correct_count)\nprint('C0 accuracy = ' + str(accuracy[0]))\nprint('C1 accuracy = ' + str(accuracy[1]))\nprint('C2 accuracy = ' + str(accuracy[2]))\nprint('C3 accuracy = ' + str(accuracy[3]))\nprint('C4 accuracy = ' + str(accuracy[4]))\nprint('C5 accuracy = ' + str(accuracy[5]))\nprint('C6 accuracy = ' + str(accuracy[6]))\nfor i in range(len(wrong)):\n print(Y_test[wrong[i]], 'ground truth:' + str(y_test_orig[wrong[i]]), \n 'predict:' + str(predicted_classes[wrong[i]]))\n plt.imshow(X_test[wrong[i]][:, :, 0])\n plt.colorbar()\n plt.show()\n",
"step-4": "import math\nimport numpy as np\nimport h5py\nimport matplotlib.pyplot as plt\nimport scipy\nfrom PIL import Image\nfrom scipy import ndimage\nimport tensorflow as tf\nfrom tensorflow.python.framework import ops\nfrom pathlib import Path\nfrom scipy.io import loadmat\nfrom skimage.transform import resize\nfrom sklearn.model_selection import train_test_split\nimport cv2\nimport keras\nfrom keras.models import Model\nfrom keras.optimizers import RMSprop\nfrom keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D\nfrom keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.models import Model, Sequential\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.optimizers import Adadelta, RMSprop, SGD, Adam\nfrom keras import regularizers\nfrom keras import backend as K\nfrom keras.utils import to_categorical\nX_train = np.load('X_train.npy')\nY_train = np.load('Y_train.npy')\nX_test = np.load('X_test.npy')\nY_test = np.load('Y_test.npy')\ny_test_orig = np.load('y_test_orig.npy')\ny_train_orig = np.load('y_train_orig.npy')\nprint('X_train shape:' + str(X_train.shape))\nprint('Y_train shape:' + str(Y_train.shape))\nprint('X_test shape:' + str(X_test.shape))\nprint('Y_test shape:' + str(Y_test.shape))\nprint('y_train_orig shape:' + str(y_train_orig.shape))\nprint('y_test_orig shape:' + str(y_test_orig.shape))\nbatch_size = 32\nepochs = 200\ninChannel = 1\nx, y = 128, 128\ninput_shape = x, y, inChannel\nnum_classes = 7\n\n\ndef model(input_shape):\n X_input = Input(input_shape)\n X = Conv2D(8, (4, 4), strides=(1, 1), name='conv0', kernel_regularizer=\n regularizers.l2(0.001), padding='same')(X_input)\n X = BatchNormalization(axis=3, name='bn0')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((2, 2), name='max_pool')(X)\n X = Conv2D(16, (2, 2), strides=(1, 1), name='conv1', kernel_regularizer\n =regularizers.l2(0.001), padding='same')(X)\n X = BatchNormalization(axis=3, name='bn1')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((2, 2), name='max_pool2')(X)\n X = Conv2D(32, (1, 1), strides=(1, 1), name='conv2', kernel_regularizer\n =regularizers.l2(0.001), padding='same')(X)\n X = BatchNormalization(axis=3, name='bn2')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((2, 2), name='max_pool3')(X)\n X = Flatten()(X)\n X = Dense(7, activation='softmax', name='fc')(X)\n model = Model(inputs=X_input, outputs=X, name='CNN')\n return model\n\n\nCNN_model = model(input_shape)\nCNN_model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=['accuracy'])\nCNN_model.summary()\nTrain = CNN_model.fit(X_train, Y_train, epochs=epochs, batch_size=\n batch_size, validation_data=(X_test, Y_test))\nplt.plot(Train.history['accuracy'])\nplt.plot(Train.history['val_accuracy'])\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()\ntest_eval = CNN_model.evaluate(X_test, Y_test)\nprint('Test loss:', test_eval[0])\nprint('Test accuracy:', test_eval[1])\npredicted_classes = CNN_model.predict(X_test)\npredicted_classes = np.argmax(np.round(predicted_classes), axis=1)\ncorrect = [i for i, item in enumerate(predicted_classes) if item ==\n y_test_orig[i]]\nwrong = [i for i, item in enumerate(predicted_classes) if item !=\n y_test_orig[i]]\nprint(predicted_classes)\nprint(y_test_orig)\nprint(correct)\nprint(wrong)\naccuracy = {}\nfor i in range(7):\n all = np.sum(y_train_orig == i)\n correct = np.array([predicted_classes == y_train_orig]) & np.array([\n predicted_classes == i])\n correct_count = np.sum(correct)\n accuracy[i] = correct_count / all\n print(all)\n print(correct_count)\naccuracy = {}\nfor i in range(7):\n all = np.sum(y_test_orig == i)\n correct = np.array([predicted_classes == y_test_orig]) & np.array([\n predicted_classes == i])\n correct_count = np.sum(correct)\n accuracy[i] = correct_count / all\n print(all)\n print(correct_count)\nprint('C0 accuracy = ' + str(accuracy[0]))\nprint('C1 accuracy = ' + str(accuracy[1]))\nprint('C2 accuracy = ' + str(accuracy[2]))\nprint('C3 accuracy = ' + str(accuracy[3]))\nprint('C4 accuracy = ' + str(accuracy[4]))\nprint('C5 accuracy = ' + str(accuracy[5]))\nprint('C6 accuracy = ' + str(accuracy[6]))\nfor i in range(len(wrong)):\n print(Y_test[wrong[i]], 'ground truth:' + str(y_test_orig[wrong[i]]), \n 'predict:' + str(predicted_classes[wrong[i]]))\n plt.imshow(X_test[wrong[i]][:, :, 0])\n plt.colorbar()\n plt.show()\n",
"step-5": "import math\r\nimport numpy as np\r\nimport h5py\r\nimport matplotlib.pyplot as plt\r\nimport scipy\r\nfrom PIL import Image\r\nfrom scipy import ndimage\r\nimport tensorflow as tf\r\nfrom tensorflow.python.framework import ops\r\nfrom pathlib import Path\r\nfrom scipy.io import loadmat\r\nfrom skimage.transform import resize\r\nfrom sklearn.model_selection import train_test_split\r\nimport cv2\r\nimport keras\r\nfrom keras.models import Model\r\nfrom keras.optimizers import RMSprop\r\nfrom keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D\r\nfrom keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D\r\nfrom keras.layers.normalization import BatchNormalization\r\nfrom keras.models import Model,Sequential\r\nfrom keras.callbacks import ModelCheckpoint\r\nfrom keras.optimizers import Adadelta, RMSprop,SGD,Adam\r\nfrom keras import regularizers\r\nfrom keras import backend as K\r\nfrom keras.utils import to_categorical\r\n\r\n\r\n#Load data\r\n\r\nX_train = np.load('X_train.npy')\r\nY_train = np.load('Y_train.npy')\r\nX_test = np.load('X_test.npy')\r\nY_test = np.load('Y_test.npy')\r\n\r\ny_test_orig = np.load('y_test_orig.npy')\r\ny_train_orig = np.load('y_train_orig.npy')\r\n\r\nprint('X_train shape:' +str(X_train.shape))\r\nprint('Y_train shape:' +str(Y_train.shape))\r\nprint('X_test shape:' + str(X_test.shape))\r\nprint('Y_test shape:' +str(Y_test.shape))\r\nprint('y_train_orig shape:' + str(y_train_orig.shape))\r\nprint('y_test_orig shape:' + str(y_test_orig.shape))\r\n\r\n\r\nbatch_size = 32\r\nepochs = 200\r\ninChannel = 1\r\nx, y = 128, 128\r\ninput_shape = (x, y, inChannel)\r\nnum_classes = 7\r\n\r\ndef model(input_shape):\r\n # Define the input placeholder as a tensor with shape input_shape. Think of this as your input image!\r\n # kernel_regularizer=regularizers.l2(0.01)\r\n X_input = Input(input_shape)\r\n\r\n # CONV1 -> BN -> RELU Block applied to X\r\n X = Conv2D(8, (4, 4), strides = (1, 1), name = 'conv0',kernel_regularizer=regularizers.l2(0.001),padding=\"same\")(X_input)\r\n X = BatchNormalization(axis = 3, name = 'bn0')(X)\r\n X = Activation('relu')(X)\r\n #X = Dropout(0.5)(X)\r\n\r\n # MAXPOOL1\r\n X = MaxPooling2D((2, 2), name='max_pool')(X)\r\n\r\n # CONV2 -> BN -> RELU Block applied to X\r\n X = Conv2D(16, (2, 2), strides=(1, 1), name='conv1',kernel_regularizer=regularizers.l2(0.001),padding=\"same\")(X)\r\n X = BatchNormalization(axis=3, name='bn1')(X)\r\n X = Activation('relu')(X)\r\n #X = Dropout(0.5)(X)\r\n\r\n # MAXPOOL2\r\n X = MaxPooling2D((2, 2), name='max_pool2')(X)\r\n\r\n # CONV3 -> BN -> RELU Block applied to X\r\n X = Conv2D(32, (1, 1), strides=(1, 1), name='conv2',kernel_regularizer=regularizers.l2(0.001),padding=\"same\")(X)\r\n X = BatchNormalization(axis=3, name='bn2')(X)\r\n X = Activation('relu')(X)\r\n #X = Dropout(0.5)(X)\r\n\r\n # MAXPOOL3\r\n X = MaxPooling2D((2, 2), name='max_pool3')(X)\r\n\r\n # FLATTEN X (means convert it to a vector) + FULLYCONNECTED\r\n X = Flatten()(X)\r\n X = Dense(7, activation='softmax', name='fc')(X)\r\n\r\n # Create model. This creates your Keras model instance, you'll use this instance to train/test the model.\r\n model = Model(inputs = X_input, outputs = X, name='CNN')\r\n\r\n return model\r\n\r\nCNN_model = model(input_shape)\r\nCNN_model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\r\nCNN_model.summary()\r\nTrain = CNN_model.fit(X_train,Y_train,epochs=epochs,batch_size=batch_size,validation_data=(X_test, Y_test))\r\n\r\nplt.plot(Train.history['accuracy'])\r\nplt.plot(Train.history['val_accuracy'])\r\nplt.title('model accuracy')\r\nplt.ylabel('accuracy')\r\nplt.xlabel('epoch')\r\nplt.legend(['train', 'test'], loc='upper left')\r\nplt.show()\r\n\r\ntest_eval = CNN_model.evaluate(X_test,Y_test)\r\nprint('Test loss:', test_eval[0])\r\nprint('Test accuracy:', test_eval[1])\r\n\r\n\r\npredicted_classes = CNN_model.predict(X_test)\r\npredicted_classes = np.argmax(np.round(predicted_classes),axis=1)\r\n\r\n\r\n\r\n\r\n\r\ncorrect = [i for i,item in enumerate(predicted_classes) if item == y_test_orig[i]]\r\nwrong = [i for i,item in enumerate(predicted_classes) if item != y_test_orig[i]]\r\n\r\nprint(predicted_classes)\r\nprint(y_test_orig)\r\nprint(correct)\r\nprint(wrong)\r\n\r\n\r\n\r\naccuracy={}\r\nfor i in range(7):\r\n all = np.sum(y_train_orig == i)\r\n correct = np.array([predicted_classes == y_train_orig]) & np.array([predicted_classes == i])\r\n correct_count = np.sum(correct)\r\n accuracy[i] = correct_count/all\r\n print(all)\r\n print(correct_count)\r\n\r\n\r\naccuracy={}\r\nfor i in range(7):\r\n all = np.sum(y_test_orig == i)\r\n correct = np.array([predicted_classes == y_test_orig]) & np.array([predicted_classes == i])\r\n correct_count = np.sum(correct)\r\n accuracy[i] = correct_count/all\r\n print(all)\r\n print(correct_count)\r\n\r\nprint('C0 accuracy = '+ str(accuracy[0]))\r\nprint('C1 accuracy = '+ str(accuracy[1]))\r\nprint('C2 accuracy = '+ str(accuracy[2]))\r\nprint('C3 accuracy = '+ str(accuracy[3]))\r\nprint('C4 accuracy = '+ str(accuracy[4]))\r\nprint('C5 accuracy = '+ str(accuracy[5]))\r\nprint('C6 accuracy = '+ str(accuracy[6]))\r\n\r\n#img = correct[1]\r\n#plt.imshow(X_test[img][:,:,0])\r\n#plt.show()\r\n\r\nfor i in range(len(wrong)):\r\n print(Y_test[wrong[i]], 'ground truth:' +str(y_test_orig[wrong[i]]), 'predict:' +str(predicted_classes[wrong[i]]))\r\n plt.imshow(X_test[wrong[i]][:,:,0])\r\n plt.colorbar()\r\n plt.show()\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import sys
import os.path
root_dir = os.path.dirname(os.path.dirname(__file__))
jsondb_dir = os.path.join(root_dir, 'jsondb')
sys.path.append(jsondb_dir)
|
normal
|
{
"blob_id": "eeb588a162fa222c0f70eb832a0026d0d8adbe9b",
"index": 6769,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsys.path.append(jsondb_dir)\n",
"step-3": "<mask token>\nroot_dir = os.path.dirname(os.path.dirname(__file__))\njsondb_dir = os.path.join(root_dir, 'jsondb')\nsys.path.append(jsondb_dir)\n",
"step-4": "import sys\nimport os.path\nroot_dir = os.path.dirname(os.path.dirname(__file__))\njsondb_dir = os.path.join(root_dir, 'jsondb')\nsys.path.append(jsondb_dir)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from .ros_publisher import *
|
normal
|
{
"blob_id": "6e7cca4f766ca89d2e2f82a73f22742b0e8f92a8",
"index": 5870,
"step-1": "<mask token>\n",
"step-2": "from .ros_publisher import *\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
One cycle policy based on Leslie Smith's paper(https://arxiv.org/pdf/1803.09820.pdf)
Created on Wed Mar 31 13:53:39 2021
"""
import logging
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
logging.getLogger('tensorflow').setLevel(logging.ERROR)
from tensorflow.keras.callbacks import Callback
class CosineAnnealer:
def __init__(self, start, end, steps):
self.start = start
self.end = end
self.steps = steps
self.n = 0
def step(self):
self.n += 1
cos = np.cos(np.pi * (self.n / self.steps)) + 1
return self.end + (self.start - self.end) / 2. * cos
class OneCycleScheduler(Callback):
""" `Callback` that schedules the learning rate on a 1cycle policy as per Leslie Smith's paper(https://arxiv.org/pdf/1803.09820.pdf).
If the model supports a momentum parameter, it will also be adapted by the schedule.
The implementation adopts additional improvements as per the fastai library: https://docs.fast.ai/callbacks.one_cycle.html, where
only two phases are used and the adaptation is done using cosine annealing.
In phase 1 the LR increases from `lr_max / div_factor` to `lr_max` and momentum decreases from `mom_max` to `mom_min`.
In the second phase the LR decreases from `lr_max` to `lr_max / (div_factor * 1e4)` and momemtum from `mom_max` to `mom_min`.
By default the phases are not of equal length, with the phase 1 percentage controlled by the parameter `phase_1_pct`.
"""
def __init__(self, lr_max, steps, mom_min=0.85, mom_max=0.95, phase_1_pct=0.3, div_factor=25.):
super(OneCycleScheduler, self).__init__()
lr_min = lr_max / div_factor
final_lr = lr_max / (div_factor * 1e4)
phase_1_steps = steps * phase_1_pct
phase_2_steps = steps - phase_1_steps
self.phase_1_steps = phase_1_steps
self.phase_2_steps = phase_2_steps
self.phase = 0
self.step = 0
self.phases = [[CosineAnnealer(lr_min, lr_max, phase_1_steps), CosineAnnealer(mom_max, mom_min, phase_1_steps)],
[CosineAnnealer(lr_max, final_lr, phase_2_steps), CosineAnnealer(mom_min, mom_max, phase_2_steps)]]
self.lrs = []
self.moms = []
def on_train_begin(self, logs=None):
self.phase = 0
self.step = 0
self.set_lr(self.lr_schedule().start)
self.set_momentum(self.mom_schedule().start)
def on_train_batch_begin(self, batch, logs=None):
self.lrs.append(self.get_lr())
self.moms.append(self.get_momentum())
def on_train_batch_end(self, batch, logs=None):
self.step += 1
if self.step >= self.phase_1_steps:
self.phase = 1
self.set_lr(self.lr_schedule().step())
self.set_momentum(self.mom_schedule().step())
def get_lr(self):
try:
return tf.keras.backend.get_value(self.model.optimizer.lr)
except AttributeError:
return None
def get_momentum(self):
try:
return tf.keras.backend.get_value(self.model.optimizer.momentum)
except AttributeError:
return None
def set_lr(self, lr):
try:
tf.keras.backend.set_value(self.model.optimizer.lr, lr)
except AttributeError:
pass # ignore
def set_momentum(self, mom):
try:
tf.keras.backend.set_value(self.model.optimizer.momentum, mom)
except AttributeError:
pass # ignore
def lr_schedule(self):
return self.phases[self.phase][0]
def mom_schedule(self):
return self.phases[self.phase][1]
def plot(self):
ax = plt.subplot(1, 2, 1)
ax.plot(self.lrs)
ax.set_title('Learning Rate')
ax = plt.subplot(1, 2, 2)
ax.plot(self.moms)
ax.set_title('Momentum')
|
normal
|
{
"blob_id": "056235f8f65a3d6a310ee8a8742c1369b5398f28",
"index": 7749,
"step-1": "<mask token>\n\n\nclass OneCycleScheduler(Callback):\n <mask token>\n\n def __init__(self, lr_max, steps, mom_min=0.85, mom_max=0.95,\n phase_1_pct=0.3, div_factor=25.0):\n super(OneCycleScheduler, self).__init__()\n lr_min = lr_max / div_factor\n final_lr = lr_max / (div_factor * 10000.0)\n phase_1_steps = steps * phase_1_pct\n phase_2_steps = steps - phase_1_steps\n self.phase_1_steps = phase_1_steps\n self.phase_2_steps = phase_2_steps\n self.phase = 0\n self.step = 0\n self.phases = [[CosineAnnealer(lr_min, lr_max, phase_1_steps),\n CosineAnnealer(mom_max, mom_min, phase_1_steps)], [\n CosineAnnealer(lr_max, final_lr, phase_2_steps), CosineAnnealer\n (mom_min, mom_max, phase_2_steps)]]\n self.lrs = []\n self.moms = []\n <mask token>\n <mask token>\n <mask token>\n\n def get_lr(self):\n try:\n return tf.keras.backend.get_value(self.model.optimizer.lr)\n except AttributeError:\n return None\n\n def get_momentum(self):\n try:\n return tf.keras.backend.get_value(self.model.optimizer.momentum)\n except AttributeError:\n return None\n <mask token>\n <mask token>\n\n def lr_schedule(self):\n return self.phases[self.phase][0]\n\n def mom_schedule(self):\n return self.phases[self.phase][1]\n\n def plot(self):\n ax = plt.subplot(1, 2, 1)\n ax.plot(self.lrs)\n ax.set_title('Learning Rate')\n ax = plt.subplot(1, 2, 2)\n ax.plot(self.moms)\n ax.set_title('Momentum')\n",
"step-2": "<mask token>\n\n\nclass OneCycleScheduler(Callback):\n <mask token>\n\n def __init__(self, lr_max, steps, mom_min=0.85, mom_max=0.95,\n phase_1_pct=0.3, div_factor=25.0):\n super(OneCycleScheduler, self).__init__()\n lr_min = lr_max / div_factor\n final_lr = lr_max / (div_factor * 10000.0)\n phase_1_steps = steps * phase_1_pct\n phase_2_steps = steps - phase_1_steps\n self.phase_1_steps = phase_1_steps\n self.phase_2_steps = phase_2_steps\n self.phase = 0\n self.step = 0\n self.phases = [[CosineAnnealer(lr_min, lr_max, phase_1_steps),\n CosineAnnealer(mom_max, mom_min, phase_1_steps)], [\n CosineAnnealer(lr_max, final_lr, phase_2_steps), CosineAnnealer\n (mom_min, mom_max, phase_2_steps)]]\n self.lrs = []\n self.moms = []\n\n def on_train_begin(self, logs=None):\n self.phase = 0\n self.step = 0\n self.set_lr(self.lr_schedule().start)\n self.set_momentum(self.mom_schedule().start)\n\n def on_train_batch_begin(self, batch, logs=None):\n self.lrs.append(self.get_lr())\n self.moms.append(self.get_momentum())\n <mask token>\n\n def get_lr(self):\n try:\n return tf.keras.backend.get_value(self.model.optimizer.lr)\n except AttributeError:\n return None\n\n def get_momentum(self):\n try:\n return tf.keras.backend.get_value(self.model.optimizer.momentum)\n except AttributeError:\n return None\n\n def set_lr(self, lr):\n try:\n tf.keras.backend.set_value(self.model.optimizer.lr, lr)\n except AttributeError:\n pass\n\n def set_momentum(self, mom):\n try:\n tf.keras.backend.set_value(self.model.optimizer.momentum, mom)\n except AttributeError:\n pass\n\n def lr_schedule(self):\n return self.phases[self.phase][0]\n\n def mom_schedule(self):\n return self.phases[self.phase][1]\n\n def plot(self):\n ax = plt.subplot(1, 2, 1)\n ax.plot(self.lrs)\n ax.set_title('Learning Rate')\n ax = plt.subplot(1, 2, 2)\n ax.plot(self.moms)\n ax.set_title('Momentum')\n",
"step-3": "<mask token>\n\n\nclass CosineAnnealer:\n\n def __init__(self, start, end, steps):\n self.start = start\n self.end = end\n self.steps = steps\n self.n = 0\n <mask token>\n\n\nclass OneCycleScheduler(Callback):\n \"\"\" `Callback` that schedules the learning rate on a 1cycle policy as per Leslie Smith's paper(https://arxiv.org/pdf/1803.09820.pdf).\n If the model supports a momentum parameter, it will also be adapted by the schedule.\n The implementation adopts additional improvements as per the fastai library: https://docs.fast.ai/callbacks.one_cycle.html, where\n only two phases are used and the adaptation is done using cosine annealing.\n In phase 1 the LR increases from `lr_max / div_factor` to `lr_max` and momentum decreases from `mom_max` to `mom_min`.\n In the second phase the LR decreases from `lr_max` to `lr_max / (div_factor * 1e4)` and momemtum from `mom_max` to `mom_min`.\n By default the phases are not of equal length, with the phase 1 percentage controlled by the parameter `phase_1_pct`.\n \"\"\"\n\n def __init__(self, lr_max, steps, mom_min=0.85, mom_max=0.95,\n phase_1_pct=0.3, div_factor=25.0):\n super(OneCycleScheduler, self).__init__()\n lr_min = lr_max / div_factor\n final_lr = lr_max / (div_factor * 10000.0)\n phase_1_steps = steps * phase_1_pct\n phase_2_steps = steps - phase_1_steps\n self.phase_1_steps = phase_1_steps\n self.phase_2_steps = phase_2_steps\n self.phase = 0\n self.step = 0\n self.phases = [[CosineAnnealer(lr_min, lr_max, phase_1_steps),\n CosineAnnealer(mom_max, mom_min, phase_1_steps)], [\n CosineAnnealer(lr_max, final_lr, phase_2_steps), CosineAnnealer\n (mom_min, mom_max, phase_2_steps)]]\n self.lrs = []\n self.moms = []\n\n def on_train_begin(self, logs=None):\n self.phase = 0\n self.step = 0\n self.set_lr(self.lr_schedule().start)\n self.set_momentum(self.mom_schedule().start)\n\n def on_train_batch_begin(self, batch, logs=None):\n self.lrs.append(self.get_lr())\n self.moms.append(self.get_momentum())\n\n def on_train_batch_end(self, batch, logs=None):\n self.step += 1\n if self.step >= self.phase_1_steps:\n self.phase = 1\n self.set_lr(self.lr_schedule().step())\n self.set_momentum(self.mom_schedule().step())\n\n def get_lr(self):\n try:\n return tf.keras.backend.get_value(self.model.optimizer.lr)\n except AttributeError:\n return None\n\n def get_momentum(self):\n try:\n return tf.keras.backend.get_value(self.model.optimizer.momentum)\n except AttributeError:\n return None\n\n def set_lr(self, lr):\n try:\n tf.keras.backend.set_value(self.model.optimizer.lr, lr)\n except AttributeError:\n pass\n\n def set_momentum(self, mom):\n try:\n tf.keras.backend.set_value(self.model.optimizer.momentum, mom)\n except AttributeError:\n pass\n\n def lr_schedule(self):\n return self.phases[self.phase][0]\n\n def mom_schedule(self):\n return self.phases[self.phase][1]\n\n def plot(self):\n ax = plt.subplot(1, 2, 1)\n ax.plot(self.lrs)\n ax.set_title('Learning Rate')\n ax = plt.subplot(1, 2, 2)\n ax.plot(self.moms)\n ax.set_title('Momentum')\n",
"step-4": "<mask token>\n\n\nclass CosineAnnealer:\n\n def __init__(self, start, end, steps):\n self.start = start\n self.end = end\n self.steps = steps\n self.n = 0\n\n def step(self):\n self.n += 1\n cos = np.cos(np.pi * (self.n / self.steps)) + 1\n return self.end + (self.start - self.end) / 2.0 * cos\n\n\nclass OneCycleScheduler(Callback):\n \"\"\" `Callback` that schedules the learning rate on a 1cycle policy as per Leslie Smith's paper(https://arxiv.org/pdf/1803.09820.pdf).\n If the model supports a momentum parameter, it will also be adapted by the schedule.\n The implementation adopts additional improvements as per the fastai library: https://docs.fast.ai/callbacks.one_cycle.html, where\n only two phases are used and the adaptation is done using cosine annealing.\n In phase 1 the LR increases from `lr_max / div_factor` to `lr_max` and momentum decreases from `mom_max` to `mom_min`.\n In the second phase the LR decreases from `lr_max` to `lr_max / (div_factor * 1e4)` and momemtum from `mom_max` to `mom_min`.\n By default the phases are not of equal length, with the phase 1 percentage controlled by the parameter `phase_1_pct`.\n \"\"\"\n\n def __init__(self, lr_max, steps, mom_min=0.85, mom_max=0.95,\n phase_1_pct=0.3, div_factor=25.0):\n super(OneCycleScheduler, self).__init__()\n lr_min = lr_max / div_factor\n final_lr = lr_max / (div_factor * 10000.0)\n phase_1_steps = steps * phase_1_pct\n phase_2_steps = steps - phase_1_steps\n self.phase_1_steps = phase_1_steps\n self.phase_2_steps = phase_2_steps\n self.phase = 0\n self.step = 0\n self.phases = [[CosineAnnealer(lr_min, lr_max, phase_1_steps),\n CosineAnnealer(mom_max, mom_min, phase_1_steps)], [\n CosineAnnealer(lr_max, final_lr, phase_2_steps), CosineAnnealer\n (mom_min, mom_max, phase_2_steps)]]\n self.lrs = []\n self.moms = []\n\n def on_train_begin(self, logs=None):\n self.phase = 0\n self.step = 0\n self.set_lr(self.lr_schedule().start)\n self.set_momentum(self.mom_schedule().start)\n\n def on_train_batch_begin(self, batch, logs=None):\n self.lrs.append(self.get_lr())\n self.moms.append(self.get_momentum())\n\n def on_train_batch_end(self, batch, logs=None):\n self.step += 1\n if self.step >= self.phase_1_steps:\n self.phase = 1\n self.set_lr(self.lr_schedule().step())\n self.set_momentum(self.mom_schedule().step())\n\n def get_lr(self):\n try:\n return tf.keras.backend.get_value(self.model.optimizer.lr)\n except AttributeError:\n return None\n\n def get_momentum(self):\n try:\n return tf.keras.backend.get_value(self.model.optimizer.momentum)\n except AttributeError:\n return None\n\n def set_lr(self, lr):\n try:\n tf.keras.backend.set_value(self.model.optimizer.lr, lr)\n except AttributeError:\n pass\n\n def set_momentum(self, mom):\n try:\n tf.keras.backend.set_value(self.model.optimizer.momentum, mom)\n except AttributeError:\n pass\n\n def lr_schedule(self):\n return self.phases[self.phase][0]\n\n def mom_schedule(self):\n return self.phases[self.phase][1]\n\n def plot(self):\n ax = plt.subplot(1, 2, 1)\n ax.plot(self.lrs)\n ax.set_title('Learning Rate')\n ax = plt.subplot(1, 2, 2)\n ax.plot(self.moms)\n ax.set_title('Momentum')\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nOne cycle policy based on Leslie Smith's paper(https://arxiv.org/pdf/1803.09820.pdf)\nCreated on Wed Mar 31 13:53:39 2021\n\n\"\"\"\nimport logging\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nlogging.getLogger('tensorflow').setLevel(logging.ERROR)\n\nfrom tensorflow.keras.callbacks import Callback\n\nclass CosineAnnealer:\n \n def __init__(self, start, end, steps):\n self.start = start\n self.end = end\n self.steps = steps\n self.n = 0\n \n def step(self):\n self.n += 1\n cos = np.cos(np.pi * (self.n / self.steps)) + 1\n return self.end + (self.start - self.end) / 2. * cos\n\n\nclass OneCycleScheduler(Callback):\n \"\"\" `Callback` that schedules the learning rate on a 1cycle policy as per Leslie Smith's paper(https://arxiv.org/pdf/1803.09820.pdf).\n If the model supports a momentum parameter, it will also be adapted by the schedule.\n The implementation adopts additional improvements as per the fastai library: https://docs.fast.ai/callbacks.one_cycle.html, where\n only two phases are used and the adaptation is done using cosine annealing.\n In phase 1 the LR increases from `lr_max / div_factor` to `lr_max` and momentum decreases from `mom_max` to `mom_min`.\n In the second phase the LR decreases from `lr_max` to `lr_max / (div_factor * 1e4)` and momemtum from `mom_max` to `mom_min`.\n By default the phases are not of equal length, with the phase 1 percentage controlled by the parameter `phase_1_pct`.\n \"\"\"\n\n def __init__(self, lr_max, steps, mom_min=0.85, mom_max=0.95, phase_1_pct=0.3, div_factor=25.):\n super(OneCycleScheduler, self).__init__()\n lr_min = lr_max / div_factor\n final_lr = lr_max / (div_factor * 1e4)\n phase_1_steps = steps * phase_1_pct\n phase_2_steps = steps - phase_1_steps\n \n self.phase_1_steps = phase_1_steps\n self.phase_2_steps = phase_2_steps\n self.phase = 0\n self.step = 0\n \n self.phases = [[CosineAnnealer(lr_min, lr_max, phase_1_steps), CosineAnnealer(mom_max, mom_min, phase_1_steps)], \n [CosineAnnealer(lr_max, final_lr, phase_2_steps), CosineAnnealer(mom_min, mom_max, phase_2_steps)]]\n \n self.lrs = []\n self.moms = []\n\n def on_train_begin(self, logs=None):\n self.phase = 0\n self.step = 0\n\n self.set_lr(self.lr_schedule().start)\n self.set_momentum(self.mom_schedule().start)\n \n def on_train_batch_begin(self, batch, logs=None):\n self.lrs.append(self.get_lr())\n self.moms.append(self.get_momentum())\n\n def on_train_batch_end(self, batch, logs=None):\n self.step += 1\n if self.step >= self.phase_1_steps:\n self.phase = 1\n \n self.set_lr(self.lr_schedule().step())\n self.set_momentum(self.mom_schedule().step())\n \n def get_lr(self):\n try:\n return tf.keras.backend.get_value(self.model.optimizer.lr)\n except AttributeError:\n return None\n \n def get_momentum(self):\n try:\n return tf.keras.backend.get_value(self.model.optimizer.momentum)\n except AttributeError:\n return None\n \n def set_lr(self, lr):\n try:\n tf.keras.backend.set_value(self.model.optimizer.lr, lr)\n except AttributeError:\n pass # ignore\n \n def set_momentum(self, mom):\n try:\n tf.keras.backend.set_value(self.model.optimizer.momentum, mom)\n except AttributeError:\n pass # ignore\n\n def lr_schedule(self):\n return self.phases[self.phase][0]\n \n def mom_schedule(self):\n return self.phases[self.phase][1]\n \n def plot(self):\n ax = plt.subplot(1, 2, 1)\n ax.plot(self.lrs)\n ax.set_title('Learning Rate')\n ax = plt.subplot(1, 2, 2)\n ax.plot(self.moms)\n ax.set_title('Momentum')\n\n",
"step-ids": [
7,
11,
15,
16,
19
]
}
|
[
7,
11,
15,
16,
19
] |
from django.conf.urls import url
from . import views
from .views import ShopView, ShopListView
urlpatterns = [
url(r'^coffeeshops/(\d+)$', ShopView.as_view()),
url(r'^coffeeshops$', ShopListView.as_view()),
]
|
normal
|
{
"blob_id": "54a705de2597140a72e47f5afe86614b619461b7",
"index": 1109,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [url('^coffeeshops/(\\\\d+)$', ShopView.as_view()), url(\n '^coffeeshops$', ShopListView.as_view())]\n",
"step-3": "from django.conf.urls import url\nfrom . import views\nfrom .views import ShopView, ShopListView\nurlpatterns = [url('^coffeeshops/(\\\\d+)$', ShopView.as_view()), url(\n '^coffeeshops$', ShopListView.as_view())]\n",
"step-4": "from django.conf.urls import url\n\nfrom . import views\nfrom .views import ShopView, ShopListView\n\nurlpatterns = [\n url(r'^coffeeshops/(\\d+)$', ShopView.as_view()),\n url(r'^coffeeshops$', ShopListView.as_view()),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import numpy as np
import torch
def pad_sequences_1d(sequences, dtype=torch.long, device=torch.device("cpu"), fixed_length=None):
""" Pad a single-nested list or a sequence of n-d array (torch.tensor or np.ndarray)
into a (n+1)-d array, only allow the first dim has variable lengths.
Args:
sequences: list(n-d tensor or list)
dtype: np.dtype or torch.dtype
device:
fixed_length: pad all seq in sequences to fixed length. All seq should have a length <= fixed_length.
return will be of shape [len(sequences), fixed_length, ...]
Returns:
padded_seqs: ((n+1)-d tensor) padded with zeros
mask: (2d tensor) of the same shape as the first two dims of padded_seqs,
1 indicate valid, 0 otherwise
Examples:
>>> test_data_list = [[1,2,3], [1,2], [3,4,7,9]]
>>> pad_sequences_1d(test_data_list, dtype=torch.long)
>>> test_data_3d = [torch.randn(2,3,4), torch.randn(4,3,4), torch.randn(1,3,4)]
>>> pad_sequences_1d(test_data_3d, dtype=torch.float)
>>> test_data_list = [[1,2,3], [1,2], [3,4,7,9]]
>>> pad_sequences_1d(test_data_list, dtype=np.float32)
>>> test_data_3d = [np.random.randn(2,3,4), np.random.randn(4,3,4), np.random.randn(1,3,4)]
>>> pad_sequences_1d(test_data_3d, dtype=np.float32)
"""
if isinstance(sequences[0], list):
if "torch" in str(dtype):
sequences = [torch.tensor(s, dtype=dtype, device=device) for s in sequences]
else:
sequences = [np.asarray(s, dtype=dtype) for s in sequences]
extra_dims = sequences[0].shape[1:] # the extra dims should be the same for all elements
lengths = [len(seq) for seq in sequences]
if fixed_length is not None:
max_length = fixed_length
else:
max_length = max(lengths)
if isinstance(sequences[0], torch.Tensor):
assert "torch" in str(dtype), "dtype and input type does not match"
padded_seqs = torch.zeros((len(sequences), max_length) + extra_dims, dtype=dtype, device=device)
mask = torch.zeros((len(sequences), max_length), dtype=torch.float32, device=device)
else: # np
assert "numpy" in str(dtype), "dtype and input type does not match"
padded_seqs = np.zeros((len(sequences), max_length) + extra_dims, dtype=dtype)
mask = np.zeros((len(sequences), max_length), dtype=np.float32)
for idx, seq in enumerate(sequences):
end = lengths[idx]
padded_seqs[idx, :end] = seq
mask[idx, :end] = 1
return padded_seqs, mask # , lengths
def pad_sequences_2d(sequences, dtype=torch.long):
""" Pad a double-nested list or a sequence of n-d torch tensor into a (n+1)-d tensor,
only allow the first two dims has variable lengths
Args:
sequences: list(n-d tensor or list)
dtype: torch.long for word indices / torch.float (float32) for other cases
Returns:
Examples:
>>> test_data_list = [[[1, 3, 5], [3, 7, 4, 1]], [[98, 34, 11, 89, 90], [22], [34, 56]],]
>>> pad_sequences_2d(test_data_list, dtype=torch.long) # torch.Size([2, 3, 5])
>>> test_data_3d = [torch.randn(2,2,4), torch.randn(4,3,4), torch.randn(1,5,4)]
>>> pad_sequences_2d(test_data_3d, dtype=torch.float) # torch.Size([2, 3, 5])
>>> test_data_3d2 = [[torch.randn(2,4), ], [torch.randn(3,4), torch.randn(5,4)]]
>>> pad_sequences_2d(test_data_3d2, dtype=torch.float) # torch.Size([2, 3, 5])
# TODO add support for numpy array
"""
bsz = len(sequences)
para_lengths = [len(seq) for seq in sequences]
max_para_len = max(para_lengths)
sen_lengths = [[len(word_seq) for word_seq in seq] for seq in sequences]
max_sen_len = max([max(e) for e in sen_lengths])
if isinstance(sequences[0], torch.Tensor):
extra_dims = sequences[0].shape[2:]
elif isinstance(sequences[0][0], torch.Tensor):
extra_dims = sequences[0][0].shape[1:]
else:
sequences = [[torch.Tensor(word_seq, dtype=dtype) for word_seq in seq] for seq in sequences]
extra_dims = ()
padded_seqs = torch.zeros((bsz, max_para_len, max_sen_len) + extra_dims, dtype=dtype)
mask = torch.zeros(bsz, max_para_len, max_sen_len).float()
for b_i in range(bsz):
for sen_i, sen_l in enumerate(sen_lengths[b_i]):
padded_seqs[b_i, sen_i, :sen_l] = sequences[b_i][sen_i]
mask[b_i, sen_i, :sen_l] = 1
return padded_seqs, mask # , sen_lengths
def find_max_triples(st_prob, ed_prob, top_n=5, prob_thd=None, tensor_type="torch"):
""" Find a list of (k1, k2) where k1 < k2 with the maximum values of st_prob[k1] * ed_prob[k2]
Args:
st_prob (torch.Tensor or np.ndarray): (N, L) batched start_idx probabilities
ed_prob (torch.Tensor or np.ndarray): (N, L) batched end_idx probabilities
top_n (int): return topN pairs with highest values
prob_thd (float):
tensor_type: str, np or torch
Returns:
batched_sorted_triple: N * [(st_idx, ed_idx, confidence), ...]
"""
if tensor_type == "torch":
st_prob, ed_prob = st_prob.data.numpy(), ed_prob.data.numpy()
product = np.einsum("bm,bn->bmn", st_prob, ed_prob)
# (N, L, L) the lower part becomes zeros, start_idx < ed_idx
upper_product = np.triu(product, k=1)
return find_max_triples_from_upper_triangle_product(upper_product, top_n=top_n, prob_thd=prob_thd)
def find_max_triples_from_upper_triangle_product(upper_product, top_n=5, prob_thd=None):
""" Find a list of (k1, k2) where k1 < k2 with the maximum values of p1[k1] * p2[k2]
Args:
upper_product (torch.Tensor or np.ndarray): (N, L, L), the lower part becomes zeros, end_idx > start_idx
top_n (int): return topN pairs with highest values
prob_thd (float or None):
Returns:
batched_sorted_triple: N * [(st_idx, ed_idx, confidence), ...]
"""
batched_sorted_triple = []
for idx, e in enumerate(upper_product):
sorted_triple = top_n_array_2d(e, top_n=top_n)
if prob_thd is not None:
sorted_triple = sorted_triple[sorted_triple[2] >= prob_thd]
batched_sorted_triple.append(sorted_triple)
return batched_sorted_triple
def top_n_array_2d(array_2d, top_n):
""" Get topN indices and values of a 2d array, return a tuple of indices and their values,
ranked by the value
"""
row_indices, column_indices = np.unravel_index(np.argsort(array_2d, axis=None), array_2d.shape)
row_indices = row_indices[::-1][:top_n]
column_indices = column_indices[::-1][:top_n]
sorted_values = array_2d[row_indices, column_indices]
return np.stack([row_indices, column_indices, sorted_values], axis=1) # (N, 3)
|
normal
|
{
"blob_id": "788d9fa03c4311a8077d492b1a2b06d1f88826a3",
"index": 5570,
"step-1": "<mask token>\n\n\ndef pad_sequences_1d(sequences, dtype=torch.long, device=torch.device('cpu'\n ), fixed_length=None):\n \"\"\" Pad a single-nested list or a sequence of n-d array (torch.tensor or np.ndarray)\n into a (n+1)-d array, only allow the first dim has variable lengths.\n Args:\n sequences: list(n-d tensor or list)\n dtype: np.dtype or torch.dtype\n device:\n fixed_length: pad all seq in sequences to fixed length. All seq should have a length <= fixed_length.\n return will be of shape [len(sequences), fixed_length, ...]\n Returns:\n padded_seqs: ((n+1)-d tensor) padded with zeros\n mask: (2d tensor) of the same shape as the first two dims of padded_seqs,\n 1 indicate valid, 0 otherwise\n Examples:\n >>> test_data_list = [[1,2,3], [1,2], [3,4,7,9]]\n >>> pad_sequences_1d(test_data_list, dtype=torch.long)\n >>> test_data_3d = [torch.randn(2,3,4), torch.randn(4,3,4), torch.randn(1,3,4)]\n >>> pad_sequences_1d(test_data_3d, dtype=torch.float)\n >>> test_data_list = [[1,2,3], [1,2], [3,4,7,9]]\n >>> pad_sequences_1d(test_data_list, dtype=np.float32)\n >>> test_data_3d = [np.random.randn(2,3,4), np.random.randn(4,3,4), np.random.randn(1,3,4)]\n >>> pad_sequences_1d(test_data_3d, dtype=np.float32)\n \"\"\"\n if isinstance(sequences[0], list):\n if 'torch' in str(dtype):\n sequences = [torch.tensor(s, dtype=dtype, device=device) for s in\n sequences]\n else:\n sequences = [np.asarray(s, dtype=dtype) for s in sequences]\n extra_dims = sequences[0].shape[1:]\n lengths = [len(seq) for seq in sequences]\n if fixed_length is not None:\n max_length = fixed_length\n else:\n max_length = max(lengths)\n if isinstance(sequences[0], torch.Tensor):\n assert 'torch' in str(dtype), 'dtype and input type does not match'\n padded_seqs = torch.zeros((len(sequences), max_length) + extra_dims,\n dtype=dtype, device=device)\n mask = torch.zeros((len(sequences), max_length), dtype=torch.\n float32, device=device)\n else:\n assert 'numpy' in str(dtype), 'dtype and input type does not match'\n padded_seqs = np.zeros((len(sequences), max_length) + extra_dims,\n dtype=dtype)\n mask = np.zeros((len(sequences), max_length), dtype=np.float32)\n for idx, seq in enumerate(sequences):\n end = lengths[idx]\n padded_seqs[idx, :end] = seq\n mask[idx, :end] = 1\n return padded_seqs, mask\n\n\n<mask token>\n\n\ndef find_max_triples_from_upper_triangle_product(upper_product, top_n=5,\n prob_thd=None):\n \"\"\" Find a list of (k1, k2) where k1 < k2 with the maximum values of p1[k1] * p2[k2]\n Args:\n upper_product (torch.Tensor or np.ndarray): (N, L, L), the lower part becomes zeros, end_idx > start_idx\n top_n (int): return topN pairs with highest values\n prob_thd (float or None):\n Returns:\n batched_sorted_triple: N * [(st_idx, ed_idx, confidence), ...]\n \"\"\"\n batched_sorted_triple = []\n for idx, e in enumerate(upper_product):\n sorted_triple = top_n_array_2d(e, top_n=top_n)\n if prob_thd is not None:\n sorted_triple = sorted_triple[sorted_triple[2] >= prob_thd]\n batched_sorted_triple.append(sorted_triple)\n return batched_sorted_triple\n\n\ndef top_n_array_2d(array_2d, top_n):\n \"\"\" Get topN indices and values of a 2d array, return a tuple of indices and their values,\n ranked by the value\n \"\"\"\n row_indices, column_indices = np.unravel_index(np.argsort(array_2d,\n axis=None), array_2d.shape)\n row_indices = row_indices[::-1][:top_n]\n column_indices = column_indices[::-1][:top_n]\n sorted_values = array_2d[row_indices, column_indices]\n return np.stack([row_indices, column_indices, sorted_values], axis=1)\n",
"step-2": "<mask token>\n\n\ndef pad_sequences_1d(sequences, dtype=torch.long, device=torch.device('cpu'\n ), fixed_length=None):\n \"\"\" Pad a single-nested list or a sequence of n-d array (torch.tensor or np.ndarray)\n into a (n+1)-d array, only allow the first dim has variable lengths.\n Args:\n sequences: list(n-d tensor or list)\n dtype: np.dtype or torch.dtype\n device:\n fixed_length: pad all seq in sequences to fixed length. All seq should have a length <= fixed_length.\n return will be of shape [len(sequences), fixed_length, ...]\n Returns:\n padded_seqs: ((n+1)-d tensor) padded with zeros\n mask: (2d tensor) of the same shape as the first two dims of padded_seqs,\n 1 indicate valid, 0 otherwise\n Examples:\n >>> test_data_list = [[1,2,3], [1,2], [3,4,7,9]]\n >>> pad_sequences_1d(test_data_list, dtype=torch.long)\n >>> test_data_3d = [torch.randn(2,3,4), torch.randn(4,3,4), torch.randn(1,3,4)]\n >>> pad_sequences_1d(test_data_3d, dtype=torch.float)\n >>> test_data_list = [[1,2,3], [1,2], [3,4,7,9]]\n >>> pad_sequences_1d(test_data_list, dtype=np.float32)\n >>> test_data_3d = [np.random.randn(2,3,4), np.random.randn(4,3,4), np.random.randn(1,3,4)]\n >>> pad_sequences_1d(test_data_3d, dtype=np.float32)\n \"\"\"\n if isinstance(sequences[0], list):\n if 'torch' in str(dtype):\n sequences = [torch.tensor(s, dtype=dtype, device=device) for s in\n sequences]\n else:\n sequences = [np.asarray(s, dtype=dtype) for s in sequences]\n extra_dims = sequences[0].shape[1:]\n lengths = [len(seq) for seq in sequences]\n if fixed_length is not None:\n max_length = fixed_length\n else:\n max_length = max(lengths)\n if isinstance(sequences[0], torch.Tensor):\n assert 'torch' in str(dtype), 'dtype and input type does not match'\n padded_seqs = torch.zeros((len(sequences), max_length) + extra_dims,\n dtype=dtype, device=device)\n mask = torch.zeros((len(sequences), max_length), dtype=torch.\n float32, device=device)\n else:\n assert 'numpy' in str(dtype), 'dtype and input type does not match'\n padded_seqs = np.zeros((len(sequences), max_length) + extra_dims,\n dtype=dtype)\n mask = np.zeros((len(sequences), max_length), dtype=np.float32)\n for idx, seq in enumerate(sequences):\n end = lengths[idx]\n padded_seqs[idx, :end] = seq\n mask[idx, :end] = 1\n return padded_seqs, mask\n\n\ndef pad_sequences_2d(sequences, dtype=torch.long):\n \"\"\" Pad a double-nested list or a sequence of n-d torch tensor into a (n+1)-d tensor,\n only allow the first two dims has variable lengths\n Args:\n sequences: list(n-d tensor or list)\n dtype: torch.long for word indices / torch.float (float32) for other cases\n Returns:\n Examples:\n >>> test_data_list = [[[1, 3, 5], [3, 7, 4, 1]], [[98, 34, 11, 89, 90], [22], [34, 56]],]\n >>> pad_sequences_2d(test_data_list, dtype=torch.long) # torch.Size([2, 3, 5])\n >>> test_data_3d = [torch.randn(2,2,4), torch.randn(4,3,4), torch.randn(1,5,4)]\n >>> pad_sequences_2d(test_data_3d, dtype=torch.float) # torch.Size([2, 3, 5])\n >>> test_data_3d2 = [[torch.randn(2,4), ], [torch.randn(3,4), torch.randn(5,4)]]\n >>> pad_sequences_2d(test_data_3d2, dtype=torch.float) # torch.Size([2, 3, 5])\n # TODO add support for numpy array\n \"\"\"\n bsz = len(sequences)\n para_lengths = [len(seq) for seq in sequences]\n max_para_len = max(para_lengths)\n sen_lengths = [[len(word_seq) for word_seq in seq] for seq in sequences]\n max_sen_len = max([max(e) for e in sen_lengths])\n if isinstance(sequences[0], torch.Tensor):\n extra_dims = sequences[0].shape[2:]\n elif isinstance(sequences[0][0], torch.Tensor):\n extra_dims = sequences[0][0].shape[1:]\n else:\n sequences = [[torch.Tensor(word_seq, dtype=dtype) for word_seq in\n seq] for seq in sequences]\n extra_dims = ()\n padded_seqs = torch.zeros((bsz, max_para_len, max_sen_len) + extra_dims,\n dtype=dtype)\n mask = torch.zeros(bsz, max_para_len, max_sen_len).float()\n for b_i in range(bsz):\n for sen_i, sen_l in enumerate(sen_lengths[b_i]):\n padded_seqs[b_i, sen_i, :sen_l] = sequences[b_i][sen_i]\n mask[b_i, sen_i, :sen_l] = 1\n return padded_seqs, mask\n\n\n<mask token>\n\n\ndef find_max_triples_from_upper_triangle_product(upper_product, top_n=5,\n prob_thd=None):\n \"\"\" Find a list of (k1, k2) where k1 < k2 with the maximum values of p1[k1] * p2[k2]\n Args:\n upper_product (torch.Tensor or np.ndarray): (N, L, L), the lower part becomes zeros, end_idx > start_idx\n top_n (int): return topN pairs with highest values\n prob_thd (float or None):\n Returns:\n batched_sorted_triple: N * [(st_idx, ed_idx, confidence), ...]\n \"\"\"\n batched_sorted_triple = []\n for idx, e in enumerate(upper_product):\n sorted_triple = top_n_array_2d(e, top_n=top_n)\n if prob_thd is not None:\n sorted_triple = sorted_triple[sorted_triple[2] >= prob_thd]\n batched_sorted_triple.append(sorted_triple)\n return batched_sorted_triple\n\n\ndef top_n_array_2d(array_2d, top_n):\n \"\"\" Get topN indices and values of a 2d array, return a tuple of indices and their values,\n ranked by the value\n \"\"\"\n row_indices, column_indices = np.unravel_index(np.argsort(array_2d,\n axis=None), array_2d.shape)\n row_indices = row_indices[::-1][:top_n]\n column_indices = column_indices[::-1][:top_n]\n sorted_values = array_2d[row_indices, column_indices]\n return np.stack([row_indices, column_indices, sorted_values], axis=1)\n",
"step-3": "<mask token>\n\n\ndef pad_sequences_1d(sequences, dtype=torch.long, device=torch.device('cpu'\n ), fixed_length=None):\n \"\"\" Pad a single-nested list or a sequence of n-d array (torch.tensor or np.ndarray)\n into a (n+1)-d array, only allow the first dim has variable lengths.\n Args:\n sequences: list(n-d tensor or list)\n dtype: np.dtype or torch.dtype\n device:\n fixed_length: pad all seq in sequences to fixed length. All seq should have a length <= fixed_length.\n return will be of shape [len(sequences), fixed_length, ...]\n Returns:\n padded_seqs: ((n+1)-d tensor) padded with zeros\n mask: (2d tensor) of the same shape as the first two dims of padded_seqs,\n 1 indicate valid, 0 otherwise\n Examples:\n >>> test_data_list = [[1,2,3], [1,2], [3,4,7,9]]\n >>> pad_sequences_1d(test_data_list, dtype=torch.long)\n >>> test_data_3d = [torch.randn(2,3,4), torch.randn(4,3,4), torch.randn(1,3,4)]\n >>> pad_sequences_1d(test_data_3d, dtype=torch.float)\n >>> test_data_list = [[1,2,3], [1,2], [3,4,7,9]]\n >>> pad_sequences_1d(test_data_list, dtype=np.float32)\n >>> test_data_3d = [np.random.randn(2,3,4), np.random.randn(4,3,4), np.random.randn(1,3,4)]\n >>> pad_sequences_1d(test_data_3d, dtype=np.float32)\n \"\"\"\n if isinstance(sequences[0], list):\n if 'torch' in str(dtype):\n sequences = [torch.tensor(s, dtype=dtype, device=device) for s in\n sequences]\n else:\n sequences = [np.asarray(s, dtype=dtype) for s in sequences]\n extra_dims = sequences[0].shape[1:]\n lengths = [len(seq) for seq in sequences]\n if fixed_length is not None:\n max_length = fixed_length\n else:\n max_length = max(lengths)\n if isinstance(sequences[0], torch.Tensor):\n assert 'torch' in str(dtype), 'dtype and input type does not match'\n padded_seqs = torch.zeros((len(sequences), max_length) + extra_dims,\n dtype=dtype, device=device)\n mask = torch.zeros((len(sequences), max_length), dtype=torch.\n float32, device=device)\n else:\n assert 'numpy' in str(dtype), 'dtype and input type does not match'\n padded_seqs = np.zeros((len(sequences), max_length) + extra_dims,\n dtype=dtype)\n mask = np.zeros((len(sequences), max_length), dtype=np.float32)\n for idx, seq in enumerate(sequences):\n end = lengths[idx]\n padded_seqs[idx, :end] = seq\n mask[idx, :end] = 1\n return padded_seqs, mask\n\n\ndef pad_sequences_2d(sequences, dtype=torch.long):\n \"\"\" Pad a double-nested list or a sequence of n-d torch tensor into a (n+1)-d tensor,\n only allow the first two dims has variable lengths\n Args:\n sequences: list(n-d tensor or list)\n dtype: torch.long for word indices / torch.float (float32) for other cases\n Returns:\n Examples:\n >>> test_data_list = [[[1, 3, 5], [3, 7, 4, 1]], [[98, 34, 11, 89, 90], [22], [34, 56]],]\n >>> pad_sequences_2d(test_data_list, dtype=torch.long) # torch.Size([2, 3, 5])\n >>> test_data_3d = [torch.randn(2,2,4), torch.randn(4,3,4), torch.randn(1,5,4)]\n >>> pad_sequences_2d(test_data_3d, dtype=torch.float) # torch.Size([2, 3, 5])\n >>> test_data_3d2 = [[torch.randn(2,4), ], [torch.randn(3,4), torch.randn(5,4)]]\n >>> pad_sequences_2d(test_data_3d2, dtype=torch.float) # torch.Size([2, 3, 5])\n # TODO add support for numpy array\n \"\"\"\n bsz = len(sequences)\n para_lengths = [len(seq) for seq in sequences]\n max_para_len = max(para_lengths)\n sen_lengths = [[len(word_seq) for word_seq in seq] for seq in sequences]\n max_sen_len = max([max(e) for e in sen_lengths])\n if isinstance(sequences[0], torch.Tensor):\n extra_dims = sequences[0].shape[2:]\n elif isinstance(sequences[0][0], torch.Tensor):\n extra_dims = sequences[0][0].shape[1:]\n else:\n sequences = [[torch.Tensor(word_seq, dtype=dtype) for word_seq in\n seq] for seq in sequences]\n extra_dims = ()\n padded_seqs = torch.zeros((bsz, max_para_len, max_sen_len) + extra_dims,\n dtype=dtype)\n mask = torch.zeros(bsz, max_para_len, max_sen_len).float()\n for b_i in range(bsz):\n for sen_i, sen_l in enumerate(sen_lengths[b_i]):\n padded_seqs[b_i, sen_i, :sen_l] = sequences[b_i][sen_i]\n mask[b_i, sen_i, :sen_l] = 1\n return padded_seqs, mask\n\n\ndef find_max_triples(st_prob, ed_prob, top_n=5, prob_thd=None, tensor_type=\n 'torch'):\n \"\"\" Find a list of (k1, k2) where k1 < k2 with the maximum values of st_prob[k1] * ed_prob[k2]\n Args:\n st_prob (torch.Tensor or np.ndarray): (N, L) batched start_idx probabilities\n ed_prob (torch.Tensor or np.ndarray): (N, L) batched end_idx probabilities\n top_n (int): return topN pairs with highest values\n prob_thd (float):\n tensor_type: str, np or torch\n Returns:\n batched_sorted_triple: N * [(st_idx, ed_idx, confidence), ...]\n \"\"\"\n if tensor_type == 'torch':\n st_prob, ed_prob = st_prob.data.numpy(), ed_prob.data.numpy()\n product = np.einsum('bm,bn->bmn', st_prob, ed_prob)\n upper_product = np.triu(product, k=1)\n return find_max_triples_from_upper_triangle_product(upper_product,\n top_n=top_n, prob_thd=prob_thd)\n\n\ndef find_max_triples_from_upper_triangle_product(upper_product, top_n=5,\n prob_thd=None):\n \"\"\" Find a list of (k1, k2) where k1 < k2 with the maximum values of p1[k1] * p2[k2]\n Args:\n upper_product (torch.Tensor or np.ndarray): (N, L, L), the lower part becomes zeros, end_idx > start_idx\n top_n (int): return topN pairs with highest values\n prob_thd (float or None):\n Returns:\n batched_sorted_triple: N * [(st_idx, ed_idx, confidence), ...]\n \"\"\"\n batched_sorted_triple = []\n for idx, e in enumerate(upper_product):\n sorted_triple = top_n_array_2d(e, top_n=top_n)\n if prob_thd is not None:\n sorted_triple = sorted_triple[sorted_triple[2] >= prob_thd]\n batched_sorted_triple.append(sorted_triple)\n return batched_sorted_triple\n\n\ndef top_n_array_2d(array_2d, top_n):\n \"\"\" Get topN indices and values of a 2d array, return a tuple of indices and their values,\n ranked by the value\n \"\"\"\n row_indices, column_indices = np.unravel_index(np.argsort(array_2d,\n axis=None), array_2d.shape)\n row_indices = row_indices[::-1][:top_n]\n column_indices = column_indices[::-1][:top_n]\n sorted_values = array_2d[row_indices, column_indices]\n return np.stack([row_indices, column_indices, sorted_values], axis=1)\n",
"step-4": "import numpy as np\nimport torch\n\n\ndef pad_sequences_1d(sequences, dtype=torch.long, device=torch.device('cpu'\n ), fixed_length=None):\n \"\"\" Pad a single-nested list or a sequence of n-d array (torch.tensor or np.ndarray)\n into a (n+1)-d array, only allow the first dim has variable lengths.\n Args:\n sequences: list(n-d tensor or list)\n dtype: np.dtype or torch.dtype\n device:\n fixed_length: pad all seq in sequences to fixed length. All seq should have a length <= fixed_length.\n return will be of shape [len(sequences), fixed_length, ...]\n Returns:\n padded_seqs: ((n+1)-d tensor) padded with zeros\n mask: (2d tensor) of the same shape as the first two dims of padded_seqs,\n 1 indicate valid, 0 otherwise\n Examples:\n >>> test_data_list = [[1,2,3], [1,2], [3,4,7,9]]\n >>> pad_sequences_1d(test_data_list, dtype=torch.long)\n >>> test_data_3d = [torch.randn(2,3,4), torch.randn(4,3,4), torch.randn(1,3,4)]\n >>> pad_sequences_1d(test_data_3d, dtype=torch.float)\n >>> test_data_list = [[1,2,3], [1,2], [3,4,7,9]]\n >>> pad_sequences_1d(test_data_list, dtype=np.float32)\n >>> test_data_3d = [np.random.randn(2,3,4), np.random.randn(4,3,4), np.random.randn(1,3,4)]\n >>> pad_sequences_1d(test_data_3d, dtype=np.float32)\n \"\"\"\n if isinstance(sequences[0], list):\n if 'torch' in str(dtype):\n sequences = [torch.tensor(s, dtype=dtype, device=device) for s in\n sequences]\n else:\n sequences = [np.asarray(s, dtype=dtype) for s in sequences]\n extra_dims = sequences[0].shape[1:]\n lengths = [len(seq) for seq in sequences]\n if fixed_length is not None:\n max_length = fixed_length\n else:\n max_length = max(lengths)\n if isinstance(sequences[0], torch.Tensor):\n assert 'torch' in str(dtype), 'dtype and input type does not match'\n padded_seqs = torch.zeros((len(sequences), max_length) + extra_dims,\n dtype=dtype, device=device)\n mask = torch.zeros((len(sequences), max_length), dtype=torch.\n float32, device=device)\n else:\n assert 'numpy' in str(dtype), 'dtype and input type does not match'\n padded_seqs = np.zeros((len(sequences), max_length) + extra_dims,\n dtype=dtype)\n mask = np.zeros((len(sequences), max_length), dtype=np.float32)\n for idx, seq in enumerate(sequences):\n end = lengths[idx]\n padded_seqs[idx, :end] = seq\n mask[idx, :end] = 1\n return padded_seqs, mask\n\n\ndef pad_sequences_2d(sequences, dtype=torch.long):\n \"\"\" Pad a double-nested list or a sequence of n-d torch tensor into a (n+1)-d tensor,\n only allow the first two dims has variable lengths\n Args:\n sequences: list(n-d tensor or list)\n dtype: torch.long for word indices / torch.float (float32) for other cases\n Returns:\n Examples:\n >>> test_data_list = [[[1, 3, 5], [3, 7, 4, 1]], [[98, 34, 11, 89, 90], [22], [34, 56]],]\n >>> pad_sequences_2d(test_data_list, dtype=torch.long) # torch.Size([2, 3, 5])\n >>> test_data_3d = [torch.randn(2,2,4), torch.randn(4,3,4), torch.randn(1,5,4)]\n >>> pad_sequences_2d(test_data_3d, dtype=torch.float) # torch.Size([2, 3, 5])\n >>> test_data_3d2 = [[torch.randn(2,4), ], [torch.randn(3,4), torch.randn(5,4)]]\n >>> pad_sequences_2d(test_data_3d2, dtype=torch.float) # torch.Size([2, 3, 5])\n # TODO add support for numpy array\n \"\"\"\n bsz = len(sequences)\n para_lengths = [len(seq) for seq in sequences]\n max_para_len = max(para_lengths)\n sen_lengths = [[len(word_seq) for word_seq in seq] for seq in sequences]\n max_sen_len = max([max(e) for e in sen_lengths])\n if isinstance(sequences[0], torch.Tensor):\n extra_dims = sequences[0].shape[2:]\n elif isinstance(sequences[0][0], torch.Tensor):\n extra_dims = sequences[0][0].shape[1:]\n else:\n sequences = [[torch.Tensor(word_seq, dtype=dtype) for word_seq in\n seq] for seq in sequences]\n extra_dims = ()\n padded_seqs = torch.zeros((bsz, max_para_len, max_sen_len) + extra_dims,\n dtype=dtype)\n mask = torch.zeros(bsz, max_para_len, max_sen_len).float()\n for b_i in range(bsz):\n for sen_i, sen_l in enumerate(sen_lengths[b_i]):\n padded_seqs[b_i, sen_i, :sen_l] = sequences[b_i][sen_i]\n mask[b_i, sen_i, :sen_l] = 1\n return padded_seqs, mask\n\n\ndef find_max_triples(st_prob, ed_prob, top_n=5, prob_thd=None, tensor_type=\n 'torch'):\n \"\"\" Find a list of (k1, k2) where k1 < k2 with the maximum values of st_prob[k1] * ed_prob[k2]\n Args:\n st_prob (torch.Tensor or np.ndarray): (N, L) batched start_idx probabilities\n ed_prob (torch.Tensor or np.ndarray): (N, L) batched end_idx probabilities\n top_n (int): return topN pairs with highest values\n prob_thd (float):\n tensor_type: str, np or torch\n Returns:\n batched_sorted_triple: N * [(st_idx, ed_idx, confidence), ...]\n \"\"\"\n if tensor_type == 'torch':\n st_prob, ed_prob = st_prob.data.numpy(), ed_prob.data.numpy()\n product = np.einsum('bm,bn->bmn', st_prob, ed_prob)\n upper_product = np.triu(product, k=1)\n return find_max_triples_from_upper_triangle_product(upper_product,\n top_n=top_n, prob_thd=prob_thd)\n\n\ndef find_max_triples_from_upper_triangle_product(upper_product, top_n=5,\n prob_thd=None):\n \"\"\" Find a list of (k1, k2) where k1 < k2 with the maximum values of p1[k1] * p2[k2]\n Args:\n upper_product (torch.Tensor or np.ndarray): (N, L, L), the lower part becomes zeros, end_idx > start_idx\n top_n (int): return topN pairs with highest values\n prob_thd (float or None):\n Returns:\n batched_sorted_triple: N * [(st_idx, ed_idx, confidence), ...]\n \"\"\"\n batched_sorted_triple = []\n for idx, e in enumerate(upper_product):\n sorted_triple = top_n_array_2d(e, top_n=top_n)\n if prob_thd is not None:\n sorted_triple = sorted_triple[sorted_triple[2] >= prob_thd]\n batched_sorted_triple.append(sorted_triple)\n return batched_sorted_triple\n\n\ndef top_n_array_2d(array_2d, top_n):\n \"\"\" Get topN indices and values of a 2d array, return a tuple of indices and their values,\n ranked by the value\n \"\"\"\n row_indices, column_indices = np.unravel_index(np.argsort(array_2d,\n axis=None), array_2d.shape)\n row_indices = row_indices[::-1][:top_n]\n column_indices = column_indices[::-1][:top_n]\n sorted_values = array_2d[row_indices, column_indices]\n return np.stack([row_indices, column_indices, sorted_values], axis=1)\n",
"step-5": "import numpy as np\nimport torch\n\n\ndef pad_sequences_1d(sequences, dtype=torch.long, device=torch.device(\"cpu\"), fixed_length=None):\n \"\"\" Pad a single-nested list or a sequence of n-d array (torch.tensor or np.ndarray)\n into a (n+1)-d array, only allow the first dim has variable lengths.\n Args:\n sequences: list(n-d tensor or list)\n dtype: np.dtype or torch.dtype\n device:\n fixed_length: pad all seq in sequences to fixed length. All seq should have a length <= fixed_length.\n return will be of shape [len(sequences), fixed_length, ...]\n Returns:\n padded_seqs: ((n+1)-d tensor) padded with zeros\n mask: (2d tensor) of the same shape as the first two dims of padded_seqs,\n 1 indicate valid, 0 otherwise\n Examples:\n >>> test_data_list = [[1,2,3], [1,2], [3,4,7,9]]\n >>> pad_sequences_1d(test_data_list, dtype=torch.long)\n >>> test_data_3d = [torch.randn(2,3,4), torch.randn(4,3,4), torch.randn(1,3,4)]\n >>> pad_sequences_1d(test_data_3d, dtype=torch.float)\n >>> test_data_list = [[1,2,3], [1,2], [3,4,7,9]]\n >>> pad_sequences_1d(test_data_list, dtype=np.float32)\n >>> test_data_3d = [np.random.randn(2,3,4), np.random.randn(4,3,4), np.random.randn(1,3,4)]\n >>> pad_sequences_1d(test_data_3d, dtype=np.float32)\n \"\"\"\n if isinstance(sequences[0], list):\n if \"torch\" in str(dtype):\n sequences = [torch.tensor(s, dtype=dtype, device=device) for s in sequences]\n else:\n sequences = [np.asarray(s, dtype=dtype) for s in sequences]\n\n extra_dims = sequences[0].shape[1:] # the extra dims should be the same for all elements\n lengths = [len(seq) for seq in sequences]\n if fixed_length is not None:\n max_length = fixed_length\n else:\n max_length = max(lengths)\n if isinstance(sequences[0], torch.Tensor):\n assert \"torch\" in str(dtype), \"dtype and input type does not match\"\n padded_seqs = torch.zeros((len(sequences), max_length) + extra_dims, dtype=dtype, device=device)\n mask = torch.zeros((len(sequences), max_length), dtype=torch.float32, device=device)\n else: # np\n assert \"numpy\" in str(dtype), \"dtype and input type does not match\"\n padded_seqs = np.zeros((len(sequences), max_length) + extra_dims, dtype=dtype)\n mask = np.zeros((len(sequences), max_length), dtype=np.float32)\n\n for idx, seq in enumerate(sequences):\n end = lengths[idx]\n padded_seqs[idx, :end] = seq\n mask[idx, :end] = 1\n return padded_seqs, mask # , lengths\n\n\ndef pad_sequences_2d(sequences, dtype=torch.long):\n \"\"\" Pad a double-nested list or a sequence of n-d torch tensor into a (n+1)-d tensor,\n only allow the first two dims has variable lengths\n Args:\n sequences: list(n-d tensor or list)\n dtype: torch.long for word indices / torch.float (float32) for other cases\n Returns:\n Examples:\n >>> test_data_list = [[[1, 3, 5], [3, 7, 4, 1]], [[98, 34, 11, 89, 90], [22], [34, 56]],]\n >>> pad_sequences_2d(test_data_list, dtype=torch.long) # torch.Size([2, 3, 5])\n >>> test_data_3d = [torch.randn(2,2,4), torch.randn(4,3,4), torch.randn(1,5,4)]\n >>> pad_sequences_2d(test_data_3d, dtype=torch.float) # torch.Size([2, 3, 5])\n >>> test_data_3d2 = [[torch.randn(2,4), ], [torch.randn(3,4), torch.randn(5,4)]]\n >>> pad_sequences_2d(test_data_3d2, dtype=torch.float) # torch.Size([2, 3, 5])\n # TODO add support for numpy array\n \"\"\"\n bsz = len(sequences)\n para_lengths = [len(seq) for seq in sequences]\n max_para_len = max(para_lengths)\n sen_lengths = [[len(word_seq) for word_seq in seq] for seq in sequences]\n max_sen_len = max([max(e) for e in sen_lengths])\n\n if isinstance(sequences[0], torch.Tensor):\n extra_dims = sequences[0].shape[2:]\n elif isinstance(sequences[0][0], torch.Tensor):\n extra_dims = sequences[0][0].shape[1:]\n else:\n sequences = [[torch.Tensor(word_seq, dtype=dtype) for word_seq in seq] for seq in sequences]\n extra_dims = ()\n\n padded_seqs = torch.zeros((bsz, max_para_len, max_sen_len) + extra_dims, dtype=dtype)\n mask = torch.zeros(bsz, max_para_len, max_sen_len).float()\n\n for b_i in range(bsz):\n for sen_i, sen_l in enumerate(sen_lengths[b_i]):\n padded_seqs[b_i, sen_i, :sen_l] = sequences[b_i][sen_i]\n mask[b_i, sen_i, :sen_l] = 1\n return padded_seqs, mask # , sen_lengths\n\n\ndef find_max_triples(st_prob, ed_prob, top_n=5, prob_thd=None, tensor_type=\"torch\"):\n \"\"\" Find a list of (k1, k2) where k1 < k2 with the maximum values of st_prob[k1] * ed_prob[k2]\n Args:\n st_prob (torch.Tensor or np.ndarray): (N, L) batched start_idx probabilities\n ed_prob (torch.Tensor or np.ndarray): (N, L) batched end_idx probabilities\n top_n (int): return topN pairs with highest values\n prob_thd (float):\n tensor_type: str, np or torch\n Returns:\n batched_sorted_triple: N * [(st_idx, ed_idx, confidence), ...]\n \"\"\"\n if tensor_type == \"torch\":\n st_prob, ed_prob = st_prob.data.numpy(), ed_prob.data.numpy()\n product = np.einsum(\"bm,bn->bmn\", st_prob, ed_prob)\n # (N, L, L) the lower part becomes zeros, start_idx < ed_idx\n upper_product = np.triu(product, k=1)\n return find_max_triples_from_upper_triangle_product(upper_product, top_n=top_n, prob_thd=prob_thd)\n\n\ndef find_max_triples_from_upper_triangle_product(upper_product, top_n=5, prob_thd=None):\n \"\"\" Find a list of (k1, k2) where k1 < k2 with the maximum values of p1[k1] * p2[k2]\n Args:\n upper_product (torch.Tensor or np.ndarray): (N, L, L), the lower part becomes zeros, end_idx > start_idx\n top_n (int): return topN pairs with highest values\n prob_thd (float or None):\n Returns:\n batched_sorted_triple: N * [(st_idx, ed_idx, confidence), ...]\n \"\"\"\n batched_sorted_triple = []\n for idx, e in enumerate(upper_product):\n sorted_triple = top_n_array_2d(e, top_n=top_n)\n if prob_thd is not None:\n sorted_triple = sorted_triple[sorted_triple[2] >= prob_thd]\n batched_sorted_triple.append(sorted_triple)\n return batched_sorted_triple\n\n\ndef top_n_array_2d(array_2d, top_n):\n \"\"\" Get topN indices and values of a 2d array, return a tuple of indices and their values,\n ranked by the value\n \"\"\"\n row_indices, column_indices = np.unravel_index(np.argsort(array_2d, axis=None), array_2d.shape)\n row_indices = row_indices[::-1][:top_n]\n column_indices = column_indices[::-1][:top_n]\n sorted_values = array_2d[row_indices, column_indices]\n return np.stack([row_indices, column_indices, sorted_values], axis=1) # (N, 3)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
"""
time: X * Y
space: worst case X * Y
"""
class Solution:
def numIslands(self, grid: List[List[str]]) -> int:
if not grid:
return 0
Y = len(grid)
X = len(grid[0])
def dfs(y, x):
if y < 0 or x < 0 or y > Y-1 or x > X-1:
return
if grid[y][x] == "1":
grid[y][x] = "0"
dfs(y, x-1)
dfs(y, x+1)
dfs(y-1, x)
dfs(y+1, x)
ans = 0
for y in range(Y):
for x in range(X):
if grid[y][x] == "1":
dfs(y, x)
ans += 1
return ans
def numIslands(self, grid : List[List[str]]) -> int:
R = len(grid)
C = len(grid[0])
def dfs(r, c):
if r < 0 or c < 0 or r >= R or c >= C:
return
if grid[r][c] == '1':
grid[r][c] = '0'
dfs(r-1,c)
dfs(r+1,c)
dfs(r,c-1)
dfs(r,c+1)
rtn = 0
for r in range(R):
for c in range(C):
if grid[r][c] == '1':
rtn += 1
dfs(r,c)
return rtn
|
normal
|
{
"blob_id": "58bd14d240242ed58dcff35fe91cebeae4899478",
"index": 9087,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def numIslands(self, grid: List[List[str]]) ->int:\n if not grid:\n return 0\n Y = len(grid)\n X = len(grid[0])\n\n def dfs(y, x):\n if y < 0 or x < 0 or y > Y - 1 or x > X - 1:\n return\n if grid[y][x] == '1':\n grid[y][x] = '0'\n dfs(y, x - 1)\n dfs(y, x + 1)\n dfs(y - 1, x)\n dfs(y + 1, x)\n ans = 0\n for y in range(Y):\n for x in range(X):\n if grid[y][x] == '1':\n dfs(y, x)\n ans += 1\n return ans\n <mask token>\n",
"step-4": "<mask token>\n\n\nclass Solution:\n\n def numIslands(self, grid: List[List[str]]) ->int:\n if not grid:\n return 0\n Y = len(grid)\n X = len(grid[0])\n\n def dfs(y, x):\n if y < 0 or x < 0 or y > Y - 1 or x > X - 1:\n return\n if grid[y][x] == '1':\n grid[y][x] = '0'\n dfs(y, x - 1)\n dfs(y, x + 1)\n dfs(y - 1, x)\n dfs(y + 1, x)\n ans = 0\n for y in range(Y):\n for x in range(X):\n if grid[y][x] == '1':\n dfs(y, x)\n ans += 1\n return ans\n\n def numIslands(self, grid: List[List[str]]) ->int:\n R = len(grid)\n C = len(grid[0])\n\n def dfs(r, c):\n if r < 0 or c < 0 or r >= R or c >= C:\n return\n if grid[r][c] == '1':\n grid[r][c] = '0'\n dfs(r - 1, c)\n dfs(r + 1, c)\n dfs(r, c - 1)\n dfs(r, c + 1)\n rtn = 0\n for r in range(R):\n for c in range(C):\n if grid[r][c] == '1':\n rtn += 1\n dfs(r, c)\n return rtn\n",
"step-5": "\"\"\"\ntime: X * Y\nspace: worst case X * Y\n\"\"\"\nclass Solution:\n def numIslands(self, grid: List[List[str]]) -> int:\n \n if not grid:\n return 0\n \n Y = len(grid)\n X = len(grid[0])\n \n def dfs(y, x):\n if y < 0 or x < 0 or y > Y-1 or x > X-1:\n return\n if grid[y][x] == \"1\":\n grid[y][x] = \"0\"\n dfs(y, x-1)\n dfs(y, x+1)\n dfs(y-1, x)\n dfs(y+1, x)\n \n ans = 0\n \n for y in range(Y):\n for x in range(X):\n if grid[y][x] == \"1\":\n dfs(y, x)\n ans += 1\n \n return ans\n\n def numIslands(self, grid : List[List[str]]) -> int:\n R = len(grid)\n C = len(grid[0])\n \n def dfs(r, c):\n if r < 0 or c < 0 or r >= R or c >= C:\n return\n \n if grid[r][c] == '1':\n grid[r][c] = '0'\n \n dfs(r-1,c)\n dfs(r+1,c)\n dfs(r,c-1)\n dfs(r,c+1)\n \n rtn = 0\n for r in range(R):\n for c in range(C):\n if grid[r][c] == '1':\n rtn += 1\n dfs(r,c)\n \n return rtn\n ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from job_description import JobDescription
from resume import Resume
from resume_manager import ResumeManager
|
normal
|
{
"blob_id": "a998433e45c1d5135749c5164e8ec1f2eb0e572a",
"index": 1693,
"step-1": "<mask token>\n",
"step-2": "from job_description import JobDescription\nfrom resume import Resume\nfrom resume_manager import ResumeManager\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
from ctypes import *
class GF_IPMPX_Data(Structure):
_fields_=[
("tag", c_char),
("Version", c_char),
("dataID", c_char)
]
|
normal
|
{
"blob_id": "b3f4815495c781fe6cc15f77b4ee601680117419",
"index": 8592,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass GF_IPMPX_Data(Structure):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass GF_IPMPX_Data(Structure):\n _fields_ = [('tag', c_char), ('Version', c_char), ('dataID', c_char)]\n",
"step-4": "from ctypes import *\n\n\nclass GF_IPMPX_Data(Structure):\n _fields_ = [('tag', c_char), ('Version', c_char), ('dataID', c_char)]\n",
"step-5": "from ctypes import *\n\n\nclass GF_IPMPX_Data(Structure):\n _fields_=[\n (\"tag\", c_char),\n (\"Version\", c_char),\n (\"dataID\", c_char)\n ]",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
list = [3, 1, 2, 5, 4, 7, 6]
def sort(list):
for i in range(len(list) - 1):
if list[i] > list[i + 1]:
a = list[i]
list[i] = list[i + 1]
list[i + 1] = a
print(list)
sort(list)
|
normal
|
{
"blob_id": "219929d52b5f1a0690590e83b41d2b4f0b2b3a51",
"index": 336,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef sort(list):\n for i in range(len(list) - 1):\n if list[i] > list[i + 1]:\n a = list[i]\n list[i] = list[i + 1]\n list[i + 1] = a\n print(list)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef sort(list):\n for i in range(len(list) - 1):\n if list[i] > list[i + 1]:\n a = list[i]\n list[i] = list[i + 1]\n list[i + 1] = a\n print(list)\n\n\nsort(list)\n",
"step-4": "list = [3, 1, 2, 5, 4, 7, 6]\n\n\ndef sort(list):\n for i in range(len(list) - 1):\n if list[i] > list[i + 1]:\n a = list[i]\n list[i] = list[i + 1]\n list[i + 1] = a\n print(list)\n\n\nsort(list)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import subprocess
import datetime
def ping_address(host,n):
ping = subprocess.Popen(
["ping","-c",str(n),host],
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
out,error = ping.communicate()
return out, error
def ping_address_windows(host,n):
ping = subprocess.Popen(
["ping","-n",str(n),host], # Need -c for linux
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
out,error = ping.communicate()
return out, error
def parse_msg(msg):
line_org = msg.split('\n')
N = len(line_org)-2
line = line_org[N]
return line
def get_vals(msg):
rhs = msg.split('=')
try:
nums = rhs[1].split('/')
min_num = float(nums[0])
ave_num = float(nums[1])
max_num = float(nums[2])
std_num = nums[3].split(' ')
std_num = float(std_num[0])
except:
print("Could not Ping Website...")
min_num = float('nan')
ave_num = float('nan')
max_num = float('nan')
std_num = float('nan')
return min_num, ave_num, max_num, std_num
def get_vals_windows(msg):
rhs = msg.split('=')
try:
nums = rhs[1].split('ms')
min_num = float(nums[0])
nums = rhs[2].split('ms')
ave_num = float(nums[0])
nums = rhs[3].split('ms')
max_num = float(nums[0])
std_num = float('nan')
except:
print("Could not Ping Website...")
min_num = float('nan')
ave_num = float('nan')
max_num = float('nan')
std_num = float('nan')
return min_num, ave_num, max_num, std_num
def get_date_and_time():
return datetime.datetime.now()
|
normal
|
{
"blob_id": "3f2221f5f3a699020dd5986acb793e3083976dff",
"index": 7176,
"step-1": "<mask token>\n\n\ndef parse_msg(msg):\n line_org = msg.split('\\n')\n N = len(line_org) - 2\n line = line_org[N]\n return line\n\n\ndef get_vals(msg):\n rhs = msg.split('=')\n try:\n nums = rhs[1].split('/')\n min_num = float(nums[0])\n ave_num = float(nums[1])\n max_num = float(nums[2])\n std_num = nums[3].split(' ')\n std_num = float(std_num[0])\n except:\n print('Could not Ping Website...')\n min_num = float('nan')\n ave_num = float('nan')\n max_num = float('nan')\n std_num = float('nan')\n return min_num, ave_num, max_num, std_num\n\n\ndef get_vals_windows(msg):\n rhs = msg.split('=')\n try:\n nums = rhs[1].split('ms')\n min_num = float(nums[0])\n nums = rhs[2].split('ms')\n ave_num = float(nums[0])\n nums = rhs[3].split('ms')\n max_num = float(nums[0])\n std_num = float('nan')\n except:\n print('Could not Ping Website...')\n min_num = float('nan')\n ave_num = float('nan')\n max_num = float('nan')\n std_num = float('nan')\n return min_num, ave_num, max_num, std_num\n\n\ndef get_date_and_time():\n return datetime.datetime.now()\n",
"step-2": "<mask token>\n\n\ndef ping_address(host, n):\n ping = subprocess.Popen(['ping', '-c', str(n), host], stdout=subprocess\n .PIPE, stderr=subprocess.PIPE)\n out, error = ping.communicate()\n return out, error\n\n\n<mask token>\n\n\ndef parse_msg(msg):\n line_org = msg.split('\\n')\n N = len(line_org) - 2\n line = line_org[N]\n return line\n\n\ndef get_vals(msg):\n rhs = msg.split('=')\n try:\n nums = rhs[1].split('/')\n min_num = float(nums[0])\n ave_num = float(nums[1])\n max_num = float(nums[2])\n std_num = nums[3].split(' ')\n std_num = float(std_num[0])\n except:\n print('Could not Ping Website...')\n min_num = float('nan')\n ave_num = float('nan')\n max_num = float('nan')\n std_num = float('nan')\n return min_num, ave_num, max_num, std_num\n\n\ndef get_vals_windows(msg):\n rhs = msg.split('=')\n try:\n nums = rhs[1].split('ms')\n min_num = float(nums[0])\n nums = rhs[2].split('ms')\n ave_num = float(nums[0])\n nums = rhs[3].split('ms')\n max_num = float(nums[0])\n std_num = float('nan')\n except:\n print('Could not Ping Website...')\n min_num = float('nan')\n ave_num = float('nan')\n max_num = float('nan')\n std_num = float('nan')\n return min_num, ave_num, max_num, std_num\n\n\ndef get_date_and_time():\n return datetime.datetime.now()\n",
"step-3": "<mask token>\n\n\ndef ping_address(host, n):\n ping = subprocess.Popen(['ping', '-c', str(n), host], stdout=subprocess\n .PIPE, stderr=subprocess.PIPE)\n out, error = ping.communicate()\n return out, error\n\n\ndef ping_address_windows(host, n):\n ping = subprocess.Popen(['ping', '-n', str(n), host], stdout=subprocess\n .PIPE, stderr=subprocess.PIPE)\n out, error = ping.communicate()\n return out, error\n\n\ndef parse_msg(msg):\n line_org = msg.split('\\n')\n N = len(line_org) - 2\n line = line_org[N]\n return line\n\n\ndef get_vals(msg):\n rhs = msg.split('=')\n try:\n nums = rhs[1].split('/')\n min_num = float(nums[0])\n ave_num = float(nums[1])\n max_num = float(nums[2])\n std_num = nums[3].split(' ')\n std_num = float(std_num[0])\n except:\n print('Could not Ping Website...')\n min_num = float('nan')\n ave_num = float('nan')\n max_num = float('nan')\n std_num = float('nan')\n return min_num, ave_num, max_num, std_num\n\n\ndef get_vals_windows(msg):\n rhs = msg.split('=')\n try:\n nums = rhs[1].split('ms')\n min_num = float(nums[0])\n nums = rhs[2].split('ms')\n ave_num = float(nums[0])\n nums = rhs[3].split('ms')\n max_num = float(nums[0])\n std_num = float('nan')\n except:\n print('Could not Ping Website...')\n min_num = float('nan')\n ave_num = float('nan')\n max_num = float('nan')\n std_num = float('nan')\n return min_num, ave_num, max_num, std_num\n\n\ndef get_date_and_time():\n return datetime.datetime.now()\n",
"step-4": "import subprocess\nimport datetime\n\n\ndef ping_address(host, n):\n ping = subprocess.Popen(['ping', '-c', str(n), host], stdout=subprocess\n .PIPE, stderr=subprocess.PIPE)\n out, error = ping.communicate()\n return out, error\n\n\ndef ping_address_windows(host, n):\n ping = subprocess.Popen(['ping', '-n', str(n), host], stdout=subprocess\n .PIPE, stderr=subprocess.PIPE)\n out, error = ping.communicate()\n return out, error\n\n\ndef parse_msg(msg):\n line_org = msg.split('\\n')\n N = len(line_org) - 2\n line = line_org[N]\n return line\n\n\ndef get_vals(msg):\n rhs = msg.split('=')\n try:\n nums = rhs[1].split('/')\n min_num = float(nums[0])\n ave_num = float(nums[1])\n max_num = float(nums[2])\n std_num = nums[3].split(' ')\n std_num = float(std_num[0])\n except:\n print('Could not Ping Website...')\n min_num = float('nan')\n ave_num = float('nan')\n max_num = float('nan')\n std_num = float('nan')\n return min_num, ave_num, max_num, std_num\n\n\ndef get_vals_windows(msg):\n rhs = msg.split('=')\n try:\n nums = rhs[1].split('ms')\n min_num = float(nums[0])\n nums = rhs[2].split('ms')\n ave_num = float(nums[0])\n nums = rhs[3].split('ms')\n max_num = float(nums[0])\n std_num = float('nan')\n except:\n print('Could not Ping Website...')\n min_num = float('nan')\n ave_num = float('nan')\n max_num = float('nan')\n std_num = float('nan')\n return min_num, ave_num, max_num, std_num\n\n\ndef get_date_and_time():\n return datetime.datetime.now()\n",
"step-5": "import subprocess\nimport datetime\n\ndef ping_address(host,n):\n ping = subprocess.Popen(\n [\"ping\",\"-c\",str(n),host],\n stdout = subprocess.PIPE,\n stderr = subprocess.PIPE)\n out,error = ping.communicate()\n return out, error\n\ndef ping_address_windows(host,n):\n ping = subprocess.Popen(\n [\"ping\",\"-n\",str(n),host], # Need -c for linux\n stdout = subprocess.PIPE,\n stderr = subprocess.PIPE)\n out,error = ping.communicate()\n return out, error\n\ndef parse_msg(msg):\n line_org = msg.split('\\n')\n N = len(line_org)-2\n line = line_org[N]\n return line\n\ndef get_vals(msg):\n rhs = msg.split('=')\n try:\n nums = rhs[1].split('/')\n min_num = float(nums[0])\n ave_num = float(nums[1])\n max_num = float(nums[2])\n std_num = nums[3].split(' ')\n std_num = float(std_num[0])\n except:\n print(\"Could not Ping Website...\")\n min_num = float('nan')\n ave_num = float('nan')\n max_num = float('nan')\n std_num = float('nan')\n return min_num, ave_num, max_num, std_num\n\ndef get_vals_windows(msg):\n rhs = msg.split('=')\n try:\n nums = rhs[1].split('ms')\n min_num = float(nums[0])\n nums = rhs[2].split('ms')\n ave_num = float(nums[0])\n nums = rhs[3].split('ms')\n max_num = float(nums[0])\n std_num = float('nan')\n except:\n print(\"Could not Ping Website...\")\n min_num = float('nan')\n ave_num = float('nan')\n max_num = float('nan')\n std_num = float('nan')\n return min_num, ave_num, max_num, std_num\n\ndef get_date_and_time():\n return datetime.datetime.now()\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
from typing import List
from pydantic import BaseModel
class BinBase(BaseModel):
name: str = None
title: str = None
class BinCreate(BinBase):
owner_id: int
password: str
class Bin(BinBase):
id: int
# TODO: token?
class Config():
orm_mode = True
class UserBase(BaseModel):
username: str
class UserCreate(UserBase):
password: str
class User(UserBase):
id: int
# TODO: password?
# bins: List[Bin] = []
class Config():
orm_mode = True
|
normal
|
{
"blob_id": "1c0f194bbdc6f7e3e4feb114e521aa958f11e83e",
"index": 3263,
"step-1": "<mask token>\n\n\nclass UserCreate(UserBase):\n password: str\n\n\nclass User(UserBase):\n id: int\n\n\n class Config:\n orm_mode = True\n",
"step-2": "<mask token>\n\n\nclass BinCreate(BinBase):\n owner_id: int\n password: str\n\n\nclass Bin(BinBase):\n id: int\n\n\n class Config:\n orm_mode = True\n\n\nclass UserBase(BaseModel):\n username: str\n\n\nclass UserCreate(UserBase):\n password: str\n\n\nclass User(UserBase):\n id: int\n\n\n class Config:\n orm_mode = True\n",
"step-3": "<mask token>\n\n\nclass BinBase(BaseModel):\n name: str = None\n title: str = None\n\n\nclass BinCreate(BinBase):\n owner_id: int\n password: str\n\n\nclass Bin(BinBase):\n id: int\n\n\n class Config:\n orm_mode = True\n\n\nclass UserBase(BaseModel):\n username: str\n\n\nclass UserCreate(UserBase):\n password: str\n\n\nclass User(UserBase):\n id: int\n\n\n class Config:\n orm_mode = True\n",
"step-4": "from typing import List\nfrom pydantic import BaseModel\n\n\nclass BinBase(BaseModel):\n name: str = None\n title: str = None\n\n\nclass BinCreate(BinBase):\n owner_id: int\n password: str\n\n\nclass Bin(BinBase):\n id: int\n\n\n class Config:\n orm_mode = True\n\n\nclass UserBase(BaseModel):\n username: str\n\n\nclass UserCreate(UserBase):\n password: str\n\n\nclass User(UserBase):\n id: int\n\n\n class Config:\n orm_mode = True\n",
"step-5": "from typing import List\nfrom pydantic import BaseModel\n\nclass BinBase(BaseModel):\n name: str = None\n title: str = None\n\n\nclass BinCreate(BinBase):\n owner_id: int\n password: str\n\n\nclass Bin(BinBase):\n id: int\n # TODO: token?\n\n class Config():\n orm_mode = True\n\n\nclass UserBase(BaseModel):\n username: str\n\n\nclass UserCreate(UserBase):\n password: str\n\n\nclass User(UserBase):\n id: int\n # TODO: password?\n # bins: List[Bin] = []\n\n class Config():\n orm_mode = True\n",
"step-ids": [
2,
5,
6,
7,
8
]
}
|
[
2,
5,
6,
7,
8
] |
from django.db import models
class Category(models.Model):
name = models.CharField(max_length=50, unique=True)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.name
class Meta:
verbose_name = 'Categoria'
class Books(models.Model):
name = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
category = models.ForeignKey(
Category, on_delete=models.CASCADE, related_name='category')
def __str__(self):
return self.name
class Meta:
verbose_name = 'Livro'
class Student(models.Model):
name = models.CharField(max_length=70)
cpf = models.CharField(max_length=14)
birth_date = models.DateField()
city = models.CharField(max_length=50)
registration_date = models.DateTimeField(auto_now_add=True)
email = models.EmailField(max_length=50)
tel = models.CharField(max_length=15)
book= models.ForeignKey(
Books, on_delete=models.CASCADE, related_name='book')
class Meta:
verbose_name = 'Estudante'
ordering = ['-id']
def __str__(self):
return self.name
|
normal
|
{
"blob_id": "0584ff5cb252fba0fe1fc350a5fb023ab5cbb02b",
"index": 6750,
"step-1": "<mask token>\n\n\nclass Student(models.Model):\n name = models.CharField(max_length=70)\n cpf = models.CharField(max_length=14)\n birth_date = models.DateField()\n city = models.CharField(max_length=50)\n registration_date = models.DateTimeField(auto_now_add=True)\n email = models.EmailField(max_length=50)\n tel = models.CharField(max_length=15)\n book = models.ForeignKey(Books, on_delete=models.CASCADE, related_name=\n 'book')\n\n\n class Meta:\n verbose_name = 'Estudante'\n ordering = ['-id']\n\n def __str__(self):\n return self.name\n",
"step-2": "<mask token>\n\n\nclass Category(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n verbose_name = 'Categoria'\n\n\nclass Books(models.Model):\n name = models.CharField(max_length=100)\n created_at = models.DateTimeField(auto_now_add=True)\n category = models.ForeignKey(Category, on_delete=models.CASCADE,\n related_name='category')\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n verbose_name = 'Livro'\n\n\nclass Student(models.Model):\n name = models.CharField(max_length=70)\n cpf = models.CharField(max_length=14)\n birth_date = models.DateField()\n city = models.CharField(max_length=50)\n registration_date = models.DateTimeField(auto_now_add=True)\n email = models.EmailField(max_length=50)\n tel = models.CharField(max_length=15)\n book = models.ForeignKey(Books, on_delete=models.CASCADE, related_name=\n 'book')\n\n\n class Meta:\n verbose_name = 'Estudante'\n ordering = ['-id']\n\n def __str__(self):\n return self.name\n",
"step-3": "<mask token>\n\n\nclass Category(models.Model):\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n verbose_name = 'Categoria'\n\n\nclass Books(models.Model):\n name = models.CharField(max_length=100)\n created_at = models.DateTimeField(auto_now_add=True)\n category = models.ForeignKey(Category, on_delete=models.CASCADE,\n related_name='category')\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n verbose_name = 'Livro'\n\n\nclass Student(models.Model):\n name = models.CharField(max_length=70)\n cpf = models.CharField(max_length=14)\n birth_date = models.DateField()\n city = models.CharField(max_length=50)\n registration_date = models.DateTimeField(auto_now_add=True)\n email = models.EmailField(max_length=50)\n tel = models.CharField(max_length=15)\n book = models.ForeignKey(Books, on_delete=models.CASCADE, related_name=\n 'book')\n\n\n class Meta:\n verbose_name = 'Estudante'\n ordering = ['-id']\n\n def __str__(self):\n return self.name\n",
"step-4": "<mask token>\n\n\nclass Category(models.Model):\n name = models.CharField(max_length=50, unique=True)\n created_at = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n verbose_name = 'Categoria'\n\n\nclass Books(models.Model):\n name = models.CharField(max_length=100)\n created_at = models.DateTimeField(auto_now_add=True)\n category = models.ForeignKey(Category, on_delete=models.CASCADE,\n related_name='category')\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n verbose_name = 'Livro'\n\n\nclass Student(models.Model):\n name = models.CharField(max_length=70)\n cpf = models.CharField(max_length=14)\n birth_date = models.DateField()\n city = models.CharField(max_length=50)\n registration_date = models.DateTimeField(auto_now_add=True)\n email = models.EmailField(max_length=50)\n tel = models.CharField(max_length=15)\n book = models.ForeignKey(Books, on_delete=models.CASCADE, related_name=\n 'book')\n\n\n class Meta:\n verbose_name = 'Estudante'\n ordering = ['-id']\n\n def __str__(self):\n return self.name\n",
"step-5": "from django.db import models\n\n\nclass Category(models.Model):\n name = models.CharField(max_length=50, unique=True)\n created_at = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = 'Categoria'\n\n\nclass Books(models.Model):\n name = models.CharField(max_length=100)\n created_at = models.DateTimeField(auto_now_add=True)\n category = models.ForeignKey(\n Category, on_delete=models.CASCADE, related_name='category')\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = 'Livro'\n\n\nclass Student(models.Model):\n name = models.CharField(max_length=70)\n cpf = models.CharField(max_length=14)\n birth_date = models.DateField()\n city = models.CharField(max_length=50)\n registration_date = models.DateTimeField(auto_now_add=True)\n email = models.EmailField(max_length=50)\n tel = models.CharField(max_length=15)\n book= models.ForeignKey(\n Books, on_delete=models.CASCADE, related_name='book')\n\n class Meta:\n verbose_name = 'Estudante'\n ordering = ['-id']\n\n def __str__(self):\n return self.name\n\n",
"step-ids": [
3,
7,
8,
9,
11
]
}
|
[
3,
7,
8,
9,
11
] |
import pandas as pd
import os
"""
This code relies heavily on the form of the data. Namely it will fail if
the authors of the same book are not comma separated. It will also be inaccurate
or even fail if the same author for different books is not spelt in exactly the
same way.
"""
loc = r'C:\Users\james\OneDrive\Documents\University\2017-18 Southampton\Data Mining\Group Coursework\Data'
#path = os.path.join(loc, r'Sample\new_books_data.csv')
path = os.path.join(loc, r'Processed_Data\new_books_data.csv')
books_data = pd.read_csv(path)
def split(string):
"""
Function takes input of a string and returns an array of strings
the original string should be comma separated with a space after
the comma in order for this function to be accurate.
"""
names = []
index = 0
last = 0
for letter in string:
if ((letter == ',') or (index == (len(string) - 1))):
if (index == (len(string) - 1)):
names.append(string[last:(index+1)])
else:
names.append(string[last:index])
last = index+2
index += 1
return names
unique_authors = []
count = 0
for name in books_data['authors']:
if (count%1000 == 0):
print(count)
split_names = split(name)
for author in split_names:
if (author in unique_authors):
pass
else:
unique_authors.append(author)
count += 1
authors_books = []
length = len(books_data.index)
count = 0
length_2 = len(unique_authors)
for author in unique_authors:
if (count%100 == 0):
print(str(count)+'/'+str(length_2))
books = []
for i in range(length):
split_names = split(books_data['authors'][i])
if (author in split_names):
books.append(books_data['goodreads_book_id'][i])
authors_books.append(books)
count += 1
d = {'author': unique_authors, 'book_id': authors_books}
books_by_author = pd.DataFrame(data=d)
#write_path = os.path.join(loc, r'Sample\books_by_author.csv')
write_path = os.path.join(loc, r'Processed_Data\books_by_author.csv')
books_by_author.to_csv(write_path, index=False)
|
normal
|
{
"blob_id": "f57490c8f4a5ba76824c3b41eb18905eb2213c23",
"index": 5107,
"step-1": "<mask token>\n\n\ndef split(string):\n \"\"\"\n Function takes input of a string and returns an array of strings\n the original string should be comma separated with a space after\n the comma in order for this function to be accurate.\n \"\"\"\n names = []\n index = 0\n last = 0\n for letter in string:\n if letter == ',' or index == len(string) - 1:\n if index == len(string) - 1:\n names.append(string[last:index + 1])\n else:\n names.append(string[last:index])\n last = index + 2\n index += 1\n return names\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef split(string):\n \"\"\"\n Function takes input of a string and returns an array of strings\n the original string should be comma separated with a space after\n the comma in order for this function to be accurate.\n \"\"\"\n names = []\n index = 0\n last = 0\n for letter in string:\n if letter == ',' or index == len(string) - 1:\n if index == len(string) - 1:\n names.append(string[last:index + 1])\n else:\n names.append(string[last:index])\n last = index + 2\n index += 1\n return names\n\n\n<mask token>\nfor name in books_data['authors']:\n if count % 1000 == 0:\n print(count)\n split_names = split(name)\n for author in split_names:\n if author in unique_authors:\n pass\n else:\n unique_authors.append(author)\n count += 1\n<mask token>\nfor author in unique_authors:\n if count % 100 == 0:\n print(str(count) + '/' + str(length_2))\n books = []\n for i in range(length):\n split_names = split(books_data['authors'][i])\n if author in split_names:\n books.append(books_data['goodreads_book_id'][i])\n authors_books.append(books)\n count += 1\n<mask token>\nbooks_by_author.to_csv(write_path, index=False)\n",
"step-3": "<mask token>\nloc = (\n 'C:\\\\Users\\\\james\\\\OneDrive\\\\Documents\\\\University\\\\2017-18 Southampton\\\\Data Mining\\\\Group Coursework\\\\Data'\n )\npath = os.path.join(loc, 'Processed_Data\\\\new_books_data.csv')\nbooks_data = pd.read_csv(path)\n\n\ndef split(string):\n \"\"\"\n Function takes input of a string and returns an array of strings\n the original string should be comma separated with a space after\n the comma in order for this function to be accurate.\n \"\"\"\n names = []\n index = 0\n last = 0\n for letter in string:\n if letter == ',' or index == len(string) - 1:\n if index == len(string) - 1:\n names.append(string[last:index + 1])\n else:\n names.append(string[last:index])\n last = index + 2\n index += 1\n return names\n\n\nunique_authors = []\ncount = 0\nfor name in books_data['authors']:\n if count % 1000 == 0:\n print(count)\n split_names = split(name)\n for author in split_names:\n if author in unique_authors:\n pass\n else:\n unique_authors.append(author)\n count += 1\nauthors_books = []\nlength = len(books_data.index)\ncount = 0\nlength_2 = len(unique_authors)\nfor author in unique_authors:\n if count % 100 == 0:\n print(str(count) + '/' + str(length_2))\n books = []\n for i in range(length):\n split_names = split(books_data['authors'][i])\n if author in split_names:\n books.append(books_data['goodreads_book_id'][i])\n authors_books.append(books)\n count += 1\nd = {'author': unique_authors, 'book_id': authors_books}\nbooks_by_author = pd.DataFrame(data=d)\nwrite_path = os.path.join(loc, 'Processed_Data\\\\books_by_author.csv')\nbooks_by_author.to_csv(write_path, index=False)\n",
"step-4": "import pandas as pd\nimport os\n<mask token>\nloc = (\n 'C:\\\\Users\\\\james\\\\OneDrive\\\\Documents\\\\University\\\\2017-18 Southampton\\\\Data Mining\\\\Group Coursework\\\\Data'\n )\npath = os.path.join(loc, 'Processed_Data\\\\new_books_data.csv')\nbooks_data = pd.read_csv(path)\n\n\ndef split(string):\n \"\"\"\n Function takes input of a string and returns an array of strings\n the original string should be comma separated with a space after\n the comma in order for this function to be accurate.\n \"\"\"\n names = []\n index = 0\n last = 0\n for letter in string:\n if letter == ',' or index == len(string) - 1:\n if index == len(string) - 1:\n names.append(string[last:index + 1])\n else:\n names.append(string[last:index])\n last = index + 2\n index += 1\n return names\n\n\nunique_authors = []\ncount = 0\nfor name in books_data['authors']:\n if count % 1000 == 0:\n print(count)\n split_names = split(name)\n for author in split_names:\n if author in unique_authors:\n pass\n else:\n unique_authors.append(author)\n count += 1\nauthors_books = []\nlength = len(books_data.index)\ncount = 0\nlength_2 = len(unique_authors)\nfor author in unique_authors:\n if count % 100 == 0:\n print(str(count) + '/' + str(length_2))\n books = []\n for i in range(length):\n split_names = split(books_data['authors'][i])\n if author in split_names:\n books.append(books_data['goodreads_book_id'][i])\n authors_books.append(books)\n count += 1\nd = {'author': unique_authors, 'book_id': authors_books}\nbooks_by_author = pd.DataFrame(data=d)\nwrite_path = os.path.join(loc, 'Processed_Data\\\\books_by_author.csv')\nbooks_by_author.to_csv(write_path, index=False)\n",
"step-5": "import pandas as pd\nimport os\n\n\"\"\"\nThis code relies heavily on the form of the data. Namely it will fail if \nthe authors of the same book are not comma separated. It will also be inaccurate\nor even fail if the same author for different books is not spelt in exactly the\nsame way.\n\"\"\"\n\n\nloc = r'C:\\Users\\james\\OneDrive\\Documents\\University\\2017-18 Southampton\\Data Mining\\Group Coursework\\Data'\n \n#path = os.path.join(loc, r'Sample\\new_books_data.csv')\npath = os.path.join(loc, r'Processed_Data\\new_books_data.csv')\n\nbooks_data = pd.read_csv(path)\n\n\ndef split(string):\n \"\"\"\n Function takes input of a string and returns an array of strings\n the original string should be comma separated with a space after\n the comma in order for this function to be accurate.\n \"\"\"\n names = []\n index = 0\n last = 0\n for letter in string:\n if ((letter == ',') or (index == (len(string) - 1))):\n if (index == (len(string) - 1)):\n names.append(string[last:(index+1)])\n else:\n names.append(string[last:index])\n last = index+2\n index += 1\n return names\n\n\nunique_authors = []\ncount = 0\nfor name in books_data['authors']:\n if (count%1000 == 0):\n print(count)\n split_names = split(name)\n for author in split_names:\n if (author in unique_authors):\n pass\n else:\n unique_authors.append(author)\n count += 1\n\nauthors_books = []\nlength = len(books_data.index)\n\ncount = 0\nlength_2 = len(unique_authors)\nfor author in unique_authors:\n if (count%100 == 0):\n print(str(count)+'/'+str(length_2))\n books = []\n for i in range(length):\n split_names = split(books_data['authors'][i])\n if (author in split_names):\n books.append(books_data['goodreads_book_id'][i])\n authors_books.append(books)\n count += 1\n\nd = {'author': unique_authors, 'book_id': authors_books}\nbooks_by_author = pd.DataFrame(data=d)\n\n#write_path = os.path.join(loc, r'Sample\\books_by_author.csv')\nwrite_path = os.path.join(loc, r'Processed_Data\\books_by_author.csv')\nbooks_by_author.to_csv(write_path, index=False)\n\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import re
from mapa import graficar_lista, graficar_matriz
class nodo:
def __init__(self, x, y, n, c):
self.columna = x
self.fila = y
self.nombre = n
self.color = c
pattern_matriz = r"[M|m][A|a][T|t][R|r][I|i][Z|z]\s*\(.*,.*,.*,.*,.*\)\{"
pattern_fila = r"[F|f][I|i][L|l][A|a]\s*\(.*\)\s*.*;"
pattern_nodo = r"[N|n][O|o][D|d][O|o]\s*\(.*,.*,.*\).*;"
pattern_defecto = r"\}\s*[D|d][E|e][F|f][E|e][C|c][T|t][O|o]\s*\(.*\).*"
propiedades = {
'fila' : '',
'columna' : '',
'nombre_matriz' : '',
'forma_nodo' : '',
'matriz_doble': '',
}
nodos = []
nombre_def = ""
color_def = ""
def leer_archivo_matriz(path):
with open(path, 'r', encoding='utf-8') as f:
lineas = f.readlines()
num_fila = 0
estado = ""
for i in lineas:
if re.search(pattern_matriz, i):
separado = re.findall(r"\(.*,.*,.*,.*,.*\)",i)
separados = separado[0].replace("(","")
separados = separados.replace(")","")
separados = re.split(r",",separados)
separados[0] = separados[0].replace(" ","")
separados[1] = separados[1].replace(" ","")
separados[2] = separados[2].replace("'","")
separados[2] = separados[2].replace(" ","")
separados[3] = separados[3].replace(" ","")
separados[4] = separados[4].replace(" ","")
#Asignar Variables al diccionario
propiedades['fila'] = separados[0]
propiedades['columna'] = separados[1]
propiedades['nombre_matriz'] = separados[2]
propiedades['forma_nodo'] = separados[3]
propiedades['matriz_doble'] = separados[4]
elif re.search(pattern_fila, i):
separado2 = re.findall(r"\).*",i)
separados2 = separado2[0].replace(")"," ")
separados2 = separados2.replace(";","")
separados2 = separados2.replace(" ","")
separado = re.findall(r"\(.*\)",i)
separados = separado[0].replace("(","")
separados = separados.replace(")","")
separados = separados.replace(";","")
separados = separados.replace(" ","")
separados = re.split(r",",separados)
num = 0
for nom in separados:
nom = nom.replace("'", "")
nom = nom.replace(" ", "")
nodos.append(nodo(num, num_fila, nom, separados2))
num = num+1
num_fila = num_fila + 1
elif re.search(pattern_nodo, i):
separado = re.findall(r"\(.*,.*,.*\).*;",i)
separados = separado[0].replace("(","")
separados = separados.replace(")",",")
separados = separados.replace(";","")
separados = re.split(r",",separados)
separados[0] = separados[0].replace(" ","")
separados[1] = separados[1].replace(" ","")
separados[2] = separados[2].replace("'","")
separados[2] = separados[2].replace(" ","")
separados[3] = separados[3].replace(" ","")
nodos.append(nodo(int(separados[0])-1, int(separados[1])-1, separados[2], separados[3]))
elif re.search(pattern_defecto, i):
separado = re.findall(r"\(.*\).*",i)
separados = separado[0].replace("(","")
separados = separados.replace(")",",")
separados = separados.replace(";","")
separados = re.split(r",",separados)
separados[0] = separados[0].replace("'","")
separados[0] = separados[0].replace(" ","")
separados[1] = separados[1].replace(" ","")
for nod in nodos:
if nod.nombre == "#":
nod.nombre = separados[0]
nombre_def = separados[0]
if nod.color == "#":
nod.color = separados[1]
color_def = separados[1]
mat = []
for i in range(0,int(propiedades["columna"])):
mat.append([])
for j in range(0, int(propiedades["fila"])):
mat[i].append(nodo(str(j),str(i),nombre_def, color_def))
for i in range(0,int(propiedades["columna"])):
for j in range(0, int(propiedades["fila"])):
for k in nodos:
if mat[i][j].fila == str(int(k.fila)) and mat[i][j].columna == str(int(k.columna)):
mat[i][j] = k
# for i in range(0,int(propiedades["columna"])):
# for j in range(0, int(propiedades["fila"])):
# print(mat[i][j].fila, mat[i][j].columna,mat[i][j].nombre, mat[i][j].color)
# print(mat)
matriz = (propiedades, mat)
# for i in nodos:
# print(i.nombre, i.color, i.columna, i.fila)
graficar_matriz(matriz)
# leer_archivo_matriz("Matriz.lfp")
|
normal
|
{
"blob_id": "70373c74e459efb2a310d94ae906910423e8bfd4",
"index": 6631,
"step-1": "<mask token>\n\n\nclass nodo:\n\n def __init__(self, x, y, n, c):\n self.columna = x\n self.fila = y\n self.nombre = n\n self.color = c\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass nodo:\n\n def __init__(self, x, y, n, c):\n self.columna = x\n self.fila = y\n self.nombre = n\n self.color = c\n\n\n<mask token>\n\n\ndef leer_archivo_matriz(path):\n with open(path, 'r', encoding='utf-8') as f:\n lineas = f.readlines()\n num_fila = 0\n estado = ''\n for i in lineas:\n if re.search(pattern_matriz, i):\n separado = re.findall('\\\\(.*,.*,.*,.*,.*\\\\)', i)\n separados = separado[0].replace('(', '')\n separados = separados.replace(')', '')\n separados = re.split(',', separados)\n separados[0] = separados[0].replace(' ', '')\n separados[1] = separados[1].replace(' ', '')\n separados[2] = separados[2].replace(\"'\", '')\n separados[2] = separados[2].replace(' ', '')\n separados[3] = separados[3].replace(' ', '')\n separados[4] = separados[4].replace(' ', '')\n propiedades['fila'] = separados[0]\n propiedades['columna'] = separados[1]\n propiedades['nombre_matriz'] = separados[2]\n propiedades['forma_nodo'] = separados[3]\n propiedades['matriz_doble'] = separados[4]\n elif re.search(pattern_fila, i):\n separado2 = re.findall('\\\\).*', i)\n separados2 = separado2[0].replace(')', ' ')\n separados2 = separados2.replace(';', '')\n separados2 = separados2.replace(' ', '')\n separado = re.findall('\\\\(.*\\\\)', i)\n separados = separado[0].replace('(', '')\n separados = separados.replace(')', '')\n separados = separados.replace(';', '')\n separados = separados.replace(' ', '')\n separados = re.split(',', separados)\n num = 0\n for nom in separados:\n nom = nom.replace(\"'\", '')\n nom = nom.replace(' ', '')\n nodos.append(nodo(num, num_fila, nom, separados2))\n num = num + 1\n num_fila = num_fila + 1\n elif re.search(pattern_nodo, i):\n separado = re.findall('\\\\(.*,.*,.*\\\\).*;', i)\n separados = separado[0].replace('(', '')\n separados = separados.replace(')', ',')\n separados = separados.replace(';', '')\n separados = re.split(',', separados)\n separados[0] = separados[0].replace(' ', '')\n separados[1] = separados[1].replace(' ', '')\n separados[2] = separados[2].replace(\"'\", '')\n separados[2] = separados[2].replace(' ', '')\n separados[3] = separados[3].replace(' ', '')\n nodos.append(nodo(int(separados[0]) - 1, int(separados[1]) -\n 1, separados[2], separados[3]))\n elif re.search(pattern_defecto, i):\n separado = re.findall('\\\\(.*\\\\).*', i)\n separados = separado[0].replace('(', '')\n separados = separados.replace(')', ',')\n separados = separados.replace(';', '')\n separados = re.split(',', separados)\n separados[0] = separados[0].replace(\"'\", '')\n separados[0] = separados[0].replace(' ', '')\n separados[1] = separados[1].replace(' ', '')\n for nod in nodos:\n if nod.nombre == '#':\n nod.nombre = separados[0]\n nombre_def = separados[0]\n if nod.color == '#':\n nod.color = separados[1]\n color_def = separados[1]\n mat = []\n for i in range(0, int(propiedades['columna'])):\n mat.append([])\n for j in range(0, int(propiedades['fila'])):\n mat[i].append(nodo(str(j), str(i), nombre_def, color_def))\n for i in range(0, int(propiedades['columna'])):\n for j in range(0, int(propiedades['fila'])):\n for k in nodos:\n if mat[i][j].fila == str(int(k.fila)) and mat[i][j\n ].columna == str(int(k.columna)):\n mat[i][j] = k\n matriz = propiedades, mat\n graficar_matriz(matriz)\n",
"step-3": "<mask token>\n\n\nclass nodo:\n\n def __init__(self, x, y, n, c):\n self.columna = x\n self.fila = y\n self.nombre = n\n self.color = c\n\n\npattern_matriz = '[M|m][A|a][T|t][R|r][I|i][Z|z]\\\\s*\\\\(.*,.*,.*,.*,.*\\\\)\\\\{'\npattern_fila = '[F|f][I|i][L|l][A|a]\\\\s*\\\\(.*\\\\)\\\\s*.*;'\npattern_nodo = '[N|n][O|o][D|d][O|o]\\\\s*\\\\(.*,.*,.*\\\\).*;'\npattern_defecto = '\\\\}\\\\s*[D|d][E|e][F|f][E|e][C|c][T|t][O|o]\\\\s*\\\\(.*\\\\).*'\npropiedades = {'fila': '', 'columna': '', 'nombre_matriz': '', 'forma_nodo':\n '', 'matriz_doble': ''}\nnodos = []\nnombre_def = ''\ncolor_def = ''\n\n\ndef leer_archivo_matriz(path):\n with open(path, 'r', encoding='utf-8') as f:\n lineas = f.readlines()\n num_fila = 0\n estado = ''\n for i in lineas:\n if re.search(pattern_matriz, i):\n separado = re.findall('\\\\(.*,.*,.*,.*,.*\\\\)', i)\n separados = separado[0].replace('(', '')\n separados = separados.replace(')', '')\n separados = re.split(',', separados)\n separados[0] = separados[0].replace(' ', '')\n separados[1] = separados[1].replace(' ', '')\n separados[2] = separados[2].replace(\"'\", '')\n separados[2] = separados[2].replace(' ', '')\n separados[3] = separados[3].replace(' ', '')\n separados[4] = separados[4].replace(' ', '')\n propiedades['fila'] = separados[0]\n propiedades['columna'] = separados[1]\n propiedades['nombre_matriz'] = separados[2]\n propiedades['forma_nodo'] = separados[3]\n propiedades['matriz_doble'] = separados[4]\n elif re.search(pattern_fila, i):\n separado2 = re.findall('\\\\).*', i)\n separados2 = separado2[0].replace(')', ' ')\n separados2 = separados2.replace(';', '')\n separados2 = separados2.replace(' ', '')\n separado = re.findall('\\\\(.*\\\\)', i)\n separados = separado[0].replace('(', '')\n separados = separados.replace(')', '')\n separados = separados.replace(';', '')\n separados = separados.replace(' ', '')\n separados = re.split(',', separados)\n num = 0\n for nom in separados:\n nom = nom.replace(\"'\", '')\n nom = nom.replace(' ', '')\n nodos.append(nodo(num, num_fila, nom, separados2))\n num = num + 1\n num_fila = num_fila + 1\n elif re.search(pattern_nodo, i):\n separado = re.findall('\\\\(.*,.*,.*\\\\).*;', i)\n separados = separado[0].replace('(', '')\n separados = separados.replace(')', ',')\n separados = separados.replace(';', '')\n separados = re.split(',', separados)\n separados[0] = separados[0].replace(' ', '')\n separados[1] = separados[1].replace(' ', '')\n separados[2] = separados[2].replace(\"'\", '')\n separados[2] = separados[2].replace(' ', '')\n separados[3] = separados[3].replace(' ', '')\n nodos.append(nodo(int(separados[0]) - 1, int(separados[1]) -\n 1, separados[2], separados[3]))\n elif re.search(pattern_defecto, i):\n separado = re.findall('\\\\(.*\\\\).*', i)\n separados = separado[0].replace('(', '')\n separados = separados.replace(')', ',')\n separados = separados.replace(';', '')\n separados = re.split(',', separados)\n separados[0] = separados[0].replace(\"'\", '')\n separados[0] = separados[0].replace(' ', '')\n separados[1] = separados[1].replace(' ', '')\n for nod in nodos:\n if nod.nombre == '#':\n nod.nombre = separados[0]\n nombre_def = separados[0]\n if nod.color == '#':\n nod.color = separados[1]\n color_def = separados[1]\n mat = []\n for i in range(0, int(propiedades['columna'])):\n mat.append([])\n for j in range(0, int(propiedades['fila'])):\n mat[i].append(nodo(str(j), str(i), nombre_def, color_def))\n for i in range(0, int(propiedades['columna'])):\n for j in range(0, int(propiedades['fila'])):\n for k in nodos:\n if mat[i][j].fila == str(int(k.fila)) and mat[i][j\n ].columna == str(int(k.columna)):\n mat[i][j] = k\n matriz = propiedades, mat\n graficar_matriz(matriz)\n",
"step-4": "import re\nfrom mapa import graficar_lista, graficar_matriz\n\n\nclass nodo:\n\n def __init__(self, x, y, n, c):\n self.columna = x\n self.fila = y\n self.nombre = n\n self.color = c\n\n\npattern_matriz = '[M|m][A|a][T|t][R|r][I|i][Z|z]\\\\s*\\\\(.*,.*,.*,.*,.*\\\\)\\\\{'\npattern_fila = '[F|f][I|i][L|l][A|a]\\\\s*\\\\(.*\\\\)\\\\s*.*;'\npattern_nodo = '[N|n][O|o][D|d][O|o]\\\\s*\\\\(.*,.*,.*\\\\).*;'\npattern_defecto = '\\\\}\\\\s*[D|d][E|e][F|f][E|e][C|c][T|t][O|o]\\\\s*\\\\(.*\\\\).*'\npropiedades = {'fila': '', 'columna': '', 'nombre_matriz': '', 'forma_nodo':\n '', 'matriz_doble': ''}\nnodos = []\nnombre_def = ''\ncolor_def = ''\n\n\ndef leer_archivo_matriz(path):\n with open(path, 'r', encoding='utf-8') as f:\n lineas = f.readlines()\n num_fila = 0\n estado = ''\n for i in lineas:\n if re.search(pattern_matriz, i):\n separado = re.findall('\\\\(.*,.*,.*,.*,.*\\\\)', i)\n separados = separado[0].replace('(', '')\n separados = separados.replace(')', '')\n separados = re.split(',', separados)\n separados[0] = separados[0].replace(' ', '')\n separados[1] = separados[1].replace(' ', '')\n separados[2] = separados[2].replace(\"'\", '')\n separados[2] = separados[2].replace(' ', '')\n separados[3] = separados[3].replace(' ', '')\n separados[4] = separados[4].replace(' ', '')\n propiedades['fila'] = separados[0]\n propiedades['columna'] = separados[1]\n propiedades['nombre_matriz'] = separados[2]\n propiedades['forma_nodo'] = separados[3]\n propiedades['matriz_doble'] = separados[4]\n elif re.search(pattern_fila, i):\n separado2 = re.findall('\\\\).*', i)\n separados2 = separado2[0].replace(')', ' ')\n separados2 = separados2.replace(';', '')\n separados2 = separados2.replace(' ', '')\n separado = re.findall('\\\\(.*\\\\)', i)\n separados = separado[0].replace('(', '')\n separados = separados.replace(')', '')\n separados = separados.replace(';', '')\n separados = separados.replace(' ', '')\n separados = re.split(',', separados)\n num = 0\n for nom in separados:\n nom = nom.replace(\"'\", '')\n nom = nom.replace(' ', '')\n nodos.append(nodo(num, num_fila, nom, separados2))\n num = num + 1\n num_fila = num_fila + 1\n elif re.search(pattern_nodo, i):\n separado = re.findall('\\\\(.*,.*,.*\\\\).*;', i)\n separados = separado[0].replace('(', '')\n separados = separados.replace(')', ',')\n separados = separados.replace(';', '')\n separados = re.split(',', separados)\n separados[0] = separados[0].replace(' ', '')\n separados[1] = separados[1].replace(' ', '')\n separados[2] = separados[2].replace(\"'\", '')\n separados[2] = separados[2].replace(' ', '')\n separados[3] = separados[3].replace(' ', '')\n nodos.append(nodo(int(separados[0]) - 1, int(separados[1]) -\n 1, separados[2], separados[3]))\n elif re.search(pattern_defecto, i):\n separado = re.findall('\\\\(.*\\\\).*', i)\n separados = separado[0].replace('(', '')\n separados = separados.replace(')', ',')\n separados = separados.replace(';', '')\n separados = re.split(',', separados)\n separados[0] = separados[0].replace(\"'\", '')\n separados[0] = separados[0].replace(' ', '')\n separados[1] = separados[1].replace(' ', '')\n for nod in nodos:\n if nod.nombre == '#':\n nod.nombre = separados[0]\n nombre_def = separados[0]\n if nod.color == '#':\n nod.color = separados[1]\n color_def = separados[1]\n mat = []\n for i in range(0, int(propiedades['columna'])):\n mat.append([])\n for j in range(0, int(propiedades['fila'])):\n mat[i].append(nodo(str(j), str(i), nombre_def, color_def))\n for i in range(0, int(propiedades['columna'])):\n for j in range(0, int(propiedades['fila'])):\n for k in nodos:\n if mat[i][j].fila == str(int(k.fila)) and mat[i][j\n ].columna == str(int(k.columna)):\n mat[i][j] = k\n matriz = propiedades, mat\n graficar_matriz(matriz)\n",
"step-5": "import re\nfrom mapa import graficar_lista, graficar_matriz\n\nclass nodo:\n def __init__(self, x, y, n, c):\n self.columna = x\n self.fila = y\n self.nombre = n\n self.color = c\n\n\npattern_matriz = r\"[M|m][A|a][T|t][R|r][I|i][Z|z]\\s*\\(.*,.*,.*,.*,.*\\)\\{\"\npattern_fila = r\"[F|f][I|i][L|l][A|a]\\s*\\(.*\\)\\s*.*;\"\npattern_nodo = r\"[N|n][O|o][D|d][O|o]\\s*\\(.*,.*,.*\\).*;\"\npattern_defecto = r\"\\}\\s*[D|d][E|e][F|f][E|e][C|c][T|t][O|o]\\s*\\(.*\\).*\"\n\npropiedades = {\n 'fila' : '',\n 'columna' : '',\n 'nombre_matriz' : '',\n 'forma_nodo' : '',\n 'matriz_doble': '',\n}\n\nnodos = []\nnombre_def = \"\"\ncolor_def = \"\"\ndef leer_archivo_matriz(path): \n with open(path, 'r', encoding='utf-8') as f:\n lineas = f.readlines()\n num_fila = 0\n estado = \"\"\n for i in lineas:\n if re.search(pattern_matriz, i):\n separado = re.findall(r\"\\(.*,.*,.*,.*,.*\\)\",i)\n separados = separado[0].replace(\"(\",\"\")\n separados = separados.replace(\")\",\"\")\n separados = re.split(r\",\",separados)\n separados[0] = separados[0].replace(\" \",\"\")\n separados[1] = separados[1].replace(\" \",\"\")\n separados[2] = separados[2].replace(\"'\",\"\")\n separados[2] = separados[2].replace(\" \",\"\")\n separados[3] = separados[3].replace(\" \",\"\")\n separados[4] = separados[4].replace(\" \",\"\")\n\n #Asignar Variables al diccionario\n propiedades['fila'] = separados[0]\n propiedades['columna'] = separados[1]\n propiedades['nombre_matriz'] = separados[2]\n propiedades['forma_nodo'] = separados[3]\n propiedades['matriz_doble'] = separados[4]\n\n elif re.search(pattern_fila, i):\n separado2 = re.findall(r\"\\).*\",i)\n separados2 = separado2[0].replace(\")\",\" \")\n separados2 = separados2.replace(\";\",\"\")\n separados2 = separados2.replace(\" \",\"\")\n\n separado = re.findall(r\"\\(.*\\)\",i)\n separados = separado[0].replace(\"(\",\"\")\n separados = separados.replace(\")\",\"\")\n separados = separados.replace(\";\",\"\")\n separados = separados.replace(\" \",\"\")\n\n separados = re.split(r\",\",separados)\n num = 0\n for nom in separados:\n nom = nom.replace(\"'\", \"\")\n nom = nom.replace(\" \", \"\")\n nodos.append(nodo(num, num_fila, nom, separados2))\n num = num+1 \n\n num_fila = num_fila + 1\n\n elif re.search(pattern_nodo, i):\n separado = re.findall(r\"\\(.*,.*,.*\\).*;\",i)\n separados = separado[0].replace(\"(\",\"\")\n separados = separados.replace(\")\",\",\")\n separados = separados.replace(\";\",\"\")\n\n separados = re.split(r\",\",separados)\n separados[0] = separados[0].replace(\" \",\"\")\n separados[1] = separados[1].replace(\" \",\"\")\n separados[2] = separados[2].replace(\"'\",\"\")\n separados[2] = separados[2].replace(\" \",\"\")\n separados[3] = separados[3].replace(\" \",\"\")\n\n nodos.append(nodo(int(separados[0])-1, int(separados[1])-1, separados[2], separados[3]))\n \n elif re.search(pattern_defecto, i):\n separado = re.findall(r\"\\(.*\\).*\",i)\n separados = separado[0].replace(\"(\",\"\")\n separados = separados.replace(\")\",\",\")\n separados = separados.replace(\";\",\"\")\n\n separados = re.split(r\",\",separados)\n separados[0] = separados[0].replace(\"'\",\"\")\n separados[0] = separados[0].replace(\" \",\"\")\n separados[1] = separados[1].replace(\" \",\"\")\n\n for nod in nodos:\n if nod.nombre == \"#\":\n nod.nombre = separados[0]\n nombre_def = separados[0]\n if nod.color == \"#\":\n nod.color = separados[1]\n color_def = separados[1]\n \n mat = []\n for i in range(0,int(propiedades[\"columna\"])):\n mat.append([])\n for j in range(0, int(propiedades[\"fila\"])):\n mat[i].append(nodo(str(j),str(i),nombre_def, color_def))\n \n for i in range(0,int(propiedades[\"columna\"])):\n for j in range(0, int(propiedades[\"fila\"])):\n for k in nodos:\n if mat[i][j].fila == str(int(k.fila)) and mat[i][j].columna == str(int(k.columna)):\n mat[i][j] = k\n \n # for i in range(0,int(propiedades[\"columna\"])):\n # for j in range(0, int(propiedades[\"fila\"])):\n # print(mat[i][j].fila, mat[i][j].columna,mat[i][j].nombre, mat[i][j].color)\n \n # print(mat)\n\n \n matriz = (propiedades, mat)\n\n # for i in nodos:\n # print(i.nombre, i.color, i.columna, i.fila)\n\n graficar_matriz(matriz)\n \n# leer_archivo_matriz(\"Matriz.lfp\")",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#! /usr/bin/env python3
import EchooFunctions, cgi, MySQLdb, hashlib, time, requests, os
print ('Content-type: text/html\n')
form = cgi.FieldStorage()
#database connection
user = "i494f18_team34"
db_pass = "my+sql=i494f18_team34"
db_con = MySQLdb.connect(host="db.soic.indiana.edu", port = 3306, user=user, passwd=db_pass, db=user)
cursor = db_con.cursor()
receiverID = form.getfirst('user','')
userName = ""
userID = ""
if "echooUser" in str(os.environ):
userName = EchooFunctions.getUserName()
userName = userName[0]
userID = EchooFunctions.getUserID(cursor, userName)
admin = False
#change the status of veriable
if userName != "":
if EchooFunctions.checkUserType(cursor, userName) == "administrator":
admin = True
#main contents to insert
friend = ""
friendList = ""
chatroom = ""
userList = []
if userID != "" and receiverID !="":
try:
SQL = "select u.userID, u.username, u.icon, m.detail, m.time_in,m.messageID from user as u, private_message as m where u.userID = "
SQL+= "m.sender and m.receiver = "+str(userID)+" and m.sender = "+str(receiverID)
SQL+= " Union select u.userID, u.username, u.icon, m.detail, m.time_in ,m.messageID from user as u, private_message as m where u.userID = "
SQL+= "m.sender and m.receiver = "+str(receiverID)+" and m.sender = "+str(userID)
SQL+=" Order By messageID ;"
cursor.execute(SQL)
results = cursor.fetchall()
except Exception as e:
print('<p>Something went wrong with the first SQL!</p>')
print(SQL, "Error:", e)
else:
if results:
count = 5
for row in results:
word_count = 0
specialChar=row[3]
specialChar2 = ""
specialChar=EchooFunctions.returnSpecialChara(specialChar)
for x in specialChar:
if word_count<=20:
specialChar2 += x
word_count+=1
else:
specialChar2 += x +"<p>"
word_count = 0
if count >= 5:
chatroom+='<li class="chatDate">'+str(row[4])+'</li>'
count=0
if str(row[0]) ==str(userID):
count+=1
chatroom+='<li class="mainUser">'+'<a href="userProfile.cgi?user='+str(row[0])+'">'+row[1]+'</a><img src="images/user/'+row[2]+'" alt="club1">'
chatroom+='<br><div class="messageLine">'+specialChar2+'</div></li>'
else:
count+=1
chatroom+='<li class="otherUser"><img src="images/user/'+row[2]+'" alt="club1">'
chatroom+='<a href="userProfile.cgi?userid='+str(row[0])+'">'+row[1]+'</a><br><div class="messageLine">'+specialChar2+'</div></li>'
if userID == "" or receiverID =="":
content ="""<p>You don't have right access to this page</p>
<a href='index.cgi'></a>"""
print(content)
print(chatroom)
|
normal
|
{
"blob_id": "dc88686d3cbb4223b4de6847bf4fc29b93054b00",
"index": 495,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Content-type: text/html\\n')\n<mask token>\nif 'echooUser' in str(os.environ):\n userName = EchooFunctions.getUserName()\n userName = userName[0]\n userID = EchooFunctions.getUserID(cursor, userName)\n<mask token>\nif userName != '':\n if EchooFunctions.checkUserType(cursor, userName) == 'administrator':\n admin = True\n<mask token>\nif userID != '' and receiverID != '':\n try:\n SQL = (\n 'select u.userID, u.username, u.icon, m.detail, m.time_in,m.messageID from user as u, private_message as m where u.userID = '\n )\n SQL += 'm.sender and m.receiver = ' + str(userID\n ) + ' and m.sender = ' + str(receiverID)\n SQL += (\n ' Union select u.userID, u.username, u.icon, m.detail, m.time_in ,m.messageID from user as u, private_message as m where u.userID = '\n )\n SQL += 'm.sender and m.receiver = ' + str(receiverID\n ) + ' and m.sender = ' + str(userID)\n SQL += ' Order By messageID ;'\n cursor.execute(SQL)\n results = cursor.fetchall()\n except Exception as e:\n print('<p>Something went wrong with the first SQL!</p>')\n print(SQL, 'Error:', e)\n else:\n if results:\n count = 5\n for row in results:\n word_count = 0\n specialChar = row[3]\n specialChar2 = ''\n specialChar = EchooFunctions.returnSpecialChara(specialChar)\n for x in specialChar:\n if word_count <= 20:\n specialChar2 += x\n word_count += 1\n else:\n specialChar2 += x + '<p>'\n word_count = 0\n if count >= 5:\n chatroom += '<li class=\"chatDate\">' + str(row[4]) + '</li>'\n count = 0\n if str(row[0]) == str(userID):\n count += 1\n chatroom += ('<li class=\"mainUser\">' +\n '<a href=\"userProfile.cgi?user=' + str(row[0]) +\n '\">' + row[1] + '</a><img src=\"images/user/' + row[\n 2] + '\" alt=\"club1\">')\n chatroom += ('<br><div class=\"messageLine\">' +\n specialChar2 + '</div></li>')\n else:\n count += 1\n chatroom += (\n '<li class=\"otherUser\"><img src=\"images/user/' +\n row[2] + '\" alt=\"club1\">')\n chatroom += ('<a href=\"userProfile.cgi?userid=' + str(\n row[0]) + '\">' + row[1] +\n '</a><br><div class=\"messageLine\">' + specialChar2 +\n '</div></li>')\nif userID == '' or receiverID == '':\n content = (\n \"<p>You don't have right access to this page</p>\\n<a href='index.cgi'></a>\"\n )\n print(content)\nprint(chatroom)\n",
"step-3": "<mask token>\nprint('Content-type: text/html\\n')\nform = cgi.FieldStorage()\nuser = 'i494f18_team34'\ndb_pass = 'my+sql=i494f18_team34'\ndb_con = MySQLdb.connect(host='db.soic.indiana.edu', port=3306, user=user,\n passwd=db_pass, db=user)\ncursor = db_con.cursor()\nreceiverID = form.getfirst('user', '')\nuserName = ''\nuserID = ''\nif 'echooUser' in str(os.environ):\n userName = EchooFunctions.getUserName()\n userName = userName[0]\n userID = EchooFunctions.getUserID(cursor, userName)\nadmin = False\nif userName != '':\n if EchooFunctions.checkUserType(cursor, userName) == 'administrator':\n admin = True\nfriend = ''\nfriendList = ''\nchatroom = ''\nuserList = []\nif userID != '' and receiverID != '':\n try:\n SQL = (\n 'select u.userID, u.username, u.icon, m.detail, m.time_in,m.messageID from user as u, private_message as m where u.userID = '\n )\n SQL += 'm.sender and m.receiver = ' + str(userID\n ) + ' and m.sender = ' + str(receiverID)\n SQL += (\n ' Union select u.userID, u.username, u.icon, m.detail, m.time_in ,m.messageID from user as u, private_message as m where u.userID = '\n )\n SQL += 'm.sender and m.receiver = ' + str(receiverID\n ) + ' and m.sender = ' + str(userID)\n SQL += ' Order By messageID ;'\n cursor.execute(SQL)\n results = cursor.fetchall()\n except Exception as e:\n print('<p>Something went wrong with the first SQL!</p>')\n print(SQL, 'Error:', e)\n else:\n if results:\n count = 5\n for row in results:\n word_count = 0\n specialChar = row[3]\n specialChar2 = ''\n specialChar = EchooFunctions.returnSpecialChara(specialChar)\n for x in specialChar:\n if word_count <= 20:\n specialChar2 += x\n word_count += 1\n else:\n specialChar2 += x + '<p>'\n word_count = 0\n if count >= 5:\n chatroom += '<li class=\"chatDate\">' + str(row[4]) + '</li>'\n count = 0\n if str(row[0]) == str(userID):\n count += 1\n chatroom += ('<li class=\"mainUser\">' +\n '<a href=\"userProfile.cgi?user=' + str(row[0]) +\n '\">' + row[1] + '</a><img src=\"images/user/' + row[\n 2] + '\" alt=\"club1\">')\n chatroom += ('<br><div class=\"messageLine\">' +\n specialChar2 + '</div></li>')\n else:\n count += 1\n chatroom += (\n '<li class=\"otherUser\"><img src=\"images/user/' +\n row[2] + '\" alt=\"club1\">')\n chatroom += ('<a href=\"userProfile.cgi?userid=' + str(\n row[0]) + '\">' + row[1] +\n '</a><br><div class=\"messageLine\">' + specialChar2 +\n '</div></li>')\nif userID == '' or receiverID == '':\n content = (\n \"<p>You don't have right access to this page</p>\\n<a href='index.cgi'></a>\"\n )\n print(content)\nprint(chatroom)\n",
"step-4": "import EchooFunctions, cgi, MySQLdb, hashlib, time, requests, os\nprint('Content-type: text/html\\n')\nform = cgi.FieldStorage()\nuser = 'i494f18_team34'\ndb_pass = 'my+sql=i494f18_team34'\ndb_con = MySQLdb.connect(host='db.soic.indiana.edu', port=3306, user=user,\n passwd=db_pass, db=user)\ncursor = db_con.cursor()\nreceiverID = form.getfirst('user', '')\nuserName = ''\nuserID = ''\nif 'echooUser' in str(os.environ):\n userName = EchooFunctions.getUserName()\n userName = userName[0]\n userID = EchooFunctions.getUserID(cursor, userName)\nadmin = False\nif userName != '':\n if EchooFunctions.checkUserType(cursor, userName) == 'administrator':\n admin = True\nfriend = ''\nfriendList = ''\nchatroom = ''\nuserList = []\nif userID != '' and receiverID != '':\n try:\n SQL = (\n 'select u.userID, u.username, u.icon, m.detail, m.time_in,m.messageID from user as u, private_message as m where u.userID = '\n )\n SQL += 'm.sender and m.receiver = ' + str(userID\n ) + ' and m.sender = ' + str(receiverID)\n SQL += (\n ' Union select u.userID, u.username, u.icon, m.detail, m.time_in ,m.messageID from user as u, private_message as m where u.userID = '\n )\n SQL += 'm.sender and m.receiver = ' + str(receiverID\n ) + ' and m.sender = ' + str(userID)\n SQL += ' Order By messageID ;'\n cursor.execute(SQL)\n results = cursor.fetchall()\n except Exception as e:\n print('<p>Something went wrong with the first SQL!</p>')\n print(SQL, 'Error:', e)\n else:\n if results:\n count = 5\n for row in results:\n word_count = 0\n specialChar = row[3]\n specialChar2 = ''\n specialChar = EchooFunctions.returnSpecialChara(specialChar)\n for x in specialChar:\n if word_count <= 20:\n specialChar2 += x\n word_count += 1\n else:\n specialChar2 += x + '<p>'\n word_count = 0\n if count >= 5:\n chatroom += '<li class=\"chatDate\">' + str(row[4]) + '</li>'\n count = 0\n if str(row[0]) == str(userID):\n count += 1\n chatroom += ('<li class=\"mainUser\">' +\n '<a href=\"userProfile.cgi?user=' + str(row[0]) +\n '\">' + row[1] + '</a><img src=\"images/user/' + row[\n 2] + '\" alt=\"club1\">')\n chatroom += ('<br><div class=\"messageLine\">' +\n specialChar2 + '</div></li>')\n else:\n count += 1\n chatroom += (\n '<li class=\"otherUser\"><img src=\"images/user/' +\n row[2] + '\" alt=\"club1\">')\n chatroom += ('<a href=\"userProfile.cgi?userid=' + str(\n row[0]) + '\">' + row[1] +\n '</a><br><div class=\"messageLine\">' + specialChar2 +\n '</div></li>')\nif userID == '' or receiverID == '':\n content = (\n \"<p>You don't have right access to this page</p>\\n<a href='index.cgi'></a>\"\n )\n print(content)\nprint(chatroom)\n",
"step-5": "#! /usr/bin/env python3\n\nimport EchooFunctions, cgi, MySQLdb, hashlib, time, requests, os\nprint ('Content-type: text/html\\n')\n\nform = cgi.FieldStorage()\n\n#database connection\nuser = \"i494f18_team34\"\ndb_pass = \"my+sql=i494f18_team34\"\ndb_con = MySQLdb.connect(host=\"db.soic.indiana.edu\", port = 3306, user=user, passwd=db_pass, db=user)\ncursor = db_con.cursor()\nreceiverID = form.getfirst('user','')\nuserName = \"\"\nuserID = \"\"\nif \"echooUser\" in str(os.environ):\n userName = EchooFunctions.getUserName()\n userName = userName[0]\n userID = EchooFunctions.getUserID(cursor, userName)\n\nadmin = False\n#change the status of veriable\nif userName != \"\":\n if EchooFunctions.checkUserType(cursor, userName) == \"administrator\":\n admin = True\n#main contents to insert\nfriend = \"\"\nfriendList = \"\"\nchatroom = \"\"\nuserList = []\nif userID != \"\" and receiverID !=\"\":\n try:\n SQL = \"select u.userID, u.username, u.icon, m.detail, m.time_in,m.messageID from user as u, private_message as m where u.userID = \"\n SQL+= \"m.sender and m.receiver = \"+str(userID)+\" and m.sender = \"+str(receiverID)\n SQL+= \" Union select u.userID, u.username, u.icon, m.detail, m.time_in ,m.messageID from user as u, private_message as m where u.userID = \"\n SQL+= \"m.sender and m.receiver = \"+str(receiverID)+\" and m.sender = \"+str(userID)\n SQL+=\" Order By messageID ;\"\n cursor.execute(SQL)\n results = cursor.fetchall()\n except Exception as e:\n print('<p>Something went wrong with the first SQL!</p>')\n print(SQL, \"Error:\", e)\n else:\n if results:\n count = 5\n for row in results:\n word_count = 0\n specialChar=row[3]\n specialChar2 = \"\"\n specialChar=EchooFunctions.returnSpecialChara(specialChar)\n for x in specialChar:\n if word_count<=20:\n specialChar2 += x\n word_count+=1\n else:\n specialChar2 += x +\"<p>\"\n word_count = 0\n if count >= 5:\n chatroom+='<li class=\"chatDate\">'+str(row[4])+'</li>'\n count=0\n if str(row[0]) ==str(userID):\n count+=1\n chatroom+='<li class=\"mainUser\">'+'<a href=\"userProfile.cgi?user='+str(row[0])+'\">'+row[1]+'</a><img src=\"images/user/'+row[2]+'\" alt=\"club1\">'\n chatroom+='<br><div class=\"messageLine\">'+specialChar2+'</div></li>'\n else:\n count+=1\n chatroom+='<li class=\"otherUser\"><img src=\"images/user/'+row[2]+'\" alt=\"club1\">'\n chatroom+='<a href=\"userProfile.cgi?userid='+str(row[0])+'\">'+row[1]+'</a><br><div class=\"messageLine\">'+specialChar2+'</div></li>'\n\nif userID == \"\" or receiverID ==\"\":\n content =\"\"\"<p>You don't have right access to this page</p>\n<a href='index.cgi'></a>\"\"\"\n print(content)\nprint(chatroom)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from .models import RecommendedArtifact
from .serializers import RecommendedArtifactSerialize
from rest_framework.decorators import api_view
from rest_framework.response import Response
from datetime import datetime
import requests, bs4
# constant value
service_key = "{jo's museum key}"
@api_view(['GET'])
def artifact_save_recommend(request,pageNo):
# 1. 페이지 선정 및 페이지 내 모든 유물 정보 가져오기
artifact_url = f"http://www.emuseum.go.kr/openapi/relic/list?serviceKey={service_key}&numOfRows=100&pageNo={pageNo}"
#http://www.emuseum.go.kr/openapi/relic/list?serviceKey=DLuSbLjmCJIDKmhoSB7ELx3eVXXxg9ZBqh9oC8/eFWTcq2gDMqfQA7jrooSkvzWgYv/pd9a6fUJKG40K3VQXHg==&numOfRows=100&pageNo=1
response = requests.get(artifact_url)
response_dict = bs4.BeautifulSoup(response.content, 'html.parser')
search_list = []
for data in response_dict.findAll('data'):
for item in data.findAll('item'):
if item['key'] == 'id':
id_num = item['value']
search_list.append(id_num)
# 2-1. 변수설정
detail_list = []
dataDict = {
'id_num': '',
'name': '',
'desc': '',
'museum_name': '',
'nationality_name': '',
'image_uri': '',
}
# 2-2. 모든 유물에서 desc있나 파악하기
for i in range(len(search_list)):
artifact_num = search_list[i]
artifact_url = f"http://www.emuseum.go.kr/openapi/relic/detail?serviceKey={service_key}&id={artifact_num}"
# http://www.emuseum.go.kr/openapi/relic/detail?serviceKey=DLuSbLjmCJIDKmhoSB7ELx3eVXXxg9ZBqh9oC8/eFWTcq2gDMqfQA7jrooSkvzWgYv/pd9a6fUJKG40K3VQXHg==&id=PS0100100100100021500000
response = requests.get(artifact_url)
response_dict = bs4.BeautifulSoup(response.content, 'html.parser')
for data in response_dict.findAll('list'):
for item in data.findAll('item'):
if item['key'] == 'id':
dataDict['id_num'] = item['value']
elif item['key'] == 'desc':
dataDict['desc'] = item['value']
elif item['key'] == 'nameKr':
dataDict['name'] = item['value']
elif item['key'] == 'nationalityName2':
dataDict['nationality_name'] = item['value']
elif item['key'] == 'museumName2':
dataDict['museum_name'] = item['value']
elif item['key'] == 'imgThumUriM':
dataDict['image_uri'] = item['value']
# 2-3 db에 저장하기
if dataDict['desc'] != '':
serializer = RecommendedArtifactSerialize(data=dataDict)
if serializer.is_valid(raise_exception=True):
serializer.save()
dataDict = {
'id_num': '',
'name': '',
'desc': '',
'museum_name': '',
'nationality_name': '',
'image_uri': '',
}
return Response(serializer.data)
@api_view(['GET'])
def artifact_recommend(request):
## 오늘은 며칠째인가요??
now = datetime.now()
nowYear = now.year
nowMonth = now.month
nowDay = now.day
daySum = 0
if nowYear%4==0 and nowYear%100!=0 or nowYear%400==0:
month = [31,29,31,30,31,30,31,31,30,31,30,31]
else:
month = [31,28,31,30,31,30,31,31,30,31,30,31]
for i in range(nowMonth-1):
daySum += month[i]
daySum += nowDay
Recommended_list = RecommendedArtifact.objects.all()
Recommended_artifact = Recommended_list[daySum]
dataDict = {
'id_num': Recommended_artifact.id_num,
'name': Recommended_artifact.name,
'desc': Recommended_artifact.desc,
'museum_name': Recommended_artifact.museum_name,
'nationality_name': Recommended_artifact.nationality_name,
'image_uri': Recommended_artifact.image_uri,
}
# print(Recommended_artifact.name)
return Response(dataDict)
|
normal
|
{
"blob_id": "707e3e60d6d9a3db5b9bc733e912b34e2cec5974",
"index": 8585,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@api_view(['GET'])\ndef artifact_save_recommend(request, pageNo):\n artifact_url = (\n f'http://www.emuseum.go.kr/openapi/relic/list?serviceKey={service_key}&numOfRows=100&pageNo={pageNo}'\n )\n response = requests.get(artifact_url)\n response_dict = bs4.BeautifulSoup(response.content, 'html.parser')\n search_list = []\n for data in response_dict.findAll('data'):\n for item in data.findAll('item'):\n if item['key'] == 'id':\n id_num = item['value']\n search_list.append(id_num)\n detail_list = []\n dataDict = {'id_num': '', 'name': '', 'desc': '', 'museum_name': '',\n 'nationality_name': '', 'image_uri': ''}\n for i in range(len(search_list)):\n artifact_num = search_list[i]\n artifact_url = (\n f'http://www.emuseum.go.kr/openapi/relic/detail?serviceKey={service_key}&id={artifact_num}'\n )\n response = requests.get(artifact_url)\n response_dict = bs4.BeautifulSoup(response.content, 'html.parser')\n for data in response_dict.findAll('list'):\n for item in data.findAll('item'):\n if item['key'] == 'id':\n dataDict['id_num'] = item['value']\n elif item['key'] == 'desc':\n dataDict['desc'] = item['value']\n elif item['key'] == 'nameKr':\n dataDict['name'] = item['value']\n elif item['key'] == 'nationalityName2':\n dataDict['nationality_name'] = item['value']\n elif item['key'] == 'museumName2':\n dataDict['museum_name'] = item['value']\n elif item['key'] == 'imgThumUriM':\n dataDict['image_uri'] = item['value']\n if dataDict['desc'] != '':\n serializer = RecommendedArtifactSerialize(data=dataDict)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n dataDict = {'id_num': '', 'name': '', 'desc': '',\n 'museum_name': '', 'nationality_name': '', 'image_uri': ''}\n return Response(serializer.data)\n\n\n@api_view(['GET'])\ndef artifact_recommend(request):\n now = datetime.now()\n nowYear = now.year\n nowMonth = now.month\n nowDay = now.day\n daySum = 0\n if nowYear % 4 == 0 and nowYear % 100 != 0 or nowYear % 400 == 0:\n month = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n else:\n month = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n for i in range(nowMonth - 1):\n daySum += month[i]\n daySum += nowDay\n Recommended_list = RecommendedArtifact.objects.all()\n Recommended_artifact = Recommended_list[daySum]\n dataDict = {'id_num': Recommended_artifact.id_num, 'name':\n Recommended_artifact.name, 'desc': Recommended_artifact.desc,\n 'museum_name': Recommended_artifact.museum_name, 'nationality_name':\n Recommended_artifact.nationality_name, 'image_uri':\n Recommended_artifact.image_uri}\n return Response(dataDict)\n",
"step-3": "<mask token>\nservice_key = \"{jo's museum key}\"\n\n\n@api_view(['GET'])\ndef artifact_save_recommend(request, pageNo):\n artifact_url = (\n f'http://www.emuseum.go.kr/openapi/relic/list?serviceKey={service_key}&numOfRows=100&pageNo={pageNo}'\n )\n response = requests.get(artifact_url)\n response_dict = bs4.BeautifulSoup(response.content, 'html.parser')\n search_list = []\n for data in response_dict.findAll('data'):\n for item in data.findAll('item'):\n if item['key'] == 'id':\n id_num = item['value']\n search_list.append(id_num)\n detail_list = []\n dataDict = {'id_num': '', 'name': '', 'desc': '', 'museum_name': '',\n 'nationality_name': '', 'image_uri': ''}\n for i in range(len(search_list)):\n artifact_num = search_list[i]\n artifact_url = (\n f'http://www.emuseum.go.kr/openapi/relic/detail?serviceKey={service_key}&id={artifact_num}'\n )\n response = requests.get(artifact_url)\n response_dict = bs4.BeautifulSoup(response.content, 'html.parser')\n for data in response_dict.findAll('list'):\n for item in data.findAll('item'):\n if item['key'] == 'id':\n dataDict['id_num'] = item['value']\n elif item['key'] == 'desc':\n dataDict['desc'] = item['value']\n elif item['key'] == 'nameKr':\n dataDict['name'] = item['value']\n elif item['key'] == 'nationalityName2':\n dataDict['nationality_name'] = item['value']\n elif item['key'] == 'museumName2':\n dataDict['museum_name'] = item['value']\n elif item['key'] == 'imgThumUriM':\n dataDict['image_uri'] = item['value']\n if dataDict['desc'] != '':\n serializer = RecommendedArtifactSerialize(data=dataDict)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n dataDict = {'id_num': '', 'name': '', 'desc': '',\n 'museum_name': '', 'nationality_name': '', 'image_uri': ''}\n return Response(serializer.data)\n\n\n@api_view(['GET'])\ndef artifact_recommend(request):\n now = datetime.now()\n nowYear = now.year\n nowMonth = now.month\n nowDay = now.day\n daySum = 0\n if nowYear % 4 == 0 and nowYear % 100 != 0 or nowYear % 400 == 0:\n month = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n else:\n month = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n for i in range(nowMonth - 1):\n daySum += month[i]\n daySum += nowDay\n Recommended_list = RecommendedArtifact.objects.all()\n Recommended_artifact = Recommended_list[daySum]\n dataDict = {'id_num': Recommended_artifact.id_num, 'name':\n Recommended_artifact.name, 'desc': Recommended_artifact.desc,\n 'museum_name': Recommended_artifact.museum_name, 'nationality_name':\n Recommended_artifact.nationality_name, 'image_uri':\n Recommended_artifact.image_uri}\n return Response(dataDict)\n",
"step-4": "from .models import RecommendedArtifact\nfrom .serializers import RecommendedArtifactSerialize\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom datetime import datetime\nimport requests, bs4\nservice_key = \"{jo's museum key}\"\n\n\n@api_view(['GET'])\ndef artifact_save_recommend(request, pageNo):\n artifact_url = (\n f'http://www.emuseum.go.kr/openapi/relic/list?serviceKey={service_key}&numOfRows=100&pageNo={pageNo}'\n )\n response = requests.get(artifact_url)\n response_dict = bs4.BeautifulSoup(response.content, 'html.parser')\n search_list = []\n for data in response_dict.findAll('data'):\n for item in data.findAll('item'):\n if item['key'] == 'id':\n id_num = item['value']\n search_list.append(id_num)\n detail_list = []\n dataDict = {'id_num': '', 'name': '', 'desc': '', 'museum_name': '',\n 'nationality_name': '', 'image_uri': ''}\n for i in range(len(search_list)):\n artifact_num = search_list[i]\n artifact_url = (\n f'http://www.emuseum.go.kr/openapi/relic/detail?serviceKey={service_key}&id={artifact_num}'\n )\n response = requests.get(artifact_url)\n response_dict = bs4.BeautifulSoup(response.content, 'html.parser')\n for data in response_dict.findAll('list'):\n for item in data.findAll('item'):\n if item['key'] == 'id':\n dataDict['id_num'] = item['value']\n elif item['key'] == 'desc':\n dataDict['desc'] = item['value']\n elif item['key'] == 'nameKr':\n dataDict['name'] = item['value']\n elif item['key'] == 'nationalityName2':\n dataDict['nationality_name'] = item['value']\n elif item['key'] == 'museumName2':\n dataDict['museum_name'] = item['value']\n elif item['key'] == 'imgThumUriM':\n dataDict['image_uri'] = item['value']\n if dataDict['desc'] != '':\n serializer = RecommendedArtifactSerialize(data=dataDict)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n dataDict = {'id_num': '', 'name': '', 'desc': '',\n 'museum_name': '', 'nationality_name': '', 'image_uri': ''}\n return Response(serializer.data)\n\n\n@api_view(['GET'])\ndef artifact_recommend(request):\n now = datetime.now()\n nowYear = now.year\n nowMonth = now.month\n nowDay = now.day\n daySum = 0\n if nowYear % 4 == 0 and nowYear % 100 != 0 or nowYear % 400 == 0:\n month = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n else:\n month = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n for i in range(nowMonth - 1):\n daySum += month[i]\n daySum += nowDay\n Recommended_list = RecommendedArtifact.objects.all()\n Recommended_artifact = Recommended_list[daySum]\n dataDict = {'id_num': Recommended_artifact.id_num, 'name':\n Recommended_artifact.name, 'desc': Recommended_artifact.desc,\n 'museum_name': Recommended_artifact.museum_name, 'nationality_name':\n Recommended_artifact.nationality_name, 'image_uri':\n Recommended_artifact.image_uri}\n return Response(dataDict)\n",
"step-5": "from .models import RecommendedArtifact\nfrom .serializers import RecommendedArtifactSerialize\nfrom rest_framework.decorators import api_view \nfrom rest_framework.response import Response\nfrom datetime import datetime\nimport requests, bs4\n\n# constant value\nservice_key = \"{jo's museum key}\"\n\n@api_view(['GET'])\ndef artifact_save_recommend(request,pageNo):\n \n # 1. 페이지 선정 및 페이지 내 모든 유물 정보 가져오기\n artifact_url = f\"http://www.emuseum.go.kr/openapi/relic/list?serviceKey={service_key}&numOfRows=100&pageNo={pageNo}\"\n #http://www.emuseum.go.kr/openapi/relic/list?serviceKey=DLuSbLjmCJIDKmhoSB7ELx3eVXXxg9ZBqh9oC8/eFWTcq2gDMqfQA7jrooSkvzWgYv/pd9a6fUJKG40K3VQXHg==&numOfRows=100&pageNo=1\n\n response = requests.get(artifact_url)\n response_dict = bs4.BeautifulSoup(response.content, 'html.parser')\n search_list = []\n\n for data in response_dict.findAll('data'):\n for item in data.findAll('item'):\n if item['key'] == 'id':\n id_num = item['value']\n search_list.append(id_num)\n\n # 2-1. 변수설정\n detail_list = []\n dataDict = {\n 'id_num': '',\n 'name': '',\n 'desc': '',\n 'museum_name': '',\n 'nationality_name': '',\n 'image_uri': '',\n }\n\n # 2-2. 모든 유물에서 desc있나 파악하기\n for i in range(len(search_list)):\n artifact_num = search_list[i]\n artifact_url = f\"http://www.emuseum.go.kr/openapi/relic/detail?serviceKey={service_key}&id={artifact_num}\"\n # http://www.emuseum.go.kr/openapi/relic/detail?serviceKey=DLuSbLjmCJIDKmhoSB7ELx3eVXXxg9ZBqh9oC8/eFWTcq2gDMqfQA7jrooSkvzWgYv/pd9a6fUJKG40K3VQXHg==&id=PS0100100100100021500000\n \n response = requests.get(artifact_url)\n response_dict = bs4.BeautifulSoup(response.content, 'html.parser')\n\n for data in response_dict.findAll('list'):\n for item in data.findAll('item'):\n if item['key'] == 'id':\n dataDict['id_num'] = item['value']\n\n elif item['key'] == 'desc':\n dataDict['desc'] = item['value']\n\n elif item['key'] == 'nameKr':\n dataDict['name'] = item['value']\n\n elif item['key'] == 'nationalityName2':\n dataDict['nationality_name'] = item['value']\n\n elif item['key'] == 'museumName2':\n dataDict['museum_name'] = item['value']\n\n elif item['key'] == 'imgThumUriM':\n dataDict['image_uri'] = item['value']\n\n # 2-3 db에 저장하기\n if dataDict['desc'] != '':\n serializer = RecommendedArtifactSerialize(data=dataDict)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n dataDict = {\n 'id_num': '',\n 'name': '',\n 'desc': '',\n 'museum_name': '',\n 'nationality_name': '',\n 'image_uri': '',\n } \n return Response(serializer.data)\n\n\n@api_view(['GET'])\ndef artifact_recommend(request):\n ## 오늘은 며칠째인가요??\n now = datetime.now()\n nowYear = now.year\n nowMonth = now.month\n nowDay = now.day\n daySum = 0\n\n if nowYear%4==0 and nowYear%100!=0 or nowYear%400==0:\n month = [31,29,31,30,31,30,31,31,30,31,30,31]\n else:\n month = [31,28,31,30,31,30,31,31,30,31,30,31]\n \n for i in range(nowMonth-1):\n daySum += month[i]\n\n daySum += nowDay\n\n Recommended_list = RecommendedArtifact.objects.all()\n Recommended_artifact = Recommended_list[daySum]\n dataDict = {\n 'id_num': Recommended_artifact.id_num,\n 'name': Recommended_artifact.name,\n 'desc': Recommended_artifact.desc,\n 'museum_name': Recommended_artifact.museum_name,\n 'nationality_name': Recommended_artifact.nationality_name,\n 'image_uri': Recommended_artifact.image_uri,\n } \n # print(Recommended_artifact.name)\n\n return Response(dataDict)\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
from pydis.datastruct.sds import SdsImp
class RPCStub(object):
def __init__(self):
pass
def SET(self, key, value):
self
print("{}: {}".format(key, value))
|
normal
|
{
"blob_id": "74f85732b4e1f4ef2b82a48818cbaedb18a56083",
"index": 8122,
"step-1": "<mask token>\n\n\nclass RPCStub(object):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass RPCStub(object):\n\n def __init__(self):\n pass\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass RPCStub(object):\n\n def __init__(self):\n pass\n\n def SET(self, key, value):\n self\n print('{}: {}'.format(key, value))\n",
"step-4": "from pydis.datastruct.sds import SdsImp\n\n\nclass RPCStub(object):\n\n def __init__(self):\n pass\n\n def SET(self, key, value):\n self\n print('{}: {}'.format(key, value))\n",
"step-5": "from pydis.datastruct.sds import SdsImp\n\n\nclass RPCStub(object):\n def __init__(self):\n pass\n\n def SET(self, key, value):\n self\n print(\"{}: {}\".format(key, value))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.