code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
<|reserved_special_token_0|>
def test_oembed_founded(oembed_providers):
oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)
url = 'https://www.instagram.com/p/BNHh2YJDdcY/'
oembed_url = oembed_url_extractor.get_oembed_url(url)
assert isinstance(oembed_url, str)
def test_oembed_discovery(oembed_providers, files_dir):
oembed_html = (files_dir / 'oembed_json.html').read_text()
soup = BeautifulSoup(oembed_html)
oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)
oembed_url = oembed_url_extractor.get_oembed_url_from_html(soup)
assert isinstance(oembed_url, str)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_oembed_not_match(oembed_providers):
oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)
url = 'http://test.com'
assert oembed_url_extractor.get_oembed_url(url) is None
def test_oembed_founded(oembed_providers):
oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)
url = 'https://www.instagram.com/p/BNHh2YJDdcY/'
oembed_url = oembed_url_extractor.get_oembed_url(url)
assert isinstance(oembed_url, str)
def test_oembed_discovery(oembed_providers, files_dir):
oembed_html = (files_dir / 'oembed_json.html').read_text()
soup = BeautifulSoup(oembed_html)
oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)
oembed_url = oembed_url_extractor.get_oembed_url_from_html(soup)
assert isinstance(oembed_url, str)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_oembed_not_match(oembed_providers):
oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)
url = 'http://test.com'
assert oembed_url_extractor.get_oembed_url(url) is None
def test_oembed_founded(oembed_providers):
oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)
url = 'https://www.instagram.com/p/BNHh2YJDdcY/'
oembed_url = oembed_url_extractor.get_oembed_url(url)
assert isinstance(oembed_url, str)
def test_oembed_discovery(oembed_providers, files_dir):
oembed_html = (files_dir / 'oembed_json.html').read_text()
soup = BeautifulSoup(oembed_html)
oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)
oembed_url = oembed_url_extractor.get_oembed_url_from_html(soup)
assert isinstance(oembed_url, str)
def test_oembed_params(oembed_providers):
oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers,
params={'maxwidth': 200})
url = 'https://www.instagram.com/p/BNHh2YJDdcY/'
oembed_url = oembed_url_extractor.get_oembed_url(url)
assert isinstance(oembed_url, str)
assert 'maxwidth=200' in oembed_url
<|reserved_special_token_1|>
from bs4 import BeautifulSoup
from aiounfurl.parsers import oembed
def test_oembed_not_match(oembed_providers):
oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)
url = 'http://test.com'
assert oembed_url_extractor.get_oembed_url(url) is None
def test_oembed_founded(oembed_providers):
oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)
url = 'https://www.instagram.com/p/BNHh2YJDdcY/'
oembed_url = oembed_url_extractor.get_oembed_url(url)
assert isinstance(oembed_url, str)
def test_oembed_discovery(oembed_providers, files_dir):
oembed_html = (files_dir / 'oembed_json.html').read_text()
soup = BeautifulSoup(oembed_html)
oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)
oembed_url = oembed_url_extractor.get_oembed_url_from_html(soup)
assert isinstance(oembed_url, str)
def test_oembed_params(oembed_providers):
oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers,
params={'maxwidth': 200})
url = 'https://www.instagram.com/p/BNHh2YJDdcY/'
oembed_url = oembed_url_extractor.get_oembed_url(url)
assert isinstance(oembed_url, str)
assert 'maxwidth=200' in oembed_url
|
flexible
|
{
"blob_id": "7b2ad0b4eca7b31b314e32ad57d51be82f0eaf61",
"index": 6979,
"step-1": "<mask token>\n\n\ndef test_oembed_founded(oembed_providers):\n oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)\n url = 'https://www.instagram.com/p/BNHh2YJDdcY/'\n oembed_url = oembed_url_extractor.get_oembed_url(url)\n assert isinstance(oembed_url, str)\n\n\ndef test_oembed_discovery(oembed_providers, files_dir):\n oembed_html = (files_dir / 'oembed_json.html').read_text()\n soup = BeautifulSoup(oembed_html)\n oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)\n oembed_url = oembed_url_extractor.get_oembed_url_from_html(soup)\n assert isinstance(oembed_url, str)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_oembed_not_match(oembed_providers):\n oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)\n url = 'http://test.com'\n assert oembed_url_extractor.get_oembed_url(url) is None\n\n\ndef test_oembed_founded(oembed_providers):\n oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)\n url = 'https://www.instagram.com/p/BNHh2YJDdcY/'\n oembed_url = oembed_url_extractor.get_oembed_url(url)\n assert isinstance(oembed_url, str)\n\n\ndef test_oembed_discovery(oembed_providers, files_dir):\n oembed_html = (files_dir / 'oembed_json.html').read_text()\n soup = BeautifulSoup(oembed_html)\n oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)\n oembed_url = oembed_url_extractor.get_oembed_url_from_html(soup)\n assert isinstance(oembed_url, str)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef test_oembed_not_match(oembed_providers):\n oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)\n url = 'http://test.com'\n assert oembed_url_extractor.get_oembed_url(url) is None\n\n\ndef test_oembed_founded(oembed_providers):\n oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)\n url = 'https://www.instagram.com/p/BNHh2YJDdcY/'\n oembed_url = oembed_url_extractor.get_oembed_url(url)\n assert isinstance(oembed_url, str)\n\n\ndef test_oembed_discovery(oembed_providers, files_dir):\n oembed_html = (files_dir / 'oembed_json.html').read_text()\n soup = BeautifulSoup(oembed_html)\n oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)\n oembed_url = oembed_url_extractor.get_oembed_url_from_html(soup)\n assert isinstance(oembed_url, str)\n\n\ndef test_oembed_params(oembed_providers):\n oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers,\n params={'maxwidth': 200})\n url = 'https://www.instagram.com/p/BNHh2YJDdcY/'\n oembed_url = oembed_url_extractor.get_oembed_url(url)\n assert isinstance(oembed_url, str)\n assert 'maxwidth=200' in oembed_url\n",
"step-4": "from bs4 import BeautifulSoup\nfrom aiounfurl.parsers import oembed\n\n\ndef test_oembed_not_match(oembed_providers):\n oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)\n url = 'http://test.com'\n assert oembed_url_extractor.get_oembed_url(url) is None\n\n\ndef test_oembed_founded(oembed_providers):\n oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)\n url = 'https://www.instagram.com/p/BNHh2YJDdcY/'\n oembed_url = oembed_url_extractor.get_oembed_url(url)\n assert isinstance(oembed_url, str)\n\n\ndef test_oembed_discovery(oembed_providers, files_dir):\n oembed_html = (files_dir / 'oembed_json.html').read_text()\n soup = BeautifulSoup(oembed_html)\n oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)\n oembed_url = oembed_url_extractor.get_oembed_url_from_html(soup)\n assert isinstance(oembed_url, str)\n\n\ndef test_oembed_params(oembed_providers):\n oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers,\n params={'maxwidth': 200})\n url = 'https://www.instagram.com/p/BNHh2YJDdcY/'\n oembed_url = oembed_url_extractor.get_oembed_url(url)\n assert isinstance(oembed_url, str)\n assert 'maxwidth=200' in oembed_url\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def run(args):
min = -0.0
max = 0.5
Q = 10
if os.path.isfile(args.incat):
cbc.coaddBatchCutFull(args.root, args.incat, filter=args.filter,
idField=args.idField, prefix=args.prefix, zCutoutSize=args.
zCutout, zField=args.zField, onlyColor=args.onlyColor, noColor=
args.noColor, saveSrc=args.saveSrc, makeDir=args.makeDir,
raField=args.raField, decField=args.decField)
else:
raise Exception('### Can not find the input catalog: %s' % args.incat)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def run(args):
min = -0.0
max = 0.5
Q = 10
if os.path.isfile(args.incat):
cbc.coaddBatchCutFull(args.root, args.incat, filter=args.filter,
idField=args.idField, prefix=args.prefix, zCutoutSize=args.
zCutout, zField=args.zField, onlyColor=args.onlyColor, noColor=
args.noColor, saveSrc=args.saveSrc, makeDir=args.makeDir,
raField=args.raField, decField=args.decField)
else:
raise Exception('### Can not find the input catalog: %s' % args.incat)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('root', help='Root directory of data repository')
parser.add_argument('incat', help='The input catalog for cutout')
parser.add_argument('-s', '--size', dest='size', type=int, help=
'Half size of the cutout box', default=200)
parser.add_argument('-f', '--filter', dest='filter', help='Filter',
default='HSC-I')
parser.add_argument('-cf', '--color-filters', dest='colorFilters', help
='Choice of filters for color images', default='riz')
parser.add_argument('-sf', '--size-field', dest='sizeField', help=
'Column name for cutout size', default='cutout_size')
parser.add_argument('-info1', '--infoField1', dest='infoField1', help=
'Column name for first extra information', default=None)
parser.add_argument('-info2', '--infoField2', dest='infoField2', help=
'Column name for second extra information', default=None)
parser.add_argument('-oc', '--onlyColor', action='store_true', dest=
'onlyColor', default=False)
parser.add_argument('-safe', '--safe', action='store_true', dest='safe',
default=False)
parser.add_argument('-clean', '--clean', action='store_true', dest=
'clean', default=False)
parser.add_argument('-v', '--verbose', action='store_true', dest=
'verbose', default=False)
parser.add_argument('-src', '--src', action='store_true', dest=
'saveSrc', default=True)
parser.add_argument('-makeDir', '--makeDir', action='store_true', dest=
'makeDir', default=True)
parser.add_argument('-zc', '--zCutoutSize', action='store_true', dest=
'zCutout', default=True)
parser.add_argument('-nc', '--noColor', action='store_true', dest=
'noColor', default=True)
parser.add_argument('-p', '--prefix', dest='prefix', help=
'Prefix of the output file', default='redBCG')
parser.add_argument('-id', '--id', dest='idField', help=
'Column name for ID', default='ID_CLUSTER')
parser.add_argument('-ra', '--ra', dest='raField', help=
'Column name for RA', default='RA_BCG')
parser.add_argument('-dec', '--dec', dest='decField', help=
'Column name for DEC', default='DEC_BCG')
parser.add_argument('-z', '--redshift', dest='zField', help=
'Column name for z', default='Z_LAMBDA')
args = parser.parse_args()
run(args)
<|reserved_special_token_1|>
import os
import argparse
import coaddBatchCutout as cbc
def run(args):
min = -0.0
max = 0.5
Q = 10
if os.path.isfile(args.incat):
cbc.coaddBatchCutFull(args.root, args.incat, filter=args.filter,
idField=args.idField, prefix=args.prefix, zCutoutSize=args.
zCutout, zField=args.zField, onlyColor=args.onlyColor, noColor=
args.noColor, saveSrc=args.saveSrc, makeDir=args.makeDir,
raField=args.raField, decField=args.decField)
else:
raise Exception('### Can not find the input catalog: %s' % args.incat)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('root', help='Root directory of data repository')
parser.add_argument('incat', help='The input catalog for cutout')
parser.add_argument('-s', '--size', dest='size', type=int, help=
'Half size of the cutout box', default=200)
parser.add_argument('-f', '--filter', dest='filter', help='Filter',
default='HSC-I')
parser.add_argument('-cf', '--color-filters', dest='colorFilters', help
='Choice of filters for color images', default='riz')
parser.add_argument('-sf', '--size-field', dest='sizeField', help=
'Column name for cutout size', default='cutout_size')
parser.add_argument('-info1', '--infoField1', dest='infoField1', help=
'Column name for first extra information', default=None)
parser.add_argument('-info2', '--infoField2', dest='infoField2', help=
'Column name for second extra information', default=None)
parser.add_argument('-oc', '--onlyColor', action='store_true', dest=
'onlyColor', default=False)
parser.add_argument('-safe', '--safe', action='store_true', dest='safe',
default=False)
parser.add_argument('-clean', '--clean', action='store_true', dest=
'clean', default=False)
parser.add_argument('-v', '--verbose', action='store_true', dest=
'verbose', default=False)
parser.add_argument('-src', '--src', action='store_true', dest=
'saveSrc', default=True)
parser.add_argument('-makeDir', '--makeDir', action='store_true', dest=
'makeDir', default=True)
parser.add_argument('-zc', '--zCutoutSize', action='store_true', dest=
'zCutout', default=True)
parser.add_argument('-nc', '--noColor', action='store_true', dest=
'noColor', default=True)
parser.add_argument('-p', '--prefix', dest='prefix', help=
'Prefix of the output file', default='redBCG')
parser.add_argument('-id', '--id', dest='idField', help=
'Column name for ID', default='ID_CLUSTER')
parser.add_argument('-ra', '--ra', dest='raField', help=
'Column name for RA', default='RA_BCG')
parser.add_argument('-dec', '--dec', dest='decField', help=
'Column name for DEC', default='DEC_BCG')
parser.add_argument('-z', '--redshift', dest='zField', help=
'Column name for z', default='Z_LAMBDA')
args = parser.parse_args()
run(args)
<|reserved_special_token_1|>
#!/usr/bin/env python
# encoding: utf-8
import os
import argparse
import coaddBatchCutout as cbc
def run(args):
min = -0.0
max = 0.5
Q = 10
if os.path.isfile(args.incat):
cbc.coaddBatchCutFull(args.root, args.incat,
filter=args.filter,
idField=args.idField,
prefix=args.prefix,
zCutoutSize=args.zCutout,
zField=args.zField,
onlyColor=args.onlyColor,
noColor=args.noColor,
saveSrc=args.saveSrc,
makeDir=args.makeDir,
raField=args.raField,
decField=args.decField)
else:
raise Exception("### Can not find the input catalog: %s" % args.incat)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("root", help="Root directory of data repository")
parser.add_argument("incat", help="The input catalog for cutout")
parser.add_argument("-s", '--size', dest='size', type=int,
help="Half size of the cutout box", default=200)
parser.add_argument('-f', '--filter', dest='filter', help="Filter",
default='HSC-I')
parser.add_argument('-cf', '--color-filters', dest='colorFilters',
help="Choice of filters for color images", default='riz')
parser.add_argument('-sf', '--size-field', dest='sizeField',
help="Column name for cutout size", default='cutout_size')
parser.add_argument('-info1', '--infoField1', dest='infoField1',
help="Column name for first extra information",
default=None)
parser.add_argument('-info2', '--infoField2', dest='infoField2',
help="Column name for second extra information",
default=None)
parser.add_argument('-oc', '--onlyColor', action="store_true", dest='onlyColor',
default=False)
parser.add_argument('-safe', '--safe', action="store_true", dest='safe',
default=False)
parser.add_argument('-clean', '--clean', action="store_true", dest='clean',
default=False)
parser.add_argument('-v', '--verbose', action="store_true", dest='verbose',
default=False)
parser.add_argument('-src', '--src', action="store_true", dest='saveSrc',
default=True)
parser.add_argument('-makeDir', '--makeDir', action="store_true", dest='makeDir',
default=True)
parser.add_argument('-zc', '--zCutoutSize', action="store_true", dest='zCutout',
default=True)
parser.add_argument('-nc', '--noColor', action="store_true", dest='noColor',
default=True)
parser.add_argument('-p', '--prefix', dest='prefix',
help='Prefix of the output file',
default='redBCG')
parser.add_argument('-id', '--id', dest='idField', help="Column name for ID",
default='ID_CLUSTER')
parser.add_argument('-ra', '--ra', dest='raField', help="Column name for RA",
default='RA_BCG')
parser.add_argument('-dec', '--dec', dest='decField', help="Column name for DEC",
default='DEC_BCG')
parser.add_argument('-z', '--redshift', dest='zField', help="Column name for z",
default='Z_LAMBDA')
args = parser.parse_args()
run(args)
|
flexible
|
{
"blob_id": "c0503536672aa824eaf0d19b9d4b5431ef910432",
"index": 1028,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef run(args):\n min = -0.0\n max = 0.5\n Q = 10\n if os.path.isfile(args.incat):\n cbc.coaddBatchCutFull(args.root, args.incat, filter=args.filter,\n idField=args.idField, prefix=args.prefix, zCutoutSize=args.\n zCutout, zField=args.zField, onlyColor=args.onlyColor, noColor=\n args.noColor, saveSrc=args.saveSrc, makeDir=args.makeDir,\n raField=args.raField, decField=args.decField)\n else:\n raise Exception('### Can not find the input catalog: %s' % args.incat)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef run(args):\n min = -0.0\n max = 0.5\n Q = 10\n if os.path.isfile(args.incat):\n cbc.coaddBatchCutFull(args.root, args.incat, filter=args.filter,\n idField=args.idField, prefix=args.prefix, zCutoutSize=args.\n zCutout, zField=args.zField, onlyColor=args.onlyColor, noColor=\n args.noColor, saveSrc=args.saveSrc, makeDir=args.makeDir,\n raField=args.raField, decField=args.decField)\n else:\n raise Exception('### Can not find the input catalog: %s' % args.incat)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('root', help='Root directory of data repository')\n parser.add_argument('incat', help='The input catalog for cutout')\n parser.add_argument('-s', '--size', dest='size', type=int, help=\n 'Half size of the cutout box', default=200)\n parser.add_argument('-f', '--filter', dest='filter', help='Filter',\n default='HSC-I')\n parser.add_argument('-cf', '--color-filters', dest='colorFilters', help\n ='Choice of filters for color images', default='riz')\n parser.add_argument('-sf', '--size-field', dest='sizeField', help=\n 'Column name for cutout size', default='cutout_size')\n parser.add_argument('-info1', '--infoField1', dest='infoField1', help=\n 'Column name for first extra information', default=None)\n parser.add_argument('-info2', '--infoField2', dest='infoField2', help=\n 'Column name for second extra information', default=None)\n parser.add_argument('-oc', '--onlyColor', action='store_true', dest=\n 'onlyColor', default=False)\n parser.add_argument('-safe', '--safe', action='store_true', dest='safe',\n default=False)\n parser.add_argument('-clean', '--clean', action='store_true', dest=\n 'clean', default=False)\n parser.add_argument('-v', '--verbose', action='store_true', dest=\n 'verbose', default=False)\n parser.add_argument('-src', '--src', action='store_true', dest=\n 'saveSrc', default=True)\n parser.add_argument('-makeDir', '--makeDir', action='store_true', dest=\n 'makeDir', default=True)\n parser.add_argument('-zc', '--zCutoutSize', action='store_true', dest=\n 'zCutout', default=True)\n parser.add_argument('-nc', '--noColor', action='store_true', dest=\n 'noColor', default=True)\n parser.add_argument('-p', '--prefix', dest='prefix', help=\n 'Prefix of the output file', default='redBCG')\n parser.add_argument('-id', '--id', dest='idField', help=\n 'Column name for ID', default='ID_CLUSTER')\n parser.add_argument('-ra', '--ra', dest='raField', help=\n 'Column name for RA', default='RA_BCG')\n parser.add_argument('-dec', '--dec', dest='decField', help=\n 'Column name for DEC', default='DEC_BCG')\n parser.add_argument('-z', '--redshift', dest='zField', help=\n 'Column name for z', default='Z_LAMBDA')\n args = parser.parse_args()\n run(args)\n",
"step-4": "import os\nimport argparse\nimport coaddBatchCutout as cbc\n\n\ndef run(args):\n min = -0.0\n max = 0.5\n Q = 10\n if os.path.isfile(args.incat):\n cbc.coaddBatchCutFull(args.root, args.incat, filter=args.filter,\n idField=args.idField, prefix=args.prefix, zCutoutSize=args.\n zCutout, zField=args.zField, onlyColor=args.onlyColor, noColor=\n args.noColor, saveSrc=args.saveSrc, makeDir=args.makeDir,\n raField=args.raField, decField=args.decField)\n else:\n raise Exception('### Can not find the input catalog: %s' % args.incat)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('root', help='Root directory of data repository')\n parser.add_argument('incat', help='The input catalog for cutout')\n parser.add_argument('-s', '--size', dest='size', type=int, help=\n 'Half size of the cutout box', default=200)\n parser.add_argument('-f', '--filter', dest='filter', help='Filter',\n default='HSC-I')\n parser.add_argument('-cf', '--color-filters', dest='colorFilters', help\n ='Choice of filters for color images', default='riz')\n parser.add_argument('-sf', '--size-field', dest='sizeField', help=\n 'Column name for cutout size', default='cutout_size')\n parser.add_argument('-info1', '--infoField1', dest='infoField1', help=\n 'Column name for first extra information', default=None)\n parser.add_argument('-info2', '--infoField2', dest='infoField2', help=\n 'Column name for second extra information', default=None)\n parser.add_argument('-oc', '--onlyColor', action='store_true', dest=\n 'onlyColor', default=False)\n parser.add_argument('-safe', '--safe', action='store_true', dest='safe',\n default=False)\n parser.add_argument('-clean', '--clean', action='store_true', dest=\n 'clean', default=False)\n parser.add_argument('-v', '--verbose', action='store_true', dest=\n 'verbose', default=False)\n parser.add_argument('-src', '--src', action='store_true', dest=\n 'saveSrc', default=True)\n parser.add_argument('-makeDir', '--makeDir', action='store_true', dest=\n 'makeDir', default=True)\n parser.add_argument('-zc', '--zCutoutSize', action='store_true', dest=\n 'zCutout', default=True)\n parser.add_argument('-nc', '--noColor', action='store_true', dest=\n 'noColor', default=True)\n parser.add_argument('-p', '--prefix', dest='prefix', help=\n 'Prefix of the output file', default='redBCG')\n parser.add_argument('-id', '--id', dest='idField', help=\n 'Column name for ID', default='ID_CLUSTER')\n parser.add_argument('-ra', '--ra', dest='raField', help=\n 'Column name for RA', default='RA_BCG')\n parser.add_argument('-dec', '--dec', dest='decField', help=\n 'Column name for DEC', default='DEC_BCG')\n parser.add_argument('-z', '--redshift', dest='zField', help=\n 'Column name for z', default='Z_LAMBDA')\n args = parser.parse_args()\n run(args)\n",
"step-5": "#!/usr/bin/env python\n# encoding: utf-8\n\nimport os\nimport argparse\nimport coaddBatchCutout as cbc\n\n\ndef run(args):\n\n min = -0.0\n max = 0.5\n Q = 10\n\n if os.path.isfile(args.incat):\n\n cbc.coaddBatchCutFull(args.root, args.incat,\n filter=args.filter,\n idField=args.idField,\n prefix=args.prefix,\n zCutoutSize=args.zCutout,\n zField=args.zField,\n onlyColor=args.onlyColor,\n noColor=args.noColor,\n saveSrc=args.saveSrc,\n makeDir=args.makeDir,\n raField=args.raField,\n decField=args.decField)\n else:\n raise Exception(\"### Can not find the input catalog: %s\" % args.incat)\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"root\", help=\"Root directory of data repository\")\n parser.add_argument(\"incat\", help=\"The input catalog for cutout\")\n parser.add_argument(\"-s\", '--size', dest='size', type=int,\n help=\"Half size of the cutout box\", default=200)\n parser.add_argument('-f', '--filter', dest='filter', help=\"Filter\",\n default='HSC-I')\n parser.add_argument('-cf', '--color-filters', dest='colorFilters',\n help=\"Choice of filters for color images\", default='riz')\n parser.add_argument('-sf', '--size-field', dest='sizeField',\n help=\"Column name for cutout size\", default='cutout_size')\n parser.add_argument('-info1', '--infoField1', dest='infoField1',\n help=\"Column name for first extra information\",\n default=None)\n parser.add_argument('-info2', '--infoField2', dest='infoField2',\n help=\"Column name for second extra information\",\n default=None)\n parser.add_argument('-oc', '--onlyColor', action=\"store_true\", dest='onlyColor',\n default=False)\n parser.add_argument('-safe', '--safe', action=\"store_true\", dest='safe',\n default=False)\n parser.add_argument('-clean', '--clean', action=\"store_true\", dest='clean',\n default=False)\n parser.add_argument('-v', '--verbose', action=\"store_true\", dest='verbose',\n default=False)\n parser.add_argument('-src', '--src', action=\"store_true\", dest='saveSrc',\n default=True)\n parser.add_argument('-makeDir', '--makeDir', action=\"store_true\", dest='makeDir',\n default=True)\n parser.add_argument('-zc', '--zCutoutSize', action=\"store_true\", dest='zCutout',\n default=True)\n parser.add_argument('-nc', '--noColor', action=\"store_true\", dest='noColor',\n default=True)\n parser.add_argument('-p', '--prefix', dest='prefix',\n help='Prefix of the output file',\n default='redBCG')\n parser.add_argument('-id', '--id', dest='idField', help=\"Column name for ID\",\n default='ID_CLUSTER')\n parser.add_argument('-ra', '--ra', dest='raField', help=\"Column name for RA\",\n default='RA_BCG')\n parser.add_argument('-dec', '--dec', dest='decField', help=\"Column name for DEC\",\n default='DEC_BCG')\n parser.add_argument('-z', '--redshift', dest='zField', help=\"Column name for z\",\n default='Z_LAMBDA')\n args = parser.parse_args()\n\n run(args)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class ContentsBlockIterator(BaseBlockIterator):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ContentsBlockIterator(BaseBlockIterator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.contents = self.block_content.split(SEPARATOR)
self.titles = self.contents[0].split('\n')[1:]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ContentsBlockIterator(BaseBlockIterator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.contents = self.block_content.split(SEPARATOR)
self.titles = self.contents[0].split('\n')[1:]
def get(self, index):
return self.contents[index + 1]
<|reserved_special_token_1|>
from libs.storage.blocks.iterators.base import BaseBlockIterator
from libs.storage.const import SEPARATOR
class ContentsBlockIterator(BaseBlockIterator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.contents = self.block_content.split(SEPARATOR)
self.titles = self.contents[0].split('\n')[1:]
def get(self, index):
return self.contents[index + 1]
|
flexible
|
{
"blob_id": "b888745b3ce815f7c9eb18f5e76bacfadfbff3f5",
"index": 3153,
"step-1": "<mask token>\n\n\nclass ContentsBlockIterator(BaseBlockIterator):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ContentsBlockIterator(BaseBlockIterator):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.contents = self.block_content.split(SEPARATOR)\n self.titles = self.contents[0].split('\\n')[1:]\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass ContentsBlockIterator(BaseBlockIterator):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.contents = self.block_content.split(SEPARATOR)\n self.titles = self.contents[0].split('\\n')[1:]\n\n def get(self, index):\n return self.contents[index + 1]\n",
"step-4": "from libs.storage.blocks.iterators.base import BaseBlockIterator\nfrom libs.storage.const import SEPARATOR\n\n\nclass ContentsBlockIterator(BaseBlockIterator):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.contents = self.block_content.split(SEPARATOR)\n self.titles = self.contents[0].split('\\n')[1:]\n\n def get(self, index):\n return self.contents[index + 1]\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
import matplotlib.pyplot as plt
import cv2
# 0
img = cv2.imread('test.jpg', cv2.IMREAD_GRAYSCALE)
# IMREAD_COLOR = 1
# IMREAD_UNCHANGED = -1
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# cv2.imwrite('watchgray,png', img)
plt.imshow(img, cmap='gray', interpolation='bicubic')
plt.show()
|
normal
|
{
"blob_id": "34ccaaf5eb47afd556588cd94cddbddaee1f0b53",
"index": 2851,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncv2.imshow('image', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\nplt.imshow(img, cmap='gray', interpolation='bicubic')\nplt.show()\n",
"step-3": "<mask token>\nimg = cv2.imread('test.jpg', cv2.IMREAD_GRAYSCALE)\ncv2.imshow('image', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\nplt.imshow(img, cmap='gray', interpolation='bicubic')\nplt.show()\n",
"step-4": "import matplotlib.pyplot as plt\nimport cv2\nimg = cv2.imread('test.jpg', cv2.IMREAD_GRAYSCALE)\ncv2.imshow('image', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\nplt.imshow(img, cmap='gray', interpolation='bicubic')\nplt.show()\n",
"step-5": "import matplotlib.pyplot as plt\nimport cv2\n# 0\nimg = cv2.imread('test.jpg', cv2.IMREAD_GRAYSCALE)\n# IMREAD_COLOR = 1\n# IMREAD_UNCHANGED = -1\ncv2.imshow('image', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n# cv2.imwrite('watchgray,png', img)\n\nplt.imshow(img, cmap='gray', interpolation='bicubic')\nplt.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
@adapter(IProcessStarting)
def start_successlogging(unused):
"""start successlogging if configured."""
from App.config import getConfiguration
config = getConfiguration().product_config.get('successlogging')
if config is None:
return
global _log_good, _log_bad
_log_good = Rotator(config['filebase'] + '_good', lock=True)
_log_bad = Rotator(config['filebase'] + '_bad', lock=True)
provideHandler(handle_request_success)
provideHandler(handle_request_failure)
<|reserved_special_token_0|>
@adapter(IPubFailure)
def handle_request_failure(event):
"""handle "IPubFailure"."""
request = event.request
if event.retry:
handle_request_success(event)
else:
response = request.response
saved = response.__dict__.copy()
response.setStatus(event.exc_info[0])
ok = ISuccessFull(response, None)
if ok is None:
status = IStatus(response, None)
if status is None:
status = response.getStatus()
else:
status = int(status)
ok = status < 500
if bool(ok):
handle_request_success(event)
else:
_log_bad.write('*')
response.__dict__.update(saved)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@adapter(IProcessStarting)
def start_successlogging(unused):
"""start successlogging if configured."""
from App.config import getConfiguration
config = getConfiguration().product_config.get('successlogging')
if config is None:
return
global _log_good, _log_bad
_log_good = Rotator(config['filebase'] + '_good', lock=True)
_log_bad = Rotator(config['filebase'] + '_bad', lock=True)
provideHandler(handle_request_success)
provideHandler(handle_request_failure)
@adapter(IPubSuccess)
def handle_request_success(event):
"""handle "IPubSuccess"."""
_log_good.write('*')
@adapter(IPubFailure)
def handle_request_failure(event):
"""handle "IPubFailure"."""
request = event.request
if event.retry:
handle_request_success(event)
else:
response = request.response
saved = response.__dict__.copy()
response.setStatus(event.exc_info[0])
ok = ISuccessFull(response, None)
if ok is None:
status = IStatus(response, None)
if status is None:
status = response.getStatus()
else:
status = int(status)
ok = status < 500
if bool(ok):
handle_request_success(event)
else:
_log_bad.write('*')
response.__dict__.update(saved)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
_log_good = _log_bad = None
@adapter(IProcessStarting)
def start_successlogging(unused):
"""start successlogging if configured."""
from App.config import getConfiguration
config = getConfiguration().product_config.get('successlogging')
if config is None:
return
global _log_good, _log_bad
_log_good = Rotator(config['filebase'] + '_good', lock=True)
_log_bad = Rotator(config['filebase'] + '_bad', lock=True)
provideHandler(handle_request_success)
provideHandler(handle_request_failure)
@adapter(IPubSuccess)
def handle_request_success(event):
"""handle "IPubSuccess"."""
_log_good.write('*')
@adapter(IPubFailure)
def handle_request_failure(event):
"""handle "IPubFailure"."""
request = event.request
if event.retry:
handle_request_success(event)
else:
response = request.response
saved = response.__dict__.copy()
response.setStatus(event.exc_info[0])
ok = ISuccessFull(response, None)
if ok is None:
status = IStatus(response, None)
if status is None:
status = response.getStatus()
else:
status = int(status)
ok = status < 500
if bool(ok):
handle_request_success(event)
else:
_log_bad.write('*')
response.__dict__.update(saved)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from .interfaces import IStatus
from .interfaces import ISuccessFull
from .Rotator import Rotator
from zope.processlifetime import IProcessStarting
from zope.component import adapter
from zope.component import provideHandler
from ZPublisher.interfaces import IPubFailure
from ZPublisher.interfaces import IPubSuccess
_log_good = _log_bad = None
@adapter(IProcessStarting)
def start_successlogging(unused):
"""start successlogging if configured."""
from App.config import getConfiguration
config = getConfiguration().product_config.get('successlogging')
if config is None:
return
global _log_good, _log_bad
_log_good = Rotator(config['filebase'] + '_good', lock=True)
_log_bad = Rotator(config['filebase'] + '_bad', lock=True)
provideHandler(handle_request_success)
provideHandler(handle_request_failure)
@adapter(IPubSuccess)
def handle_request_success(event):
"""handle "IPubSuccess"."""
_log_good.write('*')
@adapter(IPubFailure)
def handle_request_failure(event):
"""handle "IPubFailure"."""
request = event.request
if event.retry:
handle_request_success(event)
else:
response = request.response
saved = response.__dict__.copy()
response.setStatus(event.exc_info[0])
ok = ISuccessFull(response, None)
if ok is None:
status = IStatus(response, None)
if status is None:
status = response.getStatus()
else:
status = int(status)
ok = status < 500
if bool(ok):
handle_request_success(event)
else:
_log_bad.write('*')
response.__dict__.update(saved)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""Success request logging.
This logging is used by "CheckZope" to determine the amount
of work performed by Zope (in order not to bother it with monitor
probes when it is heavily active) and to detect an unreasonable
error rate.
This logging writes two files "<base>_good.<date>" and "<base>_bad.<date>".
For each request, a character is writen to either the good or
the bad logfile, depending on whether the request was successful or
unsuccessful. This means, that only the file size matters for
these logfiles.
Usually, response codes >= 500 are considered as unsuccessful requests.
You can register an "ISuccessFull" adapter, when you need
a different classification.
To activate this logging, both "successlogging.zcml" must be activated
and a "product-config" section with name "successlogging" must be defined
containing the key "filebase".
It specifies the basename of the logfiles (represented as "<base>" above).
"""
from .interfaces import IStatus
from .interfaces import ISuccessFull
from .Rotator import Rotator
from zope.processlifetime import IProcessStarting
from zope.component import adapter
from zope.component import provideHandler
from ZPublisher.interfaces import IPubFailure
from ZPublisher.interfaces import IPubSuccess
_log_good = _log_bad = None
@adapter(IProcessStarting)
def start_successlogging(unused):
"""start successlogging if configured."""
from App.config import getConfiguration
config = getConfiguration().product_config.get('successlogging')
if config is None:
return # not configured
global _log_good, _log_bad
_log_good = Rotator(config['filebase'] + '_good', lock=True)
_log_bad = Rotator(config['filebase'] + '_bad', lock=True)
# register publication observers
provideHandler(handle_request_success)
provideHandler(handle_request_failure)
@adapter(IPubSuccess)
def handle_request_success(event):
"""handle "IPubSuccess"."""
_log_good.write('*')
@adapter(IPubFailure)
def handle_request_failure(event):
"""handle "IPubFailure"."""
request = event.request
if event.retry:
handle_request_success(event)
else:
# Note: Zope forgets (at least sometimes)
# to inform the response about the exception.
# Work around this bug.
# When Zope3 views are used for error handling, they no longer
# communicate via exceptions with the ZPublisher. Instead, they seem
# to use 'setBody' which interferes with the 'exception' call below.
# We work around this problem by saving the response state and then
# restore it again. Of course, this no longer works around the Zope
# bug (forgetting to call 'exception') mentioned above.
response = request.response
saved = response.__dict__.copy()
response.setStatus(event.exc_info[0])
ok = ISuccessFull(response, None)
if ok is None:
status = IStatus(response, None)
if status is None:
status = response.getStatus()
else:
status = int(status)
ok = status < 500
if bool(ok):
handle_request_success(event)
else:
_log_bad.write('*')
response.__dict__.update(saved) # restore response again
|
flexible
|
{
"blob_id": "2edbf18c90da1ff40fd9abaf25a35dbdaf733bc1",
"index": 2786,
"step-1": "<mask token>\n\n\n@adapter(IProcessStarting)\ndef start_successlogging(unused):\n \"\"\"start successlogging if configured.\"\"\"\n from App.config import getConfiguration\n config = getConfiguration().product_config.get('successlogging')\n if config is None:\n return\n global _log_good, _log_bad\n _log_good = Rotator(config['filebase'] + '_good', lock=True)\n _log_bad = Rotator(config['filebase'] + '_bad', lock=True)\n provideHandler(handle_request_success)\n provideHandler(handle_request_failure)\n\n\n<mask token>\n\n\n@adapter(IPubFailure)\ndef handle_request_failure(event):\n \"\"\"handle \"IPubFailure\".\"\"\"\n request = event.request\n if event.retry:\n handle_request_success(event)\n else:\n response = request.response\n saved = response.__dict__.copy()\n response.setStatus(event.exc_info[0])\n ok = ISuccessFull(response, None)\n if ok is None:\n status = IStatus(response, None)\n if status is None:\n status = response.getStatus()\n else:\n status = int(status)\n ok = status < 500\n if bool(ok):\n handle_request_success(event)\n else:\n _log_bad.write('*')\n response.__dict__.update(saved)\n",
"step-2": "<mask token>\n\n\n@adapter(IProcessStarting)\ndef start_successlogging(unused):\n \"\"\"start successlogging if configured.\"\"\"\n from App.config import getConfiguration\n config = getConfiguration().product_config.get('successlogging')\n if config is None:\n return\n global _log_good, _log_bad\n _log_good = Rotator(config['filebase'] + '_good', lock=True)\n _log_bad = Rotator(config['filebase'] + '_bad', lock=True)\n provideHandler(handle_request_success)\n provideHandler(handle_request_failure)\n\n\n@adapter(IPubSuccess)\ndef handle_request_success(event):\n \"\"\"handle \"IPubSuccess\".\"\"\"\n _log_good.write('*')\n\n\n@adapter(IPubFailure)\ndef handle_request_failure(event):\n \"\"\"handle \"IPubFailure\".\"\"\"\n request = event.request\n if event.retry:\n handle_request_success(event)\n else:\n response = request.response\n saved = response.__dict__.copy()\n response.setStatus(event.exc_info[0])\n ok = ISuccessFull(response, None)\n if ok is None:\n status = IStatus(response, None)\n if status is None:\n status = response.getStatus()\n else:\n status = int(status)\n ok = status < 500\n if bool(ok):\n handle_request_success(event)\n else:\n _log_bad.write('*')\n response.__dict__.update(saved)\n",
"step-3": "<mask token>\n_log_good = _log_bad = None\n\n\n@adapter(IProcessStarting)\ndef start_successlogging(unused):\n \"\"\"start successlogging if configured.\"\"\"\n from App.config import getConfiguration\n config = getConfiguration().product_config.get('successlogging')\n if config is None:\n return\n global _log_good, _log_bad\n _log_good = Rotator(config['filebase'] + '_good', lock=True)\n _log_bad = Rotator(config['filebase'] + '_bad', lock=True)\n provideHandler(handle_request_success)\n provideHandler(handle_request_failure)\n\n\n@adapter(IPubSuccess)\ndef handle_request_success(event):\n \"\"\"handle \"IPubSuccess\".\"\"\"\n _log_good.write('*')\n\n\n@adapter(IPubFailure)\ndef handle_request_failure(event):\n \"\"\"handle \"IPubFailure\".\"\"\"\n request = event.request\n if event.retry:\n handle_request_success(event)\n else:\n response = request.response\n saved = response.__dict__.copy()\n response.setStatus(event.exc_info[0])\n ok = ISuccessFull(response, None)\n if ok is None:\n status = IStatus(response, None)\n if status is None:\n status = response.getStatus()\n else:\n status = int(status)\n ok = status < 500\n if bool(ok):\n handle_request_success(event)\n else:\n _log_bad.write('*')\n response.__dict__.update(saved)\n",
"step-4": "<mask token>\nfrom .interfaces import IStatus\nfrom .interfaces import ISuccessFull\nfrom .Rotator import Rotator\nfrom zope.processlifetime import IProcessStarting\nfrom zope.component import adapter\nfrom zope.component import provideHandler\nfrom ZPublisher.interfaces import IPubFailure\nfrom ZPublisher.interfaces import IPubSuccess\n_log_good = _log_bad = None\n\n\n@adapter(IProcessStarting)\ndef start_successlogging(unused):\n \"\"\"start successlogging if configured.\"\"\"\n from App.config import getConfiguration\n config = getConfiguration().product_config.get('successlogging')\n if config is None:\n return\n global _log_good, _log_bad\n _log_good = Rotator(config['filebase'] + '_good', lock=True)\n _log_bad = Rotator(config['filebase'] + '_bad', lock=True)\n provideHandler(handle_request_success)\n provideHandler(handle_request_failure)\n\n\n@adapter(IPubSuccess)\ndef handle_request_success(event):\n \"\"\"handle \"IPubSuccess\".\"\"\"\n _log_good.write('*')\n\n\n@adapter(IPubFailure)\ndef handle_request_failure(event):\n \"\"\"handle \"IPubFailure\".\"\"\"\n request = event.request\n if event.retry:\n handle_request_success(event)\n else:\n response = request.response\n saved = response.__dict__.copy()\n response.setStatus(event.exc_info[0])\n ok = ISuccessFull(response, None)\n if ok is None:\n status = IStatus(response, None)\n if status is None:\n status = response.getStatus()\n else:\n status = int(status)\n ok = status < 500\n if bool(ok):\n handle_request_success(event)\n else:\n _log_bad.write('*')\n response.__dict__.update(saved)\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"Success request logging.\n\nThis logging is used by \"CheckZope\" to determine the amount\nof work performed by Zope (in order not to bother it with monitor\nprobes when it is heavily active) and to detect an unreasonable\nerror rate.\n\nThis logging writes two files \"<base>_good.<date>\" and \"<base>_bad.<date>\".\nFor each request, a character is writen to either the good or\nthe bad logfile, depending on whether the request was successful or\nunsuccessful. This means, that only the file size matters for\nthese logfiles.\n\nUsually, response codes >= 500 are considered as unsuccessful requests.\nYou can register an \"ISuccessFull\" adapter, when you need\na different classification.\n\nTo activate this logging, both \"successlogging.zcml\" must be activated\nand a \"product-config\" section with name \"successlogging\" must be defined\ncontaining the key \"filebase\".\nIt specifies the basename of the logfiles (represented as \"<base>\" above).\n\"\"\"\n\nfrom .interfaces import IStatus\nfrom .interfaces import ISuccessFull\nfrom .Rotator import Rotator\nfrom zope.processlifetime import IProcessStarting\nfrom zope.component import adapter\nfrom zope.component import provideHandler\nfrom ZPublisher.interfaces import IPubFailure\nfrom ZPublisher.interfaces import IPubSuccess\n\n_log_good = _log_bad = None\n\n\n@adapter(IProcessStarting)\ndef start_successlogging(unused):\n \"\"\"start successlogging if configured.\"\"\"\n from App.config import getConfiguration\n config = getConfiguration().product_config.get('successlogging')\n if config is None:\n return # not configured\n global _log_good, _log_bad\n _log_good = Rotator(config['filebase'] + '_good', lock=True)\n _log_bad = Rotator(config['filebase'] + '_bad', lock=True)\n # register publication observers\n provideHandler(handle_request_success)\n provideHandler(handle_request_failure)\n\n\n@adapter(IPubSuccess)\ndef handle_request_success(event):\n \"\"\"handle \"IPubSuccess\".\"\"\"\n _log_good.write('*')\n\n\n@adapter(IPubFailure)\ndef handle_request_failure(event):\n \"\"\"handle \"IPubFailure\".\"\"\"\n request = event.request\n if event.retry:\n handle_request_success(event)\n else:\n # Note: Zope forgets (at least sometimes)\n # to inform the response about the exception.\n # Work around this bug.\n # When Zope3 views are used for error handling, they no longer\n # communicate via exceptions with the ZPublisher. Instead, they seem\n # to use 'setBody' which interferes with the 'exception' call below.\n # We work around this problem by saving the response state and then\n # restore it again. Of course, this no longer works around the Zope\n # bug (forgetting to call 'exception') mentioned above.\n response = request.response\n saved = response.__dict__.copy()\n response.setStatus(event.exc_info[0])\n ok = ISuccessFull(response, None)\n if ok is None:\n status = IStatus(response, None)\n if status is None:\n status = response.getStatus()\n else:\n status = int(status)\n ok = status < 500\n if bool(ok):\n handle_request_success(event)\n else:\n _log_bad.write('*')\n response.__dict__.update(saved) # restore response again\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class IsOwnerOrStaffOrReadOnly(BasePermission):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class IsOwnerOrStaffOrReadOnly(BasePermission):
def has_object_permission(self, request, view, obj):
"""
Переопределяем права доступа.
Даем все права на запись, только владельцу или
администратору, на чтение даем право всем.
Возвращаем `True` если есть разрешение, `False` если нет.
"""
return bool(request.method in SAFE_METHODS or request.user and
request.user.is_authenticated and (obj.owner == request.user or
request.user.is_superuser))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class IsOwnerOrStaffOrReadOnly(BasePermission):
def has_object_permission(self, request, view, obj):
"""
Переопределяем права доступа.
Даем все права на запись, только владельцу или
администратору, на чтение даем право всем.
Возвращаем `True` если есть разрешение, `False` если нет.
"""
return bool(request.method in SAFE_METHODS or request.user and
request.user.is_authenticated and (obj.owner == request.user or
request.user.is_superuser))
def has_permission(self, request, view):
return bool(request.method in SAFE_METHODS or request.user and
request.user.is_authenticated)
<|reserved_special_token_1|>
from rest_framework.permissions import BasePermission, SAFE_METHODS
class IsOwnerOrStaffOrReadOnly(BasePermission):
def has_object_permission(self, request, view, obj):
"""
Переопределяем права доступа.
Даем все права на запись, только владельцу или
администратору, на чтение даем право всем.
Возвращаем `True` если есть разрешение, `False` если нет.
"""
return bool(request.method in SAFE_METHODS or request.user and
request.user.is_authenticated and (obj.owner == request.user or
request.user.is_superuser))
def has_permission(self, request, view):
return bool(request.method in SAFE_METHODS or request.user and
request.user.is_authenticated)
|
flexible
|
{
"blob_id": "4488612164435ab062ca66000f0d7dc3ccd89da2",
"index": 8150,
"step-1": "<mask token>\n\n\nclass IsOwnerOrStaffOrReadOnly(BasePermission):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass IsOwnerOrStaffOrReadOnly(BasePermission):\n\n def has_object_permission(self, request, view, obj):\n \"\"\"\n Переопределяем права доступа.\n\n Даем все права на запись, только владельцу или\n администратору, на чтение даем право всем.\n Возвращаем `True` если есть разрешение, `False` если нет.\n \"\"\"\n return bool(request.method in SAFE_METHODS or request.user and\n request.user.is_authenticated and (obj.owner == request.user or\n request.user.is_superuser))\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass IsOwnerOrStaffOrReadOnly(BasePermission):\n\n def has_object_permission(self, request, view, obj):\n \"\"\"\n Переопределяем права доступа.\n\n Даем все права на запись, только владельцу или\n администратору, на чтение даем право всем.\n Возвращаем `True` если есть разрешение, `False` если нет.\n \"\"\"\n return bool(request.method in SAFE_METHODS or request.user and\n request.user.is_authenticated and (obj.owner == request.user or\n request.user.is_superuser))\n\n def has_permission(self, request, view):\n return bool(request.method in SAFE_METHODS or request.user and\n request.user.is_authenticated)\n",
"step-4": "from rest_framework.permissions import BasePermission, SAFE_METHODS\n\n\nclass IsOwnerOrStaffOrReadOnly(BasePermission):\n\n def has_object_permission(self, request, view, obj):\n \"\"\"\n Переопределяем права доступа.\n\n Даем все права на запись, только владельцу или\n администратору, на чтение даем право всем.\n Возвращаем `True` если есть разрешение, `False` если нет.\n \"\"\"\n return bool(request.method in SAFE_METHODS or request.user and\n request.user.is_authenticated and (obj.owner == request.user or\n request.user.is_superuser))\n\n def has_permission(self, request, view):\n return bool(request.method in SAFE_METHODS or request.user and\n request.user.is_authenticated)\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def check_compound_set(description_mol, validate_dict):
y_m_d = description_mol.GetProp('generation_date').split('-')
submitter_dict = {'submitter__name': description_mol.GetProp(
'submitter_name'), 'submitter__email': description_mol.GetProp(
'submitter_email'), 'submitter__institution': description_mol.
GetProp('submitter_institution'), 'submitter__generation_date':
datetime.date(int(y_m_d[0]), int(y_m_d[1]), int(y_m_d[2])),
'submitter__method': description_mol.GetProp('method')}
query = CompoundSet.objects.filter(**submitter_dict)
if len(query) != 0:
validate_dict = add_warning(molecule_name='File error', field=
'compound set', warning_string=
'a compound set with the auto_generated name ' + query[0].
submitter.unique_name +
' already exists (change method name in blank mol method field)',
validate_dict=validate_dict)
return validate_dict
<|reserved_special_token_0|>
def check_SMILES(mol, validate_dict):
"""
Checks if SMILES can be read by rdkit
:mol: rdkit mol read from SD file
:return: Updates validate dictionary with pass/fail message
"""
smi_check = mol.GetProp('original SMILES')
m = Chem.MolFromSmiles(smi_check, sanitize=False)
if m is None:
validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),
field='original SMILES', warning_string='invalid SMILES %s' % (
smi_check,), validate_dict=validate_dict)
return validate_dict
def check_ver_name(blank_mol, version, validate_dict):
"""
Checks if blank mol:
The name (title line) of this molecule should be the
file format specification version e.g. ver_1.0 (as defined in this document)
:blank_mol: rdkit mol of blank mol from an SD file
:return: Updates validate dictionary with pass/fail message
"""
ver_name = blank_mol.GetProp('_Name')
if ver_name != version:
validate_dict = add_warning(molecule_name=blank_mol.GetProp('_Name'
), field='_Name', warning_string=
'illegal version: %s found. Should be %s' % (ver_name, version),
validate_dict=validate_dict)
return validate_dict
def check_blank_mol_props(mol, validate_dict):
fields = ['ref_url', 'submitter_name', 'submitter_email',
'submitter_institution', 'generation_date', 'method']
for field in fields:
validate_dict = missing_field_check(mol, field, validate_dict)
return validate_dict
def check_blank_prop(blank_mol, validate_dict):
"""
Checks if blank mol properties have a description
:blank_mol: rdkit mol of blank mol from an SD file
:return: Updates validate dictionary with pass/fail message
"""
property_dict = blank_mol.GetPropsAsDict()
prop_ignore_list = ['ref_mols', 'ref_pdb']
for key, value in zip(property_dict.keys(), property_dict.values()):
if value == '' and key not in prop_ignore_list:
validate_dict = add_warning(molecule_name=blank_mol.GetProp(
'_Name'), field=key, warning_string=
'description for %s missing' % (key,), validate_dict=
validate_dict)
if key == 'ref_url' and check_url(value) == False:
validate_dict = add_warning(molecule_name=blank_mol.GetProp(
'_Name'), field=key, warning_string=
'illegal URL %s provided' % (value,), validate_dict=
validate_dict)
return validate_dict
<|reserved_special_token_0|>
def check_url(value):
"""
Checks if url provided exists. No internet connection required.
Checks URL using Validators package
:value: value associated with 'ref_url' key
:return: False if URL can not be validated
"""
valid = validators.url(value)
if valid != True:
return False
def check_name_characters(name, validate_dict):
legal_non_alnum = ['-', '_', '.']
for char in name:
if not char.isalnum() and char not in legal_non_alnum:
validate_dict = add_warning(molecule_name=name, field='_Name',
warning_string='illegal character %s found' % (char,),
validate_dict=validate_dict)
return validate_dict
def missing_field_check(mol, field, validate_dict):
props_dict = mol.GetPropsAsDict()
if not field in props_dict.keys():
validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),
field=field, warning_string='%s field not found!' % (field,),
validate_dict=validate_dict)
return validate_dict
<|reserved_special_token_0|>
def validate(sdf_file, target=None, zfile=None):
validated = True
validate_dict = {'molecule_name': [], 'field': [], 'warning_string': []}
validate_dict = check_sdf(sdf_file, validate_dict)
suppl = Chem.SDMolSupplier(sdf_file)
print('%d mols detected (including blank mol)' % (len(suppl),))
blank_mol = suppl[0]
if blank_mol is None:
validate_dict = add_warning(molecule_name='Blank Mol', field='N/A',
warning_string=
'your blank molecule could not be read by rdkit. The molecule must have at least one atom! No other checks were done'
, validate_dict=validate_dict)
validated = False
return validate_dict, validated
validate_dict = check_compound_set(blank_mol, validate_dict)
other_mols = []
for i in range(1, len(suppl)):
other_mols.append(suppl[i])
all_props = []
for mol in suppl:
all_props.extend([key for key in mol.GetPropsAsDict().keys()])
unique_props = list(set(all_props))
for mol in suppl:
props = [key for key in mol.GetPropsAsDict().keys()]
diff_list = np.setdiff1d(props, unique_props)
for diff in diff_list:
add_warning(molecule_name=mol.GetProp('_Name'), field=
'property (missing)', warning_string=
'%s property is missing from this molecule' % (diff,),
validate_dict=validate_dict)
validate_dict = check_ver_name(blank_mol, version, validate_dict)
validate_dict = check_blank_mol_props(blank_mol, validate_dict)
validate_dict = check_blank_prop(blank_mol, validate_dict)
for m in other_mols:
validate_dict = check_mol_props(m, validate_dict)
validate_dict = check_name_characters(m.GetProp('_Name'), validate_dict
)
validate_dict = check_pdb(m, validate_dict, target, zfile)
validate_dict = check_refmol(m, validate_dict, target)
validate_dict = check_field_populated(m, validate_dict)
validate_dict = check_SMILES(m, validate_dict)
if len(validate_dict['molecule_name']) != 0:
validated = False
return validate_dict, validated
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def check_compound_set(description_mol, validate_dict):
y_m_d = description_mol.GetProp('generation_date').split('-')
submitter_dict = {'submitter__name': description_mol.GetProp(
'submitter_name'), 'submitter__email': description_mol.GetProp(
'submitter_email'), 'submitter__institution': description_mol.
GetProp('submitter_institution'), 'submitter__generation_date':
datetime.date(int(y_m_d[0]), int(y_m_d[1]), int(y_m_d[2])),
'submitter__method': description_mol.GetProp('method')}
query = CompoundSet.objects.filter(**submitter_dict)
if len(query) != 0:
validate_dict = add_warning(molecule_name='File error', field=
'compound set', warning_string=
'a compound set with the auto_generated name ' + query[0].
submitter.unique_name +
' already exists (change method name in blank mol method field)',
validate_dict=validate_dict)
return validate_dict
<|reserved_special_token_0|>
def check_refmol(mol, validate_dict, target=None):
if target:
refmols = mol.GetProp('ref_mols').split(',')
for ref in refmols:
query = Protein.objects.filter(code__contains=target + '-' +
ref.strip())
if len(query) == 0:
validate_dict = add_warning(molecule_name=mol.GetProp(
'_Name'), field='ref_mol', warning_string=
'molecule for ' + str(ref.strip()) +
' does not exist in fragalysis (make sure the code is exactly as it appears in fragalysis - e.g. x0123_0)'
, validate_dict=validate_dict)
return validate_dict
<|reserved_special_token_0|>
def check_SMILES(mol, validate_dict):
"""
Checks if SMILES can be read by rdkit
:mol: rdkit mol read from SD file
:return: Updates validate dictionary with pass/fail message
"""
smi_check = mol.GetProp('original SMILES')
m = Chem.MolFromSmiles(smi_check, sanitize=False)
if m is None:
validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),
field='original SMILES', warning_string='invalid SMILES %s' % (
smi_check,), validate_dict=validate_dict)
return validate_dict
def check_ver_name(blank_mol, version, validate_dict):
"""
Checks if blank mol:
The name (title line) of this molecule should be the
file format specification version e.g. ver_1.0 (as defined in this document)
:blank_mol: rdkit mol of blank mol from an SD file
:return: Updates validate dictionary with pass/fail message
"""
ver_name = blank_mol.GetProp('_Name')
if ver_name != version:
validate_dict = add_warning(molecule_name=blank_mol.GetProp('_Name'
), field='_Name', warning_string=
'illegal version: %s found. Should be %s' % (ver_name, version),
validate_dict=validate_dict)
return validate_dict
def check_blank_mol_props(mol, validate_dict):
fields = ['ref_url', 'submitter_name', 'submitter_email',
'submitter_institution', 'generation_date', 'method']
for field in fields:
validate_dict = missing_field_check(mol, field, validate_dict)
return validate_dict
def check_blank_prop(blank_mol, validate_dict):
"""
Checks if blank mol properties have a description
:blank_mol: rdkit mol of blank mol from an SD file
:return: Updates validate dictionary with pass/fail message
"""
property_dict = blank_mol.GetPropsAsDict()
prop_ignore_list = ['ref_mols', 'ref_pdb']
for key, value in zip(property_dict.keys(), property_dict.values()):
if value == '' and key not in prop_ignore_list:
validate_dict = add_warning(molecule_name=blank_mol.GetProp(
'_Name'), field=key, warning_string=
'description for %s missing' % (key,), validate_dict=
validate_dict)
if key == 'ref_url' and check_url(value) == False:
validate_dict = add_warning(molecule_name=blank_mol.GetProp(
'_Name'), field=key, warning_string=
'illegal URL %s provided' % (value,), validate_dict=
validate_dict)
return validate_dict
<|reserved_special_token_0|>
def check_url(value):
"""
Checks if url provided exists. No internet connection required.
Checks URL using Validators package
:value: value associated with 'ref_url' key
:return: False if URL can not be validated
"""
valid = validators.url(value)
if valid != True:
return False
def check_name_characters(name, validate_dict):
legal_non_alnum = ['-', '_', '.']
for char in name:
if not char.isalnum() and char not in legal_non_alnum:
validate_dict = add_warning(molecule_name=name, field='_Name',
warning_string='illegal character %s found' % (char,),
validate_dict=validate_dict)
return validate_dict
def missing_field_check(mol, field, validate_dict):
props_dict = mol.GetPropsAsDict()
if not field in props_dict.keys():
validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),
field=field, warning_string='%s field not found!' % (field,),
validate_dict=validate_dict)
return validate_dict
<|reserved_special_token_0|>
def validate(sdf_file, target=None, zfile=None):
validated = True
validate_dict = {'molecule_name': [], 'field': [], 'warning_string': []}
validate_dict = check_sdf(sdf_file, validate_dict)
suppl = Chem.SDMolSupplier(sdf_file)
print('%d mols detected (including blank mol)' % (len(suppl),))
blank_mol = suppl[0]
if blank_mol is None:
validate_dict = add_warning(molecule_name='Blank Mol', field='N/A',
warning_string=
'your blank molecule could not be read by rdkit. The molecule must have at least one atom! No other checks were done'
, validate_dict=validate_dict)
validated = False
return validate_dict, validated
validate_dict = check_compound_set(blank_mol, validate_dict)
other_mols = []
for i in range(1, len(suppl)):
other_mols.append(suppl[i])
all_props = []
for mol in suppl:
all_props.extend([key for key in mol.GetPropsAsDict().keys()])
unique_props = list(set(all_props))
for mol in suppl:
props = [key for key in mol.GetPropsAsDict().keys()]
diff_list = np.setdiff1d(props, unique_props)
for diff in diff_list:
add_warning(molecule_name=mol.GetProp('_Name'), field=
'property (missing)', warning_string=
'%s property is missing from this molecule' % (diff,),
validate_dict=validate_dict)
validate_dict = check_ver_name(blank_mol, version, validate_dict)
validate_dict = check_blank_mol_props(blank_mol, validate_dict)
validate_dict = check_blank_prop(blank_mol, validate_dict)
for m in other_mols:
validate_dict = check_mol_props(m, validate_dict)
validate_dict = check_name_characters(m.GetProp('_Name'), validate_dict
)
validate_dict = check_pdb(m, validate_dict, target, zfile)
validate_dict = check_refmol(m, validate_dict, target)
validate_dict = check_field_populated(m, validate_dict)
validate_dict = check_SMILES(m, validate_dict)
if len(validate_dict['molecule_name']) != 0:
validated = False
return validate_dict, validated
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def check_compound_set(description_mol, validate_dict):
y_m_d = description_mol.GetProp('generation_date').split('-')
submitter_dict = {'submitter__name': description_mol.GetProp(
'submitter_name'), 'submitter__email': description_mol.GetProp(
'submitter_email'), 'submitter__institution': description_mol.
GetProp('submitter_institution'), 'submitter__generation_date':
datetime.date(int(y_m_d[0]), int(y_m_d[1]), int(y_m_d[2])),
'submitter__method': description_mol.GetProp('method')}
query = CompoundSet.objects.filter(**submitter_dict)
if len(query) != 0:
validate_dict = add_warning(molecule_name='File error', field=
'compound set', warning_string=
'a compound set with the auto_generated name ' + query[0].
submitter.unique_name +
' already exists (change method name in blank mol method field)',
validate_dict=validate_dict)
return validate_dict
def add_warning(molecule_name, field, warning_string, validate_dict):
validate_dict['molecule_name'].append(molecule_name)
validate_dict['field'].append(field)
validate_dict['warning_string'].append(warning_string)
return validate_dict
<|reserved_special_token_0|>
def check_refmol(mol, validate_dict, target=None):
if target:
refmols = mol.GetProp('ref_mols').split(',')
for ref in refmols:
query = Protein.objects.filter(code__contains=target + '-' +
ref.strip())
if len(query) == 0:
validate_dict = add_warning(molecule_name=mol.GetProp(
'_Name'), field='ref_mol', warning_string=
'molecule for ' + str(ref.strip()) +
' does not exist in fragalysis (make sure the code is exactly as it appears in fragalysis - e.g. x0123_0)'
, validate_dict=validate_dict)
return validate_dict
def check_pdb(mol, validate_dict, target=None, zfile=None):
"""
Checks if .pdb file can be read
:mol: rdkit mol read from SD file
:return: Updates validate dictionary with pass/fail message
"""
test_fp = mol.GetProp('ref_pdb')
if zfile:
pdb_option = mol.GetProp('ref_pdb')
if zfile:
if pdb_option not in zfile['zf_list']:
validate_dict = add_warning(molecule_name=mol.GetProp(
'_Name'), field='ref_pdb', warning_string='path ' + str
(pdb_option) +
" can't be found in uploaded zip file (list: " + str(
zfile['zf_list']) + ')', validate_dict=validate_dict)
if target and not test_fp.endswith('.pdb'):
query = Protein.objects.filter(code__contains=str(target + '-' +
test_fp))
if len(query) == 0:
validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),
field='ref_pdb', warning_string='pdb for ' + str(test_fp) +
' does not exist in fragalysis', validate_dict=validate_dict)
return validate_dict
def check_SMILES(mol, validate_dict):
"""
Checks if SMILES can be read by rdkit
:mol: rdkit mol read from SD file
:return: Updates validate dictionary with pass/fail message
"""
smi_check = mol.GetProp('original SMILES')
m = Chem.MolFromSmiles(smi_check, sanitize=False)
if m is None:
validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),
field='original SMILES', warning_string='invalid SMILES %s' % (
smi_check,), validate_dict=validate_dict)
return validate_dict
def check_ver_name(blank_mol, version, validate_dict):
"""
Checks if blank mol:
The name (title line) of this molecule should be the
file format specification version e.g. ver_1.0 (as defined in this document)
:blank_mol: rdkit mol of blank mol from an SD file
:return: Updates validate dictionary with pass/fail message
"""
ver_name = blank_mol.GetProp('_Name')
if ver_name != version:
validate_dict = add_warning(molecule_name=blank_mol.GetProp('_Name'
), field='_Name', warning_string=
'illegal version: %s found. Should be %s' % (ver_name, version),
validate_dict=validate_dict)
return validate_dict
def check_blank_mol_props(mol, validate_dict):
fields = ['ref_url', 'submitter_name', 'submitter_email',
'submitter_institution', 'generation_date', 'method']
for field in fields:
validate_dict = missing_field_check(mol, field, validate_dict)
return validate_dict
def check_blank_prop(blank_mol, validate_dict):
"""
Checks if blank mol properties have a description
:blank_mol: rdkit mol of blank mol from an SD file
:return: Updates validate dictionary with pass/fail message
"""
property_dict = blank_mol.GetPropsAsDict()
prop_ignore_list = ['ref_mols', 'ref_pdb']
for key, value in zip(property_dict.keys(), property_dict.values()):
if value == '' and key not in prop_ignore_list:
validate_dict = add_warning(molecule_name=blank_mol.GetProp(
'_Name'), field=key, warning_string=
'description for %s missing' % (key,), validate_dict=
validate_dict)
if key == 'ref_url' and check_url(value) == False:
validate_dict = add_warning(molecule_name=blank_mol.GetProp(
'_Name'), field=key, warning_string=
'illegal URL %s provided' % (value,), validate_dict=
validate_dict)
return validate_dict
def check_field_populated(mol, validate_dict):
"""
Checks if all compulsory fields are populated:
1. ref_mols - a comma separated list of the fragments
2. ref_pdb - either (a) a filepath (relative to the sdf file)
to an uploaded pdb file
3. original SMILES - the original smiles of the compound
before any computation was carried out
:mol: rdkit mol other than blank_mol
:return: Updates validate dictionary with pass/fail message
"""
compulsory_fields = ['ref_pdb', 'ref_mols', 'original SMILES']
property_dict = mol.GetPropsAsDict()
for key, value in zip(property_dict.keys(), property_dict.values()):
if value == '' and key in compulsory_fields:
validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),
field=key, warning_string='value for %s missing' % (key,),
validate_dict=validate_dict)
return validate_dict
def check_url(value):
"""
Checks if url provided exists. No internet connection required.
Checks URL using Validators package
:value: value associated with 'ref_url' key
:return: False if URL can not be validated
"""
valid = validators.url(value)
if valid != True:
return False
def check_name_characters(name, validate_dict):
legal_non_alnum = ['-', '_', '.']
for char in name:
if not char.isalnum() and char not in legal_non_alnum:
validate_dict = add_warning(molecule_name=name, field='_Name',
warning_string='illegal character %s found' % (char,),
validate_dict=validate_dict)
return validate_dict
def missing_field_check(mol, field, validate_dict):
props_dict = mol.GetPropsAsDict()
if not field in props_dict.keys():
validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),
field=field, warning_string='%s field not found!' % (field,),
validate_dict=validate_dict)
return validate_dict
def check_mol_props(mol, validate_dict):
fields = ['ref_pdb', 'ref_mols', 'original SMILES']
for field in fields:
validate_dict = missing_field_check(mol, field, validate_dict)
return validate_dict
def validate(sdf_file, target=None, zfile=None):
validated = True
validate_dict = {'molecule_name': [], 'field': [], 'warning_string': []}
validate_dict = check_sdf(sdf_file, validate_dict)
suppl = Chem.SDMolSupplier(sdf_file)
print('%d mols detected (including blank mol)' % (len(suppl),))
blank_mol = suppl[0]
if blank_mol is None:
validate_dict = add_warning(molecule_name='Blank Mol', field='N/A',
warning_string=
'your blank molecule could not be read by rdkit. The molecule must have at least one atom! No other checks were done'
, validate_dict=validate_dict)
validated = False
return validate_dict, validated
validate_dict = check_compound_set(blank_mol, validate_dict)
other_mols = []
for i in range(1, len(suppl)):
other_mols.append(suppl[i])
all_props = []
for mol in suppl:
all_props.extend([key for key in mol.GetPropsAsDict().keys()])
unique_props = list(set(all_props))
for mol in suppl:
props = [key for key in mol.GetPropsAsDict().keys()]
diff_list = np.setdiff1d(props, unique_props)
for diff in diff_list:
add_warning(molecule_name=mol.GetProp('_Name'), field=
'property (missing)', warning_string=
'%s property is missing from this molecule' % (diff,),
validate_dict=validate_dict)
validate_dict = check_ver_name(blank_mol, version, validate_dict)
validate_dict = check_blank_mol_props(blank_mol, validate_dict)
validate_dict = check_blank_prop(blank_mol, validate_dict)
for m in other_mols:
validate_dict = check_mol_props(m, validate_dict)
validate_dict = check_name_characters(m.GetProp('_Name'), validate_dict
)
validate_dict = check_pdb(m, validate_dict, target, zfile)
validate_dict = check_refmol(m, validate_dict, target)
validate_dict = check_field_populated(m, validate_dict)
validate_dict = check_SMILES(m, validate_dict)
if len(validate_dict['molecule_name']) != 0:
validated = False
return validate_dict, validated
<|reserved_special_token_1|>
<|reserved_special_token_0|>
version = 'ver_1.2'
def check_compound_set(description_mol, validate_dict):
y_m_d = description_mol.GetProp('generation_date').split('-')
submitter_dict = {'submitter__name': description_mol.GetProp(
'submitter_name'), 'submitter__email': description_mol.GetProp(
'submitter_email'), 'submitter__institution': description_mol.
GetProp('submitter_institution'), 'submitter__generation_date':
datetime.date(int(y_m_d[0]), int(y_m_d[1]), int(y_m_d[2])),
'submitter__method': description_mol.GetProp('method')}
query = CompoundSet.objects.filter(**submitter_dict)
if len(query) != 0:
validate_dict = add_warning(molecule_name='File error', field=
'compound set', warning_string=
'a compound set with the auto_generated name ' + query[0].
submitter.unique_name +
' already exists (change method name in blank mol method field)',
validate_dict=validate_dict)
return validate_dict
def add_warning(molecule_name, field, warning_string, validate_dict):
validate_dict['molecule_name'].append(molecule_name)
validate_dict['field'].append(field)
validate_dict['warning_string'].append(warning_string)
return validate_dict
def check_sdf(sdf_file, validate_dict):
"""
Checks if .sdf file can be read and follows naming format:
'compound-set_<name>.sdf' with <name> replaced with
the name you wish to give it. e.g. compound-set_fragmenstein.sdf
:sdf_file: is the sdf in the specified format
:return: Updates validate dictionary with pass/fail message
"""
if sdf_file.startswith('compound-set_') and sdf_file.endswith('.sdf'
) is False:
validate_dict = add_warning(molecule_name='File error', field=
'_File_name', warning_string='illegal filename: ' + str(
sdf_file) + ' found', validate_dict=validate_dict)
return validate_dict
def check_refmol(mol, validate_dict, target=None):
if target:
refmols = mol.GetProp('ref_mols').split(',')
for ref in refmols:
query = Protein.objects.filter(code__contains=target + '-' +
ref.strip())
if len(query) == 0:
validate_dict = add_warning(molecule_name=mol.GetProp(
'_Name'), field='ref_mol', warning_string=
'molecule for ' + str(ref.strip()) +
' does not exist in fragalysis (make sure the code is exactly as it appears in fragalysis - e.g. x0123_0)'
, validate_dict=validate_dict)
return validate_dict
def check_pdb(mol, validate_dict, target=None, zfile=None):
"""
Checks if .pdb file can be read
:mol: rdkit mol read from SD file
:return: Updates validate dictionary with pass/fail message
"""
test_fp = mol.GetProp('ref_pdb')
if zfile:
pdb_option = mol.GetProp('ref_pdb')
if zfile:
if pdb_option not in zfile['zf_list']:
validate_dict = add_warning(molecule_name=mol.GetProp(
'_Name'), field='ref_pdb', warning_string='path ' + str
(pdb_option) +
" can't be found in uploaded zip file (list: " + str(
zfile['zf_list']) + ')', validate_dict=validate_dict)
if target and not test_fp.endswith('.pdb'):
query = Protein.objects.filter(code__contains=str(target + '-' +
test_fp))
if len(query) == 0:
validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),
field='ref_pdb', warning_string='pdb for ' + str(test_fp) +
' does not exist in fragalysis', validate_dict=validate_dict)
return validate_dict
def check_SMILES(mol, validate_dict):
"""
Checks if SMILES can be read by rdkit
:mol: rdkit mol read from SD file
:return: Updates validate dictionary with pass/fail message
"""
smi_check = mol.GetProp('original SMILES')
m = Chem.MolFromSmiles(smi_check, sanitize=False)
if m is None:
validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),
field='original SMILES', warning_string='invalid SMILES %s' % (
smi_check,), validate_dict=validate_dict)
return validate_dict
def check_ver_name(blank_mol, version, validate_dict):
"""
Checks if blank mol:
The name (title line) of this molecule should be the
file format specification version e.g. ver_1.0 (as defined in this document)
:blank_mol: rdkit mol of blank mol from an SD file
:return: Updates validate dictionary with pass/fail message
"""
ver_name = blank_mol.GetProp('_Name')
if ver_name != version:
validate_dict = add_warning(molecule_name=blank_mol.GetProp('_Name'
), field='_Name', warning_string=
'illegal version: %s found. Should be %s' % (ver_name, version),
validate_dict=validate_dict)
return validate_dict
def check_blank_mol_props(mol, validate_dict):
fields = ['ref_url', 'submitter_name', 'submitter_email',
'submitter_institution', 'generation_date', 'method']
for field in fields:
validate_dict = missing_field_check(mol, field, validate_dict)
return validate_dict
def check_blank_prop(blank_mol, validate_dict):
"""
Checks if blank mol properties have a description
:blank_mol: rdkit mol of blank mol from an SD file
:return: Updates validate dictionary with pass/fail message
"""
property_dict = blank_mol.GetPropsAsDict()
prop_ignore_list = ['ref_mols', 'ref_pdb']
for key, value in zip(property_dict.keys(), property_dict.values()):
if value == '' and key not in prop_ignore_list:
validate_dict = add_warning(molecule_name=blank_mol.GetProp(
'_Name'), field=key, warning_string=
'description for %s missing' % (key,), validate_dict=
validate_dict)
if key == 'ref_url' and check_url(value) == False:
validate_dict = add_warning(molecule_name=blank_mol.GetProp(
'_Name'), field=key, warning_string=
'illegal URL %s provided' % (value,), validate_dict=
validate_dict)
return validate_dict
def check_field_populated(mol, validate_dict):
"""
Checks if all compulsory fields are populated:
1. ref_mols - a comma separated list of the fragments
2. ref_pdb - either (a) a filepath (relative to the sdf file)
to an uploaded pdb file
3. original SMILES - the original smiles of the compound
before any computation was carried out
:mol: rdkit mol other than blank_mol
:return: Updates validate dictionary with pass/fail message
"""
compulsory_fields = ['ref_pdb', 'ref_mols', 'original SMILES']
property_dict = mol.GetPropsAsDict()
for key, value in zip(property_dict.keys(), property_dict.values()):
if value == '' and key in compulsory_fields:
validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),
field=key, warning_string='value for %s missing' % (key,),
validate_dict=validate_dict)
return validate_dict
def check_url(value):
"""
Checks if url provided exists. No internet connection required.
Checks URL using Validators package
:value: value associated with 'ref_url' key
:return: False if URL can not be validated
"""
valid = validators.url(value)
if valid != True:
return False
def check_name_characters(name, validate_dict):
legal_non_alnum = ['-', '_', '.']
for char in name:
if not char.isalnum() and char not in legal_non_alnum:
validate_dict = add_warning(molecule_name=name, field='_Name',
warning_string='illegal character %s found' % (char,),
validate_dict=validate_dict)
return validate_dict
def missing_field_check(mol, field, validate_dict):
props_dict = mol.GetPropsAsDict()
if not field in props_dict.keys():
validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),
field=field, warning_string='%s field not found!' % (field,),
validate_dict=validate_dict)
return validate_dict
def check_mol_props(mol, validate_dict):
fields = ['ref_pdb', 'ref_mols', 'original SMILES']
for field in fields:
validate_dict = missing_field_check(mol, field, validate_dict)
return validate_dict
def validate(sdf_file, target=None, zfile=None):
validated = True
validate_dict = {'molecule_name': [], 'field': [], 'warning_string': []}
validate_dict = check_sdf(sdf_file, validate_dict)
suppl = Chem.SDMolSupplier(sdf_file)
print('%d mols detected (including blank mol)' % (len(suppl),))
blank_mol = suppl[0]
if blank_mol is None:
validate_dict = add_warning(molecule_name='Blank Mol', field='N/A',
warning_string=
'your blank molecule could not be read by rdkit. The molecule must have at least one atom! No other checks were done'
, validate_dict=validate_dict)
validated = False
return validate_dict, validated
validate_dict = check_compound_set(blank_mol, validate_dict)
other_mols = []
for i in range(1, len(suppl)):
other_mols.append(suppl[i])
all_props = []
for mol in suppl:
all_props.extend([key for key in mol.GetPropsAsDict().keys()])
unique_props = list(set(all_props))
for mol in suppl:
props = [key for key in mol.GetPropsAsDict().keys()]
diff_list = np.setdiff1d(props, unique_props)
for diff in diff_list:
add_warning(molecule_name=mol.GetProp('_Name'), field=
'property (missing)', warning_string=
'%s property is missing from this molecule' % (diff,),
validate_dict=validate_dict)
validate_dict = check_ver_name(blank_mol, version, validate_dict)
validate_dict = check_blank_mol_props(blank_mol, validate_dict)
validate_dict = check_blank_prop(blank_mol, validate_dict)
for m in other_mols:
validate_dict = check_mol_props(m, validate_dict)
validate_dict = check_name_characters(m.GetProp('_Name'), validate_dict
)
validate_dict = check_pdb(m, validate_dict, target, zfile)
validate_dict = check_refmol(m, validate_dict, target)
validate_dict = check_field_populated(m, validate_dict)
validate_dict = check_SMILES(m, validate_dict)
if len(validate_dict['molecule_name']) != 0:
validated = False
return validate_dict, validated
<|reserved_special_token_1|>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 22 13:19:51 2020
@author: Warren
Script to check sdf file format for Fragalysis upload
"""
from rdkit import Chem
import validators
import numpy as np
import os
from viewer.models import Protein, CompoundSet
import datetime
# Set .sdf format version here
version = 'ver_1.2'
def check_compound_set(description_mol, validate_dict):
y_m_d = description_mol.GetProp('generation_date').split('-')
submitter_dict = {'submitter__name': description_mol.GetProp('submitter_name'),
'submitter__email': description_mol.GetProp('submitter_email'),
'submitter__institution': description_mol.GetProp('submitter_institution'),
'submitter__generation_date': datetime.date(int(y_m_d[0]), int(y_m_d[1]), int(y_m_d[2])),
'submitter__method': description_mol.GetProp('method')}
query = CompoundSet.objects.filter(**submitter_dict)
if len(query)!=0:
validate_dict = add_warning(molecule_name='File error',
field='compound set',
warning_string="a compound set with the auto_generated name " + query[0].submitter.unique_name + " already exists (change method name in blank mol method field)",
validate_dict=validate_dict)
return validate_dict
def add_warning(molecule_name, field, warning_string, validate_dict):
validate_dict['molecule_name'].append(molecule_name)
validate_dict['field'].append(field)
validate_dict['warning_string'].append(warning_string)
return validate_dict
def check_sdf(sdf_file, validate_dict):
"""
Checks if .sdf file can be read and follows naming format:
'compound-set_<name>.sdf' with <name> replaced with
the name you wish to give it. e.g. compound-set_fragmenstein.sdf
:sdf_file: is the sdf in the specified format
:return: Updates validate dictionary with pass/fail message
"""
# Check filename
if sdf_file.startswith("compound-set_") and sdf_file.endswith(".sdf") is False:
validate_dict = add_warning(molecule_name='File error',
field='_File_name',
warning_string="illegal filename: " + str(sdf_file) + " found",
validate_dict=validate_dict)
return validate_dict
def check_refmol(mol, validate_dict, target=None):
if target:
refmols = mol.GetProp('ref_mols').split(',')
for ref in refmols:
query = Protein.objects.filter(code__contains=target + '-' + ref.strip())
if len(query)==0:
validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),
field='ref_mol',
warning_string="molecule for " + str(ref.strip()) + " does not exist in fragalysis (make sure the code is exactly as it appears in fragalysis - e.g. x0123_0)",
validate_dict=validate_dict)
return validate_dict
def check_pdb(mol, validate_dict, target=None, zfile=None):
"""
Checks if .pdb file can be read
:mol: rdkit mol read from SD file
:return: Updates validate dictionary with pass/fail message
"""
# Check if pdb filepath given and exists
test_fp = mol.GetProp('ref_pdb')
# {'zip_obj': zf, 'zf_list': zip_names}
if zfile:
pdb_option = mol.GetProp('ref_pdb')
# name = pdb_option.split('/')[-1]
if zfile:
if pdb_option not in zfile['zf_list']:
validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),
field='ref_pdb',
warning_string="path " + str(pdb_option) + " can't be found in uploaded zip file (list: " + str(zfile['zf_list']) + ")",
validate_dict=validate_dict)
# else:
if target and not test_fp.endswith(".pdb"):
query = Protein.objects.filter(code__contains=str(target + '-' + test_fp))
if len(query)==0:
validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),
field='ref_pdb',
warning_string="pdb for " + str(test_fp) + " does not exist in fragalysis",
validate_dict=validate_dict)
return validate_dict
def check_SMILES(mol, validate_dict):
"""
Checks if SMILES can be read by rdkit
:mol: rdkit mol read from SD file
:return: Updates validate dictionary with pass/fail message
"""
# Check SMILES
smi_check = mol.GetProp('original SMILES')
m = Chem.MolFromSmiles(smi_check, sanitize=False)
if m is None:
validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),
field='original SMILES',
warning_string="invalid SMILES %s" % (smi_check,),
validate_dict=validate_dict)
return validate_dict
def check_ver_name(blank_mol, version, validate_dict):
"""
Checks if blank mol:
The name (title line) of this molecule should be the
file format specification version e.g. ver_1.0 (as defined in this document)
:blank_mol: rdkit mol of blank mol from an SD file
:return: Updates validate dictionary with pass/fail message
"""
ver_name = blank_mol.GetProp('_Name')
if ver_name != version:
validate_dict = add_warning(molecule_name=blank_mol.GetProp('_Name'),
field='_Name',
warning_string='illegal version: %s found. Should be %s' % (ver_name, version),
validate_dict=validate_dict)
return validate_dict
def check_blank_mol_props(mol, validate_dict):
# check for compulsory fields in blank mols
fields = ['ref_url', 'submitter_name', 'submitter_email', 'submitter_institution', 'generation_date', 'method']
for field in fields:
validate_dict = missing_field_check(mol, field, validate_dict)
return validate_dict
def check_blank_prop(blank_mol, validate_dict):
"""
Checks if blank mol properties have a description
:blank_mol: rdkit mol of blank mol from an SD file
:return: Updates validate dictionary with pass/fail message
"""
# Check if properties populated
property_dict = blank_mol.GetPropsAsDict()
# Properties to ignore
prop_ignore_list = ['ref_mols', 'ref_pdb']
for key, value in zip(property_dict.keys(), property_dict.values()):
if value == '' and key not in prop_ignore_list:
validate_dict = add_warning(molecule_name=blank_mol.GetProp('_Name'),
field=key,
warning_string='description for %s missing' % (key,),
validate_dict=validate_dict)
if key == 'ref_url' and check_url(value) == False:
validate_dict = add_warning(molecule_name=blank_mol.GetProp('_Name'),
field=key,
warning_string='illegal URL %s provided' % (value,),
validate_dict=validate_dict)
return validate_dict
def check_field_populated(mol, validate_dict):
"""
Checks if all compulsory fields are populated:
1. ref_mols - a comma separated list of the fragments
2. ref_pdb - either (a) a filepath (relative to the sdf file)
to an uploaded pdb file
3. original SMILES - the original smiles of the compound
before any computation was carried out
:mol: rdkit mol other than blank_mol
:return: Updates validate dictionary with pass/fail message
"""
# Compuslory fields
compulsory_fields = ['ref_pdb', 'ref_mols', 'original SMILES']
property_dict = mol.GetPropsAsDict()
for key, value in zip(property_dict.keys(), property_dict.values()):
if value == '' and key in compulsory_fields:
validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),
field=key,
warning_string='value for %s missing' % (key,),
validate_dict=validate_dict)
return validate_dict
def check_url(value):
"""
Checks if url provided exists. No internet connection required.
Checks URL using Validators package
:value: value associated with 'ref_url' key
:return: False if URL can not be validated
"""
valid = validators.url(value)
if valid != True:
return False
def check_name_characters(name, validate_dict):
legal_non_alnum = ['-', '_', '.']
for char in name:
if not char.isalnum() and char not in legal_non_alnum:
validate_dict = add_warning(molecule_name=name,
field='_Name',
warning_string='illegal character %s found' % (char,),
validate_dict=validate_dict)
return validate_dict
def missing_field_check(mol, field, validate_dict):
props_dict = mol.GetPropsAsDict()
if not field in props_dict.keys():
validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),
field=field,
warning_string='%s field not found!' % (field,),
validate_dict=validate_dict)
return validate_dict
def check_mol_props(mol, validate_dict):
# check for missing fields
fields = ['ref_pdb', 'ref_mols', 'original SMILES']
for field in fields:
validate_dict = missing_field_check(mol, field, validate_dict)
return validate_dict
def validate(sdf_file, target=None, zfile=None):
validated = True
validate_dict = {'molecule_name': [],
'field': [],
'warning_string': []}
# Check sdf filename & can be read
validate_dict = check_sdf(sdf_file, validate_dict)
suppl = Chem.SDMolSupplier(sdf_file)
print('%d mols detected (including blank mol)' % (len(suppl),))
blank_mol = suppl[0]
if blank_mol is None:
validate_dict = add_warning(molecule_name='Blank Mol',
field='N/A',
warning_string='your blank molecule could not be read by rdkit. The molecule must have at least one atom! No other checks were done',
validate_dict=validate_dict)
validated = False
return validate_dict, validated
validate_dict = check_compound_set(blank_mol, validate_dict)
other_mols = []
for i in range(1, len(suppl)):
other_mols.append(suppl[i])
# all mol checks
# - all mols have the same properties
all_props = []
for mol in suppl:
all_props.extend([key for key in mol.GetPropsAsDict().keys()])
unique_props = list(set(all_props))
for mol in suppl:
props = [key for key in mol.GetPropsAsDict().keys()]
diff_list = np.setdiff1d(props, unique_props)
for diff in diff_list:
add_warning(molecule_name=mol.GetProp('_Name'),
field='property (missing)',
warning_string='%s property is missing from this molecule' % (diff,),
validate_dict=validate_dict)
# Check version in blank mol
validate_dict = check_ver_name(blank_mol, version, validate_dict)
# Check compuslory fields in blank mol props
validate_dict = check_blank_mol_props(blank_mol, validate_dict)
# Check properties have been described and validate url
validate_dict = check_blank_prop(blank_mol, validate_dict)
# main mols checks
# - missing compulsary fields
# - check name characters
# - check pdb assignment and if pdb filepath exists
# - check compulsory field populated
# - check SMILES can be opended by rdkit
# (check api for pdb if fragalysis)
for m in other_mols:
validate_dict = check_mol_props(m, validate_dict)
validate_dict = check_name_characters(m.GetProp('_Name'), validate_dict)
validate_dict = check_pdb(m, validate_dict, target, zfile)
validate_dict = check_refmol(m, validate_dict, target)
validate_dict = check_field_populated(m, validate_dict)
validate_dict = check_SMILES(m, validate_dict)
if len(validate_dict['molecule_name']) != 0:
validated = False
return validate_dict, validated
|
flexible
|
{
"blob_id": "0082f75332321dba498f06d4c4a99c9248829b59",
"index": 654,
"step-1": "<mask token>\n\n\ndef check_compound_set(description_mol, validate_dict):\n y_m_d = description_mol.GetProp('generation_date').split('-')\n submitter_dict = {'submitter__name': description_mol.GetProp(\n 'submitter_name'), 'submitter__email': description_mol.GetProp(\n 'submitter_email'), 'submitter__institution': description_mol.\n GetProp('submitter_institution'), 'submitter__generation_date':\n datetime.date(int(y_m_d[0]), int(y_m_d[1]), int(y_m_d[2])),\n 'submitter__method': description_mol.GetProp('method')}\n query = CompoundSet.objects.filter(**submitter_dict)\n if len(query) != 0:\n validate_dict = add_warning(molecule_name='File error', field=\n 'compound set', warning_string=\n 'a compound set with the auto_generated name ' + query[0].\n submitter.unique_name +\n ' already exists (change method name in blank mol method field)',\n validate_dict=validate_dict)\n return validate_dict\n\n\n<mask token>\n\n\ndef check_SMILES(mol, validate_dict):\n \"\"\"\n Checks if SMILES can be read by rdkit\n\n :mol: rdkit mol read from SD file\n :return: Updates validate dictionary with pass/fail message\n \"\"\"\n smi_check = mol.GetProp('original SMILES')\n m = Chem.MolFromSmiles(smi_check, sanitize=False)\n if m is None:\n validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),\n field='original SMILES', warning_string='invalid SMILES %s' % (\n smi_check,), validate_dict=validate_dict)\n return validate_dict\n\n\ndef check_ver_name(blank_mol, version, validate_dict):\n \"\"\"\n Checks if blank mol:\n The name (title line) of this molecule should be the\n file format specification version e.g. ver_1.0 (as defined in this document)\n\n :blank_mol: rdkit mol of blank mol from an SD file\n :return: Updates validate dictionary with pass/fail message\n \"\"\"\n ver_name = blank_mol.GetProp('_Name')\n if ver_name != version:\n validate_dict = add_warning(molecule_name=blank_mol.GetProp('_Name'\n ), field='_Name', warning_string=\n 'illegal version: %s found. Should be %s' % (ver_name, version),\n validate_dict=validate_dict)\n return validate_dict\n\n\ndef check_blank_mol_props(mol, validate_dict):\n fields = ['ref_url', 'submitter_name', 'submitter_email',\n 'submitter_institution', 'generation_date', 'method']\n for field in fields:\n validate_dict = missing_field_check(mol, field, validate_dict)\n return validate_dict\n\n\ndef check_blank_prop(blank_mol, validate_dict):\n \"\"\"\n Checks if blank mol properties have a description\n\n :blank_mol: rdkit mol of blank mol from an SD file\n :return: Updates validate dictionary with pass/fail message\n \"\"\"\n property_dict = blank_mol.GetPropsAsDict()\n prop_ignore_list = ['ref_mols', 'ref_pdb']\n for key, value in zip(property_dict.keys(), property_dict.values()):\n if value == '' and key not in prop_ignore_list:\n validate_dict = add_warning(molecule_name=blank_mol.GetProp(\n '_Name'), field=key, warning_string=\n 'description for %s missing' % (key,), validate_dict=\n validate_dict)\n if key == 'ref_url' and check_url(value) == False:\n validate_dict = add_warning(molecule_name=blank_mol.GetProp(\n '_Name'), field=key, warning_string=\n 'illegal URL %s provided' % (value,), validate_dict=\n validate_dict)\n return validate_dict\n\n\n<mask token>\n\n\ndef check_url(value):\n \"\"\"\n Checks if url provided exists. No internet connection required.\n Checks URL using Validators package\n\n :value: value associated with 'ref_url' key\n :return: False if URL can not be validated\n \"\"\"\n valid = validators.url(value)\n if valid != True:\n return False\n\n\ndef check_name_characters(name, validate_dict):\n legal_non_alnum = ['-', '_', '.']\n for char in name:\n if not char.isalnum() and char not in legal_non_alnum:\n validate_dict = add_warning(molecule_name=name, field='_Name',\n warning_string='illegal character %s found' % (char,),\n validate_dict=validate_dict)\n return validate_dict\n\n\ndef missing_field_check(mol, field, validate_dict):\n props_dict = mol.GetPropsAsDict()\n if not field in props_dict.keys():\n validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),\n field=field, warning_string='%s field not found!' % (field,),\n validate_dict=validate_dict)\n return validate_dict\n\n\n<mask token>\n\n\ndef validate(sdf_file, target=None, zfile=None):\n validated = True\n validate_dict = {'molecule_name': [], 'field': [], 'warning_string': []}\n validate_dict = check_sdf(sdf_file, validate_dict)\n suppl = Chem.SDMolSupplier(sdf_file)\n print('%d mols detected (including blank mol)' % (len(suppl),))\n blank_mol = suppl[0]\n if blank_mol is None:\n validate_dict = add_warning(molecule_name='Blank Mol', field='N/A',\n warning_string=\n 'your blank molecule could not be read by rdkit. The molecule must have at least one atom! No other checks were done'\n , validate_dict=validate_dict)\n validated = False\n return validate_dict, validated\n validate_dict = check_compound_set(blank_mol, validate_dict)\n other_mols = []\n for i in range(1, len(suppl)):\n other_mols.append(suppl[i])\n all_props = []\n for mol in suppl:\n all_props.extend([key for key in mol.GetPropsAsDict().keys()])\n unique_props = list(set(all_props))\n for mol in suppl:\n props = [key for key in mol.GetPropsAsDict().keys()]\n diff_list = np.setdiff1d(props, unique_props)\n for diff in diff_list:\n add_warning(molecule_name=mol.GetProp('_Name'), field=\n 'property (missing)', warning_string=\n '%s property is missing from this molecule' % (diff,),\n validate_dict=validate_dict)\n validate_dict = check_ver_name(blank_mol, version, validate_dict)\n validate_dict = check_blank_mol_props(blank_mol, validate_dict)\n validate_dict = check_blank_prop(blank_mol, validate_dict)\n for m in other_mols:\n validate_dict = check_mol_props(m, validate_dict)\n validate_dict = check_name_characters(m.GetProp('_Name'), validate_dict\n )\n validate_dict = check_pdb(m, validate_dict, target, zfile)\n validate_dict = check_refmol(m, validate_dict, target)\n validate_dict = check_field_populated(m, validate_dict)\n validate_dict = check_SMILES(m, validate_dict)\n if len(validate_dict['molecule_name']) != 0:\n validated = False\n return validate_dict, validated\n",
"step-2": "<mask token>\n\n\ndef check_compound_set(description_mol, validate_dict):\n y_m_d = description_mol.GetProp('generation_date').split('-')\n submitter_dict = {'submitter__name': description_mol.GetProp(\n 'submitter_name'), 'submitter__email': description_mol.GetProp(\n 'submitter_email'), 'submitter__institution': description_mol.\n GetProp('submitter_institution'), 'submitter__generation_date':\n datetime.date(int(y_m_d[0]), int(y_m_d[1]), int(y_m_d[2])),\n 'submitter__method': description_mol.GetProp('method')}\n query = CompoundSet.objects.filter(**submitter_dict)\n if len(query) != 0:\n validate_dict = add_warning(molecule_name='File error', field=\n 'compound set', warning_string=\n 'a compound set with the auto_generated name ' + query[0].\n submitter.unique_name +\n ' already exists (change method name in blank mol method field)',\n validate_dict=validate_dict)\n return validate_dict\n\n\n<mask token>\n\n\ndef check_refmol(mol, validate_dict, target=None):\n if target:\n refmols = mol.GetProp('ref_mols').split(',')\n for ref in refmols:\n query = Protein.objects.filter(code__contains=target + '-' +\n ref.strip())\n if len(query) == 0:\n validate_dict = add_warning(molecule_name=mol.GetProp(\n '_Name'), field='ref_mol', warning_string=\n 'molecule for ' + str(ref.strip()) +\n ' does not exist in fragalysis (make sure the code is exactly as it appears in fragalysis - e.g. x0123_0)'\n , validate_dict=validate_dict)\n return validate_dict\n\n\n<mask token>\n\n\ndef check_SMILES(mol, validate_dict):\n \"\"\"\n Checks if SMILES can be read by rdkit\n\n :mol: rdkit mol read from SD file\n :return: Updates validate dictionary with pass/fail message\n \"\"\"\n smi_check = mol.GetProp('original SMILES')\n m = Chem.MolFromSmiles(smi_check, sanitize=False)\n if m is None:\n validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),\n field='original SMILES', warning_string='invalid SMILES %s' % (\n smi_check,), validate_dict=validate_dict)\n return validate_dict\n\n\ndef check_ver_name(blank_mol, version, validate_dict):\n \"\"\"\n Checks if blank mol:\n The name (title line) of this molecule should be the\n file format specification version e.g. ver_1.0 (as defined in this document)\n\n :blank_mol: rdkit mol of blank mol from an SD file\n :return: Updates validate dictionary with pass/fail message\n \"\"\"\n ver_name = blank_mol.GetProp('_Name')\n if ver_name != version:\n validate_dict = add_warning(molecule_name=blank_mol.GetProp('_Name'\n ), field='_Name', warning_string=\n 'illegal version: %s found. Should be %s' % (ver_name, version),\n validate_dict=validate_dict)\n return validate_dict\n\n\ndef check_blank_mol_props(mol, validate_dict):\n fields = ['ref_url', 'submitter_name', 'submitter_email',\n 'submitter_institution', 'generation_date', 'method']\n for field in fields:\n validate_dict = missing_field_check(mol, field, validate_dict)\n return validate_dict\n\n\ndef check_blank_prop(blank_mol, validate_dict):\n \"\"\"\n Checks if blank mol properties have a description\n\n :blank_mol: rdkit mol of blank mol from an SD file\n :return: Updates validate dictionary with pass/fail message\n \"\"\"\n property_dict = blank_mol.GetPropsAsDict()\n prop_ignore_list = ['ref_mols', 'ref_pdb']\n for key, value in zip(property_dict.keys(), property_dict.values()):\n if value == '' and key not in prop_ignore_list:\n validate_dict = add_warning(molecule_name=blank_mol.GetProp(\n '_Name'), field=key, warning_string=\n 'description for %s missing' % (key,), validate_dict=\n validate_dict)\n if key == 'ref_url' and check_url(value) == False:\n validate_dict = add_warning(molecule_name=blank_mol.GetProp(\n '_Name'), field=key, warning_string=\n 'illegal URL %s provided' % (value,), validate_dict=\n validate_dict)\n return validate_dict\n\n\n<mask token>\n\n\ndef check_url(value):\n \"\"\"\n Checks if url provided exists. No internet connection required.\n Checks URL using Validators package\n\n :value: value associated with 'ref_url' key\n :return: False if URL can not be validated\n \"\"\"\n valid = validators.url(value)\n if valid != True:\n return False\n\n\ndef check_name_characters(name, validate_dict):\n legal_non_alnum = ['-', '_', '.']\n for char in name:\n if not char.isalnum() and char not in legal_non_alnum:\n validate_dict = add_warning(molecule_name=name, field='_Name',\n warning_string='illegal character %s found' % (char,),\n validate_dict=validate_dict)\n return validate_dict\n\n\ndef missing_field_check(mol, field, validate_dict):\n props_dict = mol.GetPropsAsDict()\n if not field in props_dict.keys():\n validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),\n field=field, warning_string='%s field not found!' % (field,),\n validate_dict=validate_dict)\n return validate_dict\n\n\n<mask token>\n\n\ndef validate(sdf_file, target=None, zfile=None):\n validated = True\n validate_dict = {'molecule_name': [], 'field': [], 'warning_string': []}\n validate_dict = check_sdf(sdf_file, validate_dict)\n suppl = Chem.SDMolSupplier(sdf_file)\n print('%d mols detected (including blank mol)' % (len(suppl),))\n blank_mol = suppl[0]\n if blank_mol is None:\n validate_dict = add_warning(molecule_name='Blank Mol', field='N/A',\n warning_string=\n 'your blank molecule could not be read by rdkit. The molecule must have at least one atom! No other checks were done'\n , validate_dict=validate_dict)\n validated = False\n return validate_dict, validated\n validate_dict = check_compound_set(blank_mol, validate_dict)\n other_mols = []\n for i in range(1, len(suppl)):\n other_mols.append(suppl[i])\n all_props = []\n for mol in suppl:\n all_props.extend([key for key in mol.GetPropsAsDict().keys()])\n unique_props = list(set(all_props))\n for mol in suppl:\n props = [key for key in mol.GetPropsAsDict().keys()]\n diff_list = np.setdiff1d(props, unique_props)\n for diff in diff_list:\n add_warning(molecule_name=mol.GetProp('_Name'), field=\n 'property (missing)', warning_string=\n '%s property is missing from this molecule' % (diff,),\n validate_dict=validate_dict)\n validate_dict = check_ver_name(blank_mol, version, validate_dict)\n validate_dict = check_blank_mol_props(blank_mol, validate_dict)\n validate_dict = check_blank_prop(blank_mol, validate_dict)\n for m in other_mols:\n validate_dict = check_mol_props(m, validate_dict)\n validate_dict = check_name_characters(m.GetProp('_Name'), validate_dict\n )\n validate_dict = check_pdb(m, validate_dict, target, zfile)\n validate_dict = check_refmol(m, validate_dict, target)\n validate_dict = check_field_populated(m, validate_dict)\n validate_dict = check_SMILES(m, validate_dict)\n if len(validate_dict['molecule_name']) != 0:\n validated = False\n return validate_dict, validated\n",
"step-3": "<mask token>\n\n\ndef check_compound_set(description_mol, validate_dict):\n y_m_d = description_mol.GetProp('generation_date').split('-')\n submitter_dict = {'submitter__name': description_mol.GetProp(\n 'submitter_name'), 'submitter__email': description_mol.GetProp(\n 'submitter_email'), 'submitter__institution': description_mol.\n GetProp('submitter_institution'), 'submitter__generation_date':\n datetime.date(int(y_m_d[0]), int(y_m_d[1]), int(y_m_d[2])),\n 'submitter__method': description_mol.GetProp('method')}\n query = CompoundSet.objects.filter(**submitter_dict)\n if len(query) != 0:\n validate_dict = add_warning(molecule_name='File error', field=\n 'compound set', warning_string=\n 'a compound set with the auto_generated name ' + query[0].\n submitter.unique_name +\n ' already exists (change method name in blank mol method field)',\n validate_dict=validate_dict)\n return validate_dict\n\n\ndef add_warning(molecule_name, field, warning_string, validate_dict):\n validate_dict['molecule_name'].append(molecule_name)\n validate_dict['field'].append(field)\n validate_dict['warning_string'].append(warning_string)\n return validate_dict\n\n\n<mask token>\n\n\ndef check_refmol(mol, validate_dict, target=None):\n if target:\n refmols = mol.GetProp('ref_mols').split(',')\n for ref in refmols:\n query = Protein.objects.filter(code__contains=target + '-' +\n ref.strip())\n if len(query) == 0:\n validate_dict = add_warning(molecule_name=mol.GetProp(\n '_Name'), field='ref_mol', warning_string=\n 'molecule for ' + str(ref.strip()) +\n ' does not exist in fragalysis (make sure the code is exactly as it appears in fragalysis - e.g. x0123_0)'\n , validate_dict=validate_dict)\n return validate_dict\n\n\ndef check_pdb(mol, validate_dict, target=None, zfile=None):\n \"\"\"\n Checks if .pdb file can be read\n\n :mol: rdkit mol read from SD file\n :return: Updates validate dictionary with pass/fail message\n \"\"\"\n test_fp = mol.GetProp('ref_pdb')\n if zfile:\n pdb_option = mol.GetProp('ref_pdb')\n if zfile:\n if pdb_option not in zfile['zf_list']:\n validate_dict = add_warning(molecule_name=mol.GetProp(\n '_Name'), field='ref_pdb', warning_string='path ' + str\n (pdb_option) +\n \" can't be found in uploaded zip file (list: \" + str(\n zfile['zf_list']) + ')', validate_dict=validate_dict)\n if target and not test_fp.endswith('.pdb'):\n query = Protein.objects.filter(code__contains=str(target + '-' +\n test_fp))\n if len(query) == 0:\n validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),\n field='ref_pdb', warning_string='pdb for ' + str(test_fp) +\n ' does not exist in fragalysis', validate_dict=validate_dict)\n return validate_dict\n\n\ndef check_SMILES(mol, validate_dict):\n \"\"\"\n Checks if SMILES can be read by rdkit\n\n :mol: rdkit mol read from SD file\n :return: Updates validate dictionary with pass/fail message\n \"\"\"\n smi_check = mol.GetProp('original SMILES')\n m = Chem.MolFromSmiles(smi_check, sanitize=False)\n if m is None:\n validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),\n field='original SMILES', warning_string='invalid SMILES %s' % (\n smi_check,), validate_dict=validate_dict)\n return validate_dict\n\n\ndef check_ver_name(blank_mol, version, validate_dict):\n \"\"\"\n Checks if blank mol:\n The name (title line) of this molecule should be the\n file format specification version e.g. ver_1.0 (as defined in this document)\n\n :blank_mol: rdkit mol of blank mol from an SD file\n :return: Updates validate dictionary with pass/fail message\n \"\"\"\n ver_name = blank_mol.GetProp('_Name')\n if ver_name != version:\n validate_dict = add_warning(molecule_name=blank_mol.GetProp('_Name'\n ), field='_Name', warning_string=\n 'illegal version: %s found. Should be %s' % (ver_name, version),\n validate_dict=validate_dict)\n return validate_dict\n\n\ndef check_blank_mol_props(mol, validate_dict):\n fields = ['ref_url', 'submitter_name', 'submitter_email',\n 'submitter_institution', 'generation_date', 'method']\n for field in fields:\n validate_dict = missing_field_check(mol, field, validate_dict)\n return validate_dict\n\n\ndef check_blank_prop(blank_mol, validate_dict):\n \"\"\"\n Checks if blank mol properties have a description\n\n :blank_mol: rdkit mol of blank mol from an SD file\n :return: Updates validate dictionary with pass/fail message\n \"\"\"\n property_dict = blank_mol.GetPropsAsDict()\n prop_ignore_list = ['ref_mols', 'ref_pdb']\n for key, value in zip(property_dict.keys(), property_dict.values()):\n if value == '' and key not in prop_ignore_list:\n validate_dict = add_warning(molecule_name=blank_mol.GetProp(\n '_Name'), field=key, warning_string=\n 'description for %s missing' % (key,), validate_dict=\n validate_dict)\n if key == 'ref_url' and check_url(value) == False:\n validate_dict = add_warning(molecule_name=blank_mol.GetProp(\n '_Name'), field=key, warning_string=\n 'illegal URL %s provided' % (value,), validate_dict=\n validate_dict)\n return validate_dict\n\n\ndef check_field_populated(mol, validate_dict):\n \"\"\"\n Checks if all compulsory fields are populated:\n 1. ref_mols - a comma separated list of the fragments\n 2. ref_pdb - either (a) a filepath (relative to the sdf file)\n to an uploaded pdb file\n 3. original SMILES - the original smiles of the compound\n before any computation was carried out\n\n :mol: rdkit mol other than blank_mol\n :return: Updates validate dictionary with pass/fail message\n \"\"\"\n compulsory_fields = ['ref_pdb', 'ref_mols', 'original SMILES']\n property_dict = mol.GetPropsAsDict()\n for key, value in zip(property_dict.keys(), property_dict.values()):\n if value == '' and key in compulsory_fields:\n validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),\n field=key, warning_string='value for %s missing' % (key,),\n validate_dict=validate_dict)\n return validate_dict\n\n\ndef check_url(value):\n \"\"\"\n Checks if url provided exists. No internet connection required.\n Checks URL using Validators package\n\n :value: value associated with 'ref_url' key\n :return: False if URL can not be validated\n \"\"\"\n valid = validators.url(value)\n if valid != True:\n return False\n\n\ndef check_name_characters(name, validate_dict):\n legal_non_alnum = ['-', '_', '.']\n for char in name:\n if not char.isalnum() and char not in legal_non_alnum:\n validate_dict = add_warning(molecule_name=name, field='_Name',\n warning_string='illegal character %s found' % (char,),\n validate_dict=validate_dict)\n return validate_dict\n\n\ndef missing_field_check(mol, field, validate_dict):\n props_dict = mol.GetPropsAsDict()\n if not field in props_dict.keys():\n validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),\n field=field, warning_string='%s field not found!' % (field,),\n validate_dict=validate_dict)\n return validate_dict\n\n\ndef check_mol_props(mol, validate_dict):\n fields = ['ref_pdb', 'ref_mols', 'original SMILES']\n for field in fields:\n validate_dict = missing_field_check(mol, field, validate_dict)\n return validate_dict\n\n\ndef validate(sdf_file, target=None, zfile=None):\n validated = True\n validate_dict = {'molecule_name': [], 'field': [], 'warning_string': []}\n validate_dict = check_sdf(sdf_file, validate_dict)\n suppl = Chem.SDMolSupplier(sdf_file)\n print('%d mols detected (including blank mol)' % (len(suppl),))\n blank_mol = suppl[0]\n if blank_mol is None:\n validate_dict = add_warning(molecule_name='Blank Mol', field='N/A',\n warning_string=\n 'your blank molecule could not be read by rdkit. The molecule must have at least one atom! No other checks were done'\n , validate_dict=validate_dict)\n validated = False\n return validate_dict, validated\n validate_dict = check_compound_set(blank_mol, validate_dict)\n other_mols = []\n for i in range(1, len(suppl)):\n other_mols.append(suppl[i])\n all_props = []\n for mol in suppl:\n all_props.extend([key for key in mol.GetPropsAsDict().keys()])\n unique_props = list(set(all_props))\n for mol in suppl:\n props = [key for key in mol.GetPropsAsDict().keys()]\n diff_list = np.setdiff1d(props, unique_props)\n for diff in diff_list:\n add_warning(molecule_name=mol.GetProp('_Name'), field=\n 'property (missing)', warning_string=\n '%s property is missing from this molecule' % (diff,),\n validate_dict=validate_dict)\n validate_dict = check_ver_name(blank_mol, version, validate_dict)\n validate_dict = check_blank_mol_props(blank_mol, validate_dict)\n validate_dict = check_blank_prop(blank_mol, validate_dict)\n for m in other_mols:\n validate_dict = check_mol_props(m, validate_dict)\n validate_dict = check_name_characters(m.GetProp('_Name'), validate_dict\n )\n validate_dict = check_pdb(m, validate_dict, target, zfile)\n validate_dict = check_refmol(m, validate_dict, target)\n validate_dict = check_field_populated(m, validate_dict)\n validate_dict = check_SMILES(m, validate_dict)\n if len(validate_dict['molecule_name']) != 0:\n validated = False\n return validate_dict, validated\n",
"step-4": "<mask token>\nversion = 'ver_1.2'\n\n\ndef check_compound_set(description_mol, validate_dict):\n y_m_d = description_mol.GetProp('generation_date').split('-')\n submitter_dict = {'submitter__name': description_mol.GetProp(\n 'submitter_name'), 'submitter__email': description_mol.GetProp(\n 'submitter_email'), 'submitter__institution': description_mol.\n GetProp('submitter_institution'), 'submitter__generation_date':\n datetime.date(int(y_m_d[0]), int(y_m_d[1]), int(y_m_d[2])),\n 'submitter__method': description_mol.GetProp('method')}\n query = CompoundSet.objects.filter(**submitter_dict)\n if len(query) != 0:\n validate_dict = add_warning(molecule_name='File error', field=\n 'compound set', warning_string=\n 'a compound set with the auto_generated name ' + query[0].\n submitter.unique_name +\n ' already exists (change method name in blank mol method field)',\n validate_dict=validate_dict)\n return validate_dict\n\n\ndef add_warning(molecule_name, field, warning_string, validate_dict):\n validate_dict['molecule_name'].append(molecule_name)\n validate_dict['field'].append(field)\n validate_dict['warning_string'].append(warning_string)\n return validate_dict\n\n\ndef check_sdf(sdf_file, validate_dict):\n \"\"\"\n Checks if .sdf file can be read and follows naming format:\n 'compound-set_<name>.sdf' with <name> replaced with\n the name you wish to give it. e.g. compound-set_fragmenstein.sdf\n\n :sdf_file: is the sdf in the specified format\n :return: Updates validate dictionary with pass/fail message\n \"\"\"\n if sdf_file.startswith('compound-set_') and sdf_file.endswith('.sdf'\n ) is False:\n validate_dict = add_warning(molecule_name='File error', field=\n '_File_name', warning_string='illegal filename: ' + str(\n sdf_file) + ' found', validate_dict=validate_dict)\n return validate_dict\n\n\ndef check_refmol(mol, validate_dict, target=None):\n if target:\n refmols = mol.GetProp('ref_mols').split(',')\n for ref in refmols:\n query = Protein.objects.filter(code__contains=target + '-' +\n ref.strip())\n if len(query) == 0:\n validate_dict = add_warning(molecule_name=mol.GetProp(\n '_Name'), field='ref_mol', warning_string=\n 'molecule for ' + str(ref.strip()) +\n ' does not exist in fragalysis (make sure the code is exactly as it appears in fragalysis - e.g. x0123_0)'\n , validate_dict=validate_dict)\n return validate_dict\n\n\ndef check_pdb(mol, validate_dict, target=None, zfile=None):\n \"\"\"\n Checks if .pdb file can be read\n\n :mol: rdkit mol read from SD file\n :return: Updates validate dictionary with pass/fail message\n \"\"\"\n test_fp = mol.GetProp('ref_pdb')\n if zfile:\n pdb_option = mol.GetProp('ref_pdb')\n if zfile:\n if pdb_option not in zfile['zf_list']:\n validate_dict = add_warning(molecule_name=mol.GetProp(\n '_Name'), field='ref_pdb', warning_string='path ' + str\n (pdb_option) +\n \" can't be found in uploaded zip file (list: \" + str(\n zfile['zf_list']) + ')', validate_dict=validate_dict)\n if target and not test_fp.endswith('.pdb'):\n query = Protein.objects.filter(code__contains=str(target + '-' +\n test_fp))\n if len(query) == 0:\n validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),\n field='ref_pdb', warning_string='pdb for ' + str(test_fp) +\n ' does not exist in fragalysis', validate_dict=validate_dict)\n return validate_dict\n\n\ndef check_SMILES(mol, validate_dict):\n \"\"\"\n Checks if SMILES can be read by rdkit\n\n :mol: rdkit mol read from SD file\n :return: Updates validate dictionary with pass/fail message\n \"\"\"\n smi_check = mol.GetProp('original SMILES')\n m = Chem.MolFromSmiles(smi_check, sanitize=False)\n if m is None:\n validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),\n field='original SMILES', warning_string='invalid SMILES %s' % (\n smi_check,), validate_dict=validate_dict)\n return validate_dict\n\n\ndef check_ver_name(blank_mol, version, validate_dict):\n \"\"\"\n Checks if blank mol:\n The name (title line) of this molecule should be the\n file format specification version e.g. ver_1.0 (as defined in this document)\n\n :blank_mol: rdkit mol of blank mol from an SD file\n :return: Updates validate dictionary with pass/fail message\n \"\"\"\n ver_name = blank_mol.GetProp('_Name')\n if ver_name != version:\n validate_dict = add_warning(molecule_name=blank_mol.GetProp('_Name'\n ), field='_Name', warning_string=\n 'illegal version: %s found. Should be %s' % (ver_name, version),\n validate_dict=validate_dict)\n return validate_dict\n\n\ndef check_blank_mol_props(mol, validate_dict):\n fields = ['ref_url', 'submitter_name', 'submitter_email',\n 'submitter_institution', 'generation_date', 'method']\n for field in fields:\n validate_dict = missing_field_check(mol, field, validate_dict)\n return validate_dict\n\n\ndef check_blank_prop(blank_mol, validate_dict):\n \"\"\"\n Checks if blank mol properties have a description\n\n :blank_mol: rdkit mol of blank mol from an SD file\n :return: Updates validate dictionary with pass/fail message\n \"\"\"\n property_dict = blank_mol.GetPropsAsDict()\n prop_ignore_list = ['ref_mols', 'ref_pdb']\n for key, value in zip(property_dict.keys(), property_dict.values()):\n if value == '' and key not in prop_ignore_list:\n validate_dict = add_warning(molecule_name=blank_mol.GetProp(\n '_Name'), field=key, warning_string=\n 'description for %s missing' % (key,), validate_dict=\n validate_dict)\n if key == 'ref_url' and check_url(value) == False:\n validate_dict = add_warning(molecule_name=blank_mol.GetProp(\n '_Name'), field=key, warning_string=\n 'illegal URL %s provided' % (value,), validate_dict=\n validate_dict)\n return validate_dict\n\n\ndef check_field_populated(mol, validate_dict):\n \"\"\"\n Checks if all compulsory fields are populated:\n 1. ref_mols - a comma separated list of the fragments\n 2. ref_pdb - either (a) a filepath (relative to the sdf file)\n to an uploaded pdb file\n 3. original SMILES - the original smiles of the compound\n before any computation was carried out\n\n :mol: rdkit mol other than blank_mol\n :return: Updates validate dictionary with pass/fail message\n \"\"\"\n compulsory_fields = ['ref_pdb', 'ref_mols', 'original SMILES']\n property_dict = mol.GetPropsAsDict()\n for key, value in zip(property_dict.keys(), property_dict.values()):\n if value == '' and key in compulsory_fields:\n validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),\n field=key, warning_string='value for %s missing' % (key,),\n validate_dict=validate_dict)\n return validate_dict\n\n\ndef check_url(value):\n \"\"\"\n Checks if url provided exists. No internet connection required.\n Checks URL using Validators package\n\n :value: value associated with 'ref_url' key\n :return: False if URL can not be validated\n \"\"\"\n valid = validators.url(value)\n if valid != True:\n return False\n\n\ndef check_name_characters(name, validate_dict):\n legal_non_alnum = ['-', '_', '.']\n for char in name:\n if not char.isalnum() and char not in legal_non_alnum:\n validate_dict = add_warning(molecule_name=name, field='_Name',\n warning_string='illegal character %s found' % (char,),\n validate_dict=validate_dict)\n return validate_dict\n\n\ndef missing_field_check(mol, field, validate_dict):\n props_dict = mol.GetPropsAsDict()\n if not field in props_dict.keys():\n validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),\n field=field, warning_string='%s field not found!' % (field,),\n validate_dict=validate_dict)\n return validate_dict\n\n\ndef check_mol_props(mol, validate_dict):\n fields = ['ref_pdb', 'ref_mols', 'original SMILES']\n for field in fields:\n validate_dict = missing_field_check(mol, field, validate_dict)\n return validate_dict\n\n\ndef validate(sdf_file, target=None, zfile=None):\n validated = True\n validate_dict = {'molecule_name': [], 'field': [], 'warning_string': []}\n validate_dict = check_sdf(sdf_file, validate_dict)\n suppl = Chem.SDMolSupplier(sdf_file)\n print('%d mols detected (including blank mol)' % (len(suppl),))\n blank_mol = suppl[0]\n if blank_mol is None:\n validate_dict = add_warning(molecule_name='Blank Mol', field='N/A',\n warning_string=\n 'your blank molecule could not be read by rdkit. The molecule must have at least one atom! No other checks were done'\n , validate_dict=validate_dict)\n validated = False\n return validate_dict, validated\n validate_dict = check_compound_set(blank_mol, validate_dict)\n other_mols = []\n for i in range(1, len(suppl)):\n other_mols.append(suppl[i])\n all_props = []\n for mol in suppl:\n all_props.extend([key for key in mol.GetPropsAsDict().keys()])\n unique_props = list(set(all_props))\n for mol in suppl:\n props = [key for key in mol.GetPropsAsDict().keys()]\n diff_list = np.setdiff1d(props, unique_props)\n for diff in diff_list:\n add_warning(molecule_name=mol.GetProp('_Name'), field=\n 'property (missing)', warning_string=\n '%s property is missing from this molecule' % (diff,),\n validate_dict=validate_dict)\n validate_dict = check_ver_name(blank_mol, version, validate_dict)\n validate_dict = check_blank_mol_props(blank_mol, validate_dict)\n validate_dict = check_blank_prop(blank_mol, validate_dict)\n for m in other_mols:\n validate_dict = check_mol_props(m, validate_dict)\n validate_dict = check_name_characters(m.GetProp('_Name'), validate_dict\n )\n validate_dict = check_pdb(m, validate_dict, target, zfile)\n validate_dict = check_refmol(m, validate_dict, target)\n validate_dict = check_field_populated(m, validate_dict)\n validate_dict = check_SMILES(m, validate_dict)\n if len(validate_dict['molecule_name']) != 0:\n validated = False\n return validate_dict, validated\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 22 13:19:51 2020\n@author: Warren\nScript to check sdf file format for Fragalysis upload\n\"\"\"\n\nfrom rdkit import Chem\nimport validators\nimport numpy as np\nimport os\nfrom viewer.models import Protein, CompoundSet\nimport datetime\n\n# Set .sdf format version here\nversion = 'ver_1.2'\n\ndef check_compound_set(description_mol, validate_dict):\n y_m_d = description_mol.GetProp('generation_date').split('-')\n\n submitter_dict = {'submitter__name': description_mol.GetProp('submitter_name'),\n 'submitter__email': description_mol.GetProp('submitter_email'),\n 'submitter__institution': description_mol.GetProp('submitter_institution'),\n 'submitter__generation_date': datetime.date(int(y_m_d[0]), int(y_m_d[1]), int(y_m_d[2])),\n 'submitter__method': description_mol.GetProp('method')}\n\n query = CompoundSet.objects.filter(**submitter_dict)\n\n if len(query)!=0:\n validate_dict = add_warning(molecule_name='File error',\n field='compound set',\n warning_string=\"a compound set with the auto_generated name \" + query[0].submitter.unique_name + \" already exists (change method name in blank mol method field)\",\n validate_dict=validate_dict)\n\n return validate_dict\n\n\ndef add_warning(molecule_name, field, warning_string, validate_dict):\n validate_dict['molecule_name'].append(molecule_name)\n validate_dict['field'].append(field)\n validate_dict['warning_string'].append(warning_string)\n\n return validate_dict\n\n\ndef check_sdf(sdf_file, validate_dict):\n \"\"\"\n Checks if .sdf file can be read and follows naming format:\n 'compound-set_<name>.sdf' with <name> replaced with\n the name you wish to give it. e.g. compound-set_fragmenstein.sdf\n\n :sdf_file: is the sdf in the specified format\n :return: Updates validate dictionary with pass/fail message\n \"\"\"\n # Check filename\n if sdf_file.startswith(\"compound-set_\") and sdf_file.endswith(\".sdf\") is False:\n validate_dict = add_warning(molecule_name='File error',\n field='_File_name',\n warning_string=\"illegal filename: \" + str(sdf_file) + \" found\",\n validate_dict=validate_dict)\n\n return validate_dict\n\n\ndef check_refmol(mol, validate_dict, target=None):\n if target:\n refmols = mol.GetProp('ref_mols').split(',')\n for ref in refmols:\n query = Protein.objects.filter(code__contains=target + '-' + ref.strip())\n if len(query)==0:\n validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),\n field='ref_mol',\n warning_string=\"molecule for \" + str(ref.strip()) + \" does not exist in fragalysis (make sure the code is exactly as it appears in fragalysis - e.g. x0123_0)\",\n validate_dict=validate_dict)\n return validate_dict\n \n\ndef check_pdb(mol, validate_dict, target=None, zfile=None):\n \"\"\"\n Checks if .pdb file can be read\n\n :mol: rdkit mol read from SD file\n :return: Updates validate dictionary with pass/fail message\n \"\"\"\n\n # Check if pdb filepath given and exists\n test_fp = mol.GetProp('ref_pdb')\n\n # {'zip_obj': zf, 'zf_list': zip_names}\n\n if zfile:\n pdb_option = mol.GetProp('ref_pdb')\n # name = pdb_option.split('/')[-1]\n if zfile:\n if pdb_option not in zfile['zf_list']:\n validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),\n field='ref_pdb',\n warning_string=\"path \" + str(pdb_option) + \" can't be found in uploaded zip file (list: \" + str(zfile['zf_list']) + \")\",\n validate_dict=validate_dict)\n\n # else:\n if target and not test_fp.endswith(\".pdb\"):\n query = Protein.objects.filter(code__contains=str(target + '-' + test_fp))\n if len(query)==0:\n validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),\n field='ref_pdb',\n warning_string=\"pdb for \" + str(test_fp) + \" does not exist in fragalysis\",\n validate_dict=validate_dict)\n\n return validate_dict\n\n\ndef check_SMILES(mol, validate_dict):\n \"\"\"\n Checks if SMILES can be read by rdkit\n\n :mol: rdkit mol read from SD file\n :return: Updates validate dictionary with pass/fail message\n \"\"\"\n # Check SMILES\n smi_check = mol.GetProp('original SMILES')\n\n m = Chem.MolFromSmiles(smi_check, sanitize=False)\n if m is None:\n validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),\n field='original SMILES',\n warning_string=\"invalid SMILES %s\" % (smi_check,),\n validate_dict=validate_dict)\n\n return validate_dict\n\n\ndef check_ver_name(blank_mol, version, validate_dict):\n \"\"\"\n Checks if blank mol:\n The name (title line) of this molecule should be the\n file format specification version e.g. ver_1.0 (as defined in this document)\n\n :blank_mol: rdkit mol of blank mol from an SD file\n :return: Updates validate dictionary with pass/fail message\n \"\"\"\n\n ver_name = blank_mol.GetProp('_Name')\n if ver_name != version:\n validate_dict = add_warning(molecule_name=blank_mol.GetProp('_Name'),\n field='_Name',\n warning_string='illegal version: %s found. Should be %s' % (ver_name, version),\n validate_dict=validate_dict)\n\n return validate_dict\n\n\ndef check_blank_mol_props(mol, validate_dict):\n # check for compulsory fields in blank mols\n fields = ['ref_url', 'submitter_name', 'submitter_email', 'submitter_institution', 'generation_date', 'method']\n for field in fields:\n validate_dict = missing_field_check(mol, field, validate_dict)\n\n return validate_dict\n\n\ndef check_blank_prop(blank_mol, validate_dict):\n \"\"\"\n Checks if blank mol properties have a description\n\n :blank_mol: rdkit mol of blank mol from an SD file\n :return: Updates validate dictionary with pass/fail message\n \"\"\"\n\n # Check if properties populated\n property_dict = blank_mol.GetPropsAsDict()\n\n # Properties to ignore\n prop_ignore_list = ['ref_mols', 'ref_pdb']\n\n for key, value in zip(property_dict.keys(), property_dict.values()):\n if value == '' and key not in prop_ignore_list:\n validate_dict = add_warning(molecule_name=blank_mol.GetProp('_Name'),\n field=key,\n warning_string='description for %s missing' % (key,),\n validate_dict=validate_dict)\n if key == 'ref_url' and check_url(value) == False:\n validate_dict = add_warning(molecule_name=blank_mol.GetProp('_Name'),\n field=key,\n warning_string='illegal URL %s provided' % (value,),\n validate_dict=validate_dict)\n\n return validate_dict\n\n\ndef check_field_populated(mol, validate_dict):\n \"\"\"\n Checks if all compulsory fields are populated:\n 1. ref_mols - a comma separated list of the fragments\n 2. ref_pdb - either (a) a filepath (relative to the sdf file)\n to an uploaded pdb file\n 3. original SMILES - the original smiles of the compound\n before any computation was carried out\n\n :mol: rdkit mol other than blank_mol\n :return: Updates validate dictionary with pass/fail message\n \"\"\"\n\n # Compuslory fields\n compulsory_fields = ['ref_pdb', 'ref_mols', 'original SMILES']\n\n property_dict = mol.GetPropsAsDict()\n for key, value in zip(property_dict.keys(), property_dict.values()):\n if value == '' and key in compulsory_fields:\n validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),\n field=key,\n warning_string='value for %s missing' % (key,),\n validate_dict=validate_dict)\n\n return validate_dict\n\n\ndef check_url(value):\n \"\"\"\n Checks if url provided exists. No internet connection required.\n Checks URL using Validators package\n\n :value: value associated with 'ref_url' key\n :return: False if URL can not be validated\n \"\"\"\n\n valid = validators.url(value)\n if valid != True:\n return False\n\n\ndef check_name_characters(name, validate_dict):\n legal_non_alnum = ['-', '_', '.']\n for char in name:\n if not char.isalnum() and char not in legal_non_alnum:\n validate_dict = add_warning(molecule_name=name,\n field='_Name',\n warning_string='illegal character %s found' % (char,),\n validate_dict=validate_dict)\n\n return validate_dict\n\n\ndef missing_field_check(mol, field, validate_dict):\n props_dict = mol.GetPropsAsDict()\n if not field in props_dict.keys():\n validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),\n field=field,\n warning_string='%s field not found!' % (field,),\n validate_dict=validate_dict)\n\n return validate_dict\n\n\ndef check_mol_props(mol, validate_dict):\n # check for missing fields\n fields = ['ref_pdb', 'ref_mols', 'original SMILES']\n for field in fields:\n validate_dict = missing_field_check(mol, field, validate_dict)\n\n return validate_dict\n\n\ndef validate(sdf_file, target=None, zfile=None):\n validated = True\n validate_dict = {'molecule_name': [],\n 'field': [],\n 'warning_string': []}\n\n # Check sdf filename & can be read\n validate_dict = check_sdf(sdf_file, validate_dict)\n\n suppl = Chem.SDMolSupplier(sdf_file)\n print('%d mols detected (including blank mol)' % (len(suppl),))\n blank_mol = suppl[0]\n if blank_mol is None:\n validate_dict = add_warning(molecule_name='Blank Mol',\n field='N/A',\n warning_string='your blank molecule could not be read by rdkit. The molecule must have at least one atom! No other checks were done',\n validate_dict=validate_dict)\n validated = False\n return validate_dict, validated\n validate_dict = check_compound_set(blank_mol, validate_dict)\n other_mols = []\n for i in range(1, len(suppl)):\n other_mols.append(suppl[i])\n\n # all mol checks\n # - all mols have the same properties\n all_props = []\n for mol in suppl:\n all_props.extend([key for key in mol.GetPropsAsDict().keys()])\n unique_props = list(set(all_props))\n for mol in suppl:\n props = [key for key in mol.GetPropsAsDict().keys()]\n diff_list = np.setdiff1d(props, unique_props)\n for diff in diff_list:\n add_warning(molecule_name=mol.GetProp('_Name'),\n field='property (missing)',\n warning_string='%s property is missing from this molecule' % (diff,),\n validate_dict=validate_dict)\n\n # Check version in blank mol\n validate_dict = check_ver_name(blank_mol, version, validate_dict)\n\n # Check compuslory fields in blank mol props\n validate_dict = check_blank_mol_props(blank_mol, validate_dict)\n\n # Check properties have been described and validate url\n validate_dict = check_blank_prop(blank_mol, validate_dict)\n\n # main mols checks\n # - missing compulsary fields\n # - check name characters\n # - check pdb assignment and if pdb filepath exists\n # - check compulsory field populated\n # - check SMILES can be opended by rdkit\n # (check api for pdb if fragalysis)\n for m in other_mols:\n validate_dict = check_mol_props(m, validate_dict)\n validate_dict = check_name_characters(m.GetProp('_Name'), validate_dict)\n validate_dict = check_pdb(m, validate_dict, target, zfile)\n validate_dict = check_refmol(m, validate_dict, target)\n validate_dict = check_field_populated(m, validate_dict)\n validate_dict = check_SMILES(m, validate_dict)\n\n if len(validate_dict['molecule_name']) != 0:\n validated = False\n\n return validate_dict, validated\n",
"step-ids": [
9,
10,
14,
16,
18
]
}
|
[
9,
10,
14,
16,
18
] |
####
# This is the script for storing the schema of your TerminusDB
# database for your project.
# Use 'terminusdb commit' to commit changes to the database and
# use 'terminusdb sync' to change this file according to
# the exsisting database schema
####
from typing import List
from terminusdb_client.woqlschema import (
DocumentTemplate,
EnumTemplate,
RandomKey,
ValueHashKey,
)
class Address(DocumentTemplate):
_subdocument = []
city: "City"
coordinates: List["Coordinates"]
postal_code: str
street: str
class Brewery(DocumentTemplate):
_key = RandomKey()
address_of: "Address"
name: str
phone: str
type_of: "Brewery_Type"
website_url: str
class Brewery_Type(EnumTemplate):
micro = ()
nano = ()
regional = ()
brewpub = ()
large = ()
planning = ()
bar = ()
contract = ()
proprietor = ()
closed = ()
taproom = ()
class City(DocumentTemplate):
_key = ValueHashKey()
name: str
state: "State"
class Coordinates(DocumentTemplate):
_key = RandomKey()
latitude: float
longitude: float
class Country(DocumentTemplate):
_key = ValueHashKey()
name: str
class State(DocumentTemplate):
_key = ValueHashKey()
country: "Country"
name: str
|
normal
|
{
"blob_id": "f702cdef3782ddc96244f3cf8e2026581d60baa9",
"index": 1537,
"step-1": "<mask token>\n\n\nclass State(DocumentTemplate):\n _key = ValueHashKey()\n country: 'Country'\n name: str\n",
"step-2": "<mask token>\n\n\nclass Address(DocumentTemplate):\n <mask token>\n city: 'City'\n coordinates: List['Coordinates']\n postal_code: str\n street: str\n\n\nclass Brewery(DocumentTemplate):\n _key = RandomKey()\n address_of: 'Address'\n name: str\n phone: str\n type_of: 'Brewery_Type'\n website_url: str\n\n\nclass Brewery_Type(EnumTemplate):\n micro = ()\n nano = ()\n regional = ()\n brewpub = ()\n large = ()\n planning = ()\n bar = ()\n contract = ()\n proprietor = ()\n closed = ()\n taproom = ()\n\n\nclass City(DocumentTemplate):\n _key = ValueHashKey()\n name: str\n state: 'State'\n\n\nclass Coordinates(DocumentTemplate):\n _key = RandomKey()\n latitude: float\n longitude: float\n\n\nclass Country(DocumentTemplate):\n _key = ValueHashKey()\n name: str\n\n\nclass State(DocumentTemplate):\n _key = ValueHashKey()\n country: 'Country'\n name: str\n",
"step-3": "<mask token>\n\n\nclass Address(DocumentTemplate):\n _subdocument = []\n city: 'City'\n coordinates: List['Coordinates']\n postal_code: str\n street: str\n\n\nclass Brewery(DocumentTemplate):\n _key = RandomKey()\n address_of: 'Address'\n name: str\n phone: str\n type_of: 'Brewery_Type'\n website_url: str\n\n\nclass Brewery_Type(EnumTemplate):\n micro = ()\n nano = ()\n regional = ()\n brewpub = ()\n large = ()\n planning = ()\n bar = ()\n contract = ()\n proprietor = ()\n closed = ()\n taproom = ()\n\n\nclass City(DocumentTemplate):\n _key = ValueHashKey()\n name: str\n state: 'State'\n\n\nclass Coordinates(DocumentTemplate):\n _key = RandomKey()\n latitude: float\n longitude: float\n\n\nclass Country(DocumentTemplate):\n _key = ValueHashKey()\n name: str\n\n\nclass State(DocumentTemplate):\n _key = ValueHashKey()\n country: 'Country'\n name: str\n",
"step-4": "from typing import List\nfrom terminusdb_client.woqlschema import DocumentTemplate, EnumTemplate, RandomKey, ValueHashKey\n\n\nclass Address(DocumentTemplate):\n _subdocument = []\n city: 'City'\n coordinates: List['Coordinates']\n postal_code: str\n street: str\n\n\nclass Brewery(DocumentTemplate):\n _key = RandomKey()\n address_of: 'Address'\n name: str\n phone: str\n type_of: 'Brewery_Type'\n website_url: str\n\n\nclass Brewery_Type(EnumTemplate):\n micro = ()\n nano = ()\n regional = ()\n brewpub = ()\n large = ()\n planning = ()\n bar = ()\n contract = ()\n proprietor = ()\n closed = ()\n taproom = ()\n\n\nclass City(DocumentTemplate):\n _key = ValueHashKey()\n name: str\n state: 'State'\n\n\nclass Coordinates(DocumentTemplate):\n _key = RandomKey()\n latitude: float\n longitude: float\n\n\nclass Country(DocumentTemplate):\n _key = ValueHashKey()\n name: str\n\n\nclass State(DocumentTemplate):\n _key = ValueHashKey()\n country: 'Country'\n name: str\n",
"step-5": "####\n# This is the script for storing the schema of your TerminusDB\n# database for your project.\n# Use 'terminusdb commit' to commit changes to the database and\n# use 'terminusdb sync' to change this file according to\n# the exsisting database schema\n####\n\nfrom typing import List\n\nfrom terminusdb_client.woqlschema import (\n DocumentTemplate,\n EnumTemplate,\n RandomKey,\n ValueHashKey,\n)\n\n\nclass Address(DocumentTemplate):\n _subdocument = []\n city: \"City\"\n coordinates: List[\"Coordinates\"]\n postal_code: str\n street: str\n\n\nclass Brewery(DocumentTemplate):\n _key = RandomKey()\n address_of: \"Address\"\n name: str\n phone: str\n type_of: \"Brewery_Type\"\n website_url: str\n\n\nclass Brewery_Type(EnumTemplate):\n micro = ()\n nano = ()\n regional = ()\n brewpub = ()\n large = ()\n planning = ()\n bar = ()\n contract = ()\n proprietor = ()\n closed = ()\n taproom = ()\n\n\nclass City(DocumentTemplate):\n _key = ValueHashKey()\n name: str\n state: \"State\"\n\n\nclass Coordinates(DocumentTemplate):\n _key = RandomKey()\n latitude: float\n longitude: float\n\n\nclass Country(DocumentTemplate):\n _key = ValueHashKey()\n name: str\n\n\nclass State(DocumentTemplate):\n _key = ValueHashKey()\n country: \"Country\"\n name: str\n",
"step-ids": [
2,
13,
14,
15,
16
]
}
|
[
2,
13,
14,
15,
16
] |
<|reserved_special_token_0|>
class AllabolaSpider(scrapy.Spider):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
f.write('fn,ln,zip,ct,st,country,dob,doby,gen,age,uid')
f.write('\n')
f.close()
try:
connection = MySQLdb.connect(host, user, password, DB_name, charset
='utf8')
cursor = connection.cursor()
except Exception as e:
print(str(e))
try:
strquery2 = """CREATE TABLE tbl_allabola(Id INT NOT NULL AUTO_INCREMENT,
Registration_no varchar(250) DEFAULT NULL,
First_name varchar(250) DEFAULT NULL,
Middle_name varchar(250) DEFAULT NULL,
Famaily_name varchar(250) DEFAULT NULL,
Gender longtext DEFAULT NULL,
Year longtext DEFAULT NULL,
Board_member longtext DEFAULT NULL,
PRIMARY KEY (`Id`))"""
cursor.execute(strquery2)
except Exception as e:
print(str(e))
<|reserved_special_token_0|>
def parse(self, response):
Post_Code = response.meta['Post_Code']
Registration_no = response.url
Registration_no = Registration_no.split('.se/')[1]
Registration_no = Registration_no.split('/')[0]
print(Registration_no)
ALl_data = response.xpath(
'//*[@class="list--personnel accordion-body"]/li')
for datas in ALl_data:
gender = datas.xpath(
".//div[1]/span[contains(@class,'male')]/@class"
).extract_first()
gender = gender.split('--')[1]
gender = gender.encode('utf-8')
if gender == 'male':
gender = 'm'
elif gender == 'female':
gender = 'f'
name = datas.xpath('.//div[2]/a/text()').extract_first()
name = name.strip()
name = name.split(' (f. ')
year = name[1].replace(')', '')
if year != None:
age = str(2019 - int(year))
fullname = name[0]
fullname = fullname.split(' ')
firstname = ''
middlename = ''
familyname = ''
if len(fullname) == 3:
firstname = fullname[0]
middlename = fullname[1]
familyname = fullname[2]
elif len(fullname) == 2:
firstname = fullname[0]
middlename = fullname[1]
elif len(fullname) > 3:
firstname = fullname[0]
familyname = fullname[-1]
middlename = ''
for k in range(1, len(fullname) - 1):
if middlename == '':
middlename = fullname[k]
else:
middlename = middlename + ' ' + fullname[k]
type = datas.xpath('.//div[2]/text()').extract()[2]
Board_member = type.replace('\n', '').strip()
if gender != '':
f = open('Facebook_Auidance.csv', 'a')
try:
f.write(firstname + ',' + familyname + ',' + Post_Code +
',' + '' + ',' + '' + ',' + 'Sweden' + ',' + '' +
',' + year + ',' + gender + ',' + age + ',' + '')
except Exception as e:
f.close()
try:
f.write('\n')
f.close()
except Exception as e:
""""""
if gender != '':
try:
reload(sys)
sys.setdefaultencoding('utf8')
self.cursor.execute(
'INSERT INTO tbl_allabola(Registration_no,First_name,Middle_name,Famaily_name,Gender,Year,Board_member)VALUES (%s,%s,%s,%s,%s,%s,%s)'
, (Registration_no, firstname, middlename,
familyname, gender, year, Board_member))
self.connection.commit()
except Exception as e:
print(e)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AllabolaSpider(scrapy.Spider):
name = 'allabola'
allowed_domains = ['https://www.allabolag.se']
start_urls = []
host = '104.197.180.57'
user = 'root'
password = 'root'
DB_name = 'db_allabolag'
f = open('Facebook_Auidance.csv', 'w')
f.write('fn,ln,zip,ct,st,country,dob,doby,gen,age,uid')
f.write('\n')
f.close()
try:
connection = MySQLdb.connect(host, user, password, DB_name, charset
='utf8')
cursor = connection.cursor()
except Exception as e:
print(str(e))
try:
strquery2 = """CREATE TABLE tbl_allabola(Id INT NOT NULL AUTO_INCREMENT,
Registration_no varchar(250) DEFAULT NULL,
First_name varchar(250) DEFAULT NULL,
Middle_name varchar(250) DEFAULT NULL,
Famaily_name varchar(250) DEFAULT NULL,
Gender longtext DEFAULT NULL,
Year longtext DEFAULT NULL,
Board_member longtext DEFAULT NULL,
PRIMARY KEY (`Id`))"""
cursor.execute(strquery2)
except Exception as e:
print(str(e))
def start_requests(self):
try:
wb = openpyxl.load_workbook('/home//Business_numbers.xlsx')
ws = wb.get_active_sheet()
row_count = ws.max_row
for h in range(2, row_count):
regi_number = ws.cell(row=h, column=2).value
Post_Code = ws.cell(row=h, column=4).value
main_link = 'https://www.allabolag.se/' + str(regi_number
) + '/befattningar'
yield scrapy.FormRequest(main_link, callback=self.parse,
dont_filter=True, meta={'Post_Code': Post_Code})
except Exception as e:
print(e)
def parse(self, response):
Post_Code = response.meta['Post_Code']
Registration_no = response.url
Registration_no = Registration_no.split('.se/')[1]
Registration_no = Registration_no.split('/')[0]
print(Registration_no)
ALl_data = response.xpath(
'//*[@class="list--personnel accordion-body"]/li')
for datas in ALl_data:
gender = datas.xpath(
".//div[1]/span[contains(@class,'male')]/@class"
).extract_first()
gender = gender.split('--')[1]
gender = gender.encode('utf-8')
if gender == 'male':
gender = 'm'
elif gender == 'female':
gender = 'f'
name = datas.xpath('.//div[2]/a/text()').extract_first()
name = name.strip()
name = name.split(' (f. ')
year = name[1].replace(')', '')
if year != None:
age = str(2019 - int(year))
fullname = name[0]
fullname = fullname.split(' ')
firstname = ''
middlename = ''
familyname = ''
if len(fullname) == 3:
firstname = fullname[0]
middlename = fullname[1]
familyname = fullname[2]
elif len(fullname) == 2:
firstname = fullname[0]
middlename = fullname[1]
elif len(fullname) > 3:
firstname = fullname[0]
familyname = fullname[-1]
middlename = ''
for k in range(1, len(fullname) - 1):
if middlename == '':
middlename = fullname[k]
else:
middlename = middlename + ' ' + fullname[k]
type = datas.xpath('.//div[2]/text()').extract()[2]
Board_member = type.replace('\n', '').strip()
if gender != '':
f = open('Facebook_Auidance.csv', 'a')
try:
f.write(firstname + ',' + familyname + ',' + Post_Code +
',' + '' + ',' + '' + ',' + 'Sweden' + ',' + '' +
',' + year + ',' + gender + ',' + age + ',' + '')
except Exception as e:
f.close()
try:
f.write('\n')
f.close()
except Exception as e:
""""""
if gender != '':
try:
reload(sys)
sys.setdefaultencoding('utf8')
self.cursor.execute(
'INSERT INTO tbl_allabola(Registration_no,First_name,Middle_name,Famaily_name,Gender,Year,Board_member)VALUES (%s,%s,%s,%s,%s,%s,%s)'
, (Registration_no, firstname, middlename,
familyname, gender, year, Board_member))
self.connection.commit()
except Exception as e:
print(e)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AllabolaSpider(scrapy.Spider):
name = 'allabola'
allowed_domains = ['https://www.allabolag.se']
start_urls = []
host = '104.197.180.57'
user = 'root'
password = 'root'
DB_name = 'db_allabolag'
f = open('Facebook_Auidance.csv', 'w')
f.write('fn,ln,zip,ct,st,country,dob,doby,gen,age,uid')
f.write('\n')
f.close()
try:
connection = MySQLdb.connect(host, user, password, DB_name, charset
='utf8')
cursor = connection.cursor()
except Exception as e:
print(str(e))
try:
strquery2 = """CREATE TABLE tbl_allabola(Id INT NOT NULL AUTO_INCREMENT,
Registration_no varchar(250) DEFAULT NULL,
First_name varchar(250) DEFAULT NULL,
Middle_name varchar(250) DEFAULT NULL,
Famaily_name varchar(250) DEFAULT NULL,
Gender longtext DEFAULT NULL,
Year longtext DEFAULT NULL,
Board_member longtext DEFAULT NULL,
PRIMARY KEY (`Id`))"""
cursor.execute(strquery2)
except Exception as e:
print(str(e))
def start_requests(self):
try:
wb = openpyxl.load_workbook('/home//Business_numbers.xlsx')
ws = wb.get_active_sheet()
row_count = ws.max_row
for h in range(2, row_count):
regi_number = ws.cell(row=h, column=2).value
Post_Code = ws.cell(row=h, column=4).value
main_link = 'https://www.allabolag.se/' + str(regi_number
) + '/befattningar'
yield scrapy.FormRequest(main_link, callback=self.parse,
dont_filter=True, meta={'Post_Code': Post_Code})
except Exception as e:
print(e)
def parse(self, response):
Post_Code = response.meta['Post_Code']
Registration_no = response.url
Registration_no = Registration_no.split('.se/')[1]
Registration_no = Registration_no.split('/')[0]
print(Registration_no)
ALl_data = response.xpath(
'//*[@class="list--personnel accordion-body"]/li')
for datas in ALl_data:
gender = datas.xpath(
".//div[1]/span[contains(@class,'male')]/@class"
).extract_first()
gender = gender.split('--')[1]
gender = gender.encode('utf-8')
if gender == 'male':
gender = 'm'
elif gender == 'female':
gender = 'f'
name = datas.xpath('.//div[2]/a/text()').extract_first()
name = name.strip()
name = name.split(' (f. ')
year = name[1].replace(')', '')
if year != None:
age = str(2019 - int(year))
fullname = name[0]
fullname = fullname.split(' ')
firstname = ''
middlename = ''
familyname = ''
if len(fullname) == 3:
firstname = fullname[0]
middlename = fullname[1]
familyname = fullname[2]
elif len(fullname) == 2:
firstname = fullname[0]
middlename = fullname[1]
elif len(fullname) > 3:
firstname = fullname[0]
familyname = fullname[-1]
middlename = ''
for k in range(1, len(fullname) - 1):
if middlename == '':
middlename = fullname[k]
else:
middlename = middlename + ' ' + fullname[k]
type = datas.xpath('.//div[2]/text()').extract()[2]
Board_member = type.replace('\n', '').strip()
if gender != '':
f = open('Facebook_Auidance.csv', 'a')
try:
f.write(firstname + ',' + familyname + ',' + Post_Code +
',' + '' + ',' + '' + ',' + 'Sweden' + ',' + '' +
',' + year + ',' + gender + ',' + age + ',' + '')
except Exception as e:
f.close()
try:
f.write('\n')
f.close()
except Exception as e:
""""""
if gender != '':
try:
reload(sys)
sys.setdefaultencoding('utf8')
self.cursor.execute(
'INSERT INTO tbl_allabola(Registration_no,First_name,Middle_name,Famaily_name,Gender,Year,Board_member)VALUES (%s,%s,%s,%s,%s,%s,%s)'
, (Registration_no, firstname, middlename,
familyname, gender, year, Board_member))
self.connection.commit()
except Exception as e:
print(e)
<|reserved_special_token_0|>
process.crawl(AllabolaSpider)
try:
process.start()
except:
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AllabolaSpider(scrapy.Spider):
name = 'allabola'
allowed_domains = ['https://www.allabolag.se']
start_urls = []
host = '104.197.180.57'
user = 'root'
password = 'root'
DB_name = 'db_allabolag'
f = open('Facebook_Auidance.csv', 'w')
f.write('fn,ln,zip,ct,st,country,dob,doby,gen,age,uid')
f.write('\n')
f.close()
try:
connection = MySQLdb.connect(host, user, password, DB_name, charset
='utf8')
cursor = connection.cursor()
except Exception as e:
print(str(e))
try:
strquery2 = """CREATE TABLE tbl_allabola(Id INT NOT NULL AUTO_INCREMENT,
Registration_no varchar(250) DEFAULT NULL,
First_name varchar(250) DEFAULT NULL,
Middle_name varchar(250) DEFAULT NULL,
Famaily_name varchar(250) DEFAULT NULL,
Gender longtext DEFAULT NULL,
Year longtext DEFAULT NULL,
Board_member longtext DEFAULT NULL,
PRIMARY KEY (`Id`))"""
cursor.execute(strquery2)
except Exception as e:
print(str(e))
def start_requests(self):
try:
wb = openpyxl.load_workbook('/home//Business_numbers.xlsx')
ws = wb.get_active_sheet()
row_count = ws.max_row
for h in range(2, row_count):
regi_number = ws.cell(row=h, column=2).value
Post_Code = ws.cell(row=h, column=4).value
main_link = 'https://www.allabolag.se/' + str(regi_number
) + '/befattningar'
yield scrapy.FormRequest(main_link, callback=self.parse,
dont_filter=True, meta={'Post_Code': Post_Code})
except Exception as e:
print(e)
def parse(self, response):
Post_Code = response.meta['Post_Code']
Registration_no = response.url
Registration_no = Registration_no.split('.se/')[1]
Registration_no = Registration_no.split('/')[0]
print(Registration_no)
ALl_data = response.xpath(
'//*[@class="list--personnel accordion-body"]/li')
for datas in ALl_data:
gender = datas.xpath(
".//div[1]/span[contains(@class,'male')]/@class"
).extract_first()
gender = gender.split('--')[1]
gender = gender.encode('utf-8')
if gender == 'male':
gender = 'm'
elif gender == 'female':
gender = 'f'
name = datas.xpath('.//div[2]/a/text()').extract_first()
name = name.strip()
name = name.split(' (f. ')
year = name[1].replace(')', '')
if year != None:
age = str(2019 - int(year))
fullname = name[0]
fullname = fullname.split(' ')
firstname = ''
middlename = ''
familyname = ''
if len(fullname) == 3:
firstname = fullname[0]
middlename = fullname[1]
familyname = fullname[2]
elif len(fullname) == 2:
firstname = fullname[0]
middlename = fullname[1]
elif len(fullname) > 3:
firstname = fullname[0]
familyname = fullname[-1]
middlename = ''
for k in range(1, len(fullname) - 1):
if middlename == '':
middlename = fullname[k]
else:
middlename = middlename + ' ' + fullname[k]
type = datas.xpath('.//div[2]/text()').extract()[2]
Board_member = type.replace('\n', '').strip()
if gender != '':
f = open('Facebook_Auidance.csv', 'a')
try:
f.write(firstname + ',' + familyname + ',' + Post_Code +
',' + '' + ',' + '' + ',' + 'Sweden' + ',' + '' +
',' + year + ',' + gender + ',' + age + ',' + '')
except Exception as e:
f.close()
try:
f.write('\n')
f.close()
except Exception as e:
""""""
if gender != '':
try:
reload(sys)
sys.setdefaultencoding('utf8')
self.cursor.execute(
'INSERT INTO tbl_allabola(Registration_no,First_name,Middle_name,Famaily_name,Gender,Year,Board_member)VALUES (%s,%s,%s,%s,%s,%s,%s)'
, (Registration_no, firstname, middlename,
familyname, gender, year, Board_member))
self.connection.commit()
except Exception as e:
print(e)
process = CrawlerProcess({'LOG_ENABLED': False})
process.crawl(AllabolaSpider)
try:
process.start()
except:
pass
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
import scrapy
import MySQLdb
import openpyxl
from scrapy.crawler import CrawlerProcess
import sys
class AllabolaSpider(scrapy.Spider):
name = 'allabola'
allowed_domains = ['https://www.allabolag.se']
start_urls = []
#'https://www.allabolag.se/7696250484/befattningar'
host = '104.197.180.57'
user = 'root'
password = 'root'
DB_name = "db_allabolag"
f = open('Facebook_Auidance.csv', 'w')
f.write('fn,ln,zip,ct,st,country,dob,doby,gen,age,uid')
f.write('\n')
f.close()
try:
connection = MySQLdb.connect(host, user, password,DB_name ,charset='utf8')
cursor = connection.cursor()
except Exception as e:
print(str(e))
try:
strquery2 = "CREATE TABLE tbl_allabola""""(Id INT NOT NULL AUTO_INCREMENT,
Registration_no varchar(250) DEFAULT NULL,
First_name varchar(250) DEFAULT NULL,
Middle_name varchar(250) DEFAULT NULL,
Famaily_name varchar(250) DEFAULT NULL,
Gender longtext DEFAULT NULL,
Year longtext DEFAULT NULL,
Board_member longtext DEFAULT NULL,
PRIMARY KEY (`Id`))"""
cursor.execute(strquery2)
except Exception as e:
print(str(e))
def start_requests(self):
try:
wb = openpyxl.load_workbook(
'/home//Business_numbers.xlsx')
ws = wb.get_active_sheet()
row_count = ws.max_row
for h in range(2,row_count):
regi_number = ws.cell(row=h, column=2).value
Post_Code = ws.cell(row=h, column=4).value
main_link = 'https://www.allabolag.se/'+str(regi_number)+'/befattningar'
yield scrapy.FormRequest(main_link,callback=self.parse,dont_filter=True,meta={'Post_Code':Post_Code})
except Exception as e:
print(e)
def parse(self, response):
Post_Code = response.meta['Post_Code']
Registration_no = response.url
Registration_no = Registration_no.split('.se/')[1]
Registration_no = Registration_no.split('/')[0]
print(Registration_no)
ALl_data = response.xpath('//*[@class="list--personnel accordion-body"]/li')
for datas in ALl_data:
gender = datas.xpath(".//div[1]/span[contains(@class,'male')]/@class").extract_first()
gender = gender.split('--')[1]
gender = gender.encode('utf-8')
if gender == 'male':
gender = 'm'
elif gender == 'female':
gender = 'f'
name = datas.xpath('.//div[2]/a/text()').extract_first()
name = name.strip()
name = name.split(' (f. ')
year = name[1].replace(')','')
if year != None:
age = str(2019 - int(year))
fullname = name[0]
# try:
# fullname = str(fullname)
# except Exception as e:
# print e
fullname = fullname.split(' ')
firstname = ''
middlename = ''
familyname = ''
if len(fullname) == 3:
firstname = fullname[0]
middlename = fullname[1]
familyname = fullname[2]
elif len(fullname) == 2:
firstname = fullname[0]
middlename = fullname[1]
elif len(fullname) > 3:
firstname = fullname[0]
familyname = fullname[-1]
middlename = ''
for k in range(1,len(fullname)-1):
if middlename == '':
middlename = fullname[k]
else:
middlename = middlename + ' ' + fullname[k]
type = datas.xpath('.//div[2]/text()').extract()[2]
Board_member = type.replace('\n','').strip()
if gender != '':
f = open('Facebook_Auidance.csv', 'a')
try:
f.write(firstname+','+familyname+','+Post_Code+','+''+','+''+','+'Sweden'+','+''+','+year+','+gender+','+age+','+'')
except Exception as e:
f.close()
try:
f.write('\n')
f.close()
except Exception as e:
''
if gender != '':
try:
reload(sys)
sys.setdefaultencoding('utf8')
self.cursor.execute(
"""INSERT INTO tbl_allabola(Registration_no,First_name,Middle_name,Famaily_name,Gender,Year,Board_member)VALUES (%s,%s,%s,%s,%s,%s,%s)""",
(Registration_no, firstname, middlename,familyname,gender,year,Board_member))
self.connection.commit()
except Exception as e:
print(e)
process = CrawlerProcess({'LOG_ENABLED': False})
process.crawl(AllabolaSpider)
try:
process.start()
except:
pass
|
flexible
|
{
"blob_id": "d60a2100127db859162890204655d313cdc2a4a5",
"index": 4614,
"step-1": "<mask token>\n\n\nclass AllabolaSpider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n f.write('fn,ln,zip,ct,st,country,dob,doby,gen,age,uid')\n f.write('\\n')\n f.close()\n try:\n connection = MySQLdb.connect(host, user, password, DB_name, charset\n ='utf8')\n cursor = connection.cursor()\n except Exception as e:\n print(str(e))\n try:\n strquery2 = \"\"\"CREATE TABLE tbl_allabola(Id INT NOT NULL AUTO_INCREMENT,\n Registration_no varchar(250) DEFAULT NULL,\n First_name varchar(250) DEFAULT NULL,\n Middle_name varchar(250) DEFAULT NULL,\n Famaily_name varchar(250) DEFAULT NULL,\n Gender longtext DEFAULT NULL,\n Year longtext DEFAULT NULL,\n Board_member longtext DEFAULT NULL,\n PRIMARY KEY (`Id`))\"\"\"\n cursor.execute(strquery2)\n except Exception as e:\n print(str(e))\n <mask token>\n\n def parse(self, response):\n Post_Code = response.meta['Post_Code']\n Registration_no = response.url\n Registration_no = Registration_no.split('.se/')[1]\n Registration_no = Registration_no.split('/')[0]\n print(Registration_no)\n ALl_data = response.xpath(\n '//*[@class=\"list--personnel accordion-body\"]/li')\n for datas in ALl_data:\n gender = datas.xpath(\n \".//div[1]/span[contains(@class,'male')]/@class\"\n ).extract_first()\n gender = gender.split('--')[1]\n gender = gender.encode('utf-8')\n if gender == 'male':\n gender = 'm'\n elif gender == 'female':\n gender = 'f'\n name = datas.xpath('.//div[2]/a/text()').extract_first()\n name = name.strip()\n name = name.split(' (f. ')\n year = name[1].replace(')', '')\n if year != None:\n age = str(2019 - int(year))\n fullname = name[0]\n fullname = fullname.split(' ')\n firstname = ''\n middlename = ''\n familyname = ''\n if len(fullname) == 3:\n firstname = fullname[0]\n middlename = fullname[1]\n familyname = fullname[2]\n elif len(fullname) == 2:\n firstname = fullname[0]\n middlename = fullname[1]\n elif len(fullname) > 3:\n firstname = fullname[0]\n familyname = fullname[-1]\n middlename = ''\n for k in range(1, len(fullname) - 1):\n if middlename == '':\n middlename = fullname[k]\n else:\n middlename = middlename + ' ' + fullname[k]\n type = datas.xpath('.//div[2]/text()').extract()[2]\n Board_member = type.replace('\\n', '').strip()\n if gender != '':\n f = open('Facebook_Auidance.csv', 'a')\n try:\n f.write(firstname + ',' + familyname + ',' + Post_Code +\n ',' + '' + ',' + '' + ',' + 'Sweden' + ',' + '' +\n ',' + year + ',' + gender + ',' + age + ',' + '')\n except Exception as e:\n f.close()\n try:\n f.write('\\n')\n f.close()\n except Exception as e:\n \"\"\"\"\"\"\n if gender != '':\n try:\n reload(sys)\n sys.setdefaultencoding('utf8')\n self.cursor.execute(\n 'INSERT INTO tbl_allabola(Registration_no,First_name,Middle_name,Famaily_name,Gender,Year,Board_member)VALUES (%s,%s,%s,%s,%s,%s,%s)'\n , (Registration_no, firstname, middlename,\n familyname, gender, year, Board_member))\n self.connection.commit()\n except Exception as e:\n print(e)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass AllabolaSpider(scrapy.Spider):\n name = 'allabola'\n allowed_domains = ['https://www.allabolag.se']\n start_urls = []\n host = '104.197.180.57'\n user = 'root'\n password = 'root'\n DB_name = 'db_allabolag'\n f = open('Facebook_Auidance.csv', 'w')\n f.write('fn,ln,zip,ct,st,country,dob,doby,gen,age,uid')\n f.write('\\n')\n f.close()\n try:\n connection = MySQLdb.connect(host, user, password, DB_name, charset\n ='utf8')\n cursor = connection.cursor()\n except Exception as e:\n print(str(e))\n try:\n strquery2 = \"\"\"CREATE TABLE tbl_allabola(Id INT NOT NULL AUTO_INCREMENT,\n Registration_no varchar(250) DEFAULT NULL,\n First_name varchar(250) DEFAULT NULL,\n Middle_name varchar(250) DEFAULT NULL,\n Famaily_name varchar(250) DEFAULT NULL,\n Gender longtext DEFAULT NULL,\n Year longtext DEFAULT NULL,\n Board_member longtext DEFAULT NULL,\n PRIMARY KEY (`Id`))\"\"\"\n cursor.execute(strquery2)\n except Exception as e:\n print(str(e))\n\n def start_requests(self):\n try:\n wb = openpyxl.load_workbook('/home//Business_numbers.xlsx')\n ws = wb.get_active_sheet()\n row_count = ws.max_row\n for h in range(2, row_count):\n regi_number = ws.cell(row=h, column=2).value\n Post_Code = ws.cell(row=h, column=4).value\n main_link = 'https://www.allabolag.se/' + str(regi_number\n ) + '/befattningar'\n yield scrapy.FormRequest(main_link, callback=self.parse,\n dont_filter=True, meta={'Post_Code': Post_Code})\n except Exception as e:\n print(e)\n\n def parse(self, response):\n Post_Code = response.meta['Post_Code']\n Registration_no = response.url\n Registration_no = Registration_no.split('.se/')[1]\n Registration_no = Registration_no.split('/')[0]\n print(Registration_no)\n ALl_data = response.xpath(\n '//*[@class=\"list--personnel accordion-body\"]/li')\n for datas in ALl_data:\n gender = datas.xpath(\n \".//div[1]/span[contains(@class,'male')]/@class\"\n ).extract_first()\n gender = gender.split('--')[1]\n gender = gender.encode('utf-8')\n if gender == 'male':\n gender = 'm'\n elif gender == 'female':\n gender = 'f'\n name = datas.xpath('.//div[2]/a/text()').extract_first()\n name = name.strip()\n name = name.split(' (f. ')\n year = name[1].replace(')', '')\n if year != None:\n age = str(2019 - int(year))\n fullname = name[0]\n fullname = fullname.split(' ')\n firstname = ''\n middlename = ''\n familyname = ''\n if len(fullname) == 3:\n firstname = fullname[0]\n middlename = fullname[1]\n familyname = fullname[2]\n elif len(fullname) == 2:\n firstname = fullname[0]\n middlename = fullname[1]\n elif len(fullname) > 3:\n firstname = fullname[0]\n familyname = fullname[-1]\n middlename = ''\n for k in range(1, len(fullname) - 1):\n if middlename == '':\n middlename = fullname[k]\n else:\n middlename = middlename + ' ' + fullname[k]\n type = datas.xpath('.//div[2]/text()').extract()[2]\n Board_member = type.replace('\\n', '').strip()\n if gender != '':\n f = open('Facebook_Auidance.csv', 'a')\n try:\n f.write(firstname + ',' + familyname + ',' + Post_Code +\n ',' + '' + ',' + '' + ',' + 'Sweden' + ',' + '' +\n ',' + year + ',' + gender + ',' + age + ',' + '')\n except Exception as e:\n f.close()\n try:\n f.write('\\n')\n f.close()\n except Exception as e:\n \"\"\"\"\"\"\n if gender != '':\n try:\n reload(sys)\n sys.setdefaultencoding('utf8')\n self.cursor.execute(\n 'INSERT INTO tbl_allabola(Registration_no,First_name,Middle_name,Famaily_name,Gender,Year,Board_member)VALUES (%s,%s,%s,%s,%s,%s,%s)'\n , (Registration_no, firstname, middlename,\n familyname, gender, year, Board_member))\n self.connection.commit()\n except Exception as e:\n print(e)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass AllabolaSpider(scrapy.Spider):\n name = 'allabola'\n allowed_domains = ['https://www.allabolag.se']\n start_urls = []\n host = '104.197.180.57'\n user = 'root'\n password = 'root'\n DB_name = 'db_allabolag'\n f = open('Facebook_Auidance.csv', 'w')\n f.write('fn,ln,zip,ct,st,country,dob,doby,gen,age,uid')\n f.write('\\n')\n f.close()\n try:\n connection = MySQLdb.connect(host, user, password, DB_name, charset\n ='utf8')\n cursor = connection.cursor()\n except Exception as e:\n print(str(e))\n try:\n strquery2 = \"\"\"CREATE TABLE tbl_allabola(Id INT NOT NULL AUTO_INCREMENT,\n Registration_no varchar(250) DEFAULT NULL,\n First_name varchar(250) DEFAULT NULL,\n Middle_name varchar(250) DEFAULT NULL,\n Famaily_name varchar(250) DEFAULT NULL,\n Gender longtext DEFAULT NULL,\n Year longtext DEFAULT NULL,\n Board_member longtext DEFAULT NULL,\n PRIMARY KEY (`Id`))\"\"\"\n cursor.execute(strquery2)\n except Exception as e:\n print(str(e))\n\n def start_requests(self):\n try:\n wb = openpyxl.load_workbook('/home//Business_numbers.xlsx')\n ws = wb.get_active_sheet()\n row_count = ws.max_row\n for h in range(2, row_count):\n regi_number = ws.cell(row=h, column=2).value\n Post_Code = ws.cell(row=h, column=4).value\n main_link = 'https://www.allabolag.se/' + str(regi_number\n ) + '/befattningar'\n yield scrapy.FormRequest(main_link, callback=self.parse,\n dont_filter=True, meta={'Post_Code': Post_Code})\n except Exception as e:\n print(e)\n\n def parse(self, response):\n Post_Code = response.meta['Post_Code']\n Registration_no = response.url\n Registration_no = Registration_no.split('.se/')[1]\n Registration_no = Registration_no.split('/')[0]\n print(Registration_no)\n ALl_data = response.xpath(\n '//*[@class=\"list--personnel accordion-body\"]/li')\n for datas in ALl_data:\n gender = datas.xpath(\n \".//div[1]/span[contains(@class,'male')]/@class\"\n ).extract_first()\n gender = gender.split('--')[1]\n gender = gender.encode('utf-8')\n if gender == 'male':\n gender = 'm'\n elif gender == 'female':\n gender = 'f'\n name = datas.xpath('.//div[2]/a/text()').extract_first()\n name = name.strip()\n name = name.split(' (f. ')\n year = name[1].replace(')', '')\n if year != None:\n age = str(2019 - int(year))\n fullname = name[0]\n fullname = fullname.split(' ')\n firstname = ''\n middlename = ''\n familyname = ''\n if len(fullname) == 3:\n firstname = fullname[0]\n middlename = fullname[1]\n familyname = fullname[2]\n elif len(fullname) == 2:\n firstname = fullname[0]\n middlename = fullname[1]\n elif len(fullname) > 3:\n firstname = fullname[0]\n familyname = fullname[-1]\n middlename = ''\n for k in range(1, len(fullname) - 1):\n if middlename == '':\n middlename = fullname[k]\n else:\n middlename = middlename + ' ' + fullname[k]\n type = datas.xpath('.//div[2]/text()').extract()[2]\n Board_member = type.replace('\\n', '').strip()\n if gender != '':\n f = open('Facebook_Auidance.csv', 'a')\n try:\n f.write(firstname + ',' + familyname + ',' + Post_Code +\n ',' + '' + ',' + '' + ',' + 'Sweden' + ',' + '' +\n ',' + year + ',' + gender + ',' + age + ',' + '')\n except Exception as e:\n f.close()\n try:\n f.write('\\n')\n f.close()\n except Exception as e:\n \"\"\"\"\"\"\n if gender != '':\n try:\n reload(sys)\n sys.setdefaultencoding('utf8')\n self.cursor.execute(\n 'INSERT INTO tbl_allabola(Registration_no,First_name,Middle_name,Famaily_name,Gender,Year,Board_member)VALUES (%s,%s,%s,%s,%s,%s,%s)'\n , (Registration_no, firstname, middlename,\n familyname, gender, year, Board_member))\n self.connection.commit()\n except Exception as e:\n print(e)\n\n\n<mask token>\nprocess.crawl(AllabolaSpider)\ntry:\n process.start()\nexcept:\n pass\n",
"step-4": "<mask token>\n\n\nclass AllabolaSpider(scrapy.Spider):\n name = 'allabola'\n allowed_domains = ['https://www.allabolag.se']\n start_urls = []\n host = '104.197.180.57'\n user = 'root'\n password = 'root'\n DB_name = 'db_allabolag'\n f = open('Facebook_Auidance.csv', 'w')\n f.write('fn,ln,zip,ct,st,country,dob,doby,gen,age,uid')\n f.write('\\n')\n f.close()\n try:\n connection = MySQLdb.connect(host, user, password, DB_name, charset\n ='utf8')\n cursor = connection.cursor()\n except Exception as e:\n print(str(e))\n try:\n strquery2 = \"\"\"CREATE TABLE tbl_allabola(Id INT NOT NULL AUTO_INCREMENT,\n Registration_no varchar(250) DEFAULT NULL,\n First_name varchar(250) DEFAULT NULL,\n Middle_name varchar(250) DEFAULT NULL,\n Famaily_name varchar(250) DEFAULT NULL,\n Gender longtext DEFAULT NULL,\n Year longtext DEFAULT NULL,\n Board_member longtext DEFAULT NULL,\n PRIMARY KEY (`Id`))\"\"\"\n cursor.execute(strquery2)\n except Exception as e:\n print(str(e))\n\n def start_requests(self):\n try:\n wb = openpyxl.load_workbook('/home//Business_numbers.xlsx')\n ws = wb.get_active_sheet()\n row_count = ws.max_row\n for h in range(2, row_count):\n regi_number = ws.cell(row=h, column=2).value\n Post_Code = ws.cell(row=h, column=4).value\n main_link = 'https://www.allabolag.se/' + str(regi_number\n ) + '/befattningar'\n yield scrapy.FormRequest(main_link, callback=self.parse,\n dont_filter=True, meta={'Post_Code': Post_Code})\n except Exception as e:\n print(e)\n\n def parse(self, response):\n Post_Code = response.meta['Post_Code']\n Registration_no = response.url\n Registration_no = Registration_no.split('.se/')[1]\n Registration_no = Registration_no.split('/')[0]\n print(Registration_no)\n ALl_data = response.xpath(\n '//*[@class=\"list--personnel accordion-body\"]/li')\n for datas in ALl_data:\n gender = datas.xpath(\n \".//div[1]/span[contains(@class,'male')]/@class\"\n ).extract_first()\n gender = gender.split('--')[1]\n gender = gender.encode('utf-8')\n if gender == 'male':\n gender = 'm'\n elif gender == 'female':\n gender = 'f'\n name = datas.xpath('.//div[2]/a/text()').extract_first()\n name = name.strip()\n name = name.split(' (f. ')\n year = name[1].replace(')', '')\n if year != None:\n age = str(2019 - int(year))\n fullname = name[0]\n fullname = fullname.split(' ')\n firstname = ''\n middlename = ''\n familyname = ''\n if len(fullname) == 3:\n firstname = fullname[0]\n middlename = fullname[1]\n familyname = fullname[2]\n elif len(fullname) == 2:\n firstname = fullname[0]\n middlename = fullname[1]\n elif len(fullname) > 3:\n firstname = fullname[0]\n familyname = fullname[-1]\n middlename = ''\n for k in range(1, len(fullname) - 1):\n if middlename == '':\n middlename = fullname[k]\n else:\n middlename = middlename + ' ' + fullname[k]\n type = datas.xpath('.//div[2]/text()').extract()[2]\n Board_member = type.replace('\\n', '').strip()\n if gender != '':\n f = open('Facebook_Auidance.csv', 'a')\n try:\n f.write(firstname + ',' + familyname + ',' + Post_Code +\n ',' + '' + ',' + '' + ',' + 'Sweden' + ',' + '' +\n ',' + year + ',' + gender + ',' + age + ',' + '')\n except Exception as e:\n f.close()\n try:\n f.write('\\n')\n f.close()\n except Exception as e:\n \"\"\"\"\"\"\n if gender != '':\n try:\n reload(sys)\n sys.setdefaultencoding('utf8')\n self.cursor.execute(\n 'INSERT INTO tbl_allabola(Registration_no,First_name,Middle_name,Famaily_name,Gender,Year,Board_member)VALUES (%s,%s,%s,%s,%s,%s,%s)'\n , (Registration_no, firstname, middlename,\n familyname, gender, year, Board_member))\n self.connection.commit()\n except Exception as e:\n print(e)\n\n\nprocess = CrawlerProcess({'LOG_ENABLED': False})\nprocess.crawl(AllabolaSpider)\ntry:\n process.start()\nexcept:\n pass\n",
"step-5": "# -*- coding: utf-8 -*-\nimport scrapy\nimport MySQLdb\nimport openpyxl\nfrom scrapy.crawler import CrawlerProcess\nimport sys\n\n\nclass AllabolaSpider(scrapy.Spider):\n name = 'allabola'\n allowed_domains = ['https://www.allabolag.se']\n start_urls = []\n #'https://www.allabolag.se/7696250484/befattningar'\n host = '104.197.180.57'\n user = 'root'\n password = 'root'\n DB_name = \"db_allabolag\"\n f = open('Facebook_Auidance.csv', 'w')\n f.write('fn,ln,zip,ct,st,country,dob,doby,gen,age,uid')\n f.write('\\n')\n f.close()\n try:\n connection = MySQLdb.connect(host, user, password,DB_name ,charset='utf8')\n cursor = connection.cursor()\n except Exception as e:\n print(str(e))\n\n try:\n strquery2 = \"CREATE TABLE tbl_allabola\"\"\"\"(Id INT NOT NULL AUTO_INCREMENT,\n Registration_no varchar(250) DEFAULT NULL,\n First_name varchar(250) DEFAULT NULL,\n Middle_name varchar(250) DEFAULT NULL,\n Famaily_name varchar(250) DEFAULT NULL,\n Gender longtext DEFAULT NULL,\n Year longtext DEFAULT NULL,\n Board_member longtext DEFAULT NULL,\n PRIMARY KEY (`Id`))\"\"\"\n\n cursor.execute(strquery2)\n except Exception as e:\n print(str(e))\n\n def start_requests(self):\n try:\n\n wb = openpyxl.load_workbook(\n '/home//Business_numbers.xlsx')\n ws = wb.get_active_sheet()\n\n row_count = ws.max_row\n\n\n\n for h in range(2,row_count):\n regi_number = ws.cell(row=h, column=2).value\n Post_Code = ws.cell(row=h, column=4).value\n main_link = 'https://www.allabolag.se/'+str(regi_number)+'/befattningar'\n yield scrapy.FormRequest(main_link,callback=self.parse,dont_filter=True,meta={'Post_Code':Post_Code})\n except Exception as e:\n print(e)\n\n def parse(self, response):\n\n Post_Code = response.meta['Post_Code']\n Registration_no = response.url\n Registration_no = Registration_no.split('.se/')[1]\n Registration_no = Registration_no.split('/')[0]\n print(Registration_no)\n ALl_data = response.xpath('//*[@class=\"list--personnel accordion-body\"]/li')\n\n for datas in ALl_data:\n\n gender = datas.xpath(\".//div[1]/span[contains(@class,'male')]/@class\").extract_first()\n gender = gender.split('--')[1]\n gender = gender.encode('utf-8')\n if gender == 'male':\n gender = 'm'\n elif gender == 'female':\n gender = 'f'\n\n name = datas.xpath('.//div[2]/a/text()').extract_first()\n name = name.strip()\n name = name.split(' (f. ')\n year = name[1].replace(')','')\n if year != None:\n age = str(2019 - int(year))\n fullname = name[0]\n # try:\n # fullname = str(fullname)\n # except Exception as e:\n # print e\n fullname = fullname.split(' ')\n firstname = ''\n middlename = ''\n familyname = ''\n if len(fullname) == 3:\n firstname = fullname[0]\n middlename = fullname[1]\n familyname = fullname[2]\n elif len(fullname) == 2:\n firstname = fullname[0]\n middlename = fullname[1]\n elif len(fullname) > 3:\n firstname = fullname[0]\n familyname = fullname[-1]\n middlename = ''\n for k in range(1,len(fullname)-1):\n if middlename == '':\n middlename = fullname[k]\n else:\n middlename = middlename + ' ' + fullname[k]\n\n\n type = datas.xpath('.//div[2]/text()').extract()[2]\n Board_member = type.replace('\\n','').strip()\n if gender != '':\n\n f = open('Facebook_Auidance.csv', 'a')\n try:\n f.write(firstname+','+familyname+','+Post_Code+','+''+','+''+','+'Sweden'+','+''+','+year+','+gender+','+age+','+'')\n except Exception as e:\n f.close()\n try:\n f.write('\\n')\n f.close()\n except Exception as e:\n ''\n\n if gender != '':\n try:\n reload(sys)\n sys.setdefaultencoding('utf8')\n self.cursor.execute(\n \"\"\"INSERT INTO tbl_allabola(Registration_no,First_name,Middle_name,Famaily_name,Gender,Year,Board_member)VALUES (%s,%s,%s,%s,%s,%s,%s)\"\"\",\n (Registration_no, firstname, middlename,familyname,gender,year,Board_member))\n self.connection.commit()\n except Exception as e:\n print(e)\n\n\nprocess = CrawlerProcess({'LOG_ENABLED': False})\nprocess.crawl(AllabolaSpider)\ntry:\n process.start()\nexcept:\n pass\n\n\n",
"step-ids": [
2,
4,
5,
6,
8
]
}
|
[
2,
4,
5,
6,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ScenesMiddleware(BaseMiddleware):
<|reserved_special_token_0|>
async def on_post_process_message(self, message: types.Message, results:
tuple, data: dict):
if data:
return
user_scene_name = await self._get_scene_name(message)
for scene_model in self._loader.handlers_storage.get_message_scene(
user_scene_name):
if content_type_checker(message, scene_model.config.get(
'content_types')):
await scene_model.handler(message)
else:
otherwise_handler = scene_model.config.get('otherwise_handler')
if otherwise_handler is not None:
await otherwise_handler(message)
async def on_post_process_callback_query(self, callback_query: types.
CallbackQuery, results: tuple, data: dict):
if data:
return
user_scene_name = await self._get_scene_name(callback_query)
for scene_model in self._loader.handlers_storage.get_callback_query_scene(
user_scene_name):
await scene_model.handler(callback_query)
async def _get_scene_name(self, ctx) ->Any:
user_id = ctx.from_user.id
user_scene = await self._storage.get(user_id)
if user_scene is None:
await self._storage.put(user_id, self._default_scene_name)
user_scene = self._default_scene_name
return user_scene
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ScenesMiddleware(BaseMiddleware):
def __init__(self, *, loader: Optional[Loader]=None, default_scene_name:
Optional[str]=None):
self._default_scene_name = default_scene_name or 'start'
self._loader = loader or Loader.get_current()
if self._loader is None:
self._loader = Loader()
if not self._loader.is_scenes_loaded:
self._loader.load_scenes()
self._storage = self._loader.data_storage
super().__init__()
async def on_post_process_message(self, message: types.Message, results:
tuple, data: dict):
if data:
return
user_scene_name = await self._get_scene_name(message)
for scene_model in self._loader.handlers_storage.get_message_scene(
user_scene_name):
if content_type_checker(message, scene_model.config.get(
'content_types')):
await scene_model.handler(message)
else:
otherwise_handler = scene_model.config.get('otherwise_handler')
if otherwise_handler is not None:
await otherwise_handler(message)
async def on_post_process_callback_query(self, callback_query: types.
CallbackQuery, results: tuple, data: dict):
if data:
return
user_scene_name = await self._get_scene_name(callback_query)
for scene_model in self._loader.handlers_storage.get_callback_query_scene(
user_scene_name):
await scene_model.handler(callback_query)
async def _get_scene_name(self, ctx) ->Any:
user_id = ctx.from_user.id
user_scene = await self._storage.get(user_id)
if user_scene is None:
await self._storage.put(user_id, self._default_scene_name)
user_scene = self._default_scene_name
return user_scene
<|reserved_special_token_1|>
from typing import Any, Optional
from aiogram import types
from aiogram.dispatcher.middlewares import BaseMiddleware
from scene_manager.loader.loader import Loader
from scene_manager.utils import content_type_checker
class ScenesMiddleware(BaseMiddleware):
def __init__(self, *, loader: Optional[Loader]=None, default_scene_name:
Optional[str]=None):
self._default_scene_name = default_scene_name or 'start'
self._loader = loader or Loader.get_current()
if self._loader is None:
self._loader = Loader()
if not self._loader.is_scenes_loaded:
self._loader.load_scenes()
self._storage = self._loader.data_storage
super().__init__()
async def on_post_process_message(self, message: types.Message, results:
tuple, data: dict):
if data:
return
user_scene_name = await self._get_scene_name(message)
for scene_model in self._loader.handlers_storage.get_message_scene(
user_scene_name):
if content_type_checker(message, scene_model.config.get(
'content_types')):
await scene_model.handler(message)
else:
otherwise_handler = scene_model.config.get('otherwise_handler')
if otherwise_handler is not None:
await otherwise_handler(message)
async def on_post_process_callback_query(self, callback_query: types.
CallbackQuery, results: tuple, data: dict):
if data:
return
user_scene_name = await self._get_scene_name(callback_query)
for scene_model in self._loader.handlers_storage.get_callback_query_scene(
user_scene_name):
await scene_model.handler(callback_query)
async def _get_scene_name(self, ctx) ->Any:
user_id = ctx.from_user.id
user_scene = await self._storage.get(user_id)
if user_scene is None:
await self._storage.put(user_id, self._default_scene_name)
user_scene = self._default_scene_name
return user_scene
<|reserved_special_token_1|>
from typing import Any, Optional
from aiogram import types
from aiogram.dispatcher.middlewares import BaseMiddleware
from scene_manager.loader.loader import Loader
from scene_manager.utils import content_type_checker
class ScenesMiddleware(BaseMiddleware):
def __init__(self, *, loader: Optional[Loader] = None, default_scene_name: Optional[str] = None):
self._default_scene_name = default_scene_name or "start"
self._loader = loader or Loader.get_current()
if self._loader is None:
self._loader = Loader()
if not self._loader.is_scenes_loaded:
self._loader.load_scenes()
self._storage = self._loader.data_storage
super().__init__()
async def on_post_process_message(self, message: types.Message, results: tuple, data: dict):
if data:
return
user_scene_name = await self._get_scene_name(message)
for scene_model in self._loader.handlers_storage.get_message_scene(user_scene_name):
if content_type_checker(message, scene_model.config.get("content_types")):
await scene_model.handler(message)
else:
otherwise_handler = scene_model.config.get("otherwise_handler")
if otherwise_handler is not None:
await otherwise_handler(message)
async def on_post_process_callback_query(
self, callback_query: types.CallbackQuery, results: tuple, data: dict
):
if data:
return
user_scene_name = await self._get_scene_name(callback_query)
for scene_model in self._loader.handlers_storage.get_callback_query_scene(user_scene_name):
await scene_model.handler(callback_query)
async def _get_scene_name(self, ctx) -> Any:
user_id = ctx.from_user.id
user_scene = await self._storage.get(user_id)
if user_scene is None:
await self._storage.put(user_id, self._default_scene_name)
user_scene = self._default_scene_name
return user_scene
|
flexible
|
{
"blob_id": "11db76cba3dd76cad0d660a0e189d3e4c465071b",
"index": 8836,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass ScenesMiddleware(BaseMiddleware):\n <mask token>\n\n async def on_post_process_message(self, message: types.Message, results:\n tuple, data: dict):\n if data:\n return\n user_scene_name = await self._get_scene_name(message)\n for scene_model in self._loader.handlers_storage.get_message_scene(\n user_scene_name):\n if content_type_checker(message, scene_model.config.get(\n 'content_types')):\n await scene_model.handler(message)\n else:\n otherwise_handler = scene_model.config.get('otherwise_handler')\n if otherwise_handler is not None:\n await otherwise_handler(message)\n\n async def on_post_process_callback_query(self, callback_query: types.\n CallbackQuery, results: tuple, data: dict):\n if data:\n return\n user_scene_name = await self._get_scene_name(callback_query)\n for scene_model in self._loader.handlers_storage.get_callback_query_scene(\n user_scene_name):\n await scene_model.handler(callback_query)\n\n async def _get_scene_name(self, ctx) ->Any:\n user_id = ctx.from_user.id\n user_scene = await self._storage.get(user_id)\n if user_scene is None:\n await self._storage.put(user_id, self._default_scene_name)\n user_scene = self._default_scene_name\n return user_scene\n",
"step-3": "<mask token>\n\n\nclass ScenesMiddleware(BaseMiddleware):\n\n def __init__(self, *, loader: Optional[Loader]=None, default_scene_name:\n Optional[str]=None):\n self._default_scene_name = default_scene_name or 'start'\n self._loader = loader or Loader.get_current()\n if self._loader is None:\n self._loader = Loader()\n if not self._loader.is_scenes_loaded:\n self._loader.load_scenes()\n self._storage = self._loader.data_storage\n super().__init__()\n\n async def on_post_process_message(self, message: types.Message, results:\n tuple, data: dict):\n if data:\n return\n user_scene_name = await self._get_scene_name(message)\n for scene_model in self._loader.handlers_storage.get_message_scene(\n user_scene_name):\n if content_type_checker(message, scene_model.config.get(\n 'content_types')):\n await scene_model.handler(message)\n else:\n otherwise_handler = scene_model.config.get('otherwise_handler')\n if otherwise_handler is not None:\n await otherwise_handler(message)\n\n async def on_post_process_callback_query(self, callback_query: types.\n CallbackQuery, results: tuple, data: dict):\n if data:\n return\n user_scene_name = await self._get_scene_name(callback_query)\n for scene_model in self._loader.handlers_storage.get_callback_query_scene(\n user_scene_name):\n await scene_model.handler(callback_query)\n\n async def _get_scene_name(self, ctx) ->Any:\n user_id = ctx.from_user.id\n user_scene = await self._storage.get(user_id)\n if user_scene is None:\n await self._storage.put(user_id, self._default_scene_name)\n user_scene = self._default_scene_name\n return user_scene\n",
"step-4": "from typing import Any, Optional\nfrom aiogram import types\nfrom aiogram.dispatcher.middlewares import BaseMiddleware\nfrom scene_manager.loader.loader import Loader\nfrom scene_manager.utils import content_type_checker\n\n\nclass ScenesMiddleware(BaseMiddleware):\n\n def __init__(self, *, loader: Optional[Loader]=None, default_scene_name:\n Optional[str]=None):\n self._default_scene_name = default_scene_name or 'start'\n self._loader = loader or Loader.get_current()\n if self._loader is None:\n self._loader = Loader()\n if not self._loader.is_scenes_loaded:\n self._loader.load_scenes()\n self._storage = self._loader.data_storage\n super().__init__()\n\n async def on_post_process_message(self, message: types.Message, results:\n tuple, data: dict):\n if data:\n return\n user_scene_name = await self._get_scene_name(message)\n for scene_model in self._loader.handlers_storage.get_message_scene(\n user_scene_name):\n if content_type_checker(message, scene_model.config.get(\n 'content_types')):\n await scene_model.handler(message)\n else:\n otherwise_handler = scene_model.config.get('otherwise_handler')\n if otherwise_handler is not None:\n await otherwise_handler(message)\n\n async def on_post_process_callback_query(self, callback_query: types.\n CallbackQuery, results: tuple, data: dict):\n if data:\n return\n user_scene_name = await self._get_scene_name(callback_query)\n for scene_model in self._loader.handlers_storage.get_callback_query_scene(\n user_scene_name):\n await scene_model.handler(callback_query)\n\n async def _get_scene_name(self, ctx) ->Any:\n user_id = ctx.from_user.id\n user_scene = await self._storage.get(user_id)\n if user_scene is None:\n await self._storage.put(user_id, self._default_scene_name)\n user_scene = self._default_scene_name\n return user_scene\n",
"step-5": "from typing import Any, Optional\n\nfrom aiogram import types\nfrom aiogram.dispatcher.middlewares import BaseMiddleware\n\nfrom scene_manager.loader.loader import Loader\nfrom scene_manager.utils import content_type_checker\n\n\nclass ScenesMiddleware(BaseMiddleware):\n def __init__(self, *, loader: Optional[Loader] = None, default_scene_name: Optional[str] = None):\n self._default_scene_name = default_scene_name or \"start\"\n self._loader = loader or Loader.get_current()\n if self._loader is None:\n self._loader = Loader()\n if not self._loader.is_scenes_loaded:\n self._loader.load_scenes()\n self._storage = self._loader.data_storage\n super().__init__()\n\n async def on_post_process_message(self, message: types.Message, results: tuple, data: dict):\n if data:\n return\n user_scene_name = await self._get_scene_name(message)\n for scene_model in self._loader.handlers_storage.get_message_scene(user_scene_name):\n if content_type_checker(message, scene_model.config.get(\"content_types\")):\n await scene_model.handler(message)\n else:\n otherwise_handler = scene_model.config.get(\"otherwise_handler\")\n if otherwise_handler is not None:\n await otherwise_handler(message)\n\n async def on_post_process_callback_query(\n self, callback_query: types.CallbackQuery, results: tuple, data: dict\n ):\n if data:\n return\n user_scene_name = await self._get_scene_name(callback_query)\n for scene_model in self._loader.handlers_storage.get_callback_query_scene(user_scene_name):\n await scene_model.handler(callback_query)\n\n async def _get_scene_name(self, ctx) -> Any:\n user_id = ctx.from_user.id\n user_scene = await self._storage.get(user_id)\n if user_scene is None:\n await self._storage.put(user_id, self._default_scene_name)\n user_scene = self._default_scene_name\n return user_scene\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
@app.route('/', methods=['GET', 'POST'])
def home():
if request.method == 'POST':
entry_content = request.form.get('content')
output = client.specific_resource_analysis(body={'document': {
'text': entry_content}}, params={'language': language,
'resource': 'relevants'})
database2.create_entryss(entry_content, datetime.datetime.today().
strftime('%b %d'))
for lemma in output.main_lemmas:
print(lemma.value)
video = getYT.searchVideoForKeyword(lemma.value)
for indivvideo in video:
database.create_entry(entry_content, datetime.datetime.
today().strftime('%b %d'), indivvideo)
videos.append(f'{indivvideo}')
return render_template('home.html')
@app.route('/feedback', methods=['GET', 'POST'])
def feedback():
if request.method == 'POST':
entry_contents = request.form.get('contents')
output = client.specific_resource_analysis(body={'document': {
'text': entry_contents}}, params={'language': language,
'resource': 'sentiment'})
database1.create_entrys(entry_contents, datetime.datetime.today().
strftime('%b %d'), output.sentiment.overall)
print(output.sentiment.overall)
return render_template('feedback.html')
@app.route('/recommendation', methods=['GET', 'POST'])
def recommendation():
return render_template('index.html', videos=videos, entries=database.
retrieve_entries(), entrie=database2.retrieve_entriee())
@app.route('/negative', methods=['GET', 'POST'])
def negative():
return render_template('negative.html', entries=database1.retrieve_entrie()
)
@app.route('/positive', methods=['GET', 'POST'])
def positive():
return render_template('positive.html', entries=database1.retrieve_entrie()
)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
database.create_tables()
database1.create_table()
database2.create_tablee()
<|reserved_special_token_0|>
@app.route('/', methods=['GET', 'POST'])
def home():
if request.method == 'POST':
entry_content = request.form.get('content')
output = client.specific_resource_analysis(body={'document': {
'text': entry_content}}, params={'language': language,
'resource': 'relevants'})
database2.create_entryss(entry_content, datetime.datetime.today().
strftime('%b %d'))
for lemma in output.main_lemmas:
print(lemma.value)
video = getYT.searchVideoForKeyword(lemma.value)
for indivvideo in video:
database.create_entry(entry_content, datetime.datetime.
today().strftime('%b %d'), indivvideo)
videos.append(f'{indivvideo}')
return render_template('home.html')
@app.route('/feedback', methods=['GET', 'POST'])
def feedback():
if request.method == 'POST':
entry_contents = request.form.get('contents')
output = client.specific_resource_analysis(body={'document': {
'text': entry_contents}}, params={'language': language,
'resource': 'sentiment'})
database1.create_entrys(entry_contents, datetime.datetime.today().
strftime('%b %d'), output.sentiment.overall)
print(output.sentiment.overall)
return render_template('feedback.html')
@app.route('/recommendation', methods=['GET', 'POST'])
def recommendation():
return render_template('index.html', videos=videos, entries=database.
retrieve_entries(), entrie=database2.retrieve_entriee())
@app.route('/negative', methods=['GET', 'POST'])
def negative():
return render_template('negative.html', entries=database1.retrieve_entrie()
)
@app.route('/positive', methods=['GET', 'POST'])
def positive():
return render_template('positive.html', entries=database1.retrieve_entrie()
)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
os.environ['EAI_USERNAME'] = '[email protected]'
os.environ['EAI_PASSWORD'] = 'Testqwerty1!'
<|reserved_special_token_0|>
client = ExpertAiClient()
app = Flask(__name__)
database.create_tables()
database1.create_table()
database2.create_tablee()
language = 'en'
videos = []
@app.route('/', methods=['GET', 'POST'])
def home():
if request.method == 'POST':
entry_content = request.form.get('content')
output = client.specific_resource_analysis(body={'document': {
'text': entry_content}}, params={'language': language,
'resource': 'relevants'})
database2.create_entryss(entry_content, datetime.datetime.today().
strftime('%b %d'))
for lemma in output.main_lemmas:
print(lemma.value)
video = getYT.searchVideoForKeyword(lemma.value)
for indivvideo in video:
database.create_entry(entry_content, datetime.datetime.
today().strftime('%b %d'), indivvideo)
videos.append(f'{indivvideo}')
return render_template('home.html')
@app.route('/feedback', methods=['GET', 'POST'])
def feedback():
if request.method == 'POST':
entry_contents = request.form.get('contents')
output = client.specific_resource_analysis(body={'document': {
'text': entry_contents}}, params={'language': language,
'resource': 'sentiment'})
database1.create_entrys(entry_contents, datetime.datetime.today().
strftime('%b %d'), output.sentiment.overall)
print(output.sentiment.overall)
return render_template('feedback.html')
@app.route('/recommendation', methods=['GET', 'POST'])
def recommendation():
return render_template('index.html', videos=videos, entries=database.
retrieve_entries(), entrie=database2.retrieve_entriee())
@app.route('/negative', methods=['GET', 'POST'])
def negative():
return render_template('negative.html', entries=database1.retrieve_entrie()
)
@app.route('/positive', methods=['GET', 'POST'])
def positive():
return render_template('positive.html', entries=database1.retrieve_entrie()
)
<|reserved_special_token_1|>
import datetime
from flask import Flask, render_template, request
import database
import database1
import database2
import getYoutubeVideoLinks as getYT
import os
os.environ['EAI_USERNAME'] = '[email protected]'
os.environ['EAI_PASSWORD'] = 'Testqwerty1!'
from expertai.nlapi.cloud.client import ExpertAiClient
client = ExpertAiClient()
app = Flask(__name__)
database.create_tables()
database1.create_table()
database2.create_tablee()
language = 'en'
videos = []
@app.route('/', methods=['GET', 'POST'])
def home():
if request.method == 'POST':
entry_content = request.form.get('content')
output = client.specific_resource_analysis(body={'document': {
'text': entry_content}}, params={'language': language,
'resource': 'relevants'})
database2.create_entryss(entry_content, datetime.datetime.today().
strftime('%b %d'))
for lemma in output.main_lemmas:
print(lemma.value)
video = getYT.searchVideoForKeyword(lemma.value)
for indivvideo in video:
database.create_entry(entry_content, datetime.datetime.
today().strftime('%b %d'), indivvideo)
videos.append(f'{indivvideo}')
return render_template('home.html')
@app.route('/feedback', methods=['GET', 'POST'])
def feedback():
if request.method == 'POST':
entry_contents = request.form.get('contents')
output = client.specific_resource_analysis(body={'document': {
'text': entry_contents}}, params={'language': language,
'resource': 'sentiment'})
database1.create_entrys(entry_contents, datetime.datetime.today().
strftime('%b %d'), output.sentiment.overall)
print(output.sentiment.overall)
return render_template('feedback.html')
@app.route('/recommendation', methods=['GET', 'POST'])
def recommendation():
return render_template('index.html', videos=videos, entries=database.
retrieve_entries(), entrie=database2.retrieve_entriee())
@app.route('/negative', methods=['GET', 'POST'])
def negative():
return render_template('negative.html', entries=database1.retrieve_entrie()
)
@app.route('/positive', methods=['GET', 'POST'])
def positive():
return render_template('positive.html', entries=database1.retrieve_entrie()
)
<|reserved_special_token_1|>
import datetime
from flask import Flask, render_template, request
import database
import database1
import database2
import getYoutubeVideoLinks as getYT
import os
os.environ["EAI_USERNAME"] = '[email protected]'
os.environ["EAI_PASSWORD"] = 'Testqwerty1!'
from expertai.nlapi.cloud.client import ExpertAiClient
client = ExpertAiClient()
# Output overall sentiment
app = Flask(__name__)
database.create_tables()
database1.create_table()
database2.create_tablee()
language= 'en'
videos = []
@app.route("/", methods=["GET", "POST"])
def home():
if request.method == "POST":
entry_content = request.form.get("content")
output = client.specific_resource_analysis(body={"document": {"text": entry_content}}, params={'language': language, 'resource': 'relevants'})
database2.create_entryss(entry_content, datetime.datetime.today().strftime("%b %d"))
for lemma in output.main_lemmas:
print(lemma.value)
video = getYT.searchVideoForKeyword(lemma.value)
for indivvideo in video:
database.create_entry(entry_content, datetime.datetime.today().strftime("%b %d"), indivvideo)
videos.append(f'{indivvideo}')
return render_template("home.html")
@app.route("/feedback", methods=["GET", "POST"])
def feedback():
if request.method == "POST":
entry_contents = request.form.get("contents")
output = client.specific_resource_analysis(body={"document": {"text": entry_contents}},params={'language': language, 'resource': 'sentiment'})
database1.create_entrys(entry_contents, datetime.datetime.today().strftime("%b %d"), output.sentiment.overall)
print(output.sentiment.overall)
return render_template("feedback.html")
@app.route("/recommendation", methods=["GET", "POST"])
def recommendation():
return render_template('index.html', videos=videos, entries=database.retrieve_entries(), entrie=database2.retrieve_entriee())
@app.route('/negative', methods=["GET", "POST"])
def negative():
return render_template("negative.html", entries=database1.retrieve_entrie())
@app.route('/positive', methods=["GET", "POST"])
def positive():
return render_template("positive.html", entries=database1.retrieve_entrie())
|
flexible
|
{
"blob_id": "d0f2d47a786b85367f96897e7cd8c2ef8c577e2b",
"index": 2961,
"step-1": "<mask token>\n\n\[email protected]('/', methods=['GET', 'POST'])\ndef home():\n if request.method == 'POST':\n entry_content = request.form.get('content')\n output = client.specific_resource_analysis(body={'document': {\n 'text': entry_content}}, params={'language': language,\n 'resource': 'relevants'})\n database2.create_entryss(entry_content, datetime.datetime.today().\n strftime('%b %d'))\n for lemma in output.main_lemmas:\n print(lemma.value)\n video = getYT.searchVideoForKeyword(lemma.value)\n for indivvideo in video:\n database.create_entry(entry_content, datetime.datetime.\n today().strftime('%b %d'), indivvideo)\n videos.append(f'{indivvideo}')\n return render_template('home.html')\n\n\[email protected]('/feedback', methods=['GET', 'POST'])\ndef feedback():\n if request.method == 'POST':\n entry_contents = request.form.get('contents')\n output = client.specific_resource_analysis(body={'document': {\n 'text': entry_contents}}, params={'language': language,\n 'resource': 'sentiment'})\n database1.create_entrys(entry_contents, datetime.datetime.today().\n strftime('%b %d'), output.sentiment.overall)\n print(output.sentiment.overall)\n return render_template('feedback.html')\n\n\[email protected]('/recommendation', methods=['GET', 'POST'])\ndef recommendation():\n return render_template('index.html', videos=videos, entries=database.\n retrieve_entries(), entrie=database2.retrieve_entriee())\n\n\[email protected]('/negative', methods=['GET', 'POST'])\ndef negative():\n return render_template('negative.html', entries=database1.retrieve_entrie()\n )\n\n\[email protected]('/positive', methods=['GET', 'POST'])\ndef positive():\n return render_template('positive.html', entries=database1.retrieve_entrie()\n )\n",
"step-2": "<mask token>\ndatabase.create_tables()\ndatabase1.create_table()\ndatabase2.create_tablee()\n<mask token>\n\n\[email protected]('/', methods=['GET', 'POST'])\ndef home():\n if request.method == 'POST':\n entry_content = request.form.get('content')\n output = client.specific_resource_analysis(body={'document': {\n 'text': entry_content}}, params={'language': language,\n 'resource': 'relevants'})\n database2.create_entryss(entry_content, datetime.datetime.today().\n strftime('%b %d'))\n for lemma in output.main_lemmas:\n print(lemma.value)\n video = getYT.searchVideoForKeyword(lemma.value)\n for indivvideo in video:\n database.create_entry(entry_content, datetime.datetime.\n today().strftime('%b %d'), indivvideo)\n videos.append(f'{indivvideo}')\n return render_template('home.html')\n\n\[email protected]('/feedback', methods=['GET', 'POST'])\ndef feedback():\n if request.method == 'POST':\n entry_contents = request.form.get('contents')\n output = client.specific_resource_analysis(body={'document': {\n 'text': entry_contents}}, params={'language': language,\n 'resource': 'sentiment'})\n database1.create_entrys(entry_contents, datetime.datetime.today().\n strftime('%b %d'), output.sentiment.overall)\n print(output.sentiment.overall)\n return render_template('feedback.html')\n\n\[email protected]('/recommendation', methods=['GET', 'POST'])\ndef recommendation():\n return render_template('index.html', videos=videos, entries=database.\n retrieve_entries(), entrie=database2.retrieve_entriee())\n\n\[email protected]('/negative', methods=['GET', 'POST'])\ndef negative():\n return render_template('negative.html', entries=database1.retrieve_entrie()\n )\n\n\[email protected]('/positive', methods=['GET', 'POST'])\ndef positive():\n return render_template('positive.html', entries=database1.retrieve_entrie()\n )\n",
"step-3": "<mask token>\nos.environ['EAI_USERNAME'] = '[email protected]'\nos.environ['EAI_PASSWORD'] = 'Testqwerty1!'\n<mask token>\nclient = ExpertAiClient()\napp = Flask(__name__)\ndatabase.create_tables()\ndatabase1.create_table()\ndatabase2.create_tablee()\nlanguage = 'en'\nvideos = []\n\n\[email protected]('/', methods=['GET', 'POST'])\ndef home():\n if request.method == 'POST':\n entry_content = request.form.get('content')\n output = client.specific_resource_analysis(body={'document': {\n 'text': entry_content}}, params={'language': language,\n 'resource': 'relevants'})\n database2.create_entryss(entry_content, datetime.datetime.today().\n strftime('%b %d'))\n for lemma in output.main_lemmas:\n print(lemma.value)\n video = getYT.searchVideoForKeyword(lemma.value)\n for indivvideo in video:\n database.create_entry(entry_content, datetime.datetime.\n today().strftime('%b %d'), indivvideo)\n videos.append(f'{indivvideo}')\n return render_template('home.html')\n\n\[email protected]('/feedback', methods=['GET', 'POST'])\ndef feedback():\n if request.method == 'POST':\n entry_contents = request.form.get('contents')\n output = client.specific_resource_analysis(body={'document': {\n 'text': entry_contents}}, params={'language': language,\n 'resource': 'sentiment'})\n database1.create_entrys(entry_contents, datetime.datetime.today().\n strftime('%b %d'), output.sentiment.overall)\n print(output.sentiment.overall)\n return render_template('feedback.html')\n\n\[email protected]('/recommendation', methods=['GET', 'POST'])\ndef recommendation():\n return render_template('index.html', videos=videos, entries=database.\n retrieve_entries(), entrie=database2.retrieve_entriee())\n\n\[email protected]('/negative', methods=['GET', 'POST'])\ndef negative():\n return render_template('negative.html', entries=database1.retrieve_entrie()\n )\n\n\[email protected]('/positive', methods=['GET', 'POST'])\ndef positive():\n return render_template('positive.html', entries=database1.retrieve_entrie()\n )\n",
"step-4": "import datetime\nfrom flask import Flask, render_template, request\nimport database\nimport database1\nimport database2\nimport getYoutubeVideoLinks as getYT\nimport os\nos.environ['EAI_USERNAME'] = '[email protected]'\nos.environ['EAI_PASSWORD'] = 'Testqwerty1!'\nfrom expertai.nlapi.cloud.client import ExpertAiClient\nclient = ExpertAiClient()\napp = Flask(__name__)\ndatabase.create_tables()\ndatabase1.create_table()\ndatabase2.create_tablee()\nlanguage = 'en'\nvideos = []\n\n\[email protected]('/', methods=['GET', 'POST'])\ndef home():\n if request.method == 'POST':\n entry_content = request.form.get('content')\n output = client.specific_resource_analysis(body={'document': {\n 'text': entry_content}}, params={'language': language,\n 'resource': 'relevants'})\n database2.create_entryss(entry_content, datetime.datetime.today().\n strftime('%b %d'))\n for lemma in output.main_lemmas:\n print(lemma.value)\n video = getYT.searchVideoForKeyword(lemma.value)\n for indivvideo in video:\n database.create_entry(entry_content, datetime.datetime.\n today().strftime('%b %d'), indivvideo)\n videos.append(f'{indivvideo}')\n return render_template('home.html')\n\n\[email protected]('/feedback', methods=['GET', 'POST'])\ndef feedback():\n if request.method == 'POST':\n entry_contents = request.form.get('contents')\n output = client.specific_resource_analysis(body={'document': {\n 'text': entry_contents}}, params={'language': language,\n 'resource': 'sentiment'})\n database1.create_entrys(entry_contents, datetime.datetime.today().\n strftime('%b %d'), output.sentiment.overall)\n print(output.sentiment.overall)\n return render_template('feedback.html')\n\n\[email protected]('/recommendation', methods=['GET', 'POST'])\ndef recommendation():\n return render_template('index.html', videos=videos, entries=database.\n retrieve_entries(), entrie=database2.retrieve_entriee())\n\n\[email protected]('/negative', methods=['GET', 'POST'])\ndef negative():\n return render_template('negative.html', entries=database1.retrieve_entrie()\n )\n\n\[email protected]('/positive', methods=['GET', 'POST'])\ndef positive():\n return render_template('positive.html', entries=database1.retrieve_entrie()\n )\n",
"step-5": "import datetime\nfrom flask import Flask, render_template, request\nimport database\nimport database1\nimport database2\nimport getYoutubeVideoLinks as getYT\n\nimport os\nos.environ[\"EAI_USERNAME\"] = '[email protected]'\nos.environ[\"EAI_PASSWORD\"] = 'Testqwerty1!'\n\nfrom expertai.nlapi.cloud.client import ExpertAiClient\nclient = ExpertAiClient()\n\n# Output overall sentiment\n\n\napp = Flask(__name__)\n\ndatabase.create_tables()\ndatabase1.create_table()\ndatabase2.create_tablee()\n\nlanguage= 'en'\n\nvideos = []\n\[email protected](\"/\", methods=[\"GET\", \"POST\"])\ndef home():\n \n if request.method == \"POST\":\n entry_content = request.form.get(\"content\")\n output = client.specific_resource_analysis(body={\"document\": {\"text\": entry_content}}, params={'language': language, 'resource': 'relevants'})\n database2.create_entryss(entry_content, datetime.datetime.today().strftime(\"%b %d\"))\n for lemma in output.main_lemmas:\n print(lemma.value)\n video = getYT.searchVideoForKeyword(lemma.value)\n for indivvideo in video:\n database.create_entry(entry_content, datetime.datetime.today().strftime(\"%b %d\"), indivvideo)\n videos.append(f'{indivvideo}')\n \n return render_template(\"home.html\")\n\n\n\[email protected](\"/feedback\", methods=[\"GET\", \"POST\"])\ndef feedback():\n if request.method == \"POST\":\n entry_contents = request.form.get(\"contents\")\n output = client.specific_resource_analysis(body={\"document\": {\"text\": entry_contents}},params={'language': language, 'resource': 'sentiment'})\n \n database1.create_entrys(entry_contents, datetime.datetime.today().strftime(\"%b %d\"), output.sentiment.overall)\n print(output.sentiment.overall)\n\n return render_template(\"feedback.html\")\n\n\n\n\[email protected](\"/recommendation\", methods=[\"GET\", \"POST\"])\ndef recommendation(): \n return render_template('index.html', videos=videos, entries=database.retrieve_entries(), entrie=database2.retrieve_entriee())\n\n\[email protected]('/negative', methods=[\"GET\", \"POST\"])\ndef negative():\n return render_template(\"negative.html\", entries=database1.retrieve_entrie())\n\n\[email protected]('/positive', methods=[\"GET\", \"POST\"])\ndef positive():\n return render_template(\"positive.html\", entries=database1.retrieve_entrie())\n\n\n\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
def solution2(a, b):
answer = [(a[i] * b[i]) for i in range(len(a))]
return sum(answer)
<|reserved_special_token_0|>
def solution5(a, b):
answer = sum([(i * j) for i, j in zip(a, b)])
return answer
<|reserved_special_token_1|>
def solution(a, b):
answer = 0
for i in range(len(a)):
answer += a[i] * b[i]
return answer
def solution2(a, b):
answer = [(a[i] * b[i]) for i in range(len(a))]
return sum(answer)
<|reserved_special_token_0|>
def solution5(a, b):
answer = sum([(i * j) for i, j in zip(a, b)])
return answer
<|reserved_special_token_1|>
def solution(a, b):
answer = 0
for i in range(len(a)):
answer += a[i] * b[i]
return answer
def solution2(a, b):
answer = [(a[i] * b[i]) for i in range(len(a))]
return sum(answer)
def solution3(a, b):
return sum(map(lambda x, y: x * y, a, b))
<|reserved_special_token_0|>
def solution5(a, b):
answer = sum([(i * j) for i, j in zip(a, b)])
return answer
<|reserved_special_token_1|>
def solution(a, b):
answer = 0
for i in range(len(a)):
answer += a[i] * b[i]
return answer
def solution2(a, b):
answer = [(a[i] * b[i]) for i in range(len(a))]
return sum(answer)
def solution3(a, b):
return sum(map(lambda x, y: x * y, a, b))
def solution4(a, b):
answer = 0
for i, j in zip(a, b):
answer += i * j
return answer
def solution5(a, b):
answer = sum([(i * j) for i, j in zip(a, b)])
return answer
<|reserved_special_token_1|>
# 문제 설명
# 길이가 같은 두 1차원 정수 배열 a, b가 매개변수로 주어집니다. a와 b의 내적을 return 하도록 solution 함수를 완성해주세요.
# 이때, a와 b의 내적은 a[0]*b[0] + a[1]*b[1] + ... + a[n-1]*b[n-1] 입니다. (n은 a, b의 길이)
# 제한사항
# a, b의 길이는 1 이상 1,000 이하입니다.
# a, b의 모든 수는 -1,000 이상 1,000 이하입니다.
# 입출력 예
# a b result
# [1,2,3,4] [-3,-1,0,2] 3
# [-1,0,1] [1,0,-1] -2
# 입출력 예 설명
# 입출력 예 #1
# a와 b의 내적은 1*(-3) + 2*(-1) + 3*0 + 4*2 = 3 입니다.
# 입출력 예 #2
# a와 b의 내적은 (-1)*1 + 0*0 + 1*(-1) = -2 입니다.
def solution(a, b):
answer = 0
for i in range(len(a)):
answer += a[i] * b[i]
return answer
# 리스트 컨프리헨션 사용
def solution2(a, b):
answer = [a[i] * b[i] for i in range(len(a))]
return sum(answer)
# 람다 사용
def solution3(a, b):
return sum(map(lambda x,y: x * y , a, b))
# zip 사용
def solution4(a, b):
answer = 0
for i,j in zip(a,b):
answer += i * j
return answer
# zip + 리스트 컨프리헨션 사용
def solution5(a, b):
answer = sum([i * j for i,j in zip(a,b)])
return answer
|
flexible
|
{
"blob_id": "5b8322761975ebec76d1dccd0290c0fb1da404e5",
"index": 5999,
"step-1": "<mask token>\n\n\ndef solution2(a, b):\n answer = [(a[i] * b[i]) for i in range(len(a))]\n return sum(answer)\n\n\n<mask token>\n\n\ndef solution5(a, b):\n answer = sum([(i * j) for i, j in zip(a, b)])\n return answer\n",
"step-2": "def solution(a, b):\n answer = 0\n for i in range(len(a)):\n answer += a[i] * b[i]\n return answer\n\n\ndef solution2(a, b):\n answer = [(a[i] * b[i]) for i in range(len(a))]\n return sum(answer)\n\n\n<mask token>\n\n\ndef solution5(a, b):\n answer = sum([(i * j) for i, j in zip(a, b)])\n return answer\n",
"step-3": "def solution(a, b):\n answer = 0\n for i in range(len(a)):\n answer += a[i] * b[i]\n return answer\n\n\ndef solution2(a, b):\n answer = [(a[i] * b[i]) for i in range(len(a))]\n return sum(answer)\n\n\ndef solution3(a, b):\n return sum(map(lambda x, y: x * y, a, b))\n\n\n<mask token>\n\n\ndef solution5(a, b):\n answer = sum([(i * j) for i, j in zip(a, b)])\n return answer\n",
"step-4": "def solution(a, b):\n answer = 0\n for i in range(len(a)):\n answer += a[i] * b[i]\n return answer\n\n\ndef solution2(a, b):\n answer = [(a[i] * b[i]) for i in range(len(a))]\n return sum(answer)\n\n\ndef solution3(a, b):\n return sum(map(lambda x, y: x * y, a, b))\n\n\ndef solution4(a, b):\n answer = 0\n for i, j in zip(a, b):\n answer += i * j\n return answer\n\n\ndef solution5(a, b):\n answer = sum([(i * j) for i, j in zip(a, b)])\n return answer\n",
"step-5": "# 문제 설명\n# 길이가 같은 두 1차원 정수 배열 a, b가 매개변수로 주어집니다. a와 b의 내적을 return 하도록 solution 함수를 완성해주세요.\n\n# 이때, a와 b의 내적은 a[0]*b[0] + a[1]*b[1] + ... + a[n-1]*b[n-1] 입니다. (n은 a, b의 길이)\n\n# 제한사항\n# a, b의 길이는 1 이상 1,000 이하입니다.\n# a, b의 모든 수는 -1,000 이상 1,000 이하입니다.\n# 입출력 예\n# a\tb\tresult\n# [1,2,3,4]\t[-3,-1,0,2]\t3\n# [-1,0,1]\t[1,0,-1]\t-2\n# 입출력 예 설명\n# 입출력 예 #1\n\n# a와 b의 내적은 1*(-3) + 2*(-1) + 3*0 + 4*2 = 3 입니다.\n# 입출력 예 #2\n\n# a와 b의 내적은 (-1)*1 + 0*0 + 1*(-1) = -2 입니다.\n\ndef solution(a, b):\n answer = 0\n for i in range(len(a)):\n answer += a[i] * b[i]\n return answer\n\n# 리스트 컨프리헨션 사용\n\ndef solution2(a, b):\n answer = [a[i] * b[i] for i in range(len(a))]\n return sum(answer)\n\n\n# 람다 사용\n\ndef solution3(a, b):\n return sum(map(lambda x,y: x * y , a, b))\n\n\n# zip 사용\n\ndef solution4(a, b):\n answer = 0\n for i,j in zip(a,b):\n answer += i * j\n \n return answer\n\n\n# zip + 리스트 컨프리헨션 사용\n\ndef solution5(a, b):\n answer = sum([i * j for i,j in zip(a,b)])\n \n return answer",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# import draw as p
# ако няма __init__.py
# from draw.point import Point
from draw import Rectangle
from draw import Point
from draw import ShapeUtils
if __name__ == '__main__':
pn1 = Point(9,8)
pn2 = Point(6,4)
print(f'dist: {pn1} and {pn1} = {ShapeUtils.distance(pn1,pn2)}')
rc1 = Rectangle(40,20,120,300)
rc2 = Rectangle(30,21,350,400)
print(f'dist: {rc1} and {rc1} = {ShapeUtils.distance(rc1,rc2)}')
if ShapeUtils.compare(pn1,pn2) > 0:
print(f'{pn1} > {pn2}')
|
normal
|
{
"blob_id": "b984dc052201748a88fa51d25c3bd3c22404fa96",
"index": 6571,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n pn1 = Point(9, 8)\n pn2 = Point(6, 4)\n print(f'dist: {pn1} and {pn1} = {ShapeUtils.distance(pn1, pn2)}')\n rc1 = Rectangle(40, 20, 120, 300)\n rc2 = Rectangle(30, 21, 350, 400)\n print(f'dist: {rc1} and {rc1} = {ShapeUtils.distance(rc1, rc2)}')\n if ShapeUtils.compare(pn1, pn2) > 0:\n print(f'{pn1} > {pn2}')\n",
"step-3": "from draw import Rectangle\nfrom draw import Point\nfrom draw import ShapeUtils\nif __name__ == '__main__':\n pn1 = Point(9, 8)\n pn2 = Point(6, 4)\n print(f'dist: {pn1} and {pn1} = {ShapeUtils.distance(pn1, pn2)}')\n rc1 = Rectangle(40, 20, 120, 300)\n rc2 = Rectangle(30, 21, 350, 400)\n print(f'dist: {rc1} and {rc1} = {ShapeUtils.distance(rc1, rc2)}')\n if ShapeUtils.compare(pn1, pn2) > 0:\n print(f'{pn1} > {pn2}')\n",
"step-4": "\n# import draw as p\n\n# ако няма __init__.py\n# from draw.point import Point \n\nfrom draw import Rectangle\nfrom draw import Point\nfrom draw import ShapeUtils\n\n\n\nif __name__ == '__main__':\n pn1 = Point(9,8)\n pn2 = Point(6,4)\n\n print(f'dist: {pn1} and {pn1} = {ShapeUtils.distance(pn1,pn2)}')\n\n rc1 = Rectangle(40,20,120,300)\n rc2 = Rectangle(30,21,350,400)\n\n print(f'dist: {rc1} and {rc1} = {ShapeUtils.distance(rc1,rc2)}')\n\n if ShapeUtils.compare(pn1,pn2) > 0:\n print(f'{pn1} > {pn2}')",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def first_append_to_last(arr):
return arr + [arr[0]]
<|reserved_special_token_0|>
def RMS(arr):
n = len(arr)
sq_sum = sum(a ** 2 for a in arr)
return (sq_sum / n) ** 0.5
<|reserved_special_token_0|>
def L1(P1, P2):
x1, y1 = P1
x2, y2 = P2
return abs(x2 - x1) + abs(y2 - y1)
<|reserved_special_token_0|>
def state_to_str(x, y, v, o):
return '%d, %d, %d, %d' % (x, y, v, o)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def cell_borders(BL, Δxy):
[x_bl, y_bl] = BL
Δx = Δxy[0]
Δy = Δxy[1]
x_border = [x_bl, x_bl + Δx, x_bl + Δx, x_bl]
y_border = [y_bl, y_bl, y_bl + Δy, y_bl + Δy]
return [x_border, y_border]
<|reserved_special_token_0|>
def first_append_to_last(arr):
return arr + [arr[0]]
<|reserved_special_token_0|>
def RMS(arr):
n = len(arr)
sq_sum = sum(a ** 2 for a in arr)
return (sq_sum / n) ** 0.5
<|reserved_special_token_0|>
def L1(P1, P2):
x1, y1 = P1
x2, y2 = P2
return abs(x2 - x1) + abs(y2 - y1)
<|reserved_special_token_0|>
def state_to_str(x, y, v, o):
return '%d, %d, %d, %d' % (x, y, v, o)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def continuous_location(PD, loc_min, Δxy):
x = PD[0] * Δxy[0] + loc_min[0]
y = PD[1] * Δxy[1] + loc_min[1]
return [x, y]
<|reserved_special_token_0|>
def cell_borders(BL, Δxy):
[x_bl, y_bl] = BL
Δx = Δxy[0]
Δy = Δxy[1]
x_border = [x_bl, x_bl + Δx, x_bl + Δx, x_bl]
y_border = [y_bl, y_bl, y_bl + Δy, y_bl + Δy]
return [x_border, y_border]
<|reserved_special_token_0|>
def first_append_to_last(arr):
return arr + [arr[0]]
<|reserved_special_token_0|>
def RMS(arr):
n = len(arr)
sq_sum = sum(a ** 2 for a in arr)
return (sq_sum / n) ** 0.5
<|reserved_special_token_0|>
def L1(P1, P2):
x1, y1 = P1
x2, y2 = P2
return abs(x2 - x1) + abs(y2 - y1)
<|reserved_special_token_0|>
def state_to_str(x, y, v, o):
return '%d, %d, %d, %d' % (x, y, v, o)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def discretize_location(P, loc_min, Δxy):
x_from_start = P[0] - loc_min[0]
y_from_start = P[1] - loc_min[1]
xd = int(x_from_start // Δxy[0])
yd = int(y_from_start // Δxy[1])
return [xd, yd]
<|reserved_special_token_0|>
def continuous_location(PD, loc_min, Δxy):
x = PD[0] * Δxy[0] + loc_min[0]
y = PD[1] * Δxy[1] + loc_min[1]
return [x, y]
<|reserved_special_token_0|>
def cell_borders(BL, Δxy):
[x_bl, y_bl] = BL
Δx = Δxy[0]
Δy = Δxy[1]
x_border = [x_bl, x_bl + Δx, x_bl + Δx, x_bl]
y_border = [y_bl, y_bl, y_bl + Δy, y_bl + Δy]
return [x_border, y_border]
<|reserved_special_token_0|>
def first_append_to_last(arr):
return arr + [arr[0]]
<|reserved_special_token_0|>
def RMS(arr):
n = len(arr)
sq_sum = sum(a ** 2 for a in arr)
return (sq_sum / n) ** 0.5
<|reserved_special_token_0|>
def L1(P1, P2):
x1, y1 = P1
x2, y2 = P2
return abs(x2 - x1) + abs(y2 - y1)
<|reserved_special_token_0|>
def state_to_str(x, y, v, o):
return '%d, %d, %d, %d' % (x, y, v, o)
<|reserved_special_token_1|>
"""
SUMMARY
Auxiliary functions, provided here to avoid clutter
"""
"""
Transforms a point (P = [x, y]) using the x, y intervals (Δxy = [Δx, Δy]) into the corresponding discrete point (D = [xd, yd])
loc_min = [x_min, y_min]
"""
def discretize_location(P, loc_min, Δxy):
x_from_start = P[0] - loc_min[0]
y_from_start = P[1] - loc_min[1]
xd = int(x_from_start//Δxy[0])
yd = int(y_from_start//Δxy[1])
return [xd, yd]
"""
Transforms a discretized point (PD = [xd, yd]) using the x, y intervals (Δxy = [Δx, Δy]) into the corresponding point (P = [x, d])
loc_min = [x_min, y_min]
"""
def continuous_location(PD, loc_min, Δxy):
x = PD[0]*Δxy[0] + loc_min[0]
y = PD[1]*Δxy[1] + loc_min[1]
return [x, y]
"""
Obtains the points in the border of a cell (starting at bottom left (BL = [x_bl, y_bl])), starting point not repeated
"""
def cell_borders(BL, Δxy):
[x_bl, y_bl] = BL
Δx = Δxy[0]
Δy = Δxy[1]
x_border = [x_bl, x_bl + Δx, x_bl + Δx, x_bl]
y_border = [y_bl, y_bl, y_bl + Δy, y_bl + Δy]
return [x_border, y_border]
"""
Appends the first element of the array to the end, useful when plotting
"""
def first_append_to_last(arr):
return arr + [arr[0]]
"""
Calculates the RMS (root mean square) value of an array
"""
def RMS(arr):
n = len(arr)
sq_sum = sum(a**2 for a in arr)
return (sq_sum/n)**0.5
"""
Calculates the L1 norm (Manhattan distance) between P1 = [x1, y1] and P2 = [x2, y2]
"""
def L1(P1, P2):
x1, y1 = P1
x2, y2 = P2
return abs(x2 - x1) + abs(y2 - y1)
"""
Turns x, y, o, v into a string of the form "x, y, v, o"
"""
def state_to_str(x, y, v, o):
return "%d, %d, %d, %d" % (x, y, v, o)
|
flexible
|
{
"blob_id": "8bbc929e2ff2321b97195031fa675fbdab269fcb",
"index": 3288,
"step-1": "<mask token>\n\n\ndef first_append_to_last(arr):\n return arr + [arr[0]]\n\n\n<mask token>\n\n\ndef RMS(arr):\n n = len(arr)\n sq_sum = sum(a ** 2 for a in arr)\n return (sq_sum / n) ** 0.5\n\n\n<mask token>\n\n\ndef L1(P1, P2):\n x1, y1 = P1\n x2, y2 = P2\n return abs(x2 - x1) + abs(y2 - y1)\n\n\n<mask token>\n\n\ndef state_to_str(x, y, v, o):\n return '%d, %d, %d, %d' % (x, y, v, o)\n",
"step-2": "<mask token>\n\n\ndef cell_borders(BL, Δxy):\n [x_bl, y_bl] = BL\n Δx = Δxy[0]\n Δy = Δxy[1]\n x_border = [x_bl, x_bl + Δx, x_bl + Δx, x_bl]\n y_border = [y_bl, y_bl, y_bl + Δy, y_bl + Δy]\n return [x_border, y_border]\n\n\n<mask token>\n\n\ndef first_append_to_last(arr):\n return arr + [arr[0]]\n\n\n<mask token>\n\n\ndef RMS(arr):\n n = len(arr)\n sq_sum = sum(a ** 2 for a in arr)\n return (sq_sum / n) ** 0.5\n\n\n<mask token>\n\n\ndef L1(P1, P2):\n x1, y1 = P1\n x2, y2 = P2\n return abs(x2 - x1) + abs(y2 - y1)\n\n\n<mask token>\n\n\ndef state_to_str(x, y, v, o):\n return '%d, %d, %d, %d' % (x, y, v, o)\n",
"step-3": "<mask token>\n\n\ndef continuous_location(PD, loc_min, Δxy):\n x = PD[0] * Δxy[0] + loc_min[0]\n y = PD[1] * Δxy[1] + loc_min[1]\n return [x, y]\n\n\n<mask token>\n\n\ndef cell_borders(BL, Δxy):\n [x_bl, y_bl] = BL\n Δx = Δxy[0]\n Δy = Δxy[1]\n x_border = [x_bl, x_bl + Δx, x_bl + Δx, x_bl]\n y_border = [y_bl, y_bl, y_bl + Δy, y_bl + Δy]\n return [x_border, y_border]\n\n\n<mask token>\n\n\ndef first_append_to_last(arr):\n return arr + [arr[0]]\n\n\n<mask token>\n\n\ndef RMS(arr):\n n = len(arr)\n sq_sum = sum(a ** 2 for a in arr)\n return (sq_sum / n) ** 0.5\n\n\n<mask token>\n\n\ndef L1(P1, P2):\n x1, y1 = P1\n x2, y2 = P2\n return abs(x2 - x1) + abs(y2 - y1)\n\n\n<mask token>\n\n\ndef state_to_str(x, y, v, o):\n return '%d, %d, %d, %d' % (x, y, v, o)\n",
"step-4": "<mask token>\n\n\ndef discretize_location(P, loc_min, Δxy):\n x_from_start = P[0] - loc_min[0]\n y_from_start = P[1] - loc_min[1]\n xd = int(x_from_start // Δxy[0])\n yd = int(y_from_start // Δxy[1])\n return [xd, yd]\n\n\n<mask token>\n\n\ndef continuous_location(PD, loc_min, Δxy):\n x = PD[0] * Δxy[0] + loc_min[0]\n y = PD[1] * Δxy[1] + loc_min[1]\n return [x, y]\n\n\n<mask token>\n\n\ndef cell_borders(BL, Δxy):\n [x_bl, y_bl] = BL\n Δx = Δxy[0]\n Δy = Δxy[1]\n x_border = [x_bl, x_bl + Δx, x_bl + Δx, x_bl]\n y_border = [y_bl, y_bl, y_bl + Δy, y_bl + Δy]\n return [x_border, y_border]\n\n\n<mask token>\n\n\ndef first_append_to_last(arr):\n return arr + [arr[0]]\n\n\n<mask token>\n\n\ndef RMS(arr):\n n = len(arr)\n sq_sum = sum(a ** 2 for a in arr)\n return (sq_sum / n) ** 0.5\n\n\n<mask token>\n\n\ndef L1(P1, P2):\n x1, y1 = P1\n x2, y2 = P2\n return abs(x2 - x1) + abs(y2 - y1)\n\n\n<mask token>\n\n\ndef state_to_str(x, y, v, o):\n return '%d, %d, %d, %d' % (x, y, v, o)\n",
"step-5": "\"\"\"\nSUMMARY\n\nAuxiliary functions, provided here to avoid clutter\n\"\"\"\n\n\n\"\"\"\nTransforms a point (P = [x, y]) using the x, y intervals (Δxy = [Δx, Δy]) into the corresponding discrete point (D = [xd, yd])\nloc_min = [x_min, y_min]\n\"\"\"\ndef discretize_location(P, loc_min, Δxy):\n x_from_start = P[0] - loc_min[0]\n y_from_start = P[1] - loc_min[1]\n\n xd = int(x_from_start//Δxy[0])\n yd = int(y_from_start//Δxy[1])\n\n return [xd, yd]\n\n\n\n\"\"\"\nTransforms a discretized point (PD = [xd, yd]) using the x, y intervals (Δxy = [Δx, Δy]) into the corresponding point (P = [x, d])\nloc_min = [x_min, y_min]\n\"\"\"\ndef continuous_location(PD, loc_min, Δxy):\n\n x = PD[0]*Δxy[0] + loc_min[0]\n y = PD[1]*Δxy[1] + loc_min[1]\n\n return [x, y]\n\n\n\n\"\"\"\nObtains the points in the border of a cell (starting at bottom left (BL = [x_bl, y_bl])), starting point not repeated\n\"\"\"\ndef cell_borders(BL, Δxy):\n [x_bl, y_bl] = BL\n Δx = Δxy[0]\n Δy = Δxy[1]\n\n x_border = [x_bl, x_bl + Δx, x_bl + Δx, x_bl]\n y_border = [y_bl, y_bl, y_bl + Δy, y_bl + Δy]\n\n return [x_border, y_border]\n\n\n\"\"\"\nAppends the first element of the array to the end, useful when plotting\n\"\"\"\ndef first_append_to_last(arr):\n return arr + [arr[0]]\n\n\n\n\"\"\"\nCalculates the RMS (root mean square) value of an array\n\"\"\"\ndef RMS(arr):\n n = len(arr)\n sq_sum = sum(a**2 for a in arr)\n return (sq_sum/n)**0.5\n\n\n\n\"\"\"\nCalculates the L1 norm (Manhattan distance) between P1 = [x1, y1] and P2 = [x2, y2]\n\"\"\"\ndef L1(P1, P2):\n x1, y1 = P1\n x2, y2 = P2\n\n return abs(x2 - x1) + abs(y2 - y1)\n\n\n\n\"\"\"\nTurns x, y, o, v into a string of the form \"x, y, v, o\"\n\"\"\"\ndef state_to_str(x, y, v, o):\n return \"%d, %d, %d, %d\" % (x, y, v, o)\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
np.put(x, ind=idx, v=1)
print(x)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
x = np.zeros(10)
idx = [1, 4, 5, 9]
np.put(x, ind=idx, v=1)
print(x)
<|reserved_special_token_1|>
import numpy as np
x = np.zeros(10)
idx = [1, 4, 5, 9]
np.put(x, ind=idx, v=1)
print(x)
|
flexible
|
{
"blob_id": "9e2485554a5a8de07dd3df39cc255f2a1ea2f164",
"index": 4769,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nnp.put(x, ind=idx, v=1)\nprint(x)\n",
"step-3": "<mask token>\nx = np.zeros(10)\nidx = [1, 4, 5, 9]\nnp.put(x, ind=idx, v=1)\nprint(x)\n",
"step-4": "import numpy as np\nx = np.zeros(10)\nidx = [1, 4, 5, 9]\nnp.put(x, ind=idx, v=1)\nprint(x)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def RED(t):
GPIO.output(21, 1)
time.sleep(1)
GPIO.output(21, 0)
<|reserved_special_token_0|>
def dataTransfer(conn):
while True:
data = conn.recv(1024)
data = data.decode('utf-8')
dataMessage = data.split(' ', 1)
command = dataMessage[0]
para = dataMessage[1]
y = int(para)
if len(command) > 0:
print(command)
if command == 'RED':
RED(y)
elif command == 'GREEN':
GREEN(y)
elif command == 'KILL':
print('Our server is shutting down.')
s.close()
break
else:
print('Unknown Command')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def setupServer():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print('Socket created.')
try:
s.bind((host, port))
except socket.error as msg:
print(msg)
print('Socket bind comlete.')
return s
def setupConnection():
s.listen(1)
conn, address = s.accept()
print('Connected to: ' + address[0] + ':' + str(address[1]))
return conn
def RED(t):
GPIO.output(21, 1)
time.sleep(1)
GPIO.output(21, 0)
<|reserved_special_token_0|>
def dataTransfer(conn):
while True:
data = conn.recv(1024)
data = data.decode('utf-8')
dataMessage = data.split(' ', 1)
command = dataMessage[0]
para = dataMessage[1]
y = int(para)
if len(command) > 0:
print(command)
if command == 'RED':
RED(y)
elif command == 'GREEN':
GREEN(y)
elif command == 'KILL':
print('Our server is shutting down.')
s.close()
break
else:
print('Unknown Command')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
GPIO.setmode(GPIO.BCM)
GPIO.setup(20, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(21, GPIO.OUT, initial=GPIO.LOW)
GPIO.setwarnings(False)
<|reserved_special_token_0|>
def setupServer():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print('Socket created.')
try:
s.bind((host, port))
except socket.error as msg:
print(msg)
print('Socket bind comlete.')
return s
def setupConnection():
s.listen(1)
conn, address = s.accept()
print('Connected to: ' + address[0] + ':' + str(address[1]))
return conn
def RED(t):
GPIO.output(21, 1)
time.sleep(1)
GPIO.output(21, 0)
def GREEN(t):
GPIO.outdefput(20, 1)
time.sleep(t)
GPIO.output(20, 0)
def dataTransfer(conn):
while True:
data = conn.recv(1024)
data = data.decode('utf-8')
dataMessage = data.split(' ', 1)
command = dataMessage[0]
para = dataMessage[1]
y = int(para)
if len(command) > 0:
print(command)
if command == 'RED':
RED(y)
elif command == 'GREEN':
GREEN(y)
elif command == 'KILL':
print('Our server is shutting down.')
s.close()
break
else:
print('Unknown Command')
<|reserved_special_token_0|>
def main():
try:
while True:
try:
conn = setupConnection()
dataTransfer(conn)
except:
break
except KeyboardInterrupt:
print('program terminated')
finally:
GPIO.cleanup()
conn.close()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import socket
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setup(20, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(21, GPIO.OUT, initial=GPIO.LOW)
GPIO.setwarnings(False)
host = '192.168.87.191'
port = 5560
def setupServer():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print('Socket created.')
try:
s.bind((host, port))
except socket.error as msg:
print(msg)
print('Socket bind comlete.')
return s
def setupConnection():
s.listen(1)
conn, address = s.accept()
print('Connected to: ' + address[0] + ':' + str(address[1]))
return conn
def RED(t):
GPIO.output(21, 1)
time.sleep(1)
GPIO.output(21, 0)
def GREEN(t):
GPIO.outdefput(20, 1)
time.sleep(t)
GPIO.output(20, 0)
def dataTransfer(conn):
while True:
data = conn.recv(1024)
data = data.decode('utf-8')
dataMessage = data.split(' ', 1)
command = dataMessage[0]
para = dataMessage[1]
y = int(para)
if len(command) > 0:
print(command)
if command == 'RED':
RED(y)
elif command == 'GREEN':
GREEN(y)
elif command == 'KILL':
print('Our server is shutting down.')
s.close()
break
else:
print('Unknown Command')
s = setupServer()
def main():
try:
while True:
try:
conn = setupConnection()
dataTransfer(conn)
except:
break
except KeyboardInterrupt:
print('program terminated')
finally:
GPIO.cleanup()
conn.close()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import socket
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setup(20,GPIO.OUT,initial=GPIO.LOW) #green
GPIO.setup(21,GPIO.OUT,initial=GPIO.LOW) #red
GPIO.setwarnings(False)
host = '192.168.87.191'
port = 5560
def setupServer():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("Socket created.")
try:
s.bind((host, port))
except socket.error as msg:
print(msg)
print("Socket bind comlete.")
return s
def setupConnection():
s.listen(1) # Allows one connection at a time.
conn, address = s.accept()
print("Connected to: " + address[0] + ":" + str(address[1]))
return conn
def RED(t):
#Red LED
GPIO.output(21,1)
time.sleep(1)
GPIO.output(21,0)
def GREEN(t):
#GREEN LED
GPIO.outdefput(20,1)
time.sleep(t)
GPIO.output(20,0)
def dataTransfer(conn):
# A big loop that receives data until told not to.
while True:
# Receive the data
data = conn.recv(1024) # receive the data
data = data.decode('utf-8')
# Split the data such that you separate the command
# from the rest of the data.
dataMessage = data.split(' ', 1)
# Command
command = dataMessage[0]
# parameter
para=dataMessage[1]
y=int(para)
if len(command)>0:
print(command)
if command == 'RED':
RED(y)
elif command == 'GREEN':
GREEN(y)
elif command == 'KILL':
print("Our server is shutting down.")
s.close()
break
else:
print('Unknown Command')
#conn.close()
s = setupServer()
#while True:
# try:
# conn = setupConnection()
# dataTransfer(conn)
# except:
# break
def main():
try:
while True:
try:
conn = setupConnection()
dataTransfer(conn)
except:
break
except KeyboardInterrupt:
print("program terminated")
finally:
GPIO.cleanup()
conn.close()
#Runs Main Function
if __name__=="__main__":
main()
|
flexible
|
{
"blob_id": "78efe97d838774cb831ef205186db29f392e1953",
"index": 1584,
"step-1": "<mask token>\n\n\ndef RED(t):\n GPIO.output(21, 1)\n time.sleep(1)\n GPIO.output(21, 0)\n\n\n<mask token>\n\n\ndef dataTransfer(conn):\n while True:\n data = conn.recv(1024)\n data = data.decode('utf-8')\n dataMessage = data.split(' ', 1)\n command = dataMessage[0]\n para = dataMessage[1]\n y = int(para)\n if len(command) > 0:\n print(command)\n if command == 'RED':\n RED(y)\n elif command == 'GREEN':\n GREEN(y)\n elif command == 'KILL':\n print('Our server is shutting down.')\n s.close()\n break\n else:\n print('Unknown Command')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef setupServer():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n print('Socket created.')\n try:\n s.bind((host, port))\n except socket.error as msg:\n print(msg)\n print('Socket bind comlete.')\n return s\n\n\ndef setupConnection():\n s.listen(1)\n conn, address = s.accept()\n print('Connected to: ' + address[0] + ':' + str(address[1]))\n return conn\n\n\ndef RED(t):\n GPIO.output(21, 1)\n time.sleep(1)\n GPIO.output(21, 0)\n\n\n<mask token>\n\n\ndef dataTransfer(conn):\n while True:\n data = conn.recv(1024)\n data = data.decode('utf-8')\n dataMessage = data.split(' ', 1)\n command = dataMessage[0]\n para = dataMessage[1]\n y = int(para)\n if len(command) > 0:\n print(command)\n if command == 'RED':\n RED(y)\n elif command == 'GREEN':\n GREEN(y)\n elif command == 'KILL':\n print('Our server is shutting down.')\n s.close()\n break\n else:\n print('Unknown Command')\n\n\n<mask token>\n",
"step-3": "<mask token>\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(20, GPIO.OUT, initial=GPIO.LOW)\nGPIO.setup(21, GPIO.OUT, initial=GPIO.LOW)\nGPIO.setwarnings(False)\n<mask token>\n\n\ndef setupServer():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n print('Socket created.')\n try:\n s.bind((host, port))\n except socket.error as msg:\n print(msg)\n print('Socket bind comlete.')\n return s\n\n\ndef setupConnection():\n s.listen(1)\n conn, address = s.accept()\n print('Connected to: ' + address[0] + ':' + str(address[1]))\n return conn\n\n\ndef RED(t):\n GPIO.output(21, 1)\n time.sleep(1)\n GPIO.output(21, 0)\n\n\ndef GREEN(t):\n GPIO.outdefput(20, 1)\n time.sleep(t)\n GPIO.output(20, 0)\n\n\ndef dataTransfer(conn):\n while True:\n data = conn.recv(1024)\n data = data.decode('utf-8')\n dataMessage = data.split(' ', 1)\n command = dataMessage[0]\n para = dataMessage[1]\n y = int(para)\n if len(command) > 0:\n print(command)\n if command == 'RED':\n RED(y)\n elif command == 'GREEN':\n GREEN(y)\n elif command == 'KILL':\n print('Our server is shutting down.')\n s.close()\n break\n else:\n print('Unknown Command')\n\n\n<mask token>\n\n\ndef main():\n try:\n while True:\n try:\n conn = setupConnection()\n dataTransfer(conn)\n except:\n break\n except KeyboardInterrupt:\n print('program terminated')\n finally:\n GPIO.cleanup()\n conn.close()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import socket\nimport RPi.GPIO as GPIO\nimport time\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(20, GPIO.OUT, initial=GPIO.LOW)\nGPIO.setup(21, GPIO.OUT, initial=GPIO.LOW)\nGPIO.setwarnings(False)\nhost = '192.168.87.191'\nport = 5560\n\n\ndef setupServer():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n print('Socket created.')\n try:\n s.bind((host, port))\n except socket.error as msg:\n print(msg)\n print('Socket bind comlete.')\n return s\n\n\ndef setupConnection():\n s.listen(1)\n conn, address = s.accept()\n print('Connected to: ' + address[0] + ':' + str(address[1]))\n return conn\n\n\ndef RED(t):\n GPIO.output(21, 1)\n time.sleep(1)\n GPIO.output(21, 0)\n\n\ndef GREEN(t):\n GPIO.outdefput(20, 1)\n time.sleep(t)\n GPIO.output(20, 0)\n\n\ndef dataTransfer(conn):\n while True:\n data = conn.recv(1024)\n data = data.decode('utf-8')\n dataMessage = data.split(' ', 1)\n command = dataMessage[0]\n para = dataMessage[1]\n y = int(para)\n if len(command) > 0:\n print(command)\n if command == 'RED':\n RED(y)\n elif command == 'GREEN':\n GREEN(y)\n elif command == 'KILL':\n print('Our server is shutting down.')\n s.close()\n break\n else:\n print('Unknown Command')\n\n\ns = setupServer()\n\n\ndef main():\n try:\n while True:\n try:\n conn = setupConnection()\n dataTransfer(conn)\n except:\n break\n except KeyboardInterrupt:\n print('program terminated')\n finally:\n GPIO.cleanup()\n conn.close()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import socket\nimport RPi.GPIO as GPIO\nimport time\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(20,GPIO.OUT,initial=GPIO.LOW) #green\nGPIO.setup(21,GPIO.OUT,initial=GPIO.LOW) #red\nGPIO.setwarnings(False)\n\nhost = '192.168.87.191'\nport = 5560\n\ndef setupServer():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n print(\"Socket created.\")\n try:\n s.bind((host, port))\n except socket.error as msg:\n print(msg)\n print(\"Socket bind comlete.\")\n return s\n\ndef setupConnection():\n s.listen(1) # Allows one connection at a time.\n conn, address = s.accept()\n print(\"Connected to: \" + address[0] + \":\" + str(address[1]))\n return conn\n\ndef RED(t):\n #Red LED\n GPIO.output(21,1)\n time.sleep(1)\n GPIO.output(21,0)\n\ndef GREEN(t):\n #GREEN LED\n GPIO.outdefput(20,1)\n time.sleep(t)\n GPIO.output(20,0)\n\ndef dataTransfer(conn):\n # A big loop that receives data until told not to.\n\n while True:\n # Receive the data\n data = conn.recv(1024) # receive the data\n data = data.decode('utf-8')\n\n # Split the data such that you separate the command\n # from the rest of the data.\n dataMessage = data.split(' ', 1)\n # Command\n command = dataMessage[0]\n # parameter\n para=dataMessage[1]\n y=int(para)\n if len(command)>0:\n print(command)\n if command == 'RED':\n RED(y)\n elif command == 'GREEN':\n GREEN(y)\n elif command == 'KILL':\n print(\"Our server is shutting down.\")\n s.close()\n break\n else:\n print('Unknown Command')\n #conn.close()\ns = setupServer()\n#while True:\n# try:\n# conn = setupConnection()\n# dataTransfer(conn)\n# except:\n# break\ndef main():\n try:\n while True:\n try:\n conn = setupConnection()\n dataTransfer(conn)\n except:\n break\n except KeyboardInterrupt:\n print(\"program terminated\")\n finally:\n GPIO.cleanup()\n conn.close()\n#Runs Main Function\nif __name__==\"__main__\":\n main()\n\n",
"step-ids": [
2,
4,
7,
9,
10
]
}
|
[
2,
4,
7,
9,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open(join(here, 'VERSION')) as VERSION_FILE:
__versionstr__ = VERSION_FILE.read().strip()
with open(join(here, 'requirements.txt')) as REQUIREMENTS:
INSTALL_REQUIRES = REQUIREMENTS.read().split('\n')
with io.open(join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(name='sumologic-sdk', version=__versionstr__, packages=find_packages(
), install_requires=INSTALL_REQUIRES, author=
'SumoLogic, Yoway Buorn, Melchi Salins', author_email=
'[email protected], [email protected], [email protected]',
description='Sumo Logic Python SDK', license='PSF', long_description=
long_description, long_description_content_type='text/markdown',
keywords=
'sumologic python sdk rest api log management analytics logreduce security siem collector forwarder'
, url='https://github.com/SumoLogic/sumologic-python-sdk', zip_safe=True)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
here = abspath(dirname(__file__))
with open(join(here, 'VERSION')) as VERSION_FILE:
__versionstr__ = VERSION_FILE.read().strip()
with open(join(here, 'requirements.txt')) as REQUIREMENTS:
INSTALL_REQUIRES = REQUIREMENTS.read().split('\n')
with io.open(join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(name='sumologic-sdk', version=__versionstr__, packages=find_packages(
), install_requires=INSTALL_REQUIRES, author=
'SumoLogic, Yoway Buorn, Melchi Salins', author_email=
'[email protected], [email protected], [email protected]',
description='Sumo Logic Python SDK', license='PSF', long_description=
long_description, long_description_content_type='text/markdown',
keywords=
'sumologic python sdk rest api log management analytics logreduce security siem collector forwarder'
, url='https://github.com/SumoLogic/sumologic-python-sdk', zip_safe=True)
<|reserved_special_token_1|>
from setuptools import setup, find_packages
from os.path import join, dirname, abspath
import io
here = abspath(dirname(__file__))
with open(join(here, 'VERSION')) as VERSION_FILE:
__versionstr__ = VERSION_FILE.read().strip()
with open(join(here, 'requirements.txt')) as REQUIREMENTS:
INSTALL_REQUIRES = REQUIREMENTS.read().split('\n')
with io.open(join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(name='sumologic-sdk', version=__versionstr__, packages=find_packages(
), install_requires=INSTALL_REQUIRES, author=
'SumoLogic, Yoway Buorn, Melchi Salins', author_email=
'[email protected], [email protected], [email protected]',
description='Sumo Logic Python SDK', license='PSF', long_description=
long_description, long_description_content_type='text/markdown',
keywords=
'sumologic python sdk rest api log management analytics logreduce security siem collector forwarder'
, url='https://github.com/SumoLogic/sumologic-python-sdk', zip_safe=True)
<|reserved_special_token_1|>
from setuptools import setup, find_packages
from os.path import join, dirname, abspath
import io
here = abspath(dirname(__file__))
with open(join(here, 'VERSION')) as VERSION_FILE:
__versionstr__ = VERSION_FILE.read().strip()
with open(join(here, 'requirements.txt')) as REQUIREMENTS:
INSTALL_REQUIRES = REQUIREMENTS.read().split('\n')
with io.open(join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name="sumologic-sdk",
version=__versionstr__,
packages=find_packages(),
install_requires=INSTALL_REQUIRES,
# PyPI metadata
author="SumoLogic, Yoway Buorn, Melchi Salins",
author_email="[email protected], [email protected], [email protected]",
description="Sumo Logic Python SDK",
license="PSF",
long_description=long_description,
long_description_content_type='text/markdown',
keywords="sumologic python sdk rest api log management analytics logreduce security siem collector forwarder",
url="https://github.com/SumoLogic/sumologic-python-sdk",
zip_safe=True
)
|
flexible
|
{
"blob_id": "8d5978bc579115eb3065dce1bae08f1790f2d83c",
"index": 2832,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open(join(here, 'VERSION')) as VERSION_FILE:\n __versionstr__ = VERSION_FILE.read().strip()\nwith open(join(here, 'requirements.txt')) as REQUIREMENTS:\n INSTALL_REQUIRES = REQUIREMENTS.read().split('\\n')\nwith io.open(join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\nsetup(name='sumologic-sdk', version=__versionstr__, packages=find_packages(\n ), install_requires=INSTALL_REQUIRES, author=\n 'SumoLogic, Yoway Buorn, Melchi Salins', author_email=\n '[email protected], [email protected], [email protected]',\n description='Sumo Logic Python SDK', license='PSF', long_description=\n long_description, long_description_content_type='text/markdown',\n keywords=\n 'sumologic python sdk rest api log management analytics logreduce security siem collector forwarder'\n , url='https://github.com/SumoLogic/sumologic-python-sdk', zip_safe=True)\n",
"step-3": "<mask token>\nhere = abspath(dirname(__file__))\nwith open(join(here, 'VERSION')) as VERSION_FILE:\n __versionstr__ = VERSION_FILE.read().strip()\nwith open(join(here, 'requirements.txt')) as REQUIREMENTS:\n INSTALL_REQUIRES = REQUIREMENTS.read().split('\\n')\nwith io.open(join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\nsetup(name='sumologic-sdk', version=__versionstr__, packages=find_packages(\n ), install_requires=INSTALL_REQUIRES, author=\n 'SumoLogic, Yoway Buorn, Melchi Salins', author_email=\n '[email protected], [email protected], [email protected]',\n description='Sumo Logic Python SDK', license='PSF', long_description=\n long_description, long_description_content_type='text/markdown',\n keywords=\n 'sumologic python sdk rest api log management analytics logreduce security siem collector forwarder'\n , url='https://github.com/SumoLogic/sumologic-python-sdk', zip_safe=True)\n",
"step-4": "from setuptools import setup, find_packages\nfrom os.path import join, dirname, abspath\nimport io\nhere = abspath(dirname(__file__))\nwith open(join(here, 'VERSION')) as VERSION_FILE:\n __versionstr__ = VERSION_FILE.read().strip()\nwith open(join(here, 'requirements.txt')) as REQUIREMENTS:\n INSTALL_REQUIRES = REQUIREMENTS.read().split('\\n')\nwith io.open(join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\nsetup(name='sumologic-sdk', version=__versionstr__, packages=find_packages(\n ), install_requires=INSTALL_REQUIRES, author=\n 'SumoLogic, Yoway Buorn, Melchi Salins', author_email=\n '[email protected], [email protected], [email protected]',\n description='Sumo Logic Python SDK', license='PSF', long_description=\n long_description, long_description_content_type='text/markdown',\n keywords=\n 'sumologic python sdk rest api log management analytics logreduce security siem collector forwarder'\n , url='https://github.com/SumoLogic/sumologic-python-sdk', zip_safe=True)\n",
"step-5": "from setuptools import setup, find_packages\nfrom os.path import join, dirname, abspath\nimport io\n\nhere = abspath(dirname(__file__))\n\nwith open(join(here, 'VERSION')) as VERSION_FILE:\n __versionstr__ = VERSION_FILE.read().strip()\n\n\nwith open(join(here, 'requirements.txt')) as REQUIREMENTS:\n INSTALL_REQUIRES = REQUIREMENTS.read().split('\\n')\n\n\nwith io.open(join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name=\"sumologic-sdk\",\n version=__versionstr__,\n packages=find_packages(),\n install_requires=INSTALL_REQUIRES,\n # PyPI metadata\n author=\"SumoLogic, Yoway Buorn, Melchi Salins\",\n author_email=\"[email protected], [email protected], [email protected]\",\n description=\"Sumo Logic Python SDK\",\n license=\"PSF\",\n long_description=long_description,\n long_description_content_type='text/markdown',\n keywords=\"sumologic python sdk rest api log management analytics logreduce security siem collector forwarder\",\n url=\"https://github.com/SumoLogic/sumologic-python-sdk\",\n zip_safe=True\n)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class AdaBoostClassifier:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def predict(self, X, threshold=0):
"""Predict the catagories for geven samples.
Args:
X: An ndarray indicating the samples to be predicted, which shape should be (n_samples,n_features).
threshold: The demarcation number of deviding the samples into two parts.
Returns:
An ndarray consists of predicted labels, which shape should be (n_samples,1).
"""
predictYList = []
for i in range(len(self.finalClassifierList)):
tempY = self.finalClassifierList[i].predict(X)
predictYList.append(tempY)
predicYArray = np.transpose(np.array(predictYList))
alphaArray = np.array(self.alphaList)
temp = predicYArray * alphaArray
predictY = np.sum(temp, axis=1)
for i in range(len(predictY)):
if predictY[i] > threshold:
predictY[i] = 1
else:
predictY[i] = -1
return predictY
<|reserved_special_token_0|>
@staticmethod
def load(filename):
with open(filename, 'rb') as f:
return pickle.load(f)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AdaBoostClassifier:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def is_good_enough(self):
"""Optional"""
pass
<|reserved_special_token_0|>
def fit(self, X, y):
"""Build a boosted classifier from the training set (X, y).
Args:
X: An ndarray indicating the samples to be trained, which shape should be (n_samples,n_features).
y: An ndarray indicating the ground-truth labels correspond to X, which shape should be (n_samples,1).
"""
row, col = X.shape
weightArray = [1 / row] * row
self.alphaList = []
self.finalClassifierList = []
for i in range(self.iteration):
clf = self.weakClassifier(max_depth=2)
clf.fit(X, y, weightArray)
predictY = clf.predict(X)
error = self.calculateError(y, predictY, weightArray)
if error > 0.5:
break
else:
self.finalClassifierList.append(clf)
alpha = 0.5 * math.log((1 - error) / error)
self.alphaList.append(alpha)
aYH = alpha * y * predictY * -1
tempWeights = weightArray * np.exp(aYH)
tempSum = np.sum(tempWeights)
weightArray = tempWeights / tempSum
def predict_scores(self, X):
"""Calculate the weighted sum score of the whole base classifiers for given samples.
Args:
X: An ndarray indicating the samples to be predicted, which shape should be (n_samples,n_features).
Returns:
An one-dimension ndarray indicating the scores of differnt samples, which shape should be (n_samples,1).
"""
pass
def predict(self, X, threshold=0):
"""Predict the catagories for geven samples.
Args:
X: An ndarray indicating the samples to be predicted, which shape should be (n_samples,n_features).
threshold: The demarcation number of deviding the samples into two parts.
Returns:
An ndarray consists of predicted labels, which shape should be (n_samples,1).
"""
predictYList = []
for i in range(len(self.finalClassifierList)):
tempY = self.finalClassifierList[i].predict(X)
predictYList.append(tempY)
predicYArray = np.transpose(np.array(predictYList))
alphaArray = np.array(self.alphaList)
temp = predicYArray * alphaArray
predictY = np.sum(temp, axis=1)
for i in range(len(predictY)):
if predictY[i] > threshold:
predictY[i] = 1
else:
predictY[i] = -1
return predictY
@staticmethod
def save(model, filename):
with open(filename, 'wb') as f:
pickle.dump(model, f)
@staticmethod
def load(filename):
with open(filename, 'rb') as f:
return pickle.load(f)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AdaBoostClassifier:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def is_good_enough(self):
"""Optional"""
pass
def calculateError(self, y, predictY, weights):
"""
函数作用:计算误差
:param y:列表,标签
:param predictY:列表,元素是预测值
:param weights:列表,权重值
:return:误差
"""
error = 0
for i in range(len(y)):
if y[i] != predictY[i]:
error += weights[i]
return error
def fit(self, X, y):
"""Build a boosted classifier from the training set (X, y).
Args:
X: An ndarray indicating the samples to be trained, which shape should be (n_samples,n_features).
y: An ndarray indicating the ground-truth labels correspond to X, which shape should be (n_samples,1).
"""
row, col = X.shape
weightArray = [1 / row] * row
self.alphaList = []
self.finalClassifierList = []
for i in range(self.iteration):
clf = self.weakClassifier(max_depth=2)
clf.fit(X, y, weightArray)
predictY = clf.predict(X)
error = self.calculateError(y, predictY, weightArray)
if error > 0.5:
break
else:
self.finalClassifierList.append(clf)
alpha = 0.5 * math.log((1 - error) / error)
self.alphaList.append(alpha)
aYH = alpha * y * predictY * -1
tempWeights = weightArray * np.exp(aYH)
tempSum = np.sum(tempWeights)
weightArray = tempWeights / tempSum
def predict_scores(self, X):
"""Calculate the weighted sum score of the whole base classifiers for given samples.
Args:
X: An ndarray indicating the samples to be predicted, which shape should be (n_samples,n_features).
Returns:
An one-dimension ndarray indicating the scores of differnt samples, which shape should be (n_samples,1).
"""
pass
def predict(self, X, threshold=0):
"""Predict the catagories for geven samples.
Args:
X: An ndarray indicating the samples to be predicted, which shape should be (n_samples,n_features).
threshold: The demarcation number of deviding the samples into two parts.
Returns:
An ndarray consists of predicted labels, which shape should be (n_samples,1).
"""
predictYList = []
for i in range(len(self.finalClassifierList)):
tempY = self.finalClassifierList[i].predict(X)
predictYList.append(tempY)
predicYArray = np.transpose(np.array(predictYList))
alphaArray = np.array(self.alphaList)
temp = predicYArray * alphaArray
predictY = np.sum(temp, axis=1)
for i in range(len(predictY)):
if predictY[i] > threshold:
predictY[i] = 1
else:
predictY[i] = -1
return predictY
@staticmethod
def save(model, filename):
with open(filename, 'wb') as f:
pickle.dump(model, f)
@staticmethod
def load(filename):
with open(filename, 'rb') as f:
return pickle.load(f)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AdaBoostClassifier:
<|reserved_special_token_0|>
def __init__(self, weak_classifier, n_weakers_limit):
"""Initialize AdaBoostClassifier
Args:
weak_classifier: The class of weak classifier, which is recommend to be sklearn.tree.DecisionTreeClassifier.
n_weakers_limit: The maximum number of weak classifier the model can use.
"""
self.weakClassifier = weak_classifier
self.iteration = n_weakers_limit
def is_good_enough(self):
"""Optional"""
pass
def calculateError(self, y, predictY, weights):
"""
函数作用:计算误差
:param y:列表,标签
:param predictY:列表,元素是预测值
:param weights:列表,权重值
:return:误差
"""
error = 0
for i in range(len(y)):
if y[i] != predictY[i]:
error += weights[i]
return error
def fit(self, X, y):
"""Build a boosted classifier from the training set (X, y).
Args:
X: An ndarray indicating the samples to be trained, which shape should be (n_samples,n_features).
y: An ndarray indicating the ground-truth labels correspond to X, which shape should be (n_samples,1).
"""
row, col = X.shape
weightArray = [1 / row] * row
self.alphaList = []
self.finalClassifierList = []
for i in range(self.iteration):
clf = self.weakClassifier(max_depth=2)
clf.fit(X, y, weightArray)
predictY = clf.predict(X)
error = self.calculateError(y, predictY, weightArray)
if error > 0.5:
break
else:
self.finalClassifierList.append(clf)
alpha = 0.5 * math.log((1 - error) / error)
self.alphaList.append(alpha)
aYH = alpha * y * predictY * -1
tempWeights = weightArray * np.exp(aYH)
tempSum = np.sum(tempWeights)
weightArray = tempWeights / tempSum
def predict_scores(self, X):
"""Calculate the weighted sum score of the whole base classifiers for given samples.
Args:
X: An ndarray indicating the samples to be predicted, which shape should be (n_samples,n_features).
Returns:
An one-dimension ndarray indicating the scores of differnt samples, which shape should be (n_samples,1).
"""
pass
def predict(self, X, threshold=0):
"""Predict the catagories for geven samples.
Args:
X: An ndarray indicating the samples to be predicted, which shape should be (n_samples,n_features).
threshold: The demarcation number of deviding the samples into two parts.
Returns:
An ndarray consists of predicted labels, which shape should be (n_samples,1).
"""
predictYList = []
for i in range(len(self.finalClassifierList)):
tempY = self.finalClassifierList[i].predict(X)
predictYList.append(tempY)
predicYArray = np.transpose(np.array(predictYList))
alphaArray = np.array(self.alphaList)
temp = predicYArray * alphaArray
predictY = np.sum(temp, axis=1)
for i in range(len(predictY)):
if predictY[i] > threshold:
predictY[i] = 1
else:
predictY[i] = -1
return predictY
@staticmethod
def save(model, filename):
with open(filename, 'wb') as f:
pickle.dump(model, f)
@staticmethod
def load(filename):
with open(filename, 'rb') as f:
return pickle.load(f)
<|reserved_special_token_1|>
import pickle
import numpy as np
import math
class AdaBoostClassifier:
'''A simple AdaBoost Classifier.'''
def __init__(self, weak_classifier, n_weakers_limit):
'''Initialize AdaBoostClassifier
Args:
weak_classifier: The class of weak classifier, which is recommend to be sklearn.tree.DecisionTreeClassifier.
n_weakers_limit: The maximum number of weak classifier the model can use.
'''
self.weakClassifier = weak_classifier
self.iteration = n_weakers_limit
def is_good_enough(self):
'''Optional'''
pass
def calculateError(self, y, predictY, weights):
"""
函数作用:计算误差
:param y:列表,标签
:param predictY:列表,元素是预测值
:param weights:列表,权重值
:return:误差
"""
error = 0
for i in range(len(y)):
if y[i] != predictY[i]:
error += weights[i]
return error
def fit(self,X,y):
'''Build a boosted classifier from the training set (X, y).
Args:
X: An ndarray indicating the samples to be trained, which shape should be (n_samples,n_features).
y: An ndarray indicating the ground-truth labels correspond to X, which shape should be (n_samples,1).
'''
row, col = X.shape
weightArray = [(1 / row)] * row
self.alphaList = []
self.finalClassifierList = []
for i in range(self.iteration):
clf = self.weakClassifier(max_depth=2)
clf.fit(X,y,weightArray)
predictY = clf.predict(X)
error = self.calculateError(y, predictY, weightArray)
if error > 0.5:
break
else:
self.finalClassifierList.append(clf)
alpha = 0.5 * math.log((1-error) / error)
self.alphaList.append(alpha)
aYH = alpha * y * predictY * (-1)
tempWeights = weightArray * np.exp(aYH)
tempSum = np.sum(tempWeights)
weightArray = tempWeights / tempSum
def predict_scores(self, X):
'''Calculate the weighted sum score of the whole base classifiers for given samples.
Args:
X: An ndarray indicating the samples to be predicted, which shape should be (n_samples,n_features).
Returns:
An one-dimension ndarray indicating the scores of differnt samples, which shape should be (n_samples,1).
'''
pass
def predict(self, X, threshold=0):
'''Predict the catagories for geven samples.
Args:
X: An ndarray indicating the samples to be predicted, which shape should be (n_samples,n_features).
threshold: The demarcation number of deviding the samples into two parts.
Returns:
An ndarray consists of predicted labels, which shape should be (n_samples,1).
'''
predictYList = []
for i in range(len(self.finalClassifierList)):
tempY = self.finalClassifierList[i].predict(X)
predictYList.append(tempY)
predicYArray = np.transpose(np.array(predictYList))
alphaArray = np.array(self.alphaList)
temp = predicYArray * alphaArray
predictY = np.sum(temp, axis = 1)
for i in range(len(predictY)):
if predictY[i] > threshold:
predictY[i] = 1
else:
predictY[i] = -1
return predictY
@staticmethod
def save(model, filename):
with open(filename, "wb") as f:
pickle.dump(model, f)
@staticmethod
def load(filename):
with open(filename, "rb") as f:
return pickle.load(f)
|
flexible
|
{
"blob_id": "905d8be76ef245a2b8fcfb3f806f8922d351ecf0",
"index": 8877,
"step-1": "<mask token>\n\n\nclass AdaBoostClassifier:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def predict(self, X, threshold=0):\n \"\"\"Predict the catagories for geven samples.\n\n Args:\n X: An ndarray indicating the samples to be predicted, which shape should be (n_samples,n_features).\n threshold: The demarcation number of deviding the samples into two parts.\n\n Returns:\n An ndarray consists of predicted labels, which shape should be (n_samples,1).\n \"\"\"\n predictYList = []\n for i in range(len(self.finalClassifierList)):\n tempY = self.finalClassifierList[i].predict(X)\n predictYList.append(tempY)\n predicYArray = np.transpose(np.array(predictYList))\n alphaArray = np.array(self.alphaList)\n temp = predicYArray * alphaArray\n predictY = np.sum(temp, axis=1)\n for i in range(len(predictY)):\n if predictY[i] > threshold:\n predictY[i] = 1\n else:\n predictY[i] = -1\n return predictY\n <mask token>\n\n @staticmethod\n def load(filename):\n with open(filename, 'rb') as f:\n return pickle.load(f)\n",
"step-2": "<mask token>\n\n\nclass AdaBoostClassifier:\n <mask token>\n <mask token>\n\n def is_good_enough(self):\n \"\"\"Optional\"\"\"\n pass\n <mask token>\n\n def fit(self, X, y):\n \"\"\"Build a boosted classifier from the training set (X, y).\n\n Args:\n X: An ndarray indicating the samples to be trained, which shape should be (n_samples,n_features).\n y: An ndarray indicating the ground-truth labels correspond to X, which shape should be (n_samples,1).\n \"\"\"\n row, col = X.shape\n weightArray = [1 / row] * row\n self.alphaList = []\n self.finalClassifierList = []\n for i in range(self.iteration):\n clf = self.weakClassifier(max_depth=2)\n clf.fit(X, y, weightArray)\n predictY = clf.predict(X)\n error = self.calculateError(y, predictY, weightArray)\n if error > 0.5:\n break\n else:\n self.finalClassifierList.append(clf)\n alpha = 0.5 * math.log((1 - error) / error)\n self.alphaList.append(alpha)\n aYH = alpha * y * predictY * -1\n tempWeights = weightArray * np.exp(aYH)\n tempSum = np.sum(tempWeights)\n weightArray = tempWeights / tempSum\n\n def predict_scores(self, X):\n \"\"\"Calculate the weighted sum score of the whole base classifiers for given samples.\n\n Args:\n X: An ndarray indicating the samples to be predicted, which shape should be (n_samples,n_features).\n\n Returns:\n An one-dimension ndarray indicating the scores of differnt samples, which shape should be (n_samples,1).\n \"\"\"\n pass\n\n def predict(self, X, threshold=0):\n \"\"\"Predict the catagories for geven samples.\n\n Args:\n X: An ndarray indicating the samples to be predicted, which shape should be (n_samples,n_features).\n threshold: The demarcation number of deviding the samples into two parts.\n\n Returns:\n An ndarray consists of predicted labels, which shape should be (n_samples,1).\n \"\"\"\n predictYList = []\n for i in range(len(self.finalClassifierList)):\n tempY = self.finalClassifierList[i].predict(X)\n predictYList.append(tempY)\n predicYArray = np.transpose(np.array(predictYList))\n alphaArray = np.array(self.alphaList)\n temp = predicYArray * alphaArray\n predictY = np.sum(temp, axis=1)\n for i in range(len(predictY)):\n if predictY[i] > threshold:\n predictY[i] = 1\n else:\n predictY[i] = -1\n return predictY\n\n @staticmethod\n def save(model, filename):\n with open(filename, 'wb') as f:\n pickle.dump(model, f)\n\n @staticmethod\n def load(filename):\n with open(filename, 'rb') as f:\n return pickle.load(f)\n",
"step-3": "<mask token>\n\n\nclass AdaBoostClassifier:\n <mask token>\n <mask token>\n\n def is_good_enough(self):\n \"\"\"Optional\"\"\"\n pass\n\n def calculateError(self, y, predictY, weights):\n \"\"\"\n\t\t函数作用:计算误差\n :param y:列表,标签\n :param predictY:列表,元素是预测值\n :param weights:列表,权重值\n :return:误差\n \"\"\"\n error = 0\n for i in range(len(y)):\n if y[i] != predictY[i]:\n error += weights[i]\n return error\n\n def fit(self, X, y):\n \"\"\"Build a boosted classifier from the training set (X, y).\n\n Args:\n X: An ndarray indicating the samples to be trained, which shape should be (n_samples,n_features).\n y: An ndarray indicating the ground-truth labels correspond to X, which shape should be (n_samples,1).\n \"\"\"\n row, col = X.shape\n weightArray = [1 / row] * row\n self.alphaList = []\n self.finalClassifierList = []\n for i in range(self.iteration):\n clf = self.weakClassifier(max_depth=2)\n clf.fit(X, y, weightArray)\n predictY = clf.predict(X)\n error = self.calculateError(y, predictY, weightArray)\n if error > 0.5:\n break\n else:\n self.finalClassifierList.append(clf)\n alpha = 0.5 * math.log((1 - error) / error)\n self.alphaList.append(alpha)\n aYH = alpha * y * predictY * -1\n tempWeights = weightArray * np.exp(aYH)\n tempSum = np.sum(tempWeights)\n weightArray = tempWeights / tempSum\n\n def predict_scores(self, X):\n \"\"\"Calculate the weighted sum score of the whole base classifiers for given samples.\n\n Args:\n X: An ndarray indicating the samples to be predicted, which shape should be (n_samples,n_features).\n\n Returns:\n An one-dimension ndarray indicating the scores of differnt samples, which shape should be (n_samples,1).\n \"\"\"\n pass\n\n def predict(self, X, threshold=0):\n \"\"\"Predict the catagories for geven samples.\n\n Args:\n X: An ndarray indicating the samples to be predicted, which shape should be (n_samples,n_features).\n threshold: The demarcation number of deviding the samples into two parts.\n\n Returns:\n An ndarray consists of predicted labels, which shape should be (n_samples,1).\n \"\"\"\n predictYList = []\n for i in range(len(self.finalClassifierList)):\n tempY = self.finalClassifierList[i].predict(X)\n predictYList.append(tempY)\n predicYArray = np.transpose(np.array(predictYList))\n alphaArray = np.array(self.alphaList)\n temp = predicYArray * alphaArray\n predictY = np.sum(temp, axis=1)\n for i in range(len(predictY)):\n if predictY[i] > threshold:\n predictY[i] = 1\n else:\n predictY[i] = -1\n return predictY\n\n @staticmethod\n def save(model, filename):\n with open(filename, 'wb') as f:\n pickle.dump(model, f)\n\n @staticmethod\n def load(filename):\n with open(filename, 'rb') as f:\n return pickle.load(f)\n",
"step-4": "<mask token>\n\n\nclass AdaBoostClassifier:\n <mask token>\n\n def __init__(self, weak_classifier, n_weakers_limit):\n \"\"\"Initialize AdaBoostClassifier\n\n Args:\n weak_classifier: The class of weak classifier, which is recommend to be sklearn.tree.DecisionTreeClassifier.\n n_weakers_limit: The maximum number of weak classifier the model can use.\n \"\"\"\n self.weakClassifier = weak_classifier\n self.iteration = n_weakers_limit\n\n def is_good_enough(self):\n \"\"\"Optional\"\"\"\n pass\n\n def calculateError(self, y, predictY, weights):\n \"\"\"\n\t\t函数作用:计算误差\n :param y:列表,标签\n :param predictY:列表,元素是预测值\n :param weights:列表,权重值\n :return:误差\n \"\"\"\n error = 0\n for i in range(len(y)):\n if y[i] != predictY[i]:\n error += weights[i]\n return error\n\n def fit(self, X, y):\n \"\"\"Build a boosted classifier from the training set (X, y).\n\n Args:\n X: An ndarray indicating the samples to be trained, which shape should be (n_samples,n_features).\n y: An ndarray indicating the ground-truth labels correspond to X, which shape should be (n_samples,1).\n \"\"\"\n row, col = X.shape\n weightArray = [1 / row] * row\n self.alphaList = []\n self.finalClassifierList = []\n for i in range(self.iteration):\n clf = self.weakClassifier(max_depth=2)\n clf.fit(X, y, weightArray)\n predictY = clf.predict(X)\n error = self.calculateError(y, predictY, weightArray)\n if error > 0.5:\n break\n else:\n self.finalClassifierList.append(clf)\n alpha = 0.5 * math.log((1 - error) / error)\n self.alphaList.append(alpha)\n aYH = alpha * y * predictY * -1\n tempWeights = weightArray * np.exp(aYH)\n tempSum = np.sum(tempWeights)\n weightArray = tempWeights / tempSum\n\n def predict_scores(self, X):\n \"\"\"Calculate the weighted sum score of the whole base classifiers for given samples.\n\n Args:\n X: An ndarray indicating the samples to be predicted, which shape should be (n_samples,n_features).\n\n Returns:\n An one-dimension ndarray indicating the scores of differnt samples, which shape should be (n_samples,1).\n \"\"\"\n pass\n\n def predict(self, X, threshold=0):\n \"\"\"Predict the catagories for geven samples.\n\n Args:\n X: An ndarray indicating the samples to be predicted, which shape should be (n_samples,n_features).\n threshold: The demarcation number of deviding the samples into two parts.\n\n Returns:\n An ndarray consists of predicted labels, which shape should be (n_samples,1).\n \"\"\"\n predictYList = []\n for i in range(len(self.finalClassifierList)):\n tempY = self.finalClassifierList[i].predict(X)\n predictYList.append(tempY)\n predicYArray = np.transpose(np.array(predictYList))\n alphaArray = np.array(self.alphaList)\n temp = predicYArray * alphaArray\n predictY = np.sum(temp, axis=1)\n for i in range(len(predictY)):\n if predictY[i] > threshold:\n predictY[i] = 1\n else:\n predictY[i] = -1\n return predictY\n\n @staticmethod\n def save(model, filename):\n with open(filename, 'wb') as f:\n pickle.dump(model, f)\n\n @staticmethod\n def load(filename):\n with open(filename, 'rb') as f:\n return pickle.load(f)\n",
"step-5": "import pickle\nimport numpy as np\nimport math\n\nclass AdaBoostClassifier:\n '''A simple AdaBoost Classifier.'''\n\n def __init__(self, weak_classifier, n_weakers_limit):\n '''Initialize AdaBoostClassifier\n\n Args:\n weak_classifier: The class of weak classifier, which is recommend to be sklearn.tree.DecisionTreeClassifier.\n n_weakers_limit: The maximum number of weak classifier the model can use.\n '''\n self.weakClassifier = weak_classifier\n self.iteration = n_weakers_limit\n\n def is_good_enough(self):\n '''Optional'''\n pass\n\n def calculateError(self, y, predictY, weights):\n \"\"\"\n\t\t函数作用:计算误差\n :param y:列表,标签\n :param predictY:列表,元素是预测值\n :param weights:列表,权重值\n :return:误差\n \"\"\"\n error = 0\n for i in range(len(y)):\n if y[i] != predictY[i]:\n error += weights[i]\n return error\n\n def fit(self,X,y):\n '''Build a boosted classifier from the training set (X, y).\n\n Args:\n X: An ndarray indicating the samples to be trained, which shape should be (n_samples,n_features).\n y: An ndarray indicating the ground-truth labels correspond to X, which shape should be (n_samples,1).\n '''\n row, col = X.shape\n weightArray = [(1 / row)] * row\n self.alphaList = []\n self.finalClassifierList = []\n for i in range(self.iteration):\n clf = self.weakClassifier(max_depth=2)\n clf.fit(X,y,weightArray)\n predictY = clf.predict(X)\n error = self.calculateError(y, predictY, weightArray)\n if error > 0.5:\n break\n else:\n self.finalClassifierList.append(clf)\n alpha = 0.5 * math.log((1-error) / error)\n self.alphaList.append(alpha)\n aYH = alpha * y * predictY * (-1)\n tempWeights = weightArray * np.exp(aYH)\n tempSum = np.sum(tempWeights)\n weightArray = tempWeights / tempSum\n\n def predict_scores(self, X):\n '''Calculate the weighted sum score of the whole base classifiers for given samples.\n\n Args:\n X: An ndarray indicating the samples to be predicted, which shape should be (n_samples,n_features).\n\n Returns:\n An one-dimension ndarray indicating the scores of differnt samples, which shape should be (n_samples,1).\n '''\n\n pass\n\n def predict(self, X, threshold=0):\n '''Predict the catagories for geven samples.\n\n Args:\n X: An ndarray indicating the samples to be predicted, which shape should be (n_samples,n_features).\n threshold: The demarcation number of deviding the samples into two parts.\n\n Returns:\n An ndarray consists of predicted labels, which shape should be (n_samples,1).\n '''\n predictYList = []\n for i in range(len(self.finalClassifierList)):\n tempY = self.finalClassifierList[i].predict(X)\n predictYList.append(tempY)\n predicYArray = np.transpose(np.array(predictYList))\n alphaArray = np.array(self.alphaList)\n temp = predicYArray * alphaArray\n predictY = np.sum(temp, axis = 1)\n for i in range(len(predictY)):\n if predictY[i] > threshold:\n predictY[i] = 1\n else:\n predictY[i] = -1\n return predictY\n\n @staticmethod\n def save(model, filename):\n with open(filename, \"wb\") as f:\n pickle.dump(model, f)\n\n @staticmethod\n def load(filename):\n with open(filename, \"rb\") as f:\n return pickle.load(f)\n",
"step-ids": [
3,
7,
8,
9,
12
]
}
|
[
3,
7,
8,
9,
12
] |
import gc
import unittest
import numpy as np
from pydrake.autodiffutils import AutoDiffXd
from pydrake.common import RandomDistribution, RandomGenerator
from pydrake.common.test_utilities import numpy_compare
from pydrake.common.test_utilities.deprecation import catch_drake_warnings
from pydrake.common.value import Value
from pydrake.symbolic import Expression, Variable
from pydrake.systems.framework import (
BasicVector,
DiagramBuilder,
DiagramBuilder_,
InputPort,
TriggerType,
VectorBase,
)
from pydrake.systems.test.test_util import (
MyVector2,
)
from pydrake.systems.primitives import (
Adder, Adder_,
AddRandomInputs,
AffineSystem, AffineSystem_,
ConstantValueSource, ConstantValueSource_,
ConstantVectorSource, ConstantVectorSource_,
ControllabilityMatrix,
Demultiplexer, Demultiplexer_,
DiscreteDerivative, DiscreteDerivative_,
DiscreteTimeDelay, DiscreteTimeDelay_,
FirstOrderLowPassFilter,
FirstOrderTaylorApproximation,
Gain, Gain_,
Integrator, Integrator_,
IsControllable,
IsDetectable,
IsObservable,
IsStabilizable,
Linearize,
LinearSystem, LinearSystem_,
LinearTransformDensity, LinearTransformDensity_,
LogVectorOutput,
MatrixGain,
Multiplexer, Multiplexer_,
MultilayerPerceptron, MultilayerPerceptron_,
ObservabilityMatrix,
PassThrough, PassThrough_,
PerceptronActivationType,
PortSwitch, PortSwitch_,
RandomSource,
Saturation, Saturation_,
SharedPointerSystem, SharedPointerSystem_,
Sine, Sine_,
StateInterpolatorWithDiscreteDerivative,
StateInterpolatorWithDiscreteDerivative_,
SymbolicVectorSystem, SymbolicVectorSystem_,
TrajectoryAffineSystem, TrajectoryAffineSystem_,
TrajectoryLinearSystem, TrajectoryLinearSystem_,
TrajectorySource, TrajectorySource_,
VectorLog, VectorLogSink, VectorLogSink_,
WrapToSystem, WrapToSystem_,
ZeroOrderHold, ZeroOrderHold_,
)
from pydrake.trajectories import PiecewisePolynomial
def compare_value(test, a, b):
# Compares a vector or abstract value.
if isinstance(a, VectorBase):
test.assertTrue(np.allclose(a.get_value(), b.get_value()))
else:
test.assertEqual(type(a.get_value()), type(b.get_value()))
test.assertEqual(a.get_value(), b.get_value())
class TestGeneral(unittest.TestCase):
def _check_instantiations(self, template, supports_symbolic=True):
default_cls = template[None]
self.assertTrue(template[float] is default_cls)
self.assertTrue(template[AutoDiffXd] is not default_cls)
if supports_symbolic:
self.assertTrue(template[Expression] is not default_cls)
def test_instantiations(self):
# TODO(eric.cousineau): Refine tests once NumPy functionality is
# resolved for dtype=object, or dtype=custom is used.
self._check_instantiations(Adder_)
self._check_instantiations(AffineSystem_)
self._check_instantiations(ConstantValueSource_)
self._check_instantiations(ConstantVectorSource_)
self._check_instantiations(Demultiplexer_)
self._check_instantiations(DiscreteDerivative_)
self._check_instantiations(DiscreteTimeDelay_)
self._check_instantiations(Gain_)
self._check_instantiations(Integrator_)
self._check_instantiations(LinearSystem_)
self._check_instantiations(LinearTransformDensity_,
supports_symbolic=False)
self._check_instantiations(Multiplexer_)
self._check_instantiations(MultilayerPerceptron_)
self._check_instantiations(PassThrough_)
self._check_instantiations(PortSwitch_)
self._check_instantiations(Saturation_)
self._check_instantiations(SharedPointerSystem_)
self._check_instantiations(Sine_)
self._check_instantiations(StateInterpolatorWithDiscreteDerivative_)
self._check_instantiations(SymbolicVectorSystem_)
self._check_instantiations(TrajectoryAffineSystem_,
supports_symbolic=False)
self._check_instantiations(TrajectoryLinearSystem_,
supports_symbolic=False)
self._check_instantiations(TrajectorySource_)
self._check_instantiations(VectorLogSink_)
self._check_instantiations(WrapToSystem_)
self._check_instantiations(ZeroOrderHold_)
def test_linear_affine_system(self):
# Just make sure linear system is spelled correctly.
A = np.identity(2)
B = np.array([[0], [1]])
f0 = np.array([[0], [0]])
C = np.array([[0, 1]])
D = [1]
y0 = [0]
system = LinearSystem(A, B, C, D)
context = system.CreateDefaultContext()
self.assertEqual(system.get_input_port(0).size(), 1)
self.assertEqual(context
.get_mutable_continuous_state_vector().size(), 2)
self.assertEqual(system.get_output_port(0).size(), 1)
self.assertTrue((system.A() == A).all())
self.assertTrue((system.B() == B).all())
self.assertTrue((system.f0() == f0).all())
self.assertTrue((system.C() == C).all())
self.assertEqual(system.D(), D)
self.assertEqual(system.y0(), y0)
self.assertEqual(system.time_period(), 0.)
x0 = np.array([1, 2])
system.configure_default_state(x0=x0)
system.SetDefaultContext(context)
np.testing.assert_equal(
context.get_continuous_state_vector().CopyToVector(), x0)
generator = RandomGenerator()
system.SetRandomContext(context, generator)
np.testing.assert_equal(
context.get_continuous_state_vector().CopyToVector(), x0)
system.configure_random_state(covariance=np.eye(2))
system.SetRandomContext(context, generator)
self.assertNotEqual(
context.get_continuous_state_vector().CopyToVector()[1], x0[1])
Co = ControllabilityMatrix(system)
self.assertEqual(Co.shape, (2, 2))
self.assertFalse(IsControllable(system))
self.assertFalse(IsControllable(system, 1e-6))
self.assertFalse(IsStabilizable(sys=system))
self.assertFalse(IsStabilizable(sys=system, threshold=1e-6))
Ob = ObservabilityMatrix(system)
self.assertEqual(Ob.shape, (2, 2))
self.assertFalse(IsObservable(system))
self.assertFalse(IsDetectable(sys=system))
self.assertFalse(IsDetectable(sys=system, threshold=1e-6))
system = AffineSystem(A, B, f0, C, D, y0, .1)
self.assertEqual(system.get_input_port(0), system.get_input_port())
self.assertEqual(system.get_output_port(0), system.get_output_port())
context = system.CreateDefaultContext()
self.assertEqual(system.get_input_port(0).size(), 1)
self.assertEqual(context.get_discrete_state_vector().size(), 2)
self.assertEqual(system.get_output_port(0).size(), 1)
self.assertTrue((system.A() == A).all())
self.assertTrue((system.B() == B).all())
self.assertTrue((system.f0() == f0).all())
self.assertTrue((system.C() == C).all())
self.assertEqual(system.D(), D)
self.assertEqual(system.y0(), y0)
self.assertEqual(system.time_period(), .1)
system.get_input_port(0).FixValue(context, 0)
linearized = Linearize(system, context)
self.assertTrue((linearized.A() == A).all())
taylor = FirstOrderTaylorApproximation(system, context)
self.assertTrue((taylor.y0() == y0).all())
new_A = np.array([[1, 2], [3, 4]])
new_B = np.array([[5], [6]])
new_f0 = np.array([[7], [8]])
new_C = np.array([[9, 10]])
new_D = np.array([[11]])
new_y0 = np.array([12])
system.UpdateCoefficients(
A=new_A, B=new_B, f0=new_f0, C=new_C, D=new_D, y0=new_y0
)
np.testing.assert_equal(new_A, system.A())
np.testing.assert_equal(new_B, system.B())
np.testing.assert_equal(new_f0.flatten(), system.f0())
np.testing.assert_equal(new_C, system.C())
np.testing.assert_equal(new_D, system.D())
np.testing.assert_equal(new_y0, system.y0())
system = MatrixGain(D=A)
self.assertTrue((system.D() == A).all())
system = TrajectoryAffineSystem(
PiecewisePolynomial(A),
PiecewisePolynomial(B),
PiecewisePolynomial(f0),
PiecewisePolynomial(C),
PiecewisePolynomial(D),
PiecewisePolynomial(y0),
.1)
self.assertEqual(system.get_input_port(0), system.get_input_port())
self.assertEqual(system.get_output_port(0), system.get_output_port())
context = system.CreateDefaultContext()
self.assertEqual(system.get_input_port(0).size(), 1)
self.assertEqual(context.get_discrete_state_vector().size(), 2)
self.assertEqual(system.get_output_port(0).size(), 1)
for t in np.linspace(0., 1., 5):
self.assertTrue((system.A(t) == A).all())
self.assertTrue((system.B(t) == B).all())
self.assertTrue((system.f0(t) == f0).all())
self.assertTrue((system.C(t) == C).all())
self.assertEqual(system.D(t), D)
self.assertEqual(system.y0(t), y0)
self.assertEqual(system.time_period(), .1)
x0 = np.array([1, 2])
system.configure_default_state(x0=x0)
system.SetDefaultContext(context)
np.testing.assert_equal(
context.get_discrete_state_vector().CopyToVector(), x0)
generator = RandomGenerator()
system.SetRandomContext(context, generator)
np.testing.assert_equal(
context.get_discrete_state_vector().CopyToVector(), x0)
system.configure_random_state(covariance=np.eye(2))
system.SetRandomContext(context, generator)
self.assertNotEqual(
context.get_discrete_state_vector().CopyToVector()[1], x0[1])
system = TrajectoryLinearSystem(
A=PiecewisePolynomial(A),
B=PiecewisePolynomial(B),
C=PiecewisePolynomial(C),
D=PiecewisePolynomial(D),
time_period=0.1)
self.assertEqual(system.time_period(), .1)
system.configure_default_state(x0=np.array([1, 2]))
system.configure_random_state(covariance=np.eye(2))
def test_linear_affine_system_empty_matrices(self):
# Confirm the default values for the system matrices in the
# constructor.
def CheckSizes(system, num_states, num_inputs, num_outputs):
self.assertEqual(system.num_continuous_states(), num_states)
self.assertEqual(system.num_inputs(), num_inputs)
self.assertEqual(system.num_outputs(), num_outputs)
# A constant vector system.
system = AffineSystem(y0=[2, 1])
CheckSizes(system, num_states=0, num_inputs=0, num_outputs=2)
# A matrix gain.
system = AffineSystem(D=np.eye(2))
CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2)
system = LinearSystem(D=np.eye(2))
CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2)
# Add an offset.
system = AffineSystem(D=np.eye(2), y0=[1, 2])
CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2)
# An integrator.
system = LinearSystem(B=np.eye(2))
CheckSizes(system, num_states=2, num_inputs=2, num_outputs=0)
def test_linear_system_zero_size(self):
# Explicitly test #12633.
num_x = 0
num_y = 2
num_u = 2
A = np.zeros((num_x, num_x))
B = np.zeros((num_x, num_u))
C = np.zeros((num_y, num_x))
D = np.zeros((num_y, num_u))
self.assertIsNotNone(LinearSystem(A, B, C, D))
@numpy_compare.check_nonsymbolic_types
def test_linear_transform_density(self, T):
dut = LinearTransformDensity_[T](
distribution=RandomDistribution.kGaussian,
input_size=3,
output_size=3)
w_in = np.array([T(0.5), T(0.1), T(1.5)])
context = dut.CreateDefaultContext()
dut.get_input_port_w_in().FixValue(context, w_in)
self.assertEqual(dut.get_input_port_A().size(), 9)
self.assertEqual(dut.get_input_port_b().size(), 3)
self.assertEqual(dut.get_distribution(), RandomDistribution.kGaussian)
A = np.array([
[T(0.5), T(1), T(2)], [T(1), T(2), T(3)], [T(3), T(4), T(5)]])
dut.FixConstantA(context=context, A=A)
b = np.array([T(1), T(2), T(3)])
dut.FixConstantB(context=context, b=b)
dut.CalcDensity(context=context)
self.assertEqual(dut.get_output_port_w_out().size(), 3)
self.assertEqual(dut.get_output_port_w_out_density().size(), 1)
def test_vector_pass_through(self):
model_value = BasicVector([1., 2, 3])
system = PassThrough(vector_size=model_value.size())
context = system.CreateDefaultContext()
system.get_input_port(0).FixValue(context, model_value)
output = system.AllocateOutput()
input_eval = system.EvalVectorInput(context, 0)
compare_value(self, input_eval, model_value)
system.CalcOutput(context, output)
output_value = output.get_vector_data(0)
compare_value(self, output_value, model_value)
def test_default_vector_pass_through(self):
model_value = [1., 2, 3]
system = PassThrough(value=model_value)
context = system.CreateDefaultContext()
np.testing.assert_array_equal(
model_value, system.get_output_port().Eval(context))
def test_abstract_pass_through(self):
model_value = Value("Hello world")
system = PassThrough(abstract_model_value=model_value)
context = system.CreateDefaultContext()
system.get_input_port(0).FixValue(context, model_value)
output = system.AllocateOutput()
input_eval = system.EvalAbstractInput(context, 0)
compare_value(self, input_eval, model_value)
system.CalcOutput(context, output)
output_value = output.get_data(0)
compare_value(self, output_value, model_value)
def test_port_switch(self):
system = PortSwitch(vector_size=2)
a = system.DeclareInputPort(name="a")
system.DeclareInputPort(name="b")
context = system.CreateDefaultContext()
self.assertIsInstance(a, InputPort)
system.get_port_selector_input_port().FixValue(context, a.get_index())
def test_first_order_low_pass_filter(self):
filter1 = FirstOrderLowPassFilter(time_constant=3.0, size=4)
self.assertEqual(filter1.get_time_constant(), 3.0)
alpha = np.array([1, 2, 3])
filter2 = FirstOrderLowPassFilter(time_constants=alpha)
np.testing.assert_array_equal(filter2.get_time_constants_vector(),
alpha)
context = filter2.CreateDefaultContext()
filter2.set_initial_output_value(context, [0., -0.2, 0.4])
def test_gain(self):
k = 42.
input_size = 10
systems = [Gain(k=k, size=input_size),
Gain(k=k*np.ones(input_size))]
for system in systems:
context = system.CreateDefaultContext()
output = system.AllocateOutput()
def mytest(input, expected):
system.get_input_port(0).FixValue(context, input)
system.CalcOutput(context, output)
self.assertTrue(np.allclose(output.get_vector_data(
0).CopyToVector(), expected))
test_input = np.arange(input_size)
mytest(np.arange(input_size), k*np.arange(input_size))
def test_saturation(self):
system = Saturation((0., -1., 3.), (1., 2., 4.))
context = system.CreateDefaultContext()
output = system.AllocateOutput()
def mytest(input, expected):
system.get_input_port(0).FixValue(context, input)
system.CalcOutput(context, output)
self.assertTrue(np.allclose(output.get_vector_data(
0).CopyToVector(), expected))
mytest((-5., 5., 4.), (0., 2., 4.))
mytest((.4, 0., 3.5), (.4, 0., 3.5))
def test_trajectory_source(self):
ppt = PiecewisePolynomial.FirstOrderHold(
[0., 1.], [[2., 3.], [2., 1.]])
system = TrajectorySource(trajectory=ppt,
output_derivative_order=0,
zero_derivatives_beyond_limits=True)
context = system.CreateDefaultContext()
output = system.AllocateOutput()
def mytest(input, expected):
context.SetTime(input)
system.CalcOutput(context, output)
self.assertTrue(np.allclose(output.get_vector_data(
0).CopyToVector(), expected))
mytest(0.0, (2.0, 2.0))
mytest(0.5, (2.5, 1.5))
mytest(1.0, (3.0, 1.0))
ppt2 = PiecewisePolynomial.FirstOrderHold(
[0., 1.], [[4., 6.], [4., 2.]])
system.UpdateTrajectory(trajectory=ppt2)
mytest(0.0, (4.0, 4.0))
mytest(0.5, (5.0, 3.0))
mytest(1.0, (6.0, 2.0))
def test_symbolic_vector_system(self):
t = Variable("t")
x = [Variable("x0"), Variable("x1")]
u = [Variable("u0"), Variable("u1")]
system = SymbolicVectorSystem(time=t, state=x, input=u,
dynamics=[x[0] + x[1], t],
output=[u[1]],
time_period=0.0)
context = system.CreateDefaultContext()
self.assertEqual(context.num_continuous_states(), 2)
self.assertEqual(context.num_discrete_state_groups(), 0)
self.assertEqual(system.get_input_port(0).size(), 2)
self.assertEqual(system.get_output_port(0).size(), 1)
self.assertEqual(context.num_abstract_parameters(), 0)
self.assertEqual(context.num_numeric_parameter_groups(), 0)
self.assertTrue(system.dynamics_for_variable(x[0])
.EqualTo(x[0] + x[1]))
self.assertTrue(system.dynamics_for_variable(x[1])
.EqualTo(t))
def test_symbolic_vector_system_parameters(self):
t = Variable("t")
x = [Variable("x0"), Variable("x1")]
u = [Variable("u0"), Variable("u1")]
p = [Variable("p0"), Variable("p1")]
system = SymbolicVectorSystem(time=t, state=x, input=u,
parameter=p,
dynamics=[p[0] * x[0] + x[1] + p[1], t],
output=[u[1]],
time_period=0.0)
context = system.CreateDefaultContext()
self.assertEqual(context.num_continuous_states(), 2)
self.assertEqual(context.num_discrete_state_groups(), 0)
self.assertEqual(system.get_input_port(0).size(), 2)
self.assertEqual(system.get_output_port(0).size(), 1)
self.assertEqual(context.num_abstract_parameters(), 0)
self.assertEqual(context.num_numeric_parameter_groups(), 1)
self.assertEqual(context.get_numeric_parameter(0).size(), 2)
self.assertTrue(system.dynamics_for_variable(x[0])
.EqualTo(p[0] * x[0] + x[1] + p[1]))
self.assertTrue(system.dynamics_for_variable(x[1])
.EqualTo(t))
def test_wrap_to_system(self):
system = WrapToSystem(2)
system.set_interval(1, 1., 2.)
context = system.CreateDefaultContext()
output = system.AllocateOutput()
def mytest(input, expected):
system.get_input_port(0).FixValue(context, input)
system.CalcOutput(context, output)
self.assertTrue(np.allclose(output.get_vector_data(
0).CopyToVector(), expected))
mytest((-1.5, 0.5), (-1.5, 1.5))
mytest((.2, .3), (.2, 1.3))
def test_demultiplexer(self):
# Test demultiplexer with scalar outputs.
demux = Demultiplexer(size=4)
context = demux.CreateDefaultContext()
self.assertEqual(demux.num_input_ports(), 1)
self.assertEqual(demux.num_output_ports(), 4)
numpy_compare.assert_equal(demux.get_output_ports_sizes(),
[1, 1, 1, 1])
input_vec = np.array([1., 2., 3., 4.])
demux.get_input_port(0).FixValue(context, input_vec)
output = demux.AllocateOutput()
demux.CalcOutput(context, output)
for i in range(4):
self.assertTrue(
np.allclose(output.get_vector_data(i).get_value(),
input_vec[i]))
# Test demultiplexer with vector outputs.
demux = Demultiplexer(size=4, output_ports_size=2)
context = demux.CreateDefaultContext()
self.assertEqual(demux.num_input_ports(), 1)
self.assertEqual(demux.num_output_ports(), 2)
numpy_compare.assert_equal(demux.get_output_ports_sizes(), [2, 2])
demux.get_input_port(0).FixValue(context, input_vec)
output = demux.AllocateOutput()
demux.CalcOutput(context, output)
for i in range(2):
self.assertTrue(
np.allclose(output.get_vector_data(i).get_value(),
input_vec[2*i:2*i+2]))
# Test demultiplexer with different output port sizes.
output_ports_sizes = np.array([1, 2, 1])
num_output_ports = output_ports_sizes.size
input_vec = np.array([1., 2., 3., 4.])
demux = Demultiplexer(output_ports_sizes=output_ports_sizes)
context = demux.CreateDefaultContext()
self.assertEqual(demux.num_input_ports(), 1)
self.assertEqual(demux.num_output_ports(), num_output_ports)
numpy_compare.assert_equal(demux.get_output_ports_sizes(),
output_ports_sizes)
demux.get_input_port(0).FixValue(context, input_vec)
output = demux.AllocateOutput()
demux.CalcOutput(context, output)
output_port_start = 0
for i in range(num_output_ports):
output_port_size = output.get_vector_data(i).size()
self.assertTrue(
np.allclose(output.get_vector_data(i).get_value(),
input_vec[output_port_start:
output_port_start+output_port_size]))
output_port_start += output_port_size
def test_multiplexer(self):
my_vector = MyVector2(data=[1., 2.])
test_cases = [
dict(has_vector=False, mux=Multiplexer(num_scalar_inputs=4),
data=[[5.], [3.], [4.], [2.]]),
dict(has_vector=False, mux=Multiplexer(input_sizes=[2, 3]),
data=[[8., 4.], [3., 6., 9.]]),
dict(has_vector=True, mux=Multiplexer(model_vector=my_vector),
data=[[42.], [3.]]),
]
for case in test_cases:
mux = case['mux']
port_size = sum([len(vec) for vec in case['data']])
self.assertEqual(mux.get_output_port(0).size(), port_size)
context = mux.CreateDefaultContext()
output = mux.AllocateOutput()
num_ports = len(case['data'])
self.assertEqual(context.num_input_ports(), num_ports)
for j, vec in enumerate(case['data']):
mux.get_input_port(j).FixValue(context, vec)
mux.CalcOutput(context, output)
self.assertTrue(
np.allclose(output.get_vector_data(0).get_value(),
[elem for vec in case['data'] for elem in vec]))
if case['has_vector']:
# Check the type matches MyVector2.
value = output.get_vector_data(0)
self.assertTrue(isinstance(value, MyVector2))
def test_multilayer_perceptron(self):
mlp = MultilayerPerceptron(
layers=[1, 2, 3], activation_type=PerceptronActivationType.kReLU)
self.assertEqual(mlp.get_input_port().size(), 1)
self.assertEqual(mlp.get_output_port().size(), 3)
context = mlp.CreateDefaultContext()
params = np.zeros((mlp.num_parameters(), 1))
self.assertEqual(mlp.num_parameters(), 13)
self.assertEqual(mlp.layers(), [1, 2, 3])
self.assertEqual(mlp.activation_type(layer=0),
PerceptronActivationType.kReLU)
self.assertEqual(len(mlp.GetParameters(context=context)),
mlp.num_parameters())
mlp.SetWeights(context=context, layer=0, W=np.array([[1], [2]]))
mlp.SetBiases(context=context, layer=0, b=[3, 4])
np.testing.assert_array_equal(
mlp.GetWeights(context=context, layer=0), np.array([[1], [2]]))
np.testing.assert_array_equal(
mlp.GetBiases(context=context, layer=0), np.array([3, 4]))
params = np.zeros(mlp.num_parameters())
mlp.SetWeights(params=params, layer=0, W=np.array([[1], [2]]))
mlp.SetBiases(params=params, layer=0, b=[3, 4])
np.testing.assert_array_equal(
mlp.GetWeights(params=params, layer=0), np.array([[1], [2]]))
np.testing.assert_array_equal(
mlp.GetBiases(params=params, layer=0), np.array([3, 4]))
mutable_params = mlp.GetMutableParameters(context=context)
mutable_params[:] = 3.0
np.testing.assert_array_equal(mlp.GetParameters(context),
np.full(mlp.num_parameters(), 3.0))
global called_loss
called_loss = False
def silly_loss(Y, dloss_dY):
global called_loss
called_loss = True
# We must be careful to update the dloss in place, rather than bind
# a new matrix to the same variable name.
dloss_dY[:] = 1
# dloss_dY = np.array(...etc...) # <== wrong
return Y.sum()
dloss_dparams = np.zeros((13,))
generator = RandomGenerator(23)
mlp.SetRandomContext(context, generator)
mlp.Backpropagation(context=context,
X=np.array([1, 3, 4]).reshape((1, 3)),
loss=silly_loss,
dloss_dparams=dloss_dparams)
self.assertTrue(called_loss)
self.assertTrue(dloss_dparams.any()) # No longer all zero.
dloss_dparams = np.zeros((13,))
mlp.BackpropagationMeanSquaredError(context=context,
X=np.array([1, 3, 4]).reshape(
(1, 3)),
Y_desired=np.eye(3),
dloss_dparams=dloss_dparams)
self.assertTrue(dloss_dparams.any()) # No longer all zero.
Y = np.asfortranarray(np.eye(3))
mlp.BatchOutput(context=context, X=np.array([[0.1, 0.3, 0.4]]), Y=Y)
self.assertFalse(np.allclose(Y, np.eye(3)))
Y2 = mlp.BatchOutput(context=context, X=np.array([[0.1, 0.3, 0.4]]))
np.testing.assert_array_equal(Y, Y2)
mlp2 = MultilayerPerceptron(layers=[3, 2, 1],
activation_types=[
PerceptronActivationType.kReLU,
PerceptronActivationType.kTanh
])
self.assertEqual(mlp2.activation_type(0),
PerceptronActivationType.kReLU)
self.assertEqual(mlp2.activation_type(1),
PerceptronActivationType.kTanh)
Y = np.asfortranarray(np.full((1, 3), 2.4))
dYdX = np.asfortranarray(np.full((3, 3), 5.3))
context2 = mlp2.CreateDefaultContext()
mlp2.BatchOutput(context=context2, X=np.eye(3), Y=Y, dYdX=dYdX)
# The default context sets the weights and biases to zero, so the
# output (and gradients) should be zero.
np.testing.assert_array_almost_equal(Y, np.zeros((1, 3)))
np.testing.assert_array_almost_equal(dYdX, np.zeros((3, 3)))
mlp = MultilayerPerceptron(use_sin_cos_for_input=[True, False],
remaining_layers=[3, 2],
activation_types=[
PerceptronActivationType.kReLU,
PerceptronActivationType.kTanh
])
self.assertEqual(mlp.get_input_port().size(), 2)
np.testing.assert_array_equal(mlp.layers(), [3, 3, 2])
def test_random_source(self):
source = RandomSource(distribution=RandomDistribution.kUniform,
num_outputs=2, sampling_interval_sec=0.01)
self.assertEqual(source.get_output_port(0).size(), 2)
builder = DiagramBuilder()
# Note: There are no random inputs to add to the empty diagram, but it
# confirms the API works.
AddRandomInputs(sampling_interval_sec=0.01, builder=builder)
builder_ad = DiagramBuilder_[AutoDiffXd]()
AddRandomInputs(sampling_interval_sec=0.01, builder=builder_ad)
def test_constant_vector_source(self):
source = ConstantVectorSource(source_value=[1., 2.])
context = source.CreateDefaultContext()
source.get_source_value(context)
source.get_mutable_source_value(context)
def test_ctor_api(self):
"""Tests construction of systems for systems whose executions semantics
are not tested above.
"""
ConstantValueSource(Value("Hello world"))
DiscreteTimeDelay(update_sec=0.1, delay_time_steps=5, vector_size=2)
DiscreteTimeDelay(
update_sec=0.1, delay_time_steps=5,
abstract_model_value=Value("Hello world"))
with catch_drake_warnings(expected_count=2) as w:
DiscreteTimeDelay(update_sec=0.1, delay_timesteps=5, vector_size=2)
DiscreteTimeDelay(
update_sec=0.1, delay_timesteps=5,
abstract_model_value=Value("Hello world"))
ZeroOrderHold(period_sec=0.1, offset_sec=0.0, vector_size=2)
dut = ZeroOrderHold(period_sec=1.0, offset_sec=0.25,
abstract_model_value=Value("Hello world"))
self.assertEqual(dut.period(), 1.0)
self.assertEqual(dut.offset(), 0.25)
def test_shared_pointer_system_ctor(self):
dut = SharedPointerSystem(value_to_hold=[1, 2, 3])
readback = dut.get()
self.assertListEqual(readback, [1, 2, 3])
del dut
self.assertListEqual(readback, [1, 2, 3])
def test_shared_pointer_system_builder(self):
builder = DiagramBuilder()
self.assertListEqual(
SharedPointerSystem.AddToBuilder(
builder=builder, value_to_hold=[1, 2, 3]),
[1, 2, 3])
diagram = builder.Build()
del builder
readback = diagram.GetSystems()[0].get()
self.assertListEqual(readback, [1, 2, 3])
del diagram
self.assertListEqual(readback, [1, 2, 3])
def test_sine(self):
# Test scalar output.
sine_source = Sine(amplitude=1, frequency=2, phase=3,
size=1, is_time_based=True)
self.assertEqual(sine_source.get_output_port(0).size(), 1)
self.assertEqual(sine_source.get_output_port(1).size(), 1)
self.assertEqual(sine_source.get_output_port(2).size(), 1)
# Test vector output.
sine_source = Sine(amplitude=1, frequency=2, phase=3,
size=3, is_time_based=True)
self.assertEqual(sine_source.get_output_port(0).size(), 3)
self.assertEqual(sine_source.get_output_port(1).size(), 3)
self.assertEqual(sine_source.get_output_port(2).size(), 3)
sine_source = Sine(amplitudes=np.ones(2), frequencies=np.ones(2),
phases=np.ones(2), is_time_based=True)
self.assertEqual(sine_source.get_output_port(0).size(), 2)
self.assertEqual(sine_source.get_output_port(1).size(), 2)
self.assertEqual(sine_source.get_output_port(2).size(), 2)
def test_discrete_derivative(self):
discrete_derivative = DiscreteDerivative(num_inputs=5, time_step=0.5)
self.assertEqual(discrete_derivative.get_input_port(0).size(), 5)
self.assertEqual(discrete_derivative.get_output_port(0).size(), 5)
self.assertEqual(discrete_derivative.time_step(), 0.5)
self.assertTrue(discrete_derivative.suppress_initial_transient())
discrete_derivative = DiscreteDerivative(
num_inputs=5, time_step=0.5, suppress_initial_transient=False)
self.assertFalse(discrete_derivative.suppress_initial_transient())
def test_state_interpolator_with_discrete_derivative(self):
state_interpolator = StateInterpolatorWithDiscreteDerivative(
num_positions=5, time_step=0.4)
self.assertEqual(state_interpolator.get_input_port(0).size(), 5)
self.assertEqual(state_interpolator.get_output_port(0).size(), 10)
self.assertTrue(state_interpolator.suppress_initial_transient())
# test set_initial_position using context
context = state_interpolator.CreateDefaultContext()
state_interpolator.set_initial_position(
context=context, position=5*[1.1])
np.testing.assert_array_equal(
context.get_discrete_state(0).CopyToVector(),
np.array(5*[1.1]))
np.testing.assert_array_equal(
context.get_discrete_state(1).CopyToVector(),
np.array(5*[1.1]))
# test set_initial_position using state
context = state_interpolator.CreateDefaultContext()
state_interpolator.set_initial_position(
state=context.get_state(), position=5*[1.3])
np.testing.assert_array_equal(
context.get_discrete_state(0).CopyToVector(),
np.array(5*[1.3]))
np.testing.assert_array_equal(
context.get_discrete_state(1).CopyToVector(),
np.array(5*[1.3]))
state_interpolator = StateInterpolatorWithDiscreteDerivative(
num_positions=5, time_step=0.4, suppress_initial_transient=True)
self.assertTrue(state_interpolator.suppress_initial_transient())
@numpy_compare.check_nonsymbolic_types
def test_log_vector_output(self, T):
# Add various redundant loggers to a system, to exercise the
# LogVectorOutput bindings.
builder = DiagramBuilder_[T]()
kSize = 1
integrator = builder.AddSystem(Integrator_[T](kSize))
port = integrator.get_output_port(0)
loggers = []
loggers.append(LogVectorOutput(port, builder))
loggers.append(LogVectorOutput(src=port, builder=builder))
loggers.append(LogVectorOutput(port, builder, 0.125))
loggers.append(LogVectorOutput(
src=port, builder=builder, publish_period=0.125))
loggers.append(LogVectorOutput(port, builder, {TriggerType.kForced}))
loggers.append(LogVectorOutput(
src=port, builder=builder, publish_triggers={TriggerType.kForced}))
loggers.append(LogVectorOutput(
port, builder, {TriggerType.kPeriodic}, 0.125))
loggers.append(LogVectorOutput(
src=port, builder=builder,
publish_triggers={TriggerType.kPeriodic}, publish_period=0.125))
# Check the returned loggers by calling some trivial methods.
diagram = builder.Build()
context = diagram.CreateDefaultContext()
self.assertTrue(all(logger.FindLog(context).num_samples() == 0
for logger in loggers))
@numpy_compare.check_nonsymbolic_types
def test_vector_log(self, T):
kSize = 1
dut = VectorLog(kSize)
self.assertEqual(dut.get_input_size(), kSize)
dut.AddData(0.1, [22.22])
self.assertEqual(dut.num_samples(), 1)
self.assertEqual(dut.sample_times(), [0.1])
self.assertEqual(dut.data(), [22.22])
dut.Clear()
self.assertEqual(dut.num_samples(), 0)
# There is no good way from python to test the semantics of Reserve(),
# but test the binding anyway.
dut.Reserve(VectorLog.kDefaultCapacity * 3)
@numpy_compare.check_nonsymbolic_types
def test_vector_log_sink(self, T):
# Add various redundant loggers to a system, to exercise the
# VectorLog constructor bindings.
builder = DiagramBuilder_[T]()
kSize = 1
constructors = [VectorLogSink_[T]]
loggers = []
if T == float:
constructors.append(VectorLogSink)
for constructor in constructors:
loggers.append(builder.AddSystem(constructor(kSize)))
loggers.append(builder.AddSystem(constructor(input_size=kSize)))
loggers.append(builder.AddSystem(constructor(kSize, 0.125)))
loggers.append(builder.AddSystem(
constructor(input_size=kSize, publish_period=0.125)))
loggers.append(builder.AddSystem(
constructor(kSize, {TriggerType.kForced})))
loggers.append(builder.AddSystem(
constructor(input_size=kSize,
publish_triggers={TriggerType.kForced})))
loggers.append(builder.AddSystem(
constructor(kSize, {TriggerType.kPeriodic}, 0.125)))
loggers.append(builder.AddSystem(
constructor(input_size=kSize,
publish_triggers={TriggerType.kPeriodic},
publish_period=0.125)))
# Exercise all of the log access methods.
diagram = builder.Build()
context = diagram.CreateDefaultContext()
# FindLog and FindMutableLog find the same object.
self.assertTrue(
all(logger.FindLog(context) == logger.FindMutableLog(context)
for logger in loggers))
# Build a list of pairs of loggers and their local contexts.
loggers_and_contexts = [(x, x.GetMyContextFromRoot(context))
for x in loggers]
# GetLog and GetMutableLog find the same object.
self.assertTrue(
all(logger.GetLog(logger_context)
== logger.GetMutableLog(logger_context)
for logger, logger_context in loggers_and_contexts))
# GetLog and FindLog find the same object, given the proper contexts.
self.assertTrue(
all(logger.GetLog(logger_context) == logger.FindLog(context)
for logger, logger_context in loggers_and_contexts))
|
normal
|
{
"blob_id": "f17ae8a44f8b032feac7c18fe39663054fea40c0",
"index": 5282,
"step-1": "<mask token>\n\n\nclass TestGeneral(unittest.TestCase):\n\n def _check_instantiations(self, template, supports_symbolic=True):\n default_cls = template[None]\n self.assertTrue(template[float] is default_cls)\n self.assertTrue(template[AutoDiffXd] is not default_cls)\n if supports_symbolic:\n self.assertTrue(template[Expression] is not default_cls)\n\n def test_instantiations(self):\n self._check_instantiations(Adder_)\n self._check_instantiations(AffineSystem_)\n self._check_instantiations(ConstantValueSource_)\n self._check_instantiations(ConstantVectorSource_)\n self._check_instantiations(Demultiplexer_)\n self._check_instantiations(DiscreteDerivative_)\n self._check_instantiations(DiscreteTimeDelay_)\n self._check_instantiations(Gain_)\n self._check_instantiations(Integrator_)\n self._check_instantiations(LinearSystem_)\n self._check_instantiations(LinearTransformDensity_,\n supports_symbolic=False)\n self._check_instantiations(Multiplexer_)\n self._check_instantiations(MultilayerPerceptron_)\n self._check_instantiations(PassThrough_)\n self._check_instantiations(PortSwitch_)\n self._check_instantiations(Saturation_)\n self._check_instantiations(SharedPointerSystem_)\n self._check_instantiations(Sine_)\n self._check_instantiations(StateInterpolatorWithDiscreteDerivative_)\n self._check_instantiations(SymbolicVectorSystem_)\n self._check_instantiations(TrajectoryAffineSystem_,\n supports_symbolic=False)\n self._check_instantiations(TrajectoryLinearSystem_,\n supports_symbolic=False)\n self._check_instantiations(TrajectorySource_)\n self._check_instantiations(VectorLogSink_)\n self._check_instantiations(WrapToSystem_)\n self._check_instantiations(ZeroOrderHold_)\n <mask token>\n\n def test_linear_affine_system_empty_matrices(self):\n\n def CheckSizes(system, num_states, num_inputs, num_outputs):\n self.assertEqual(system.num_continuous_states(), num_states)\n self.assertEqual(system.num_inputs(), num_inputs)\n self.assertEqual(system.num_outputs(), num_outputs)\n system = AffineSystem(y0=[2, 1])\n CheckSizes(system, num_states=0, num_inputs=0, num_outputs=2)\n system = AffineSystem(D=np.eye(2))\n CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2)\n system = LinearSystem(D=np.eye(2))\n CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2)\n system = AffineSystem(D=np.eye(2), y0=[1, 2])\n CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2)\n system = LinearSystem(B=np.eye(2))\n CheckSizes(system, num_states=2, num_inputs=2, num_outputs=0)\n\n def test_linear_system_zero_size(self):\n num_x = 0\n num_y = 2\n num_u = 2\n A = np.zeros((num_x, num_x))\n B = np.zeros((num_x, num_u))\n C = np.zeros((num_y, num_x))\n D = np.zeros((num_y, num_u))\n self.assertIsNotNone(LinearSystem(A, B, C, D))\n\n @numpy_compare.check_nonsymbolic_types\n def test_linear_transform_density(self, T):\n dut = LinearTransformDensity_[T](distribution=RandomDistribution.\n kGaussian, input_size=3, output_size=3)\n w_in = np.array([T(0.5), T(0.1), T(1.5)])\n context = dut.CreateDefaultContext()\n dut.get_input_port_w_in().FixValue(context, w_in)\n self.assertEqual(dut.get_input_port_A().size(), 9)\n self.assertEqual(dut.get_input_port_b().size(), 3)\n self.assertEqual(dut.get_distribution(), RandomDistribution.kGaussian)\n A = np.array([[T(0.5), T(1), T(2)], [T(1), T(2), T(3)], [T(3), T(4),\n T(5)]])\n dut.FixConstantA(context=context, A=A)\n b = np.array([T(1), T(2), T(3)])\n dut.FixConstantB(context=context, b=b)\n dut.CalcDensity(context=context)\n self.assertEqual(dut.get_output_port_w_out().size(), 3)\n self.assertEqual(dut.get_output_port_w_out_density().size(), 1)\n\n def test_vector_pass_through(self):\n model_value = BasicVector([1.0, 2, 3])\n system = PassThrough(vector_size=model_value.size())\n context = system.CreateDefaultContext()\n system.get_input_port(0).FixValue(context, model_value)\n output = system.AllocateOutput()\n input_eval = system.EvalVectorInput(context, 0)\n compare_value(self, input_eval, model_value)\n system.CalcOutput(context, output)\n output_value = output.get_vector_data(0)\n compare_value(self, output_value, model_value)\n\n def test_default_vector_pass_through(self):\n model_value = [1.0, 2, 3]\n system = PassThrough(value=model_value)\n context = system.CreateDefaultContext()\n np.testing.assert_array_equal(model_value, system.get_output_port()\n .Eval(context))\n\n def test_abstract_pass_through(self):\n model_value = Value('Hello world')\n system = PassThrough(abstract_model_value=model_value)\n context = system.CreateDefaultContext()\n system.get_input_port(0).FixValue(context, model_value)\n output = system.AllocateOutput()\n input_eval = system.EvalAbstractInput(context, 0)\n compare_value(self, input_eval, model_value)\n system.CalcOutput(context, output)\n output_value = output.get_data(0)\n compare_value(self, output_value, model_value)\n\n def test_port_switch(self):\n system = PortSwitch(vector_size=2)\n a = system.DeclareInputPort(name='a')\n system.DeclareInputPort(name='b')\n context = system.CreateDefaultContext()\n self.assertIsInstance(a, InputPort)\n system.get_port_selector_input_port().FixValue(context, a.get_index())\n\n def test_first_order_low_pass_filter(self):\n filter1 = FirstOrderLowPassFilter(time_constant=3.0, size=4)\n self.assertEqual(filter1.get_time_constant(), 3.0)\n alpha = np.array([1, 2, 3])\n filter2 = FirstOrderLowPassFilter(time_constants=alpha)\n np.testing.assert_array_equal(filter2.get_time_constants_vector(),\n alpha)\n context = filter2.CreateDefaultContext()\n filter2.set_initial_output_value(context, [0.0, -0.2, 0.4])\n <mask token>\n\n def test_saturation(self):\n system = Saturation((0.0, -1.0, 3.0), (1.0, 2.0, 4.0))\n context = system.CreateDefaultContext()\n output = system.AllocateOutput()\n\n def mytest(input, expected):\n system.get_input_port(0).FixValue(context, input)\n system.CalcOutput(context, output)\n self.assertTrue(np.allclose(output.get_vector_data(0).\n CopyToVector(), expected))\n mytest((-5.0, 5.0, 4.0), (0.0, 2.0, 4.0))\n mytest((0.4, 0.0, 3.5), (0.4, 0.0, 3.5))\n\n def test_trajectory_source(self):\n ppt = PiecewisePolynomial.FirstOrderHold([0.0, 1.0], [[2.0, 3.0], [\n 2.0, 1.0]])\n system = TrajectorySource(trajectory=ppt, output_derivative_order=0,\n zero_derivatives_beyond_limits=True)\n context = system.CreateDefaultContext()\n output = system.AllocateOutput()\n\n def mytest(input, expected):\n context.SetTime(input)\n system.CalcOutput(context, output)\n self.assertTrue(np.allclose(output.get_vector_data(0).\n CopyToVector(), expected))\n mytest(0.0, (2.0, 2.0))\n mytest(0.5, (2.5, 1.5))\n mytest(1.0, (3.0, 1.0))\n ppt2 = PiecewisePolynomial.FirstOrderHold([0.0, 1.0], [[4.0, 6.0],\n [4.0, 2.0]])\n system.UpdateTrajectory(trajectory=ppt2)\n mytest(0.0, (4.0, 4.0))\n mytest(0.5, (5.0, 3.0))\n mytest(1.0, (6.0, 2.0))\n\n def test_symbolic_vector_system(self):\n t = Variable('t')\n x = [Variable('x0'), Variable('x1')]\n u = [Variable('u0'), Variable('u1')]\n system = SymbolicVectorSystem(time=t, state=x, input=u, dynamics=[x\n [0] + x[1], t], output=[u[1]], time_period=0.0)\n context = system.CreateDefaultContext()\n self.assertEqual(context.num_continuous_states(), 2)\n self.assertEqual(context.num_discrete_state_groups(), 0)\n self.assertEqual(system.get_input_port(0).size(), 2)\n self.assertEqual(system.get_output_port(0).size(), 1)\n self.assertEqual(context.num_abstract_parameters(), 0)\n self.assertEqual(context.num_numeric_parameter_groups(), 0)\n self.assertTrue(system.dynamics_for_variable(x[0]).EqualTo(x[0] + x[1])\n )\n self.assertTrue(system.dynamics_for_variable(x[1]).EqualTo(t))\n\n def test_symbolic_vector_system_parameters(self):\n t = Variable('t')\n x = [Variable('x0'), Variable('x1')]\n u = [Variable('u0'), Variable('u1')]\n p = [Variable('p0'), Variable('p1')]\n system = SymbolicVectorSystem(time=t, state=x, input=u, parameter=p,\n dynamics=[p[0] * x[0] + x[1] + p[1], t], output=[u[1]],\n time_period=0.0)\n context = system.CreateDefaultContext()\n self.assertEqual(context.num_continuous_states(), 2)\n self.assertEqual(context.num_discrete_state_groups(), 0)\n self.assertEqual(system.get_input_port(0).size(), 2)\n self.assertEqual(system.get_output_port(0).size(), 1)\n self.assertEqual(context.num_abstract_parameters(), 0)\n self.assertEqual(context.num_numeric_parameter_groups(), 1)\n self.assertEqual(context.get_numeric_parameter(0).size(), 2)\n self.assertTrue(system.dynamics_for_variable(x[0]).EqualTo(p[0] * x\n [0] + x[1] + p[1]))\n self.assertTrue(system.dynamics_for_variable(x[1]).EqualTo(t))\n\n def test_wrap_to_system(self):\n system = WrapToSystem(2)\n system.set_interval(1, 1.0, 2.0)\n context = system.CreateDefaultContext()\n output = system.AllocateOutput()\n\n def mytest(input, expected):\n system.get_input_port(0).FixValue(context, input)\n system.CalcOutput(context, output)\n self.assertTrue(np.allclose(output.get_vector_data(0).\n CopyToVector(), expected))\n mytest((-1.5, 0.5), (-1.5, 1.5))\n mytest((0.2, 0.3), (0.2, 1.3))\n\n def test_demultiplexer(self):\n demux = Demultiplexer(size=4)\n context = demux.CreateDefaultContext()\n self.assertEqual(demux.num_input_ports(), 1)\n self.assertEqual(demux.num_output_ports(), 4)\n numpy_compare.assert_equal(demux.get_output_ports_sizes(), [1, 1, 1, 1]\n )\n input_vec = np.array([1.0, 2.0, 3.0, 4.0])\n demux.get_input_port(0).FixValue(context, input_vec)\n output = demux.AllocateOutput()\n demux.CalcOutput(context, output)\n for i in range(4):\n self.assertTrue(np.allclose(output.get_vector_data(i).get_value\n (), input_vec[i]))\n demux = Demultiplexer(size=4, output_ports_size=2)\n context = demux.CreateDefaultContext()\n self.assertEqual(demux.num_input_ports(), 1)\n self.assertEqual(demux.num_output_ports(), 2)\n numpy_compare.assert_equal(demux.get_output_ports_sizes(), [2, 2])\n demux.get_input_port(0).FixValue(context, input_vec)\n output = demux.AllocateOutput()\n demux.CalcOutput(context, output)\n for i in range(2):\n self.assertTrue(np.allclose(output.get_vector_data(i).get_value\n (), input_vec[2 * i:2 * i + 2]))\n output_ports_sizes = np.array([1, 2, 1])\n num_output_ports = output_ports_sizes.size\n input_vec = np.array([1.0, 2.0, 3.0, 4.0])\n demux = Demultiplexer(output_ports_sizes=output_ports_sizes)\n context = demux.CreateDefaultContext()\n self.assertEqual(demux.num_input_ports(), 1)\n self.assertEqual(demux.num_output_ports(), num_output_ports)\n numpy_compare.assert_equal(demux.get_output_ports_sizes(),\n output_ports_sizes)\n demux.get_input_port(0).FixValue(context, input_vec)\n output = demux.AllocateOutput()\n demux.CalcOutput(context, output)\n output_port_start = 0\n for i in range(num_output_ports):\n output_port_size = output.get_vector_data(i).size()\n self.assertTrue(np.allclose(output.get_vector_data(i).get_value\n (), input_vec[output_port_start:output_port_start +\n output_port_size]))\n output_port_start += output_port_size\n <mask token>\n\n def test_multilayer_perceptron(self):\n mlp = MultilayerPerceptron(layers=[1, 2, 3], activation_type=\n PerceptronActivationType.kReLU)\n self.assertEqual(mlp.get_input_port().size(), 1)\n self.assertEqual(mlp.get_output_port().size(), 3)\n context = mlp.CreateDefaultContext()\n params = np.zeros((mlp.num_parameters(), 1))\n self.assertEqual(mlp.num_parameters(), 13)\n self.assertEqual(mlp.layers(), [1, 2, 3])\n self.assertEqual(mlp.activation_type(layer=0),\n PerceptronActivationType.kReLU)\n self.assertEqual(len(mlp.GetParameters(context=context)), mlp.\n num_parameters())\n mlp.SetWeights(context=context, layer=0, W=np.array([[1], [2]]))\n mlp.SetBiases(context=context, layer=0, b=[3, 4])\n np.testing.assert_array_equal(mlp.GetWeights(context=context, layer\n =0), np.array([[1], [2]]))\n np.testing.assert_array_equal(mlp.GetBiases(context=context, layer=\n 0), np.array([3, 4]))\n params = np.zeros(mlp.num_parameters())\n mlp.SetWeights(params=params, layer=0, W=np.array([[1], [2]]))\n mlp.SetBiases(params=params, layer=0, b=[3, 4])\n np.testing.assert_array_equal(mlp.GetWeights(params=params, layer=0\n ), np.array([[1], [2]]))\n np.testing.assert_array_equal(mlp.GetBiases(params=params, layer=0),\n np.array([3, 4]))\n mutable_params = mlp.GetMutableParameters(context=context)\n mutable_params[:] = 3.0\n np.testing.assert_array_equal(mlp.GetParameters(context), np.full(\n mlp.num_parameters(), 3.0))\n global called_loss\n called_loss = False\n\n def silly_loss(Y, dloss_dY):\n global called_loss\n called_loss = True\n dloss_dY[:] = 1\n return Y.sum()\n dloss_dparams = np.zeros((13,))\n generator = RandomGenerator(23)\n mlp.SetRandomContext(context, generator)\n mlp.Backpropagation(context=context, X=np.array([1, 3, 4]).reshape(\n (1, 3)), loss=silly_loss, dloss_dparams=dloss_dparams)\n self.assertTrue(called_loss)\n self.assertTrue(dloss_dparams.any())\n dloss_dparams = np.zeros((13,))\n mlp.BackpropagationMeanSquaredError(context=context, X=np.array([1,\n 3, 4]).reshape((1, 3)), Y_desired=np.eye(3), dloss_dparams=\n dloss_dparams)\n self.assertTrue(dloss_dparams.any())\n Y = np.asfortranarray(np.eye(3))\n mlp.BatchOutput(context=context, X=np.array([[0.1, 0.3, 0.4]]), Y=Y)\n self.assertFalse(np.allclose(Y, np.eye(3)))\n Y2 = mlp.BatchOutput(context=context, X=np.array([[0.1, 0.3, 0.4]]))\n np.testing.assert_array_equal(Y, Y2)\n mlp2 = MultilayerPerceptron(layers=[3, 2, 1], activation_types=[\n PerceptronActivationType.kReLU, PerceptronActivationType.kTanh])\n self.assertEqual(mlp2.activation_type(0), PerceptronActivationType.\n kReLU)\n self.assertEqual(mlp2.activation_type(1), PerceptronActivationType.\n kTanh)\n Y = np.asfortranarray(np.full((1, 3), 2.4))\n dYdX = np.asfortranarray(np.full((3, 3), 5.3))\n context2 = mlp2.CreateDefaultContext()\n mlp2.BatchOutput(context=context2, X=np.eye(3), Y=Y, dYdX=dYdX)\n np.testing.assert_array_almost_equal(Y, np.zeros((1, 3)))\n np.testing.assert_array_almost_equal(dYdX, np.zeros((3, 3)))\n mlp = MultilayerPerceptron(use_sin_cos_for_input=[True, False],\n remaining_layers=[3, 2], activation_types=[\n PerceptronActivationType.kReLU, PerceptronActivationType.kTanh])\n self.assertEqual(mlp.get_input_port().size(), 2)\n np.testing.assert_array_equal(mlp.layers(), [3, 3, 2])\n\n def test_random_source(self):\n source = RandomSource(distribution=RandomDistribution.kUniform,\n num_outputs=2, sampling_interval_sec=0.01)\n self.assertEqual(source.get_output_port(0).size(), 2)\n builder = DiagramBuilder()\n AddRandomInputs(sampling_interval_sec=0.01, builder=builder)\n builder_ad = DiagramBuilder_[AutoDiffXd]()\n AddRandomInputs(sampling_interval_sec=0.01, builder=builder_ad)\n\n def test_constant_vector_source(self):\n source = ConstantVectorSource(source_value=[1.0, 2.0])\n context = source.CreateDefaultContext()\n source.get_source_value(context)\n source.get_mutable_source_value(context)\n <mask token>\n\n def test_shared_pointer_system_ctor(self):\n dut = SharedPointerSystem(value_to_hold=[1, 2, 3])\n readback = dut.get()\n self.assertListEqual(readback, [1, 2, 3])\n del dut\n self.assertListEqual(readback, [1, 2, 3])\n <mask token>\n\n def test_sine(self):\n sine_source = Sine(amplitude=1, frequency=2, phase=3, size=1,\n is_time_based=True)\n self.assertEqual(sine_source.get_output_port(0).size(), 1)\n self.assertEqual(sine_source.get_output_port(1).size(), 1)\n self.assertEqual(sine_source.get_output_port(2).size(), 1)\n sine_source = Sine(amplitude=1, frequency=2, phase=3, size=3,\n is_time_based=True)\n self.assertEqual(sine_source.get_output_port(0).size(), 3)\n self.assertEqual(sine_source.get_output_port(1).size(), 3)\n self.assertEqual(sine_source.get_output_port(2).size(), 3)\n sine_source = Sine(amplitudes=np.ones(2), frequencies=np.ones(2),\n phases=np.ones(2), is_time_based=True)\n self.assertEqual(sine_source.get_output_port(0).size(), 2)\n self.assertEqual(sine_source.get_output_port(1).size(), 2)\n self.assertEqual(sine_source.get_output_port(2).size(), 2)\n\n def test_discrete_derivative(self):\n discrete_derivative = DiscreteDerivative(num_inputs=5, time_step=0.5)\n self.assertEqual(discrete_derivative.get_input_port(0).size(), 5)\n self.assertEqual(discrete_derivative.get_output_port(0).size(), 5)\n self.assertEqual(discrete_derivative.time_step(), 0.5)\n self.assertTrue(discrete_derivative.suppress_initial_transient())\n discrete_derivative = DiscreteDerivative(num_inputs=5, time_step=\n 0.5, suppress_initial_transient=False)\n self.assertFalse(discrete_derivative.suppress_initial_transient())\n <mask token>\n\n @numpy_compare.check_nonsymbolic_types\n def test_log_vector_output(self, T):\n builder = DiagramBuilder_[T]()\n kSize = 1\n integrator = builder.AddSystem(Integrator_[T](kSize))\n port = integrator.get_output_port(0)\n loggers = []\n loggers.append(LogVectorOutput(port, builder))\n loggers.append(LogVectorOutput(src=port, builder=builder))\n loggers.append(LogVectorOutput(port, builder, 0.125))\n loggers.append(LogVectorOutput(src=port, builder=builder,\n publish_period=0.125))\n loggers.append(LogVectorOutput(port, builder, {TriggerType.kForced}))\n loggers.append(LogVectorOutput(src=port, builder=builder,\n publish_triggers={TriggerType.kForced}))\n loggers.append(LogVectorOutput(port, builder, {TriggerType.\n kPeriodic}, 0.125))\n loggers.append(LogVectorOutput(src=port, builder=builder,\n publish_triggers={TriggerType.kPeriodic}, publish_period=0.125))\n diagram = builder.Build()\n context = diagram.CreateDefaultContext()\n self.assertTrue(all(logger.FindLog(context).num_samples() == 0 for\n logger in loggers))\n <mask token>\n\n @numpy_compare.check_nonsymbolic_types\n def test_vector_log_sink(self, T):\n builder = DiagramBuilder_[T]()\n kSize = 1\n constructors = [VectorLogSink_[T]]\n loggers = []\n if T == float:\n constructors.append(VectorLogSink)\n for constructor in constructors:\n loggers.append(builder.AddSystem(constructor(kSize)))\n loggers.append(builder.AddSystem(constructor(input_size=kSize)))\n loggers.append(builder.AddSystem(constructor(kSize, 0.125)))\n loggers.append(builder.AddSystem(constructor(input_size=kSize,\n publish_period=0.125)))\n loggers.append(builder.AddSystem(constructor(kSize, {\n TriggerType.kForced})))\n loggers.append(builder.AddSystem(constructor(input_size=kSize,\n publish_triggers={TriggerType.kForced})))\n loggers.append(builder.AddSystem(constructor(kSize, {\n TriggerType.kPeriodic}, 0.125)))\n loggers.append(builder.AddSystem(constructor(input_size=kSize,\n publish_triggers={TriggerType.kPeriodic}, publish_period=\n 0.125)))\n diagram = builder.Build()\n context = diagram.CreateDefaultContext()\n self.assertTrue(all(logger.FindLog(context) == logger.\n FindMutableLog(context) for logger in loggers))\n loggers_and_contexts = [(x, x.GetMyContextFromRoot(context)) for x in\n loggers]\n self.assertTrue(all(logger.GetLog(logger_context) == logger.\n GetMutableLog(logger_context) for logger, logger_context in\n loggers_and_contexts))\n self.assertTrue(all(logger.GetLog(logger_context) == logger.FindLog\n (context) for logger, logger_context in loggers_and_contexts))\n",
"step-2": "<mask token>\n\n\nclass TestGeneral(unittest.TestCase):\n\n def _check_instantiations(self, template, supports_symbolic=True):\n default_cls = template[None]\n self.assertTrue(template[float] is default_cls)\n self.assertTrue(template[AutoDiffXd] is not default_cls)\n if supports_symbolic:\n self.assertTrue(template[Expression] is not default_cls)\n\n def test_instantiations(self):\n self._check_instantiations(Adder_)\n self._check_instantiations(AffineSystem_)\n self._check_instantiations(ConstantValueSource_)\n self._check_instantiations(ConstantVectorSource_)\n self._check_instantiations(Demultiplexer_)\n self._check_instantiations(DiscreteDerivative_)\n self._check_instantiations(DiscreteTimeDelay_)\n self._check_instantiations(Gain_)\n self._check_instantiations(Integrator_)\n self._check_instantiations(LinearSystem_)\n self._check_instantiations(LinearTransformDensity_,\n supports_symbolic=False)\n self._check_instantiations(Multiplexer_)\n self._check_instantiations(MultilayerPerceptron_)\n self._check_instantiations(PassThrough_)\n self._check_instantiations(PortSwitch_)\n self._check_instantiations(Saturation_)\n self._check_instantiations(SharedPointerSystem_)\n self._check_instantiations(Sine_)\n self._check_instantiations(StateInterpolatorWithDiscreteDerivative_)\n self._check_instantiations(SymbolicVectorSystem_)\n self._check_instantiations(TrajectoryAffineSystem_,\n supports_symbolic=False)\n self._check_instantiations(TrajectoryLinearSystem_,\n supports_symbolic=False)\n self._check_instantiations(TrajectorySource_)\n self._check_instantiations(VectorLogSink_)\n self._check_instantiations(WrapToSystem_)\n self._check_instantiations(ZeroOrderHold_)\n <mask token>\n\n def test_linear_affine_system_empty_matrices(self):\n\n def CheckSizes(system, num_states, num_inputs, num_outputs):\n self.assertEqual(system.num_continuous_states(), num_states)\n self.assertEqual(system.num_inputs(), num_inputs)\n self.assertEqual(system.num_outputs(), num_outputs)\n system = AffineSystem(y0=[2, 1])\n CheckSizes(system, num_states=0, num_inputs=0, num_outputs=2)\n system = AffineSystem(D=np.eye(2))\n CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2)\n system = LinearSystem(D=np.eye(2))\n CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2)\n system = AffineSystem(D=np.eye(2), y0=[1, 2])\n CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2)\n system = LinearSystem(B=np.eye(2))\n CheckSizes(system, num_states=2, num_inputs=2, num_outputs=0)\n\n def test_linear_system_zero_size(self):\n num_x = 0\n num_y = 2\n num_u = 2\n A = np.zeros((num_x, num_x))\n B = np.zeros((num_x, num_u))\n C = np.zeros((num_y, num_x))\n D = np.zeros((num_y, num_u))\n self.assertIsNotNone(LinearSystem(A, B, C, D))\n\n @numpy_compare.check_nonsymbolic_types\n def test_linear_transform_density(self, T):\n dut = LinearTransformDensity_[T](distribution=RandomDistribution.\n kGaussian, input_size=3, output_size=3)\n w_in = np.array([T(0.5), T(0.1), T(1.5)])\n context = dut.CreateDefaultContext()\n dut.get_input_port_w_in().FixValue(context, w_in)\n self.assertEqual(dut.get_input_port_A().size(), 9)\n self.assertEqual(dut.get_input_port_b().size(), 3)\n self.assertEqual(dut.get_distribution(), RandomDistribution.kGaussian)\n A = np.array([[T(0.5), T(1), T(2)], [T(1), T(2), T(3)], [T(3), T(4),\n T(5)]])\n dut.FixConstantA(context=context, A=A)\n b = np.array([T(1), T(2), T(3)])\n dut.FixConstantB(context=context, b=b)\n dut.CalcDensity(context=context)\n self.assertEqual(dut.get_output_port_w_out().size(), 3)\n self.assertEqual(dut.get_output_port_w_out_density().size(), 1)\n\n def test_vector_pass_through(self):\n model_value = BasicVector([1.0, 2, 3])\n system = PassThrough(vector_size=model_value.size())\n context = system.CreateDefaultContext()\n system.get_input_port(0).FixValue(context, model_value)\n output = system.AllocateOutput()\n input_eval = system.EvalVectorInput(context, 0)\n compare_value(self, input_eval, model_value)\n system.CalcOutput(context, output)\n output_value = output.get_vector_data(0)\n compare_value(self, output_value, model_value)\n\n def test_default_vector_pass_through(self):\n model_value = [1.0, 2, 3]\n system = PassThrough(value=model_value)\n context = system.CreateDefaultContext()\n np.testing.assert_array_equal(model_value, system.get_output_port()\n .Eval(context))\n\n def test_abstract_pass_through(self):\n model_value = Value('Hello world')\n system = PassThrough(abstract_model_value=model_value)\n context = system.CreateDefaultContext()\n system.get_input_port(0).FixValue(context, model_value)\n output = system.AllocateOutput()\n input_eval = system.EvalAbstractInput(context, 0)\n compare_value(self, input_eval, model_value)\n system.CalcOutput(context, output)\n output_value = output.get_data(0)\n compare_value(self, output_value, model_value)\n\n def test_port_switch(self):\n system = PortSwitch(vector_size=2)\n a = system.DeclareInputPort(name='a')\n system.DeclareInputPort(name='b')\n context = system.CreateDefaultContext()\n self.assertIsInstance(a, InputPort)\n system.get_port_selector_input_port().FixValue(context, a.get_index())\n\n def test_first_order_low_pass_filter(self):\n filter1 = FirstOrderLowPassFilter(time_constant=3.0, size=4)\n self.assertEqual(filter1.get_time_constant(), 3.0)\n alpha = np.array([1, 2, 3])\n filter2 = FirstOrderLowPassFilter(time_constants=alpha)\n np.testing.assert_array_equal(filter2.get_time_constants_vector(),\n alpha)\n context = filter2.CreateDefaultContext()\n filter2.set_initial_output_value(context, [0.0, -0.2, 0.4])\n <mask token>\n\n def test_saturation(self):\n system = Saturation((0.0, -1.0, 3.0), (1.0, 2.0, 4.0))\n context = system.CreateDefaultContext()\n output = system.AllocateOutput()\n\n def mytest(input, expected):\n system.get_input_port(0).FixValue(context, input)\n system.CalcOutput(context, output)\n self.assertTrue(np.allclose(output.get_vector_data(0).\n CopyToVector(), expected))\n mytest((-5.0, 5.0, 4.0), (0.0, 2.0, 4.0))\n mytest((0.4, 0.0, 3.5), (0.4, 0.0, 3.5))\n\n def test_trajectory_source(self):\n ppt = PiecewisePolynomial.FirstOrderHold([0.0, 1.0], [[2.0, 3.0], [\n 2.0, 1.0]])\n system = TrajectorySource(trajectory=ppt, output_derivative_order=0,\n zero_derivatives_beyond_limits=True)\n context = system.CreateDefaultContext()\n output = system.AllocateOutput()\n\n def mytest(input, expected):\n context.SetTime(input)\n system.CalcOutput(context, output)\n self.assertTrue(np.allclose(output.get_vector_data(0).\n CopyToVector(), expected))\n mytest(0.0, (2.0, 2.0))\n mytest(0.5, (2.5, 1.5))\n mytest(1.0, (3.0, 1.0))\n ppt2 = PiecewisePolynomial.FirstOrderHold([0.0, 1.0], [[4.0, 6.0],\n [4.0, 2.0]])\n system.UpdateTrajectory(trajectory=ppt2)\n mytest(0.0, (4.0, 4.0))\n mytest(0.5, (5.0, 3.0))\n mytest(1.0, (6.0, 2.0))\n\n def test_symbolic_vector_system(self):\n t = Variable('t')\n x = [Variable('x0'), Variable('x1')]\n u = [Variable('u0'), Variable('u1')]\n system = SymbolicVectorSystem(time=t, state=x, input=u, dynamics=[x\n [0] + x[1], t], output=[u[1]], time_period=0.0)\n context = system.CreateDefaultContext()\n self.assertEqual(context.num_continuous_states(), 2)\n self.assertEqual(context.num_discrete_state_groups(), 0)\n self.assertEqual(system.get_input_port(0).size(), 2)\n self.assertEqual(system.get_output_port(0).size(), 1)\n self.assertEqual(context.num_abstract_parameters(), 0)\n self.assertEqual(context.num_numeric_parameter_groups(), 0)\n self.assertTrue(system.dynamics_for_variable(x[0]).EqualTo(x[0] + x[1])\n )\n self.assertTrue(system.dynamics_for_variable(x[1]).EqualTo(t))\n\n def test_symbolic_vector_system_parameters(self):\n t = Variable('t')\n x = [Variable('x0'), Variable('x1')]\n u = [Variable('u0'), Variable('u1')]\n p = [Variable('p0'), Variable('p1')]\n system = SymbolicVectorSystem(time=t, state=x, input=u, parameter=p,\n dynamics=[p[0] * x[0] + x[1] + p[1], t], output=[u[1]],\n time_period=0.0)\n context = system.CreateDefaultContext()\n self.assertEqual(context.num_continuous_states(), 2)\n self.assertEqual(context.num_discrete_state_groups(), 0)\n self.assertEqual(system.get_input_port(0).size(), 2)\n self.assertEqual(system.get_output_port(0).size(), 1)\n self.assertEqual(context.num_abstract_parameters(), 0)\n self.assertEqual(context.num_numeric_parameter_groups(), 1)\n self.assertEqual(context.get_numeric_parameter(0).size(), 2)\n self.assertTrue(system.dynamics_for_variable(x[0]).EqualTo(p[0] * x\n [0] + x[1] + p[1]))\n self.assertTrue(system.dynamics_for_variable(x[1]).EqualTo(t))\n\n def test_wrap_to_system(self):\n system = WrapToSystem(2)\n system.set_interval(1, 1.0, 2.0)\n context = system.CreateDefaultContext()\n output = system.AllocateOutput()\n\n def mytest(input, expected):\n system.get_input_port(0).FixValue(context, input)\n system.CalcOutput(context, output)\n self.assertTrue(np.allclose(output.get_vector_data(0).\n CopyToVector(), expected))\n mytest((-1.5, 0.5), (-1.5, 1.5))\n mytest((0.2, 0.3), (0.2, 1.3))\n\n def test_demultiplexer(self):\n demux = Demultiplexer(size=4)\n context = demux.CreateDefaultContext()\n self.assertEqual(demux.num_input_ports(), 1)\n self.assertEqual(demux.num_output_ports(), 4)\n numpy_compare.assert_equal(demux.get_output_ports_sizes(), [1, 1, 1, 1]\n )\n input_vec = np.array([1.0, 2.0, 3.0, 4.0])\n demux.get_input_port(0).FixValue(context, input_vec)\n output = demux.AllocateOutput()\n demux.CalcOutput(context, output)\n for i in range(4):\n self.assertTrue(np.allclose(output.get_vector_data(i).get_value\n (), input_vec[i]))\n demux = Demultiplexer(size=4, output_ports_size=2)\n context = demux.CreateDefaultContext()\n self.assertEqual(demux.num_input_ports(), 1)\n self.assertEqual(demux.num_output_ports(), 2)\n numpy_compare.assert_equal(demux.get_output_ports_sizes(), [2, 2])\n demux.get_input_port(0).FixValue(context, input_vec)\n output = demux.AllocateOutput()\n demux.CalcOutput(context, output)\n for i in range(2):\n self.assertTrue(np.allclose(output.get_vector_data(i).get_value\n (), input_vec[2 * i:2 * i + 2]))\n output_ports_sizes = np.array([1, 2, 1])\n num_output_ports = output_ports_sizes.size\n input_vec = np.array([1.0, 2.0, 3.0, 4.0])\n demux = Demultiplexer(output_ports_sizes=output_ports_sizes)\n context = demux.CreateDefaultContext()\n self.assertEqual(demux.num_input_ports(), 1)\n self.assertEqual(demux.num_output_ports(), num_output_ports)\n numpy_compare.assert_equal(demux.get_output_ports_sizes(),\n output_ports_sizes)\n demux.get_input_port(0).FixValue(context, input_vec)\n output = demux.AllocateOutput()\n demux.CalcOutput(context, output)\n output_port_start = 0\n for i in range(num_output_ports):\n output_port_size = output.get_vector_data(i).size()\n self.assertTrue(np.allclose(output.get_vector_data(i).get_value\n (), input_vec[output_port_start:output_port_start +\n output_port_size]))\n output_port_start += output_port_size\n <mask token>\n\n def test_multilayer_perceptron(self):\n mlp = MultilayerPerceptron(layers=[1, 2, 3], activation_type=\n PerceptronActivationType.kReLU)\n self.assertEqual(mlp.get_input_port().size(), 1)\n self.assertEqual(mlp.get_output_port().size(), 3)\n context = mlp.CreateDefaultContext()\n params = np.zeros((mlp.num_parameters(), 1))\n self.assertEqual(mlp.num_parameters(), 13)\n self.assertEqual(mlp.layers(), [1, 2, 3])\n self.assertEqual(mlp.activation_type(layer=0),\n PerceptronActivationType.kReLU)\n self.assertEqual(len(mlp.GetParameters(context=context)), mlp.\n num_parameters())\n mlp.SetWeights(context=context, layer=0, W=np.array([[1], [2]]))\n mlp.SetBiases(context=context, layer=0, b=[3, 4])\n np.testing.assert_array_equal(mlp.GetWeights(context=context, layer\n =0), np.array([[1], [2]]))\n np.testing.assert_array_equal(mlp.GetBiases(context=context, layer=\n 0), np.array([3, 4]))\n params = np.zeros(mlp.num_parameters())\n mlp.SetWeights(params=params, layer=0, W=np.array([[1], [2]]))\n mlp.SetBiases(params=params, layer=0, b=[3, 4])\n np.testing.assert_array_equal(mlp.GetWeights(params=params, layer=0\n ), np.array([[1], [2]]))\n np.testing.assert_array_equal(mlp.GetBiases(params=params, layer=0),\n np.array([3, 4]))\n mutable_params = mlp.GetMutableParameters(context=context)\n mutable_params[:] = 3.0\n np.testing.assert_array_equal(mlp.GetParameters(context), np.full(\n mlp.num_parameters(), 3.0))\n global called_loss\n called_loss = False\n\n def silly_loss(Y, dloss_dY):\n global called_loss\n called_loss = True\n dloss_dY[:] = 1\n return Y.sum()\n dloss_dparams = np.zeros((13,))\n generator = RandomGenerator(23)\n mlp.SetRandomContext(context, generator)\n mlp.Backpropagation(context=context, X=np.array([1, 3, 4]).reshape(\n (1, 3)), loss=silly_loss, dloss_dparams=dloss_dparams)\n self.assertTrue(called_loss)\n self.assertTrue(dloss_dparams.any())\n dloss_dparams = np.zeros((13,))\n mlp.BackpropagationMeanSquaredError(context=context, X=np.array([1,\n 3, 4]).reshape((1, 3)), Y_desired=np.eye(3), dloss_dparams=\n dloss_dparams)\n self.assertTrue(dloss_dparams.any())\n Y = np.asfortranarray(np.eye(3))\n mlp.BatchOutput(context=context, X=np.array([[0.1, 0.3, 0.4]]), Y=Y)\n self.assertFalse(np.allclose(Y, np.eye(3)))\n Y2 = mlp.BatchOutput(context=context, X=np.array([[0.1, 0.3, 0.4]]))\n np.testing.assert_array_equal(Y, Y2)\n mlp2 = MultilayerPerceptron(layers=[3, 2, 1], activation_types=[\n PerceptronActivationType.kReLU, PerceptronActivationType.kTanh])\n self.assertEqual(mlp2.activation_type(0), PerceptronActivationType.\n kReLU)\n self.assertEqual(mlp2.activation_type(1), PerceptronActivationType.\n kTanh)\n Y = np.asfortranarray(np.full((1, 3), 2.4))\n dYdX = np.asfortranarray(np.full((3, 3), 5.3))\n context2 = mlp2.CreateDefaultContext()\n mlp2.BatchOutput(context=context2, X=np.eye(3), Y=Y, dYdX=dYdX)\n np.testing.assert_array_almost_equal(Y, np.zeros((1, 3)))\n np.testing.assert_array_almost_equal(dYdX, np.zeros((3, 3)))\n mlp = MultilayerPerceptron(use_sin_cos_for_input=[True, False],\n remaining_layers=[3, 2], activation_types=[\n PerceptronActivationType.kReLU, PerceptronActivationType.kTanh])\n self.assertEqual(mlp.get_input_port().size(), 2)\n np.testing.assert_array_equal(mlp.layers(), [3, 3, 2])\n\n def test_random_source(self):\n source = RandomSource(distribution=RandomDistribution.kUniform,\n num_outputs=2, sampling_interval_sec=0.01)\n self.assertEqual(source.get_output_port(0).size(), 2)\n builder = DiagramBuilder()\n AddRandomInputs(sampling_interval_sec=0.01, builder=builder)\n builder_ad = DiagramBuilder_[AutoDiffXd]()\n AddRandomInputs(sampling_interval_sec=0.01, builder=builder_ad)\n\n def test_constant_vector_source(self):\n source = ConstantVectorSource(source_value=[1.0, 2.0])\n context = source.CreateDefaultContext()\n source.get_source_value(context)\n source.get_mutable_source_value(context)\n <mask token>\n\n def test_shared_pointer_system_ctor(self):\n dut = SharedPointerSystem(value_to_hold=[1, 2, 3])\n readback = dut.get()\n self.assertListEqual(readback, [1, 2, 3])\n del dut\n self.assertListEqual(readback, [1, 2, 3])\n\n def test_shared_pointer_system_builder(self):\n builder = DiagramBuilder()\n self.assertListEqual(SharedPointerSystem.AddToBuilder(builder=\n builder, value_to_hold=[1, 2, 3]), [1, 2, 3])\n diagram = builder.Build()\n del builder\n readback = diagram.GetSystems()[0].get()\n self.assertListEqual(readback, [1, 2, 3])\n del diagram\n self.assertListEqual(readback, [1, 2, 3])\n\n def test_sine(self):\n sine_source = Sine(amplitude=1, frequency=2, phase=3, size=1,\n is_time_based=True)\n self.assertEqual(sine_source.get_output_port(0).size(), 1)\n self.assertEqual(sine_source.get_output_port(1).size(), 1)\n self.assertEqual(sine_source.get_output_port(2).size(), 1)\n sine_source = Sine(amplitude=1, frequency=2, phase=3, size=3,\n is_time_based=True)\n self.assertEqual(sine_source.get_output_port(0).size(), 3)\n self.assertEqual(sine_source.get_output_port(1).size(), 3)\n self.assertEqual(sine_source.get_output_port(2).size(), 3)\n sine_source = Sine(amplitudes=np.ones(2), frequencies=np.ones(2),\n phases=np.ones(2), is_time_based=True)\n self.assertEqual(sine_source.get_output_port(0).size(), 2)\n self.assertEqual(sine_source.get_output_port(1).size(), 2)\n self.assertEqual(sine_source.get_output_port(2).size(), 2)\n\n def test_discrete_derivative(self):\n discrete_derivative = DiscreteDerivative(num_inputs=5, time_step=0.5)\n self.assertEqual(discrete_derivative.get_input_port(0).size(), 5)\n self.assertEqual(discrete_derivative.get_output_port(0).size(), 5)\n self.assertEqual(discrete_derivative.time_step(), 0.5)\n self.assertTrue(discrete_derivative.suppress_initial_transient())\n discrete_derivative = DiscreteDerivative(num_inputs=5, time_step=\n 0.5, suppress_initial_transient=False)\n self.assertFalse(discrete_derivative.suppress_initial_transient())\n <mask token>\n\n @numpy_compare.check_nonsymbolic_types\n def test_log_vector_output(self, T):\n builder = DiagramBuilder_[T]()\n kSize = 1\n integrator = builder.AddSystem(Integrator_[T](kSize))\n port = integrator.get_output_port(0)\n loggers = []\n loggers.append(LogVectorOutput(port, builder))\n loggers.append(LogVectorOutput(src=port, builder=builder))\n loggers.append(LogVectorOutput(port, builder, 0.125))\n loggers.append(LogVectorOutput(src=port, builder=builder,\n publish_period=0.125))\n loggers.append(LogVectorOutput(port, builder, {TriggerType.kForced}))\n loggers.append(LogVectorOutput(src=port, builder=builder,\n publish_triggers={TriggerType.kForced}))\n loggers.append(LogVectorOutput(port, builder, {TriggerType.\n kPeriodic}, 0.125))\n loggers.append(LogVectorOutput(src=port, builder=builder,\n publish_triggers={TriggerType.kPeriodic}, publish_period=0.125))\n diagram = builder.Build()\n context = diagram.CreateDefaultContext()\n self.assertTrue(all(logger.FindLog(context).num_samples() == 0 for\n logger in loggers))\n <mask token>\n\n @numpy_compare.check_nonsymbolic_types\n def test_vector_log_sink(self, T):\n builder = DiagramBuilder_[T]()\n kSize = 1\n constructors = [VectorLogSink_[T]]\n loggers = []\n if T == float:\n constructors.append(VectorLogSink)\n for constructor in constructors:\n loggers.append(builder.AddSystem(constructor(kSize)))\n loggers.append(builder.AddSystem(constructor(input_size=kSize)))\n loggers.append(builder.AddSystem(constructor(kSize, 0.125)))\n loggers.append(builder.AddSystem(constructor(input_size=kSize,\n publish_period=0.125)))\n loggers.append(builder.AddSystem(constructor(kSize, {\n TriggerType.kForced})))\n loggers.append(builder.AddSystem(constructor(input_size=kSize,\n publish_triggers={TriggerType.kForced})))\n loggers.append(builder.AddSystem(constructor(kSize, {\n TriggerType.kPeriodic}, 0.125)))\n loggers.append(builder.AddSystem(constructor(input_size=kSize,\n publish_triggers={TriggerType.kPeriodic}, publish_period=\n 0.125)))\n diagram = builder.Build()\n context = diagram.CreateDefaultContext()\n self.assertTrue(all(logger.FindLog(context) == logger.\n FindMutableLog(context) for logger in loggers))\n loggers_and_contexts = [(x, x.GetMyContextFromRoot(context)) for x in\n loggers]\n self.assertTrue(all(logger.GetLog(logger_context) == logger.\n GetMutableLog(logger_context) for logger, logger_context in\n loggers_and_contexts))\n self.assertTrue(all(logger.GetLog(logger_context) == logger.FindLog\n (context) for logger, logger_context in loggers_and_contexts))\n",
"step-3": "<mask token>\n\n\nclass TestGeneral(unittest.TestCase):\n\n def _check_instantiations(self, template, supports_symbolic=True):\n default_cls = template[None]\n self.assertTrue(template[float] is default_cls)\n self.assertTrue(template[AutoDiffXd] is not default_cls)\n if supports_symbolic:\n self.assertTrue(template[Expression] is not default_cls)\n\n def test_instantiations(self):\n self._check_instantiations(Adder_)\n self._check_instantiations(AffineSystem_)\n self._check_instantiations(ConstantValueSource_)\n self._check_instantiations(ConstantVectorSource_)\n self._check_instantiations(Demultiplexer_)\n self._check_instantiations(DiscreteDerivative_)\n self._check_instantiations(DiscreteTimeDelay_)\n self._check_instantiations(Gain_)\n self._check_instantiations(Integrator_)\n self._check_instantiations(LinearSystem_)\n self._check_instantiations(LinearTransformDensity_,\n supports_symbolic=False)\n self._check_instantiations(Multiplexer_)\n self._check_instantiations(MultilayerPerceptron_)\n self._check_instantiations(PassThrough_)\n self._check_instantiations(PortSwitch_)\n self._check_instantiations(Saturation_)\n self._check_instantiations(SharedPointerSystem_)\n self._check_instantiations(Sine_)\n self._check_instantiations(StateInterpolatorWithDiscreteDerivative_)\n self._check_instantiations(SymbolicVectorSystem_)\n self._check_instantiations(TrajectoryAffineSystem_,\n supports_symbolic=False)\n self._check_instantiations(TrajectoryLinearSystem_,\n supports_symbolic=False)\n self._check_instantiations(TrajectorySource_)\n self._check_instantiations(VectorLogSink_)\n self._check_instantiations(WrapToSystem_)\n self._check_instantiations(ZeroOrderHold_)\n\n def test_linear_affine_system(self):\n A = np.identity(2)\n B = np.array([[0], [1]])\n f0 = np.array([[0], [0]])\n C = np.array([[0, 1]])\n D = [1]\n y0 = [0]\n system = LinearSystem(A, B, C, D)\n context = system.CreateDefaultContext()\n self.assertEqual(system.get_input_port(0).size(), 1)\n self.assertEqual(context.get_mutable_continuous_state_vector().size\n (), 2)\n self.assertEqual(system.get_output_port(0).size(), 1)\n self.assertTrue((system.A() == A).all())\n self.assertTrue((system.B() == B).all())\n self.assertTrue((system.f0() == f0).all())\n self.assertTrue((system.C() == C).all())\n self.assertEqual(system.D(), D)\n self.assertEqual(system.y0(), y0)\n self.assertEqual(system.time_period(), 0.0)\n x0 = np.array([1, 2])\n system.configure_default_state(x0=x0)\n system.SetDefaultContext(context)\n np.testing.assert_equal(context.get_continuous_state_vector().\n CopyToVector(), x0)\n generator = RandomGenerator()\n system.SetRandomContext(context, generator)\n np.testing.assert_equal(context.get_continuous_state_vector().\n CopyToVector(), x0)\n system.configure_random_state(covariance=np.eye(2))\n system.SetRandomContext(context, generator)\n self.assertNotEqual(context.get_continuous_state_vector().\n CopyToVector()[1], x0[1])\n Co = ControllabilityMatrix(system)\n self.assertEqual(Co.shape, (2, 2))\n self.assertFalse(IsControllable(system))\n self.assertFalse(IsControllable(system, 1e-06))\n self.assertFalse(IsStabilizable(sys=system))\n self.assertFalse(IsStabilizable(sys=system, threshold=1e-06))\n Ob = ObservabilityMatrix(system)\n self.assertEqual(Ob.shape, (2, 2))\n self.assertFalse(IsObservable(system))\n self.assertFalse(IsDetectable(sys=system))\n self.assertFalse(IsDetectable(sys=system, threshold=1e-06))\n system = AffineSystem(A, B, f0, C, D, y0, 0.1)\n self.assertEqual(system.get_input_port(0), system.get_input_port())\n self.assertEqual(system.get_output_port(0), system.get_output_port())\n context = system.CreateDefaultContext()\n self.assertEqual(system.get_input_port(0).size(), 1)\n self.assertEqual(context.get_discrete_state_vector().size(), 2)\n self.assertEqual(system.get_output_port(0).size(), 1)\n self.assertTrue((system.A() == A).all())\n self.assertTrue((system.B() == B).all())\n self.assertTrue((system.f0() == f0).all())\n self.assertTrue((system.C() == C).all())\n self.assertEqual(system.D(), D)\n self.assertEqual(system.y0(), y0)\n self.assertEqual(system.time_period(), 0.1)\n system.get_input_port(0).FixValue(context, 0)\n linearized = Linearize(system, context)\n self.assertTrue((linearized.A() == A).all())\n taylor = FirstOrderTaylorApproximation(system, context)\n self.assertTrue((taylor.y0() == y0).all())\n new_A = np.array([[1, 2], [3, 4]])\n new_B = np.array([[5], [6]])\n new_f0 = np.array([[7], [8]])\n new_C = np.array([[9, 10]])\n new_D = np.array([[11]])\n new_y0 = np.array([12])\n system.UpdateCoefficients(A=new_A, B=new_B, f0=new_f0, C=new_C, D=\n new_D, y0=new_y0)\n np.testing.assert_equal(new_A, system.A())\n np.testing.assert_equal(new_B, system.B())\n np.testing.assert_equal(new_f0.flatten(), system.f0())\n np.testing.assert_equal(new_C, system.C())\n np.testing.assert_equal(new_D, system.D())\n np.testing.assert_equal(new_y0, system.y0())\n system = MatrixGain(D=A)\n self.assertTrue((system.D() == A).all())\n system = TrajectoryAffineSystem(PiecewisePolynomial(A),\n PiecewisePolynomial(B), PiecewisePolynomial(f0),\n PiecewisePolynomial(C), PiecewisePolynomial(D),\n PiecewisePolynomial(y0), 0.1)\n self.assertEqual(system.get_input_port(0), system.get_input_port())\n self.assertEqual(system.get_output_port(0), system.get_output_port())\n context = system.CreateDefaultContext()\n self.assertEqual(system.get_input_port(0).size(), 1)\n self.assertEqual(context.get_discrete_state_vector().size(), 2)\n self.assertEqual(system.get_output_port(0).size(), 1)\n for t in np.linspace(0.0, 1.0, 5):\n self.assertTrue((system.A(t) == A).all())\n self.assertTrue((system.B(t) == B).all())\n self.assertTrue((system.f0(t) == f0).all())\n self.assertTrue((system.C(t) == C).all())\n self.assertEqual(system.D(t), D)\n self.assertEqual(system.y0(t), y0)\n self.assertEqual(system.time_period(), 0.1)\n x0 = np.array([1, 2])\n system.configure_default_state(x0=x0)\n system.SetDefaultContext(context)\n np.testing.assert_equal(context.get_discrete_state_vector().\n CopyToVector(), x0)\n generator = RandomGenerator()\n system.SetRandomContext(context, generator)\n np.testing.assert_equal(context.get_discrete_state_vector().\n CopyToVector(), x0)\n system.configure_random_state(covariance=np.eye(2))\n system.SetRandomContext(context, generator)\n self.assertNotEqual(context.get_discrete_state_vector().\n CopyToVector()[1], x0[1])\n system = TrajectoryLinearSystem(A=PiecewisePolynomial(A), B=\n PiecewisePolynomial(B), C=PiecewisePolynomial(C), D=\n PiecewisePolynomial(D), time_period=0.1)\n self.assertEqual(system.time_period(), 0.1)\n system.configure_default_state(x0=np.array([1, 2]))\n system.configure_random_state(covariance=np.eye(2))\n\n def test_linear_affine_system_empty_matrices(self):\n\n def CheckSizes(system, num_states, num_inputs, num_outputs):\n self.assertEqual(system.num_continuous_states(), num_states)\n self.assertEqual(system.num_inputs(), num_inputs)\n self.assertEqual(system.num_outputs(), num_outputs)\n system = AffineSystem(y0=[2, 1])\n CheckSizes(system, num_states=0, num_inputs=0, num_outputs=2)\n system = AffineSystem(D=np.eye(2))\n CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2)\n system = LinearSystem(D=np.eye(2))\n CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2)\n system = AffineSystem(D=np.eye(2), y0=[1, 2])\n CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2)\n system = LinearSystem(B=np.eye(2))\n CheckSizes(system, num_states=2, num_inputs=2, num_outputs=0)\n\n def test_linear_system_zero_size(self):\n num_x = 0\n num_y = 2\n num_u = 2\n A = np.zeros((num_x, num_x))\n B = np.zeros((num_x, num_u))\n C = np.zeros((num_y, num_x))\n D = np.zeros((num_y, num_u))\n self.assertIsNotNone(LinearSystem(A, B, C, D))\n\n @numpy_compare.check_nonsymbolic_types\n def test_linear_transform_density(self, T):\n dut = LinearTransformDensity_[T](distribution=RandomDistribution.\n kGaussian, input_size=3, output_size=3)\n w_in = np.array([T(0.5), T(0.1), T(1.5)])\n context = dut.CreateDefaultContext()\n dut.get_input_port_w_in().FixValue(context, w_in)\n self.assertEqual(dut.get_input_port_A().size(), 9)\n self.assertEqual(dut.get_input_port_b().size(), 3)\n self.assertEqual(dut.get_distribution(), RandomDistribution.kGaussian)\n A = np.array([[T(0.5), T(1), T(2)], [T(1), T(2), T(3)], [T(3), T(4),\n T(5)]])\n dut.FixConstantA(context=context, A=A)\n b = np.array([T(1), T(2), T(3)])\n dut.FixConstantB(context=context, b=b)\n dut.CalcDensity(context=context)\n self.assertEqual(dut.get_output_port_w_out().size(), 3)\n self.assertEqual(dut.get_output_port_w_out_density().size(), 1)\n\n def test_vector_pass_through(self):\n model_value = BasicVector([1.0, 2, 3])\n system = PassThrough(vector_size=model_value.size())\n context = system.CreateDefaultContext()\n system.get_input_port(0).FixValue(context, model_value)\n output = system.AllocateOutput()\n input_eval = system.EvalVectorInput(context, 0)\n compare_value(self, input_eval, model_value)\n system.CalcOutput(context, output)\n output_value = output.get_vector_data(0)\n compare_value(self, output_value, model_value)\n\n def test_default_vector_pass_through(self):\n model_value = [1.0, 2, 3]\n system = PassThrough(value=model_value)\n context = system.CreateDefaultContext()\n np.testing.assert_array_equal(model_value, system.get_output_port()\n .Eval(context))\n\n def test_abstract_pass_through(self):\n model_value = Value('Hello world')\n system = PassThrough(abstract_model_value=model_value)\n context = system.CreateDefaultContext()\n system.get_input_port(0).FixValue(context, model_value)\n output = system.AllocateOutput()\n input_eval = system.EvalAbstractInput(context, 0)\n compare_value(self, input_eval, model_value)\n system.CalcOutput(context, output)\n output_value = output.get_data(0)\n compare_value(self, output_value, model_value)\n\n def test_port_switch(self):\n system = PortSwitch(vector_size=2)\n a = system.DeclareInputPort(name='a')\n system.DeclareInputPort(name='b')\n context = system.CreateDefaultContext()\n self.assertIsInstance(a, InputPort)\n system.get_port_selector_input_port().FixValue(context, a.get_index())\n\n def test_first_order_low_pass_filter(self):\n filter1 = FirstOrderLowPassFilter(time_constant=3.0, size=4)\n self.assertEqual(filter1.get_time_constant(), 3.0)\n alpha = np.array([1, 2, 3])\n filter2 = FirstOrderLowPassFilter(time_constants=alpha)\n np.testing.assert_array_equal(filter2.get_time_constants_vector(),\n alpha)\n context = filter2.CreateDefaultContext()\n filter2.set_initial_output_value(context, [0.0, -0.2, 0.4])\n <mask token>\n\n def test_saturation(self):\n system = Saturation((0.0, -1.0, 3.0), (1.0, 2.0, 4.0))\n context = system.CreateDefaultContext()\n output = system.AllocateOutput()\n\n def mytest(input, expected):\n system.get_input_port(0).FixValue(context, input)\n system.CalcOutput(context, output)\n self.assertTrue(np.allclose(output.get_vector_data(0).\n CopyToVector(), expected))\n mytest((-5.0, 5.0, 4.0), (0.0, 2.0, 4.0))\n mytest((0.4, 0.0, 3.5), (0.4, 0.0, 3.5))\n\n def test_trajectory_source(self):\n ppt = PiecewisePolynomial.FirstOrderHold([0.0, 1.0], [[2.0, 3.0], [\n 2.0, 1.0]])\n system = TrajectorySource(trajectory=ppt, output_derivative_order=0,\n zero_derivatives_beyond_limits=True)\n context = system.CreateDefaultContext()\n output = system.AllocateOutput()\n\n def mytest(input, expected):\n context.SetTime(input)\n system.CalcOutput(context, output)\n self.assertTrue(np.allclose(output.get_vector_data(0).\n CopyToVector(), expected))\n mytest(0.0, (2.0, 2.0))\n mytest(0.5, (2.5, 1.5))\n mytest(1.0, (3.0, 1.0))\n ppt2 = PiecewisePolynomial.FirstOrderHold([0.0, 1.0], [[4.0, 6.0],\n [4.0, 2.0]])\n system.UpdateTrajectory(trajectory=ppt2)\n mytest(0.0, (4.0, 4.0))\n mytest(0.5, (5.0, 3.0))\n mytest(1.0, (6.0, 2.0))\n\n def test_symbolic_vector_system(self):\n t = Variable('t')\n x = [Variable('x0'), Variable('x1')]\n u = [Variable('u0'), Variable('u1')]\n system = SymbolicVectorSystem(time=t, state=x, input=u, dynamics=[x\n [0] + x[1], t], output=[u[1]], time_period=0.0)\n context = system.CreateDefaultContext()\n self.assertEqual(context.num_continuous_states(), 2)\n self.assertEqual(context.num_discrete_state_groups(), 0)\n self.assertEqual(system.get_input_port(0).size(), 2)\n self.assertEqual(system.get_output_port(0).size(), 1)\n self.assertEqual(context.num_abstract_parameters(), 0)\n self.assertEqual(context.num_numeric_parameter_groups(), 0)\n self.assertTrue(system.dynamics_for_variable(x[0]).EqualTo(x[0] + x[1])\n )\n self.assertTrue(system.dynamics_for_variable(x[1]).EqualTo(t))\n\n def test_symbolic_vector_system_parameters(self):\n t = Variable('t')\n x = [Variable('x0'), Variable('x1')]\n u = [Variable('u0'), Variable('u1')]\n p = [Variable('p0'), Variable('p1')]\n system = SymbolicVectorSystem(time=t, state=x, input=u, parameter=p,\n dynamics=[p[0] * x[0] + x[1] + p[1], t], output=[u[1]],\n time_period=0.0)\n context = system.CreateDefaultContext()\n self.assertEqual(context.num_continuous_states(), 2)\n self.assertEqual(context.num_discrete_state_groups(), 0)\n self.assertEqual(system.get_input_port(0).size(), 2)\n self.assertEqual(system.get_output_port(0).size(), 1)\n self.assertEqual(context.num_abstract_parameters(), 0)\n self.assertEqual(context.num_numeric_parameter_groups(), 1)\n self.assertEqual(context.get_numeric_parameter(0).size(), 2)\n self.assertTrue(system.dynamics_for_variable(x[0]).EqualTo(p[0] * x\n [0] + x[1] + p[1]))\n self.assertTrue(system.dynamics_for_variable(x[1]).EqualTo(t))\n\n def test_wrap_to_system(self):\n system = WrapToSystem(2)\n system.set_interval(1, 1.0, 2.0)\n context = system.CreateDefaultContext()\n output = system.AllocateOutput()\n\n def mytest(input, expected):\n system.get_input_port(0).FixValue(context, input)\n system.CalcOutput(context, output)\n self.assertTrue(np.allclose(output.get_vector_data(0).\n CopyToVector(), expected))\n mytest((-1.5, 0.5), (-1.5, 1.5))\n mytest((0.2, 0.3), (0.2, 1.3))\n\n def test_demultiplexer(self):\n demux = Demultiplexer(size=4)\n context = demux.CreateDefaultContext()\n self.assertEqual(demux.num_input_ports(), 1)\n self.assertEqual(demux.num_output_ports(), 4)\n numpy_compare.assert_equal(demux.get_output_ports_sizes(), [1, 1, 1, 1]\n )\n input_vec = np.array([1.0, 2.0, 3.0, 4.0])\n demux.get_input_port(0).FixValue(context, input_vec)\n output = demux.AllocateOutput()\n demux.CalcOutput(context, output)\n for i in range(4):\n self.assertTrue(np.allclose(output.get_vector_data(i).get_value\n (), input_vec[i]))\n demux = Demultiplexer(size=4, output_ports_size=2)\n context = demux.CreateDefaultContext()\n self.assertEqual(demux.num_input_ports(), 1)\n self.assertEqual(demux.num_output_ports(), 2)\n numpy_compare.assert_equal(demux.get_output_ports_sizes(), [2, 2])\n demux.get_input_port(0).FixValue(context, input_vec)\n output = demux.AllocateOutput()\n demux.CalcOutput(context, output)\n for i in range(2):\n self.assertTrue(np.allclose(output.get_vector_data(i).get_value\n (), input_vec[2 * i:2 * i + 2]))\n output_ports_sizes = np.array([1, 2, 1])\n num_output_ports = output_ports_sizes.size\n input_vec = np.array([1.0, 2.0, 3.0, 4.0])\n demux = Demultiplexer(output_ports_sizes=output_ports_sizes)\n context = demux.CreateDefaultContext()\n self.assertEqual(demux.num_input_ports(), 1)\n self.assertEqual(demux.num_output_ports(), num_output_ports)\n numpy_compare.assert_equal(demux.get_output_ports_sizes(),\n output_ports_sizes)\n demux.get_input_port(0).FixValue(context, input_vec)\n output = demux.AllocateOutput()\n demux.CalcOutput(context, output)\n output_port_start = 0\n for i in range(num_output_ports):\n output_port_size = output.get_vector_data(i).size()\n self.assertTrue(np.allclose(output.get_vector_data(i).get_value\n (), input_vec[output_port_start:output_port_start +\n output_port_size]))\n output_port_start += output_port_size\n <mask token>\n\n def test_multilayer_perceptron(self):\n mlp = MultilayerPerceptron(layers=[1, 2, 3], activation_type=\n PerceptronActivationType.kReLU)\n self.assertEqual(mlp.get_input_port().size(), 1)\n self.assertEqual(mlp.get_output_port().size(), 3)\n context = mlp.CreateDefaultContext()\n params = np.zeros((mlp.num_parameters(), 1))\n self.assertEqual(mlp.num_parameters(), 13)\n self.assertEqual(mlp.layers(), [1, 2, 3])\n self.assertEqual(mlp.activation_type(layer=0),\n PerceptronActivationType.kReLU)\n self.assertEqual(len(mlp.GetParameters(context=context)), mlp.\n num_parameters())\n mlp.SetWeights(context=context, layer=0, W=np.array([[1], [2]]))\n mlp.SetBiases(context=context, layer=0, b=[3, 4])\n np.testing.assert_array_equal(mlp.GetWeights(context=context, layer\n =0), np.array([[1], [2]]))\n np.testing.assert_array_equal(mlp.GetBiases(context=context, layer=\n 0), np.array([3, 4]))\n params = np.zeros(mlp.num_parameters())\n mlp.SetWeights(params=params, layer=0, W=np.array([[1], [2]]))\n mlp.SetBiases(params=params, layer=0, b=[3, 4])\n np.testing.assert_array_equal(mlp.GetWeights(params=params, layer=0\n ), np.array([[1], [2]]))\n np.testing.assert_array_equal(mlp.GetBiases(params=params, layer=0),\n np.array([3, 4]))\n mutable_params = mlp.GetMutableParameters(context=context)\n mutable_params[:] = 3.0\n np.testing.assert_array_equal(mlp.GetParameters(context), np.full(\n mlp.num_parameters(), 3.0))\n global called_loss\n called_loss = False\n\n def silly_loss(Y, dloss_dY):\n global called_loss\n called_loss = True\n dloss_dY[:] = 1\n return Y.sum()\n dloss_dparams = np.zeros((13,))\n generator = RandomGenerator(23)\n mlp.SetRandomContext(context, generator)\n mlp.Backpropagation(context=context, X=np.array([1, 3, 4]).reshape(\n (1, 3)), loss=silly_loss, dloss_dparams=dloss_dparams)\n self.assertTrue(called_loss)\n self.assertTrue(dloss_dparams.any())\n dloss_dparams = np.zeros((13,))\n mlp.BackpropagationMeanSquaredError(context=context, X=np.array([1,\n 3, 4]).reshape((1, 3)), Y_desired=np.eye(3), dloss_dparams=\n dloss_dparams)\n self.assertTrue(dloss_dparams.any())\n Y = np.asfortranarray(np.eye(3))\n mlp.BatchOutput(context=context, X=np.array([[0.1, 0.3, 0.4]]), Y=Y)\n self.assertFalse(np.allclose(Y, np.eye(3)))\n Y2 = mlp.BatchOutput(context=context, X=np.array([[0.1, 0.3, 0.4]]))\n np.testing.assert_array_equal(Y, Y2)\n mlp2 = MultilayerPerceptron(layers=[3, 2, 1], activation_types=[\n PerceptronActivationType.kReLU, PerceptronActivationType.kTanh])\n self.assertEqual(mlp2.activation_type(0), PerceptronActivationType.\n kReLU)\n self.assertEqual(mlp2.activation_type(1), PerceptronActivationType.\n kTanh)\n Y = np.asfortranarray(np.full((1, 3), 2.4))\n dYdX = np.asfortranarray(np.full((3, 3), 5.3))\n context2 = mlp2.CreateDefaultContext()\n mlp2.BatchOutput(context=context2, X=np.eye(3), Y=Y, dYdX=dYdX)\n np.testing.assert_array_almost_equal(Y, np.zeros((1, 3)))\n np.testing.assert_array_almost_equal(dYdX, np.zeros((3, 3)))\n mlp = MultilayerPerceptron(use_sin_cos_for_input=[True, False],\n remaining_layers=[3, 2], activation_types=[\n PerceptronActivationType.kReLU, PerceptronActivationType.kTanh])\n self.assertEqual(mlp.get_input_port().size(), 2)\n np.testing.assert_array_equal(mlp.layers(), [3, 3, 2])\n\n def test_random_source(self):\n source = RandomSource(distribution=RandomDistribution.kUniform,\n num_outputs=2, sampling_interval_sec=0.01)\n self.assertEqual(source.get_output_port(0).size(), 2)\n builder = DiagramBuilder()\n AddRandomInputs(sampling_interval_sec=0.01, builder=builder)\n builder_ad = DiagramBuilder_[AutoDiffXd]()\n AddRandomInputs(sampling_interval_sec=0.01, builder=builder_ad)\n\n def test_constant_vector_source(self):\n source = ConstantVectorSource(source_value=[1.0, 2.0])\n context = source.CreateDefaultContext()\n source.get_source_value(context)\n source.get_mutable_source_value(context)\n\n def test_ctor_api(self):\n \"\"\"Tests construction of systems for systems whose executions semantics\n are not tested above.\n \"\"\"\n ConstantValueSource(Value('Hello world'))\n DiscreteTimeDelay(update_sec=0.1, delay_time_steps=5, vector_size=2)\n DiscreteTimeDelay(update_sec=0.1, delay_time_steps=5,\n abstract_model_value=Value('Hello world'))\n with catch_drake_warnings(expected_count=2) as w:\n DiscreteTimeDelay(update_sec=0.1, delay_timesteps=5, vector_size=2)\n DiscreteTimeDelay(update_sec=0.1, delay_timesteps=5,\n abstract_model_value=Value('Hello world'))\n ZeroOrderHold(period_sec=0.1, offset_sec=0.0, vector_size=2)\n dut = ZeroOrderHold(period_sec=1.0, offset_sec=0.25,\n abstract_model_value=Value('Hello world'))\n self.assertEqual(dut.period(), 1.0)\n self.assertEqual(dut.offset(), 0.25)\n\n def test_shared_pointer_system_ctor(self):\n dut = SharedPointerSystem(value_to_hold=[1, 2, 3])\n readback = dut.get()\n self.assertListEqual(readback, [1, 2, 3])\n del dut\n self.assertListEqual(readback, [1, 2, 3])\n\n def test_shared_pointer_system_builder(self):\n builder = DiagramBuilder()\n self.assertListEqual(SharedPointerSystem.AddToBuilder(builder=\n builder, value_to_hold=[1, 2, 3]), [1, 2, 3])\n diagram = builder.Build()\n del builder\n readback = diagram.GetSystems()[0].get()\n self.assertListEqual(readback, [1, 2, 3])\n del diagram\n self.assertListEqual(readback, [1, 2, 3])\n\n def test_sine(self):\n sine_source = Sine(amplitude=1, frequency=2, phase=3, size=1,\n is_time_based=True)\n self.assertEqual(sine_source.get_output_port(0).size(), 1)\n self.assertEqual(sine_source.get_output_port(1).size(), 1)\n self.assertEqual(sine_source.get_output_port(2).size(), 1)\n sine_source = Sine(amplitude=1, frequency=2, phase=3, size=3,\n is_time_based=True)\n self.assertEqual(sine_source.get_output_port(0).size(), 3)\n self.assertEqual(sine_source.get_output_port(1).size(), 3)\n self.assertEqual(sine_source.get_output_port(2).size(), 3)\n sine_source = Sine(amplitudes=np.ones(2), frequencies=np.ones(2),\n phases=np.ones(2), is_time_based=True)\n self.assertEqual(sine_source.get_output_port(0).size(), 2)\n self.assertEqual(sine_source.get_output_port(1).size(), 2)\n self.assertEqual(sine_source.get_output_port(2).size(), 2)\n\n def test_discrete_derivative(self):\n discrete_derivative = DiscreteDerivative(num_inputs=5, time_step=0.5)\n self.assertEqual(discrete_derivative.get_input_port(0).size(), 5)\n self.assertEqual(discrete_derivative.get_output_port(0).size(), 5)\n self.assertEqual(discrete_derivative.time_step(), 0.5)\n self.assertTrue(discrete_derivative.suppress_initial_transient())\n discrete_derivative = DiscreteDerivative(num_inputs=5, time_step=\n 0.5, suppress_initial_transient=False)\n self.assertFalse(discrete_derivative.suppress_initial_transient())\n <mask token>\n\n @numpy_compare.check_nonsymbolic_types\n def test_log_vector_output(self, T):\n builder = DiagramBuilder_[T]()\n kSize = 1\n integrator = builder.AddSystem(Integrator_[T](kSize))\n port = integrator.get_output_port(0)\n loggers = []\n loggers.append(LogVectorOutput(port, builder))\n loggers.append(LogVectorOutput(src=port, builder=builder))\n loggers.append(LogVectorOutput(port, builder, 0.125))\n loggers.append(LogVectorOutput(src=port, builder=builder,\n publish_period=0.125))\n loggers.append(LogVectorOutput(port, builder, {TriggerType.kForced}))\n loggers.append(LogVectorOutput(src=port, builder=builder,\n publish_triggers={TriggerType.kForced}))\n loggers.append(LogVectorOutput(port, builder, {TriggerType.\n kPeriodic}, 0.125))\n loggers.append(LogVectorOutput(src=port, builder=builder,\n publish_triggers={TriggerType.kPeriodic}, publish_period=0.125))\n diagram = builder.Build()\n context = diagram.CreateDefaultContext()\n self.assertTrue(all(logger.FindLog(context).num_samples() == 0 for\n logger in loggers))\n <mask token>\n\n @numpy_compare.check_nonsymbolic_types\n def test_vector_log_sink(self, T):\n builder = DiagramBuilder_[T]()\n kSize = 1\n constructors = [VectorLogSink_[T]]\n loggers = []\n if T == float:\n constructors.append(VectorLogSink)\n for constructor in constructors:\n loggers.append(builder.AddSystem(constructor(kSize)))\n loggers.append(builder.AddSystem(constructor(input_size=kSize)))\n loggers.append(builder.AddSystem(constructor(kSize, 0.125)))\n loggers.append(builder.AddSystem(constructor(input_size=kSize,\n publish_period=0.125)))\n loggers.append(builder.AddSystem(constructor(kSize, {\n TriggerType.kForced})))\n loggers.append(builder.AddSystem(constructor(input_size=kSize,\n publish_triggers={TriggerType.kForced})))\n loggers.append(builder.AddSystem(constructor(kSize, {\n TriggerType.kPeriodic}, 0.125)))\n loggers.append(builder.AddSystem(constructor(input_size=kSize,\n publish_triggers={TriggerType.kPeriodic}, publish_period=\n 0.125)))\n diagram = builder.Build()\n context = diagram.CreateDefaultContext()\n self.assertTrue(all(logger.FindLog(context) == logger.\n FindMutableLog(context) for logger in loggers))\n loggers_and_contexts = [(x, x.GetMyContextFromRoot(context)) for x in\n loggers]\n self.assertTrue(all(logger.GetLog(logger_context) == logger.\n GetMutableLog(logger_context) for logger, logger_context in\n loggers_and_contexts))\n self.assertTrue(all(logger.GetLog(logger_context) == logger.FindLog\n (context) for logger, logger_context in loggers_and_contexts))\n",
"step-4": "<mask token>\n\n\nclass TestGeneral(unittest.TestCase):\n\n def _check_instantiations(self, template, supports_symbolic=True):\n default_cls = template[None]\n self.assertTrue(template[float] is default_cls)\n self.assertTrue(template[AutoDiffXd] is not default_cls)\n if supports_symbolic:\n self.assertTrue(template[Expression] is not default_cls)\n\n def test_instantiations(self):\n self._check_instantiations(Adder_)\n self._check_instantiations(AffineSystem_)\n self._check_instantiations(ConstantValueSource_)\n self._check_instantiations(ConstantVectorSource_)\n self._check_instantiations(Demultiplexer_)\n self._check_instantiations(DiscreteDerivative_)\n self._check_instantiations(DiscreteTimeDelay_)\n self._check_instantiations(Gain_)\n self._check_instantiations(Integrator_)\n self._check_instantiations(LinearSystem_)\n self._check_instantiations(LinearTransformDensity_,\n supports_symbolic=False)\n self._check_instantiations(Multiplexer_)\n self._check_instantiations(MultilayerPerceptron_)\n self._check_instantiations(PassThrough_)\n self._check_instantiations(PortSwitch_)\n self._check_instantiations(Saturation_)\n self._check_instantiations(SharedPointerSystem_)\n self._check_instantiations(Sine_)\n self._check_instantiations(StateInterpolatorWithDiscreteDerivative_)\n self._check_instantiations(SymbolicVectorSystem_)\n self._check_instantiations(TrajectoryAffineSystem_,\n supports_symbolic=False)\n self._check_instantiations(TrajectoryLinearSystem_,\n supports_symbolic=False)\n self._check_instantiations(TrajectorySource_)\n self._check_instantiations(VectorLogSink_)\n self._check_instantiations(WrapToSystem_)\n self._check_instantiations(ZeroOrderHold_)\n\n def test_linear_affine_system(self):\n A = np.identity(2)\n B = np.array([[0], [1]])\n f0 = np.array([[0], [0]])\n C = np.array([[0, 1]])\n D = [1]\n y0 = [0]\n system = LinearSystem(A, B, C, D)\n context = system.CreateDefaultContext()\n self.assertEqual(system.get_input_port(0).size(), 1)\n self.assertEqual(context.get_mutable_continuous_state_vector().size\n (), 2)\n self.assertEqual(system.get_output_port(0).size(), 1)\n self.assertTrue((system.A() == A).all())\n self.assertTrue((system.B() == B).all())\n self.assertTrue((system.f0() == f0).all())\n self.assertTrue((system.C() == C).all())\n self.assertEqual(system.D(), D)\n self.assertEqual(system.y0(), y0)\n self.assertEqual(system.time_period(), 0.0)\n x0 = np.array([1, 2])\n system.configure_default_state(x0=x0)\n system.SetDefaultContext(context)\n np.testing.assert_equal(context.get_continuous_state_vector().\n CopyToVector(), x0)\n generator = RandomGenerator()\n system.SetRandomContext(context, generator)\n np.testing.assert_equal(context.get_continuous_state_vector().\n CopyToVector(), x0)\n system.configure_random_state(covariance=np.eye(2))\n system.SetRandomContext(context, generator)\n self.assertNotEqual(context.get_continuous_state_vector().\n CopyToVector()[1], x0[1])\n Co = ControllabilityMatrix(system)\n self.assertEqual(Co.shape, (2, 2))\n self.assertFalse(IsControllable(system))\n self.assertFalse(IsControllable(system, 1e-06))\n self.assertFalse(IsStabilizable(sys=system))\n self.assertFalse(IsStabilizable(sys=system, threshold=1e-06))\n Ob = ObservabilityMatrix(system)\n self.assertEqual(Ob.shape, (2, 2))\n self.assertFalse(IsObservable(system))\n self.assertFalse(IsDetectable(sys=system))\n self.assertFalse(IsDetectable(sys=system, threshold=1e-06))\n system = AffineSystem(A, B, f0, C, D, y0, 0.1)\n self.assertEqual(system.get_input_port(0), system.get_input_port())\n self.assertEqual(system.get_output_port(0), system.get_output_port())\n context = system.CreateDefaultContext()\n self.assertEqual(system.get_input_port(0).size(), 1)\n self.assertEqual(context.get_discrete_state_vector().size(), 2)\n self.assertEqual(system.get_output_port(0).size(), 1)\n self.assertTrue((system.A() == A).all())\n self.assertTrue((system.B() == B).all())\n self.assertTrue((system.f0() == f0).all())\n self.assertTrue((system.C() == C).all())\n self.assertEqual(system.D(), D)\n self.assertEqual(system.y0(), y0)\n self.assertEqual(system.time_period(), 0.1)\n system.get_input_port(0).FixValue(context, 0)\n linearized = Linearize(system, context)\n self.assertTrue((linearized.A() == A).all())\n taylor = FirstOrderTaylorApproximation(system, context)\n self.assertTrue((taylor.y0() == y0).all())\n new_A = np.array([[1, 2], [3, 4]])\n new_B = np.array([[5], [6]])\n new_f0 = np.array([[7], [8]])\n new_C = np.array([[9, 10]])\n new_D = np.array([[11]])\n new_y0 = np.array([12])\n system.UpdateCoefficients(A=new_A, B=new_B, f0=new_f0, C=new_C, D=\n new_D, y0=new_y0)\n np.testing.assert_equal(new_A, system.A())\n np.testing.assert_equal(new_B, system.B())\n np.testing.assert_equal(new_f0.flatten(), system.f0())\n np.testing.assert_equal(new_C, system.C())\n np.testing.assert_equal(new_D, system.D())\n np.testing.assert_equal(new_y0, system.y0())\n system = MatrixGain(D=A)\n self.assertTrue((system.D() == A).all())\n system = TrajectoryAffineSystem(PiecewisePolynomial(A),\n PiecewisePolynomial(B), PiecewisePolynomial(f0),\n PiecewisePolynomial(C), PiecewisePolynomial(D),\n PiecewisePolynomial(y0), 0.1)\n self.assertEqual(system.get_input_port(0), system.get_input_port())\n self.assertEqual(system.get_output_port(0), system.get_output_port())\n context = system.CreateDefaultContext()\n self.assertEqual(system.get_input_port(0).size(), 1)\n self.assertEqual(context.get_discrete_state_vector().size(), 2)\n self.assertEqual(system.get_output_port(0).size(), 1)\n for t in np.linspace(0.0, 1.0, 5):\n self.assertTrue((system.A(t) == A).all())\n self.assertTrue((system.B(t) == B).all())\n self.assertTrue((system.f0(t) == f0).all())\n self.assertTrue((system.C(t) == C).all())\n self.assertEqual(system.D(t), D)\n self.assertEqual(system.y0(t), y0)\n self.assertEqual(system.time_period(), 0.1)\n x0 = np.array([1, 2])\n system.configure_default_state(x0=x0)\n system.SetDefaultContext(context)\n np.testing.assert_equal(context.get_discrete_state_vector().\n CopyToVector(), x0)\n generator = RandomGenerator()\n system.SetRandomContext(context, generator)\n np.testing.assert_equal(context.get_discrete_state_vector().\n CopyToVector(), x0)\n system.configure_random_state(covariance=np.eye(2))\n system.SetRandomContext(context, generator)\n self.assertNotEqual(context.get_discrete_state_vector().\n CopyToVector()[1], x0[1])\n system = TrajectoryLinearSystem(A=PiecewisePolynomial(A), B=\n PiecewisePolynomial(B), C=PiecewisePolynomial(C), D=\n PiecewisePolynomial(D), time_period=0.1)\n self.assertEqual(system.time_period(), 0.1)\n system.configure_default_state(x0=np.array([1, 2]))\n system.configure_random_state(covariance=np.eye(2))\n\n def test_linear_affine_system_empty_matrices(self):\n\n def CheckSizes(system, num_states, num_inputs, num_outputs):\n self.assertEqual(system.num_continuous_states(), num_states)\n self.assertEqual(system.num_inputs(), num_inputs)\n self.assertEqual(system.num_outputs(), num_outputs)\n system = AffineSystem(y0=[2, 1])\n CheckSizes(system, num_states=0, num_inputs=0, num_outputs=2)\n system = AffineSystem(D=np.eye(2))\n CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2)\n system = LinearSystem(D=np.eye(2))\n CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2)\n system = AffineSystem(D=np.eye(2), y0=[1, 2])\n CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2)\n system = LinearSystem(B=np.eye(2))\n CheckSizes(system, num_states=2, num_inputs=2, num_outputs=0)\n\n def test_linear_system_zero_size(self):\n num_x = 0\n num_y = 2\n num_u = 2\n A = np.zeros((num_x, num_x))\n B = np.zeros((num_x, num_u))\n C = np.zeros((num_y, num_x))\n D = np.zeros((num_y, num_u))\n self.assertIsNotNone(LinearSystem(A, B, C, D))\n\n @numpy_compare.check_nonsymbolic_types\n def test_linear_transform_density(self, T):\n dut = LinearTransformDensity_[T](distribution=RandomDistribution.\n kGaussian, input_size=3, output_size=3)\n w_in = np.array([T(0.5), T(0.1), T(1.5)])\n context = dut.CreateDefaultContext()\n dut.get_input_port_w_in().FixValue(context, w_in)\n self.assertEqual(dut.get_input_port_A().size(), 9)\n self.assertEqual(dut.get_input_port_b().size(), 3)\n self.assertEqual(dut.get_distribution(), RandomDistribution.kGaussian)\n A = np.array([[T(0.5), T(1), T(2)], [T(1), T(2), T(3)], [T(3), T(4),\n T(5)]])\n dut.FixConstantA(context=context, A=A)\n b = np.array([T(1), T(2), T(3)])\n dut.FixConstantB(context=context, b=b)\n dut.CalcDensity(context=context)\n self.assertEqual(dut.get_output_port_w_out().size(), 3)\n self.assertEqual(dut.get_output_port_w_out_density().size(), 1)\n\n def test_vector_pass_through(self):\n model_value = BasicVector([1.0, 2, 3])\n system = PassThrough(vector_size=model_value.size())\n context = system.CreateDefaultContext()\n system.get_input_port(0).FixValue(context, model_value)\n output = system.AllocateOutput()\n input_eval = system.EvalVectorInput(context, 0)\n compare_value(self, input_eval, model_value)\n system.CalcOutput(context, output)\n output_value = output.get_vector_data(0)\n compare_value(self, output_value, model_value)\n\n def test_default_vector_pass_through(self):\n model_value = [1.0, 2, 3]\n system = PassThrough(value=model_value)\n context = system.CreateDefaultContext()\n np.testing.assert_array_equal(model_value, system.get_output_port()\n .Eval(context))\n\n def test_abstract_pass_through(self):\n model_value = Value('Hello world')\n system = PassThrough(abstract_model_value=model_value)\n context = system.CreateDefaultContext()\n system.get_input_port(0).FixValue(context, model_value)\n output = system.AllocateOutput()\n input_eval = system.EvalAbstractInput(context, 0)\n compare_value(self, input_eval, model_value)\n system.CalcOutput(context, output)\n output_value = output.get_data(0)\n compare_value(self, output_value, model_value)\n\n def test_port_switch(self):\n system = PortSwitch(vector_size=2)\n a = system.DeclareInputPort(name='a')\n system.DeclareInputPort(name='b')\n context = system.CreateDefaultContext()\n self.assertIsInstance(a, InputPort)\n system.get_port_selector_input_port().FixValue(context, a.get_index())\n\n def test_first_order_low_pass_filter(self):\n filter1 = FirstOrderLowPassFilter(time_constant=3.0, size=4)\n self.assertEqual(filter1.get_time_constant(), 3.0)\n alpha = np.array([1, 2, 3])\n filter2 = FirstOrderLowPassFilter(time_constants=alpha)\n np.testing.assert_array_equal(filter2.get_time_constants_vector(),\n alpha)\n context = filter2.CreateDefaultContext()\n filter2.set_initial_output_value(context, [0.0, -0.2, 0.4])\n <mask token>\n\n def test_saturation(self):\n system = Saturation((0.0, -1.0, 3.0), (1.0, 2.0, 4.0))\n context = system.CreateDefaultContext()\n output = system.AllocateOutput()\n\n def mytest(input, expected):\n system.get_input_port(0).FixValue(context, input)\n system.CalcOutput(context, output)\n self.assertTrue(np.allclose(output.get_vector_data(0).\n CopyToVector(), expected))\n mytest((-5.0, 5.0, 4.0), (0.0, 2.0, 4.0))\n mytest((0.4, 0.0, 3.5), (0.4, 0.0, 3.5))\n\n def test_trajectory_source(self):\n ppt = PiecewisePolynomial.FirstOrderHold([0.0, 1.0], [[2.0, 3.0], [\n 2.0, 1.0]])\n system = TrajectorySource(trajectory=ppt, output_derivative_order=0,\n zero_derivatives_beyond_limits=True)\n context = system.CreateDefaultContext()\n output = system.AllocateOutput()\n\n def mytest(input, expected):\n context.SetTime(input)\n system.CalcOutput(context, output)\n self.assertTrue(np.allclose(output.get_vector_data(0).\n CopyToVector(), expected))\n mytest(0.0, (2.0, 2.0))\n mytest(0.5, (2.5, 1.5))\n mytest(1.0, (3.0, 1.0))\n ppt2 = PiecewisePolynomial.FirstOrderHold([0.0, 1.0], [[4.0, 6.0],\n [4.0, 2.0]])\n system.UpdateTrajectory(trajectory=ppt2)\n mytest(0.0, (4.0, 4.0))\n mytest(0.5, (5.0, 3.0))\n mytest(1.0, (6.0, 2.0))\n\n def test_symbolic_vector_system(self):\n t = Variable('t')\n x = [Variable('x0'), Variable('x1')]\n u = [Variable('u0'), Variable('u1')]\n system = SymbolicVectorSystem(time=t, state=x, input=u, dynamics=[x\n [0] + x[1], t], output=[u[1]], time_period=0.0)\n context = system.CreateDefaultContext()\n self.assertEqual(context.num_continuous_states(), 2)\n self.assertEqual(context.num_discrete_state_groups(), 0)\n self.assertEqual(system.get_input_port(0).size(), 2)\n self.assertEqual(system.get_output_port(0).size(), 1)\n self.assertEqual(context.num_abstract_parameters(), 0)\n self.assertEqual(context.num_numeric_parameter_groups(), 0)\n self.assertTrue(system.dynamics_for_variable(x[0]).EqualTo(x[0] + x[1])\n )\n self.assertTrue(system.dynamics_for_variable(x[1]).EqualTo(t))\n\n def test_symbolic_vector_system_parameters(self):\n t = Variable('t')\n x = [Variable('x0'), Variable('x1')]\n u = [Variable('u0'), Variable('u1')]\n p = [Variable('p0'), Variable('p1')]\n system = SymbolicVectorSystem(time=t, state=x, input=u, parameter=p,\n dynamics=[p[0] * x[0] + x[1] + p[1], t], output=[u[1]],\n time_period=0.0)\n context = system.CreateDefaultContext()\n self.assertEqual(context.num_continuous_states(), 2)\n self.assertEqual(context.num_discrete_state_groups(), 0)\n self.assertEqual(system.get_input_port(0).size(), 2)\n self.assertEqual(system.get_output_port(0).size(), 1)\n self.assertEqual(context.num_abstract_parameters(), 0)\n self.assertEqual(context.num_numeric_parameter_groups(), 1)\n self.assertEqual(context.get_numeric_parameter(0).size(), 2)\n self.assertTrue(system.dynamics_for_variable(x[0]).EqualTo(p[0] * x\n [0] + x[1] + p[1]))\n self.assertTrue(system.dynamics_for_variable(x[1]).EqualTo(t))\n\n def test_wrap_to_system(self):\n system = WrapToSystem(2)\n system.set_interval(1, 1.0, 2.0)\n context = system.CreateDefaultContext()\n output = system.AllocateOutput()\n\n def mytest(input, expected):\n system.get_input_port(0).FixValue(context, input)\n system.CalcOutput(context, output)\n self.assertTrue(np.allclose(output.get_vector_data(0).\n CopyToVector(), expected))\n mytest((-1.5, 0.5), (-1.5, 1.5))\n mytest((0.2, 0.3), (0.2, 1.3))\n\n def test_demultiplexer(self):\n demux = Demultiplexer(size=4)\n context = demux.CreateDefaultContext()\n self.assertEqual(demux.num_input_ports(), 1)\n self.assertEqual(demux.num_output_ports(), 4)\n numpy_compare.assert_equal(demux.get_output_ports_sizes(), [1, 1, 1, 1]\n )\n input_vec = np.array([1.0, 2.0, 3.0, 4.0])\n demux.get_input_port(0).FixValue(context, input_vec)\n output = demux.AllocateOutput()\n demux.CalcOutput(context, output)\n for i in range(4):\n self.assertTrue(np.allclose(output.get_vector_data(i).get_value\n (), input_vec[i]))\n demux = Demultiplexer(size=4, output_ports_size=2)\n context = demux.CreateDefaultContext()\n self.assertEqual(demux.num_input_ports(), 1)\n self.assertEqual(demux.num_output_ports(), 2)\n numpy_compare.assert_equal(demux.get_output_ports_sizes(), [2, 2])\n demux.get_input_port(0).FixValue(context, input_vec)\n output = demux.AllocateOutput()\n demux.CalcOutput(context, output)\n for i in range(2):\n self.assertTrue(np.allclose(output.get_vector_data(i).get_value\n (), input_vec[2 * i:2 * i + 2]))\n output_ports_sizes = np.array([1, 2, 1])\n num_output_ports = output_ports_sizes.size\n input_vec = np.array([1.0, 2.0, 3.0, 4.0])\n demux = Demultiplexer(output_ports_sizes=output_ports_sizes)\n context = demux.CreateDefaultContext()\n self.assertEqual(demux.num_input_ports(), 1)\n self.assertEqual(demux.num_output_ports(), num_output_ports)\n numpy_compare.assert_equal(demux.get_output_ports_sizes(),\n output_ports_sizes)\n demux.get_input_port(0).FixValue(context, input_vec)\n output = demux.AllocateOutput()\n demux.CalcOutput(context, output)\n output_port_start = 0\n for i in range(num_output_ports):\n output_port_size = output.get_vector_data(i).size()\n self.assertTrue(np.allclose(output.get_vector_data(i).get_value\n (), input_vec[output_port_start:output_port_start +\n output_port_size]))\n output_port_start += output_port_size\n\n def test_multiplexer(self):\n my_vector = MyVector2(data=[1.0, 2.0])\n test_cases = [dict(has_vector=False, mux=Multiplexer(\n num_scalar_inputs=4), data=[[5.0], [3.0], [4.0], [2.0]]), dict(\n has_vector=False, mux=Multiplexer(input_sizes=[2, 3]), data=[[\n 8.0, 4.0], [3.0, 6.0, 9.0]]), dict(has_vector=True, mux=\n Multiplexer(model_vector=my_vector), data=[[42.0], [3.0]])]\n for case in test_cases:\n mux = case['mux']\n port_size = sum([len(vec) for vec in case['data']])\n self.assertEqual(mux.get_output_port(0).size(), port_size)\n context = mux.CreateDefaultContext()\n output = mux.AllocateOutput()\n num_ports = len(case['data'])\n self.assertEqual(context.num_input_ports(), num_ports)\n for j, vec in enumerate(case['data']):\n mux.get_input_port(j).FixValue(context, vec)\n mux.CalcOutput(context, output)\n self.assertTrue(np.allclose(output.get_vector_data(0).get_value\n (), [elem for vec in case['data'] for elem in vec]))\n if case['has_vector']:\n value = output.get_vector_data(0)\n self.assertTrue(isinstance(value, MyVector2))\n\n def test_multilayer_perceptron(self):\n mlp = MultilayerPerceptron(layers=[1, 2, 3], activation_type=\n PerceptronActivationType.kReLU)\n self.assertEqual(mlp.get_input_port().size(), 1)\n self.assertEqual(mlp.get_output_port().size(), 3)\n context = mlp.CreateDefaultContext()\n params = np.zeros((mlp.num_parameters(), 1))\n self.assertEqual(mlp.num_parameters(), 13)\n self.assertEqual(mlp.layers(), [1, 2, 3])\n self.assertEqual(mlp.activation_type(layer=0),\n PerceptronActivationType.kReLU)\n self.assertEqual(len(mlp.GetParameters(context=context)), mlp.\n num_parameters())\n mlp.SetWeights(context=context, layer=0, W=np.array([[1], [2]]))\n mlp.SetBiases(context=context, layer=0, b=[3, 4])\n np.testing.assert_array_equal(mlp.GetWeights(context=context, layer\n =0), np.array([[1], [2]]))\n np.testing.assert_array_equal(mlp.GetBiases(context=context, layer=\n 0), np.array([3, 4]))\n params = np.zeros(mlp.num_parameters())\n mlp.SetWeights(params=params, layer=0, W=np.array([[1], [2]]))\n mlp.SetBiases(params=params, layer=0, b=[3, 4])\n np.testing.assert_array_equal(mlp.GetWeights(params=params, layer=0\n ), np.array([[1], [2]]))\n np.testing.assert_array_equal(mlp.GetBiases(params=params, layer=0),\n np.array([3, 4]))\n mutable_params = mlp.GetMutableParameters(context=context)\n mutable_params[:] = 3.0\n np.testing.assert_array_equal(mlp.GetParameters(context), np.full(\n mlp.num_parameters(), 3.0))\n global called_loss\n called_loss = False\n\n def silly_loss(Y, dloss_dY):\n global called_loss\n called_loss = True\n dloss_dY[:] = 1\n return Y.sum()\n dloss_dparams = np.zeros((13,))\n generator = RandomGenerator(23)\n mlp.SetRandomContext(context, generator)\n mlp.Backpropagation(context=context, X=np.array([1, 3, 4]).reshape(\n (1, 3)), loss=silly_loss, dloss_dparams=dloss_dparams)\n self.assertTrue(called_loss)\n self.assertTrue(dloss_dparams.any())\n dloss_dparams = np.zeros((13,))\n mlp.BackpropagationMeanSquaredError(context=context, X=np.array([1,\n 3, 4]).reshape((1, 3)), Y_desired=np.eye(3), dloss_dparams=\n dloss_dparams)\n self.assertTrue(dloss_dparams.any())\n Y = np.asfortranarray(np.eye(3))\n mlp.BatchOutput(context=context, X=np.array([[0.1, 0.3, 0.4]]), Y=Y)\n self.assertFalse(np.allclose(Y, np.eye(3)))\n Y2 = mlp.BatchOutput(context=context, X=np.array([[0.1, 0.3, 0.4]]))\n np.testing.assert_array_equal(Y, Y2)\n mlp2 = MultilayerPerceptron(layers=[3, 2, 1], activation_types=[\n PerceptronActivationType.kReLU, PerceptronActivationType.kTanh])\n self.assertEqual(mlp2.activation_type(0), PerceptronActivationType.\n kReLU)\n self.assertEqual(mlp2.activation_type(1), PerceptronActivationType.\n kTanh)\n Y = np.asfortranarray(np.full((1, 3), 2.4))\n dYdX = np.asfortranarray(np.full((3, 3), 5.3))\n context2 = mlp2.CreateDefaultContext()\n mlp2.BatchOutput(context=context2, X=np.eye(3), Y=Y, dYdX=dYdX)\n np.testing.assert_array_almost_equal(Y, np.zeros((1, 3)))\n np.testing.assert_array_almost_equal(dYdX, np.zeros((3, 3)))\n mlp = MultilayerPerceptron(use_sin_cos_for_input=[True, False],\n remaining_layers=[3, 2], activation_types=[\n PerceptronActivationType.kReLU, PerceptronActivationType.kTanh])\n self.assertEqual(mlp.get_input_port().size(), 2)\n np.testing.assert_array_equal(mlp.layers(), [3, 3, 2])\n\n def test_random_source(self):\n source = RandomSource(distribution=RandomDistribution.kUniform,\n num_outputs=2, sampling_interval_sec=0.01)\n self.assertEqual(source.get_output_port(0).size(), 2)\n builder = DiagramBuilder()\n AddRandomInputs(sampling_interval_sec=0.01, builder=builder)\n builder_ad = DiagramBuilder_[AutoDiffXd]()\n AddRandomInputs(sampling_interval_sec=0.01, builder=builder_ad)\n\n def test_constant_vector_source(self):\n source = ConstantVectorSource(source_value=[1.0, 2.0])\n context = source.CreateDefaultContext()\n source.get_source_value(context)\n source.get_mutable_source_value(context)\n\n def test_ctor_api(self):\n \"\"\"Tests construction of systems for systems whose executions semantics\n are not tested above.\n \"\"\"\n ConstantValueSource(Value('Hello world'))\n DiscreteTimeDelay(update_sec=0.1, delay_time_steps=5, vector_size=2)\n DiscreteTimeDelay(update_sec=0.1, delay_time_steps=5,\n abstract_model_value=Value('Hello world'))\n with catch_drake_warnings(expected_count=2) as w:\n DiscreteTimeDelay(update_sec=0.1, delay_timesteps=5, vector_size=2)\n DiscreteTimeDelay(update_sec=0.1, delay_timesteps=5,\n abstract_model_value=Value('Hello world'))\n ZeroOrderHold(period_sec=0.1, offset_sec=0.0, vector_size=2)\n dut = ZeroOrderHold(period_sec=1.0, offset_sec=0.25,\n abstract_model_value=Value('Hello world'))\n self.assertEqual(dut.period(), 1.0)\n self.assertEqual(dut.offset(), 0.25)\n\n def test_shared_pointer_system_ctor(self):\n dut = SharedPointerSystem(value_to_hold=[1, 2, 3])\n readback = dut.get()\n self.assertListEqual(readback, [1, 2, 3])\n del dut\n self.assertListEqual(readback, [1, 2, 3])\n\n def test_shared_pointer_system_builder(self):\n builder = DiagramBuilder()\n self.assertListEqual(SharedPointerSystem.AddToBuilder(builder=\n builder, value_to_hold=[1, 2, 3]), [1, 2, 3])\n diagram = builder.Build()\n del builder\n readback = diagram.GetSystems()[0].get()\n self.assertListEqual(readback, [1, 2, 3])\n del diagram\n self.assertListEqual(readback, [1, 2, 3])\n\n def test_sine(self):\n sine_source = Sine(amplitude=1, frequency=2, phase=3, size=1,\n is_time_based=True)\n self.assertEqual(sine_source.get_output_port(0).size(), 1)\n self.assertEqual(sine_source.get_output_port(1).size(), 1)\n self.assertEqual(sine_source.get_output_port(2).size(), 1)\n sine_source = Sine(amplitude=1, frequency=2, phase=3, size=3,\n is_time_based=True)\n self.assertEqual(sine_source.get_output_port(0).size(), 3)\n self.assertEqual(sine_source.get_output_port(1).size(), 3)\n self.assertEqual(sine_source.get_output_port(2).size(), 3)\n sine_source = Sine(amplitudes=np.ones(2), frequencies=np.ones(2),\n phases=np.ones(2), is_time_based=True)\n self.assertEqual(sine_source.get_output_port(0).size(), 2)\n self.assertEqual(sine_source.get_output_port(1).size(), 2)\n self.assertEqual(sine_source.get_output_port(2).size(), 2)\n\n def test_discrete_derivative(self):\n discrete_derivative = DiscreteDerivative(num_inputs=5, time_step=0.5)\n self.assertEqual(discrete_derivative.get_input_port(0).size(), 5)\n self.assertEqual(discrete_derivative.get_output_port(0).size(), 5)\n self.assertEqual(discrete_derivative.time_step(), 0.5)\n self.assertTrue(discrete_derivative.suppress_initial_transient())\n discrete_derivative = DiscreteDerivative(num_inputs=5, time_step=\n 0.5, suppress_initial_transient=False)\n self.assertFalse(discrete_derivative.suppress_initial_transient())\n\n def test_state_interpolator_with_discrete_derivative(self):\n state_interpolator = StateInterpolatorWithDiscreteDerivative(\n num_positions=5, time_step=0.4)\n self.assertEqual(state_interpolator.get_input_port(0).size(), 5)\n self.assertEqual(state_interpolator.get_output_port(0).size(), 10)\n self.assertTrue(state_interpolator.suppress_initial_transient())\n context = state_interpolator.CreateDefaultContext()\n state_interpolator.set_initial_position(context=context, position=5 *\n [1.1])\n np.testing.assert_array_equal(context.get_discrete_state(0).\n CopyToVector(), np.array(5 * [1.1]))\n np.testing.assert_array_equal(context.get_discrete_state(1).\n CopyToVector(), np.array(5 * [1.1]))\n context = state_interpolator.CreateDefaultContext()\n state_interpolator.set_initial_position(state=context.get_state(),\n position=5 * [1.3])\n np.testing.assert_array_equal(context.get_discrete_state(0).\n CopyToVector(), np.array(5 * [1.3]))\n np.testing.assert_array_equal(context.get_discrete_state(1).\n CopyToVector(), np.array(5 * [1.3]))\n state_interpolator = StateInterpolatorWithDiscreteDerivative(\n num_positions=5, time_step=0.4, suppress_initial_transient=True)\n self.assertTrue(state_interpolator.suppress_initial_transient())\n\n @numpy_compare.check_nonsymbolic_types\n def test_log_vector_output(self, T):\n builder = DiagramBuilder_[T]()\n kSize = 1\n integrator = builder.AddSystem(Integrator_[T](kSize))\n port = integrator.get_output_port(0)\n loggers = []\n loggers.append(LogVectorOutput(port, builder))\n loggers.append(LogVectorOutput(src=port, builder=builder))\n loggers.append(LogVectorOutput(port, builder, 0.125))\n loggers.append(LogVectorOutput(src=port, builder=builder,\n publish_period=0.125))\n loggers.append(LogVectorOutput(port, builder, {TriggerType.kForced}))\n loggers.append(LogVectorOutput(src=port, builder=builder,\n publish_triggers={TriggerType.kForced}))\n loggers.append(LogVectorOutput(port, builder, {TriggerType.\n kPeriodic}, 0.125))\n loggers.append(LogVectorOutput(src=port, builder=builder,\n publish_triggers={TriggerType.kPeriodic}, publish_period=0.125))\n diagram = builder.Build()\n context = diagram.CreateDefaultContext()\n self.assertTrue(all(logger.FindLog(context).num_samples() == 0 for\n logger in loggers))\n <mask token>\n\n @numpy_compare.check_nonsymbolic_types\n def test_vector_log_sink(self, T):\n builder = DiagramBuilder_[T]()\n kSize = 1\n constructors = [VectorLogSink_[T]]\n loggers = []\n if T == float:\n constructors.append(VectorLogSink)\n for constructor in constructors:\n loggers.append(builder.AddSystem(constructor(kSize)))\n loggers.append(builder.AddSystem(constructor(input_size=kSize)))\n loggers.append(builder.AddSystem(constructor(kSize, 0.125)))\n loggers.append(builder.AddSystem(constructor(input_size=kSize,\n publish_period=0.125)))\n loggers.append(builder.AddSystem(constructor(kSize, {\n TriggerType.kForced})))\n loggers.append(builder.AddSystem(constructor(input_size=kSize,\n publish_triggers={TriggerType.kForced})))\n loggers.append(builder.AddSystem(constructor(kSize, {\n TriggerType.kPeriodic}, 0.125)))\n loggers.append(builder.AddSystem(constructor(input_size=kSize,\n publish_triggers={TriggerType.kPeriodic}, publish_period=\n 0.125)))\n diagram = builder.Build()\n context = diagram.CreateDefaultContext()\n self.assertTrue(all(logger.FindLog(context) == logger.\n FindMutableLog(context) for logger in loggers))\n loggers_and_contexts = [(x, x.GetMyContextFromRoot(context)) for x in\n loggers]\n self.assertTrue(all(logger.GetLog(logger_context) == logger.\n GetMutableLog(logger_context) for logger, logger_context in\n loggers_and_contexts))\n self.assertTrue(all(logger.GetLog(logger_context) == logger.FindLog\n (context) for logger, logger_context in loggers_and_contexts))\n",
"step-5": "import gc\nimport unittest\nimport numpy as np\n\nfrom pydrake.autodiffutils import AutoDiffXd\nfrom pydrake.common import RandomDistribution, RandomGenerator\nfrom pydrake.common.test_utilities import numpy_compare\nfrom pydrake.common.test_utilities.deprecation import catch_drake_warnings\nfrom pydrake.common.value import Value\nfrom pydrake.symbolic import Expression, Variable\nfrom pydrake.systems.framework import (\n BasicVector,\n DiagramBuilder,\n DiagramBuilder_,\n InputPort,\n TriggerType,\n VectorBase,\n)\nfrom pydrake.systems.test.test_util import (\n MyVector2,\n)\nfrom pydrake.systems.primitives import (\n Adder, Adder_,\n AddRandomInputs,\n AffineSystem, AffineSystem_,\n ConstantValueSource, ConstantValueSource_,\n ConstantVectorSource, ConstantVectorSource_,\n ControllabilityMatrix,\n Demultiplexer, Demultiplexer_,\n DiscreteDerivative, DiscreteDerivative_,\n DiscreteTimeDelay, DiscreteTimeDelay_,\n FirstOrderLowPassFilter,\n FirstOrderTaylorApproximation,\n Gain, Gain_,\n Integrator, Integrator_,\n IsControllable,\n IsDetectable,\n IsObservable,\n IsStabilizable,\n Linearize,\n LinearSystem, LinearSystem_,\n LinearTransformDensity, LinearTransformDensity_,\n LogVectorOutput,\n MatrixGain,\n Multiplexer, Multiplexer_,\n MultilayerPerceptron, MultilayerPerceptron_,\n ObservabilityMatrix,\n PassThrough, PassThrough_,\n PerceptronActivationType,\n PortSwitch, PortSwitch_,\n RandomSource,\n Saturation, Saturation_,\n SharedPointerSystem, SharedPointerSystem_,\n Sine, Sine_,\n StateInterpolatorWithDiscreteDerivative,\n StateInterpolatorWithDiscreteDerivative_,\n SymbolicVectorSystem, SymbolicVectorSystem_,\n TrajectoryAffineSystem, TrajectoryAffineSystem_,\n TrajectoryLinearSystem, TrajectoryLinearSystem_,\n TrajectorySource, TrajectorySource_,\n VectorLog, VectorLogSink, VectorLogSink_,\n WrapToSystem, WrapToSystem_,\n ZeroOrderHold, ZeroOrderHold_,\n)\nfrom pydrake.trajectories import PiecewisePolynomial\n\n\ndef compare_value(test, a, b):\n # Compares a vector or abstract value.\n if isinstance(a, VectorBase):\n test.assertTrue(np.allclose(a.get_value(), b.get_value()))\n else:\n test.assertEqual(type(a.get_value()), type(b.get_value()))\n test.assertEqual(a.get_value(), b.get_value())\n\n\nclass TestGeneral(unittest.TestCase):\n def _check_instantiations(self, template, supports_symbolic=True):\n default_cls = template[None]\n self.assertTrue(template[float] is default_cls)\n self.assertTrue(template[AutoDiffXd] is not default_cls)\n if supports_symbolic:\n self.assertTrue(template[Expression] is not default_cls)\n\n def test_instantiations(self):\n # TODO(eric.cousineau): Refine tests once NumPy functionality is\n # resolved for dtype=object, or dtype=custom is used.\n self._check_instantiations(Adder_)\n self._check_instantiations(AffineSystem_)\n self._check_instantiations(ConstantValueSource_)\n self._check_instantiations(ConstantVectorSource_)\n self._check_instantiations(Demultiplexer_)\n self._check_instantiations(DiscreteDerivative_)\n self._check_instantiations(DiscreteTimeDelay_)\n self._check_instantiations(Gain_)\n self._check_instantiations(Integrator_)\n self._check_instantiations(LinearSystem_)\n self._check_instantiations(LinearTransformDensity_,\n supports_symbolic=False)\n self._check_instantiations(Multiplexer_)\n self._check_instantiations(MultilayerPerceptron_)\n self._check_instantiations(PassThrough_)\n self._check_instantiations(PortSwitch_)\n self._check_instantiations(Saturation_)\n self._check_instantiations(SharedPointerSystem_)\n self._check_instantiations(Sine_)\n self._check_instantiations(StateInterpolatorWithDiscreteDerivative_)\n self._check_instantiations(SymbolicVectorSystem_)\n self._check_instantiations(TrajectoryAffineSystem_,\n supports_symbolic=False)\n self._check_instantiations(TrajectoryLinearSystem_,\n supports_symbolic=False)\n self._check_instantiations(TrajectorySource_)\n self._check_instantiations(VectorLogSink_)\n self._check_instantiations(WrapToSystem_)\n self._check_instantiations(ZeroOrderHold_)\n\n def test_linear_affine_system(self):\n # Just make sure linear system is spelled correctly.\n A = np.identity(2)\n B = np.array([[0], [1]])\n f0 = np.array([[0], [0]])\n C = np.array([[0, 1]])\n D = [1]\n y0 = [0]\n system = LinearSystem(A, B, C, D)\n context = system.CreateDefaultContext()\n self.assertEqual(system.get_input_port(0).size(), 1)\n self.assertEqual(context\n .get_mutable_continuous_state_vector().size(), 2)\n self.assertEqual(system.get_output_port(0).size(), 1)\n self.assertTrue((system.A() == A).all())\n self.assertTrue((system.B() == B).all())\n self.assertTrue((system.f0() == f0).all())\n self.assertTrue((system.C() == C).all())\n self.assertEqual(system.D(), D)\n self.assertEqual(system.y0(), y0)\n self.assertEqual(system.time_period(), 0.)\n\n x0 = np.array([1, 2])\n system.configure_default_state(x0=x0)\n system.SetDefaultContext(context)\n np.testing.assert_equal(\n context.get_continuous_state_vector().CopyToVector(), x0)\n generator = RandomGenerator()\n system.SetRandomContext(context, generator)\n np.testing.assert_equal(\n context.get_continuous_state_vector().CopyToVector(), x0)\n system.configure_random_state(covariance=np.eye(2))\n system.SetRandomContext(context, generator)\n self.assertNotEqual(\n context.get_continuous_state_vector().CopyToVector()[1], x0[1])\n\n Co = ControllabilityMatrix(system)\n self.assertEqual(Co.shape, (2, 2))\n self.assertFalse(IsControllable(system))\n self.assertFalse(IsControllable(system, 1e-6))\n self.assertFalse(IsStabilizable(sys=system))\n self.assertFalse(IsStabilizable(sys=system, threshold=1e-6))\n Ob = ObservabilityMatrix(system)\n self.assertEqual(Ob.shape, (2, 2))\n self.assertFalse(IsObservable(system))\n self.assertFalse(IsDetectable(sys=system))\n self.assertFalse(IsDetectable(sys=system, threshold=1e-6))\n\n system = AffineSystem(A, B, f0, C, D, y0, .1)\n self.assertEqual(system.get_input_port(0), system.get_input_port())\n self.assertEqual(system.get_output_port(0), system.get_output_port())\n context = system.CreateDefaultContext()\n self.assertEqual(system.get_input_port(0).size(), 1)\n self.assertEqual(context.get_discrete_state_vector().size(), 2)\n self.assertEqual(system.get_output_port(0).size(), 1)\n self.assertTrue((system.A() == A).all())\n self.assertTrue((system.B() == B).all())\n self.assertTrue((system.f0() == f0).all())\n self.assertTrue((system.C() == C).all())\n self.assertEqual(system.D(), D)\n self.assertEqual(system.y0(), y0)\n self.assertEqual(system.time_period(), .1)\n\n system.get_input_port(0).FixValue(context, 0)\n linearized = Linearize(system, context)\n self.assertTrue((linearized.A() == A).all())\n taylor = FirstOrderTaylorApproximation(system, context)\n self.assertTrue((taylor.y0() == y0).all())\n\n new_A = np.array([[1, 2], [3, 4]])\n new_B = np.array([[5], [6]])\n new_f0 = np.array([[7], [8]])\n new_C = np.array([[9, 10]])\n new_D = np.array([[11]])\n new_y0 = np.array([12])\n system.UpdateCoefficients(\n A=new_A, B=new_B, f0=new_f0, C=new_C, D=new_D, y0=new_y0\n )\n np.testing.assert_equal(new_A, system.A())\n np.testing.assert_equal(new_B, system.B())\n np.testing.assert_equal(new_f0.flatten(), system.f0())\n np.testing.assert_equal(new_C, system.C())\n np.testing.assert_equal(new_D, system.D())\n np.testing.assert_equal(new_y0, system.y0())\n\n system = MatrixGain(D=A)\n self.assertTrue((system.D() == A).all())\n\n system = TrajectoryAffineSystem(\n PiecewisePolynomial(A),\n PiecewisePolynomial(B),\n PiecewisePolynomial(f0),\n PiecewisePolynomial(C),\n PiecewisePolynomial(D),\n PiecewisePolynomial(y0),\n .1)\n self.assertEqual(system.get_input_port(0), system.get_input_port())\n self.assertEqual(system.get_output_port(0), system.get_output_port())\n context = system.CreateDefaultContext()\n self.assertEqual(system.get_input_port(0).size(), 1)\n self.assertEqual(context.get_discrete_state_vector().size(), 2)\n self.assertEqual(system.get_output_port(0).size(), 1)\n for t in np.linspace(0., 1., 5):\n self.assertTrue((system.A(t) == A).all())\n self.assertTrue((system.B(t) == B).all())\n self.assertTrue((system.f0(t) == f0).all())\n self.assertTrue((system.C(t) == C).all())\n self.assertEqual(system.D(t), D)\n self.assertEqual(system.y0(t), y0)\n self.assertEqual(system.time_period(), .1)\n x0 = np.array([1, 2])\n system.configure_default_state(x0=x0)\n system.SetDefaultContext(context)\n np.testing.assert_equal(\n context.get_discrete_state_vector().CopyToVector(), x0)\n generator = RandomGenerator()\n system.SetRandomContext(context, generator)\n np.testing.assert_equal(\n context.get_discrete_state_vector().CopyToVector(), x0)\n system.configure_random_state(covariance=np.eye(2))\n system.SetRandomContext(context, generator)\n self.assertNotEqual(\n context.get_discrete_state_vector().CopyToVector()[1], x0[1])\n\n system = TrajectoryLinearSystem(\n A=PiecewisePolynomial(A),\n B=PiecewisePolynomial(B),\n C=PiecewisePolynomial(C),\n D=PiecewisePolynomial(D),\n time_period=0.1)\n self.assertEqual(system.time_period(), .1)\n system.configure_default_state(x0=np.array([1, 2]))\n system.configure_random_state(covariance=np.eye(2))\n\n def test_linear_affine_system_empty_matrices(self):\n # Confirm the default values for the system matrices in the\n # constructor.\n def CheckSizes(system, num_states, num_inputs, num_outputs):\n self.assertEqual(system.num_continuous_states(), num_states)\n self.assertEqual(system.num_inputs(), num_inputs)\n self.assertEqual(system.num_outputs(), num_outputs)\n\n # A constant vector system.\n system = AffineSystem(y0=[2, 1])\n CheckSizes(system, num_states=0, num_inputs=0, num_outputs=2)\n\n # A matrix gain.\n system = AffineSystem(D=np.eye(2))\n CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2)\n system = LinearSystem(D=np.eye(2))\n CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2)\n\n # Add an offset.\n system = AffineSystem(D=np.eye(2), y0=[1, 2])\n CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2)\n\n # An integrator.\n system = LinearSystem(B=np.eye(2))\n CheckSizes(system, num_states=2, num_inputs=2, num_outputs=0)\n\n def test_linear_system_zero_size(self):\n # Explicitly test #12633.\n num_x = 0\n num_y = 2\n num_u = 2\n A = np.zeros((num_x, num_x))\n B = np.zeros((num_x, num_u))\n C = np.zeros((num_y, num_x))\n D = np.zeros((num_y, num_u))\n self.assertIsNotNone(LinearSystem(A, B, C, D))\n\n @numpy_compare.check_nonsymbolic_types\n def test_linear_transform_density(self, T):\n dut = LinearTransformDensity_[T](\n distribution=RandomDistribution.kGaussian,\n input_size=3,\n output_size=3)\n w_in = np.array([T(0.5), T(0.1), T(1.5)])\n context = dut.CreateDefaultContext()\n dut.get_input_port_w_in().FixValue(context, w_in)\n self.assertEqual(dut.get_input_port_A().size(), 9)\n self.assertEqual(dut.get_input_port_b().size(), 3)\n self.assertEqual(dut.get_distribution(), RandomDistribution.kGaussian)\n A = np.array([\n [T(0.5), T(1), T(2)], [T(1), T(2), T(3)], [T(3), T(4), T(5)]])\n dut.FixConstantA(context=context, A=A)\n b = np.array([T(1), T(2), T(3)])\n dut.FixConstantB(context=context, b=b)\n\n dut.CalcDensity(context=context)\n\n self.assertEqual(dut.get_output_port_w_out().size(), 3)\n self.assertEqual(dut.get_output_port_w_out_density().size(), 1)\n\n def test_vector_pass_through(self):\n model_value = BasicVector([1., 2, 3])\n system = PassThrough(vector_size=model_value.size())\n context = system.CreateDefaultContext()\n system.get_input_port(0).FixValue(context, model_value)\n output = system.AllocateOutput()\n input_eval = system.EvalVectorInput(context, 0)\n compare_value(self, input_eval, model_value)\n system.CalcOutput(context, output)\n output_value = output.get_vector_data(0)\n compare_value(self, output_value, model_value)\n\n def test_default_vector_pass_through(self):\n model_value = [1., 2, 3]\n system = PassThrough(value=model_value)\n context = system.CreateDefaultContext()\n np.testing.assert_array_equal(\n model_value, system.get_output_port().Eval(context))\n\n def test_abstract_pass_through(self):\n model_value = Value(\"Hello world\")\n system = PassThrough(abstract_model_value=model_value)\n context = system.CreateDefaultContext()\n system.get_input_port(0).FixValue(context, model_value)\n output = system.AllocateOutput()\n input_eval = system.EvalAbstractInput(context, 0)\n compare_value(self, input_eval, model_value)\n system.CalcOutput(context, output)\n output_value = output.get_data(0)\n compare_value(self, output_value, model_value)\n\n def test_port_switch(self):\n system = PortSwitch(vector_size=2)\n a = system.DeclareInputPort(name=\"a\")\n system.DeclareInputPort(name=\"b\")\n context = system.CreateDefaultContext()\n self.assertIsInstance(a, InputPort)\n system.get_port_selector_input_port().FixValue(context, a.get_index())\n\n def test_first_order_low_pass_filter(self):\n filter1 = FirstOrderLowPassFilter(time_constant=3.0, size=4)\n self.assertEqual(filter1.get_time_constant(), 3.0)\n\n alpha = np.array([1, 2, 3])\n filter2 = FirstOrderLowPassFilter(time_constants=alpha)\n np.testing.assert_array_equal(filter2.get_time_constants_vector(),\n alpha)\n\n context = filter2.CreateDefaultContext()\n filter2.set_initial_output_value(context, [0., -0.2, 0.4])\n\n def test_gain(self):\n k = 42.\n input_size = 10\n systems = [Gain(k=k, size=input_size),\n Gain(k=k*np.ones(input_size))]\n\n for system in systems:\n context = system.CreateDefaultContext()\n output = system.AllocateOutput()\n\n def mytest(input, expected):\n system.get_input_port(0).FixValue(context, input)\n system.CalcOutput(context, output)\n self.assertTrue(np.allclose(output.get_vector_data(\n 0).CopyToVector(), expected))\n\n test_input = np.arange(input_size)\n mytest(np.arange(input_size), k*np.arange(input_size))\n\n def test_saturation(self):\n system = Saturation((0., -1., 3.), (1., 2., 4.))\n context = system.CreateDefaultContext()\n output = system.AllocateOutput()\n\n def mytest(input, expected):\n system.get_input_port(0).FixValue(context, input)\n system.CalcOutput(context, output)\n self.assertTrue(np.allclose(output.get_vector_data(\n 0).CopyToVector(), expected))\n\n mytest((-5., 5., 4.), (0., 2., 4.))\n mytest((.4, 0., 3.5), (.4, 0., 3.5))\n\n def test_trajectory_source(self):\n ppt = PiecewisePolynomial.FirstOrderHold(\n [0., 1.], [[2., 3.], [2., 1.]])\n system = TrajectorySource(trajectory=ppt,\n output_derivative_order=0,\n zero_derivatives_beyond_limits=True)\n context = system.CreateDefaultContext()\n output = system.AllocateOutput()\n\n def mytest(input, expected):\n context.SetTime(input)\n system.CalcOutput(context, output)\n self.assertTrue(np.allclose(output.get_vector_data(\n 0).CopyToVector(), expected))\n\n mytest(0.0, (2.0, 2.0))\n mytest(0.5, (2.5, 1.5))\n mytest(1.0, (3.0, 1.0))\n\n ppt2 = PiecewisePolynomial.FirstOrderHold(\n [0., 1.], [[4., 6.], [4., 2.]])\n system.UpdateTrajectory(trajectory=ppt2)\n mytest(0.0, (4.0, 4.0))\n mytest(0.5, (5.0, 3.0))\n mytest(1.0, (6.0, 2.0))\n\n def test_symbolic_vector_system(self):\n t = Variable(\"t\")\n x = [Variable(\"x0\"), Variable(\"x1\")]\n u = [Variable(\"u0\"), Variable(\"u1\")]\n system = SymbolicVectorSystem(time=t, state=x, input=u,\n dynamics=[x[0] + x[1], t],\n output=[u[1]],\n time_period=0.0)\n context = system.CreateDefaultContext()\n\n self.assertEqual(context.num_continuous_states(), 2)\n self.assertEqual(context.num_discrete_state_groups(), 0)\n self.assertEqual(system.get_input_port(0).size(), 2)\n self.assertEqual(system.get_output_port(0).size(), 1)\n self.assertEqual(context.num_abstract_parameters(), 0)\n self.assertEqual(context.num_numeric_parameter_groups(), 0)\n self.assertTrue(system.dynamics_for_variable(x[0])\n .EqualTo(x[0] + x[1]))\n self.assertTrue(system.dynamics_for_variable(x[1])\n .EqualTo(t))\n\n def test_symbolic_vector_system_parameters(self):\n t = Variable(\"t\")\n x = [Variable(\"x0\"), Variable(\"x1\")]\n u = [Variable(\"u0\"), Variable(\"u1\")]\n p = [Variable(\"p0\"), Variable(\"p1\")]\n system = SymbolicVectorSystem(time=t, state=x, input=u,\n parameter=p,\n dynamics=[p[0] * x[0] + x[1] + p[1], t],\n output=[u[1]],\n time_period=0.0)\n context = system.CreateDefaultContext()\n\n self.assertEqual(context.num_continuous_states(), 2)\n self.assertEqual(context.num_discrete_state_groups(), 0)\n self.assertEqual(system.get_input_port(0).size(), 2)\n self.assertEqual(system.get_output_port(0).size(), 1)\n self.assertEqual(context.num_abstract_parameters(), 0)\n self.assertEqual(context.num_numeric_parameter_groups(), 1)\n self.assertEqual(context.get_numeric_parameter(0).size(), 2)\n self.assertTrue(system.dynamics_for_variable(x[0])\n .EqualTo(p[0] * x[0] + x[1] + p[1]))\n self.assertTrue(system.dynamics_for_variable(x[1])\n .EqualTo(t))\n\n def test_wrap_to_system(self):\n system = WrapToSystem(2)\n system.set_interval(1, 1., 2.)\n context = system.CreateDefaultContext()\n output = system.AllocateOutput()\n\n def mytest(input, expected):\n system.get_input_port(0).FixValue(context, input)\n system.CalcOutput(context, output)\n self.assertTrue(np.allclose(output.get_vector_data(\n 0).CopyToVector(), expected))\n\n mytest((-1.5, 0.5), (-1.5, 1.5))\n mytest((.2, .3), (.2, 1.3))\n\n def test_demultiplexer(self):\n # Test demultiplexer with scalar outputs.\n demux = Demultiplexer(size=4)\n context = demux.CreateDefaultContext()\n self.assertEqual(demux.num_input_ports(), 1)\n self.assertEqual(demux.num_output_ports(), 4)\n numpy_compare.assert_equal(demux.get_output_ports_sizes(),\n [1, 1, 1, 1])\n\n input_vec = np.array([1., 2., 3., 4.])\n demux.get_input_port(0).FixValue(context, input_vec)\n output = demux.AllocateOutput()\n demux.CalcOutput(context, output)\n\n for i in range(4):\n self.assertTrue(\n np.allclose(output.get_vector_data(i).get_value(),\n input_vec[i]))\n\n # Test demultiplexer with vector outputs.\n demux = Demultiplexer(size=4, output_ports_size=2)\n context = demux.CreateDefaultContext()\n self.assertEqual(demux.num_input_ports(), 1)\n self.assertEqual(demux.num_output_ports(), 2)\n numpy_compare.assert_equal(demux.get_output_ports_sizes(), [2, 2])\n\n demux.get_input_port(0).FixValue(context, input_vec)\n output = demux.AllocateOutput()\n demux.CalcOutput(context, output)\n\n for i in range(2):\n self.assertTrue(\n np.allclose(output.get_vector_data(i).get_value(),\n input_vec[2*i:2*i+2]))\n\n # Test demultiplexer with different output port sizes.\n output_ports_sizes = np.array([1, 2, 1])\n num_output_ports = output_ports_sizes.size\n input_vec = np.array([1., 2., 3., 4.])\n demux = Demultiplexer(output_ports_sizes=output_ports_sizes)\n context = demux.CreateDefaultContext()\n self.assertEqual(demux.num_input_ports(), 1)\n self.assertEqual(demux.num_output_ports(), num_output_ports)\n numpy_compare.assert_equal(demux.get_output_ports_sizes(),\n output_ports_sizes)\n\n demux.get_input_port(0).FixValue(context, input_vec)\n output = demux.AllocateOutput()\n demux.CalcOutput(context, output)\n\n output_port_start = 0\n for i in range(num_output_ports):\n output_port_size = output.get_vector_data(i).size()\n self.assertTrue(\n np.allclose(output.get_vector_data(i).get_value(),\n input_vec[output_port_start:\n output_port_start+output_port_size]))\n output_port_start += output_port_size\n\n def test_multiplexer(self):\n my_vector = MyVector2(data=[1., 2.])\n test_cases = [\n dict(has_vector=False, mux=Multiplexer(num_scalar_inputs=4),\n data=[[5.], [3.], [4.], [2.]]),\n dict(has_vector=False, mux=Multiplexer(input_sizes=[2, 3]),\n data=[[8., 4.], [3., 6., 9.]]),\n dict(has_vector=True, mux=Multiplexer(model_vector=my_vector),\n data=[[42.], [3.]]),\n ]\n for case in test_cases:\n mux = case['mux']\n port_size = sum([len(vec) for vec in case['data']])\n self.assertEqual(mux.get_output_port(0).size(), port_size)\n context = mux.CreateDefaultContext()\n output = mux.AllocateOutput()\n num_ports = len(case['data'])\n self.assertEqual(context.num_input_ports(), num_ports)\n for j, vec in enumerate(case['data']):\n mux.get_input_port(j).FixValue(context, vec)\n mux.CalcOutput(context, output)\n self.assertTrue(\n np.allclose(output.get_vector_data(0).get_value(),\n [elem for vec in case['data'] for elem in vec]))\n if case['has_vector']:\n # Check the type matches MyVector2.\n value = output.get_vector_data(0)\n self.assertTrue(isinstance(value, MyVector2))\n\n def test_multilayer_perceptron(self):\n mlp = MultilayerPerceptron(\n layers=[1, 2, 3], activation_type=PerceptronActivationType.kReLU)\n self.assertEqual(mlp.get_input_port().size(), 1)\n self.assertEqual(mlp.get_output_port().size(), 3)\n context = mlp.CreateDefaultContext()\n params = np.zeros((mlp.num_parameters(), 1))\n self.assertEqual(mlp.num_parameters(), 13)\n self.assertEqual(mlp.layers(), [1, 2, 3])\n self.assertEqual(mlp.activation_type(layer=0),\n PerceptronActivationType.kReLU)\n self.assertEqual(len(mlp.GetParameters(context=context)),\n mlp.num_parameters())\n mlp.SetWeights(context=context, layer=0, W=np.array([[1], [2]]))\n mlp.SetBiases(context=context, layer=0, b=[3, 4])\n np.testing.assert_array_equal(\n mlp.GetWeights(context=context, layer=0), np.array([[1], [2]]))\n np.testing.assert_array_equal(\n mlp.GetBiases(context=context, layer=0), np.array([3, 4]))\n params = np.zeros(mlp.num_parameters())\n mlp.SetWeights(params=params, layer=0, W=np.array([[1], [2]]))\n mlp.SetBiases(params=params, layer=0, b=[3, 4])\n np.testing.assert_array_equal(\n mlp.GetWeights(params=params, layer=0), np.array([[1], [2]]))\n np.testing.assert_array_equal(\n mlp.GetBiases(params=params, layer=0), np.array([3, 4]))\n mutable_params = mlp.GetMutableParameters(context=context)\n mutable_params[:] = 3.0\n np.testing.assert_array_equal(mlp.GetParameters(context),\n np.full(mlp.num_parameters(), 3.0))\n\n global called_loss\n called_loss = False\n\n def silly_loss(Y, dloss_dY):\n global called_loss\n called_loss = True\n # We must be careful to update the dloss in place, rather than bind\n # a new matrix to the same variable name.\n dloss_dY[:] = 1\n # dloss_dY = np.array(...etc...) # <== wrong\n return Y.sum()\n\n dloss_dparams = np.zeros((13,))\n generator = RandomGenerator(23)\n mlp.SetRandomContext(context, generator)\n mlp.Backpropagation(context=context,\n X=np.array([1, 3, 4]).reshape((1, 3)),\n loss=silly_loss,\n dloss_dparams=dloss_dparams)\n self.assertTrue(called_loss)\n self.assertTrue(dloss_dparams.any()) # No longer all zero.\n\n dloss_dparams = np.zeros((13,))\n mlp.BackpropagationMeanSquaredError(context=context,\n X=np.array([1, 3, 4]).reshape(\n (1, 3)),\n Y_desired=np.eye(3),\n dloss_dparams=dloss_dparams)\n self.assertTrue(dloss_dparams.any()) # No longer all zero.\n\n Y = np.asfortranarray(np.eye(3))\n mlp.BatchOutput(context=context, X=np.array([[0.1, 0.3, 0.4]]), Y=Y)\n self.assertFalse(np.allclose(Y, np.eye(3)))\n Y2 = mlp.BatchOutput(context=context, X=np.array([[0.1, 0.3, 0.4]]))\n np.testing.assert_array_equal(Y, Y2)\n\n mlp2 = MultilayerPerceptron(layers=[3, 2, 1],\n activation_types=[\n PerceptronActivationType.kReLU,\n PerceptronActivationType.kTanh\n ])\n self.assertEqual(mlp2.activation_type(0),\n PerceptronActivationType.kReLU)\n self.assertEqual(mlp2.activation_type(1),\n PerceptronActivationType.kTanh)\n Y = np.asfortranarray(np.full((1, 3), 2.4))\n dYdX = np.asfortranarray(np.full((3, 3), 5.3))\n context2 = mlp2.CreateDefaultContext()\n mlp2.BatchOutput(context=context2, X=np.eye(3), Y=Y, dYdX=dYdX)\n # The default context sets the weights and biases to zero, so the\n # output (and gradients) should be zero.\n np.testing.assert_array_almost_equal(Y, np.zeros((1, 3)))\n np.testing.assert_array_almost_equal(dYdX, np.zeros((3, 3)))\n\n mlp = MultilayerPerceptron(use_sin_cos_for_input=[True, False],\n remaining_layers=[3, 2],\n activation_types=[\n PerceptronActivationType.kReLU,\n PerceptronActivationType.kTanh\n ])\n self.assertEqual(mlp.get_input_port().size(), 2)\n np.testing.assert_array_equal(mlp.layers(), [3, 3, 2])\n\n def test_random_source(self):\n source = RandomSource(distribution=RandomDistribution.kUniform,\n num_outputs=2, sampling_interval_sec=0.01)\n self.assertEqual(source.get_output_port(0).size(), 2)\n\n builder = DiagramBuilder()\n # Note: There are no random inputs to add to the empty diagram, but it\n # confirms the API works.\n AddRandomInputs(sampling_interval_sec=0.01, builder=builder)\n\n builder_ad = DiagramBuilder_[AutoDiffXd]()\n AddRandomInputs(sampling_interval_sec=0.01, builder=builder_ad)\n\n def test_constant_vector_source(self):\n source = ConstantVectorSource(source_value=[1., 2.])\n context = source.CreateDefaultContext()\n source.get_source_value(context)\n source.get_mutable_source_value(context)\n\n def test_ctor_api(self):\n \"\"\"Tests construction of systems for systems whose executions semantics\n are not tested above.\n \"\"\"\n ConstantValueSource(Value(\"Hello world\"))\n DiscreteTimeDelay(update_sec=0.1, delay_time_steps=5, vector_size=2)\n DiscreteTimeDelay(\n update_sec=0.1, delay_time_steps=5,\n abstract_model_value=Value(\"Hello world\"))\n with catch_drake_warnings(expected_count=2) as w:\n DiscreteTimeDelay(update_sec=0.1, delay_timesteps=5, vector_size=2)\n DiscreteTimeDelay(\n update_sec=0.1, delay_timesteps=5,\n abstract_model_value=Value(\"Hello world\"))\n\n ZeroOrderHold(period_sec=0.1, offset_sec=0.0, vector_size=2)\n dut = ZeroOrderHold(period_sec=1.0, offset_sec=0.25,\n abstract_model_value=Value(\"Hello world\"))\n self.assertEqual(dut.period(), 1.0)\n self.assertEqual(dut.offset(), 0.25)\n\n def test_shared_pointer_system_ctor(self):\n dut = SharedPointerSystem(value_to_hold=[1, 2, 3])\n readback = dut.get()\n self.assertListEqual(readback, [1, 2, 3])\n del dut\n self.assertListEqual(readback, [1, 2, 3])\n\n def test_shared_pointer_system_builder(self):\n builder = DiagramBuilder()\n self.assertListEqual(\n SharedPointerSystem.AddToBuilder(\n builder=builder, value_to_hold=[1, 2, 3]),\n [1, 2, 3])\n diagram = builder.Build()\n del builder\n readback = diagram.GetSystems()[0].get()\n self.assertListEqual(readback, [1, 2, 3])\n del diagram\n self.assertListEqual(readback, [1, 2, 3])\n\n def test_sine(self):\n # Test scalar output.\n sine_source = Sine(amplitude=1, frequency=2, phase=3,\n size=1, is_time_based=True)\n self.assertEqual(sine_source.get_output_port(0).size(), 1)\n self.assertEqual(sine_source.get_output_port(1).size(), 1)\n self.assertEqual(sine_source.get_output_port(2).size(), 1)\n\n # Test vector output.\n sine_source = Sine(amplitude=1, frequency=2, phase=3,\n size=3, is_time_based=True)\n self.assertEqual(sine_source.get_output_port(0).size(), 3)\n self.assertEqual(sine_source.get_output_port(1).size(), 3)\n self.assertEqual(sine_source.get_output_port(2).size(), 3)\n\n sine_source = Sine(amplitudes=np.ones(2), frequencies=np.ones(2),\n phases=np.ones(2), is_time_based=True)\n self.assertEqual(sine_source.get_output_port(0).size(), 2)\n self.assertEqual(sine_source.get_output_port(1).size(), 2)\n self.assertEqual(sine_source.get_output_port(2).size(), 2)\n\n def test_discrete_derivative(self):\n discrete_derivative = DiscreteDerivative(num_inputs=5, time_step=0.5)\n self.assertEqual(discrete_derivative.get_input_port(0).size(), 5)\n self.assertEqual(discrete_derivative.get_output_port(0).size(), 5)\n self.assertEqual(discrete_derivative.time_step(), 0.5)\n self.assertTrue(discrete_derivative.suppress_initial_transient())\n\n discrete_derivative = DiscreteDerivative(\n num_inputs=5, time_step=0.5, suppress_initial_transient=False)\n self.assertFalse(discrete_derivative.suppress_initial_transient())\n\n def test_state_interpolator_with_discrete_derivative(self):\n state_interpolator = StateInterpolatorWithDiscreteDerivative(\n num_positions=5, time_step=0.4)\n self.assertEqual(state_interpolator.get_input_port(0).size(), 5)\n self.assertEqual(state_interpolator.get_output_port(0).size(), 10)\n self.assertTrue(state_interpolator.suppress_initial_transient())\n\n # test set_initial_position using context\n context = state_interpolator.CreateDefaultContext()\n state_interpolator.set_initial_position(\n context=context, position=5*[1.1])\n np.testing.assert_array_equal(\n context.get_discrete_state(0).CopyToVector(),\n np.array(5*[1.1]))\n np.testing.assert_array_equal(\n context.get_discrete_state(1).CopyToVector(),\n np.array(5*[1.1]))\n\n # test set_initial_position using state\n context = state_interpolator.CreateDefaultContext()\n state_interpolator.set_initial_position(\n state=context.get_state(), position=5*[1.3])\n np.testing.assert_array_equal(\n context.get_discrete_state(0).CopyToVector(),\n np.array(5*[1.3]))\n np.testing.assert_array_equal(\n context.get_discrete_state(1).CopyToVector(),\n np.array(5*[1.3]))\n\n state_interpolator = StateInterpolatorWithDiscreteDerivative(\n num_positions=5, time_step=0.4, suppress_initial_transient=True)\n self.assertTrue(state_interpolator.suppress_initial_transient())\n\n @numpy_compare.check_nonsymbolic_types\n def test_log_vector_output(self, T):\n # Add various redundant loggers to a system, to exercise the\n # LogVectorOutput bindings.\n builder = DiagramBuilder_[T]()\n kSize = 1\n integrator = builder.AddSystem(Integrator_[T](kSize))\n port = integrator.get_output_port(0)\n loggers = []\n loggers.append(LogVectorOutput(port, builder))\n loggers.append(LogVectorOutput(src=port, builder=builder))\n loggers.append(LogVectorOutput(port, builder, 0.125))\n loggers.append(LogVectorOutput(\n src=port, builder=builder, publish_period=0.125))\n\n loggers.append(LogVectorOutput(port, builder, {TriggerType.kForced}))\n loggers.append(LogVectorOutput(\n src=port, builder=builder, publish_triggers={TriggerType.kForced}))\n loggers.append(LogVectorOutput(\n port, builder, {TriggerType.kPeriodic}, 0.125))\n loggers.append(LogVectorOutput(\n src=port, builder=builder,\n publish_triggers={TriggerType.kPeriodic}, publish_period=0.125))\n\n # Check the returned loggers by calling some trivial methods.\n diagram = builder.Build()\n context = diagram.CreateDefaultContext()\n self.assertTrue(all(logger.FindLog(context).num_samples() == 0\n for logger in loggers))\n\n @numpy_compare.check_nonsymbolic_types\n def test_vector_log(self, T):\n kSize = 1\n dut = VectorLog(kSize)\n self.assertEqual(dut.get_input_size(), kSize)\n dut.AddData(0.1, [22.22])\n self.assertEqual(dut.num_samples(), 1)\n self.assertEqual(dut.sample_times(), [0.1])\n self.assertEqual(dut.data(), [22.22])\n dut.Clear()\n self.assertEqual(dut.num_samples(), 0)\n # There is no good way from python to test the semantics of Reserve(),\n # but test the binding anyway.\n dut.Reserve(VectorLog.kDefaultCapacity * 3)\n\n @numpy_compare.check_nonsymbolic_types\n def test_vector_log_sink(self, T):\n # Add various redundant loggers to a system, to exercise the\n # VectorLog constructor bindings.\n builder = DiagramBuilder_[T]()\n kSize = 1\n constructors = [VectorLogSink_[T]]\n loggers = []\n if T == float:\n constructors.append(VectorLogSink)\n for constructor in constructors:\n loggers.append(builder.AddSystem(constructor(kSize)))\n loggers.append(builder.AddSystem(constructor(input_size=kSize)))\n loggers.append(builder.AddSystem(constructor(kSize, 0.125)))\n loggers.append(builder.AddSystem(\n constructor(input_size=kSize, publish_period=0.125)))\n loggers.append(builder.AddSystem(\n constructor(kSize, {TriggerType.kForced})))\n loggers.append(builder.AddSystem(\n constructor(input_size=kSize,\n publish_triggers={TriggerType.kForced})))\n loggers.append(builder.AddSystem(\n constructor(kSize, {TriggerType.kPeriodic}, 0.125)))\n loggers.append(builder.AddSystem(\n constructor(input_size=kSize,\n publish_triggers={TriggerType.kPeriodic},\n publish_period=0.125)))\n\n # Exercise all of the log access methods.\n diagram = builder.Build()\n context = diagram.CreateDefaultContext()\n # FindLog and FindMutableLog find the same object.\n self.assertTrue(\n all(logger.FindLog(context) == logger.FindMutableLog(context)\n for logger in loggers))\n # Build a list of pairs of loggers and their local contexts.\n loggers_and_contexts = [(x, x.GetMyContextFromRoot(context))\n for x in loggers]\n # GetLog and GetMutableLog find the same object.\n self.assertTrue(\n all(logger.GetLog(logger_context)\n == logger.GetMutableLog(logger_context)\n for logger, logger_context in loggers_and_contexts))\n # GetLog and FindLog find the same object, given the proper contexts.\n self.assertTrue(\n all(logger.GetLog(logger_context) == logger.FindLog(context)\n for logger, logger_context in loggers_and_contexts))\n",
"step-ids": [
25,
26,
28,
30,
35
]
}
|
[
25,
26,
28,
30,
35
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
hp.mollview(fu, title='Full map +50 GLAT', sub=311)
hp.mollview(se, title='Above threshold (4.0) +50 GLAT', sub=312)
hp.mollview(ma, title='Diff +50 GLAT', sub=313)
plt.savefig('figs/diff4a.pdf', bbox_inches='tight', pad_inches=0.106)
plt.show()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
start = time.time()
path = '/Users/trygveleithesvalheim/datafiles/'
NN = 338
NS = 438
nside = 512
npix = 12 * nside ** 2
z = np.zeros((1680, 4320))
<|reserved_special_token_0|>
c1 = 420
c2 = 475
hdulist = pf.open(path + 'data/cubesCombinedN.fits')
Nfull = hdulist[0].data
hdulist = pf.open(path + 'data/cubesCombinedS.fits')
Sfull = hdulist[0].data
fullLOSN = Nfull[c1:c2].sum(axis=0)
fullLOSS = Sfull[c1:c2].sum(axis=0)
fullLOSS = np.concatenate((fullLOSS, z), axis=0)
fullLOSN = np.concatenate((z, fullLOSN), axis=0)
full = fullLOSN + fullLOSS
<|reserved_special_token_0|>
hdulist = pf.open(path + 'data/LOScloudsN.fits')
LOScloudsN = hdulist[0].data
hdulist = pf.open(path + 'data/LOScloudsS.fits')
LOScloudsS = hdulist[0].data
LOScloudsN = LOScloudsN.sum(axis=0)
LOScloudsS = LOScloudsS.sum(axis=0)
LOScloudsS = np.concatenate((LOScloudsS, z), axis=0)
LOScloudsN = np.concatenate((z, LOScloudsN), axis=0)
image_array = LOScloudsN + LOScloudsS
<|reserved_special_token_0|>
theta = np.linspace(0, np.pi, num=image_array.shape[0])[:, None]
phi = np.linspace(-np.pi, np.pi, num=image_array.shape[1])
pix = hp.ang2pix(nside, theta, phi)
<|reserved_special_token_0|>
healpix_map = np.zeros(hp.nside2npix(nside), dtype=np.double)
healpix_map[pix] = image_array
<|reserved_special_token_0|>
full_map = np.zeros(hp.nside2npix(nside), dtype=np.double)
full_map[pix] = full
<|reserved_special_token_0|>
le = full_map - healpix_map
ma = le[::-1]
ma[np.where(ma == 0)] = hp.UNSEEN
full_map[np.where(full_map == 0)] = hp.UNSEEN
fu = full_map[::-1]
healpix_map[np.where(healpix_map == 0)] = hp.UNSEEN
se = healpix_map[::-1]
<|reserved_special_token_0|>
hp.mollview(fu, title='Full map +50 GLAT', sub=311)
hp.mollview(se, title='Above threshold (4.0) +50 GLAT', sub=312)
hp.mollview(ma, title='Diff +50 GLAT', sub=313)
plt.savefig('figs/diff4a.pdf', bbox_inches='tight', pad_inches=0.106)
plt.show()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from skimage import data, filters, measure, exposure
from skimage.filters import threshold_mean
from skimage.transform import resize
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pyfits as pf
import time
import numpy as np
import healpy as hp
from healpy.projector import CartesianProj
from healpy.projector import MollweideProj
start = time.time()
path = '/Users/trygveleithesvalheim/datafiles/'
NN = 338
NS = 438
nside = 512
npix = 12 * nside ** 2
z = np.zeros((1680, 4320))
<|reserved_special_token_0|>
c1 = 420
c2 = 475
hdulist = pf.open(path + 'data/cubesCombinedN.fits')
Nfull = hdulist[0].data
hdulist = pf.open(path + 'data/cubesCombinedS.fits')
Sfull = hdulist[0].data
fullLOSN = Nfull[c1:c2].sum(axis=0)
fullLOSS = Sfull[c1:c2].sum(axis=0)
fullLOSS = np.concatenate((fullLOSS, z), axis=0)
fullLOSN = np.concatenate((z, fullLOSN), axis=0)
full = fullLOSN + fullLOSS
<|reserved_special_token_0|>
hdulist = pf.open(path + 'data/LOScloudsN.fits')
LOScloudsN = hdulist[0].data
hdulist = pf.open(path + 'data/LOScloudsS.fits')
LOScloudsS = hdulist[0].data
LOScloudsN = LOScloudsN.sum(axis=0)
LOScloudsS = LOScloudsS.sum(axis=0)
LOScloudsS = np.concatenate((LOScloudsS, z), axis=0)
LOScloudsN = np.concatenate((z, LOScloudsN), axis=0)
image_array = LOScloudsN + LOScloudsS
<|reserved_special_token_0|>
theta = np.linspace(0, np.pi, num=image_array.shape[0])[:, None]
phi = np.linspace(-np.pi, np.pi, num=image_array.shape[1])
pix = hp.ang2pix(nside, theta, phi)
<|reserved_special_token_0|>
healpix_map = np.zeros(hp.nside2npix(nside), dtype=np.double)
healpix_map[pix] = image_array
<|reserved_special_token_0|>
full_map = np.zeros(hp.nside2npix(nside), dtype=np.double)
full_map[pix] = full
<|reserved_special_token_0|>
le = full_map - healpix_map
ma = le[::-1]
ma[np.where(ma == 0)] = hp.UNSEEN
full_map[np.where(full_map == 0)] = hp.UNSEEN
fu = full_map[::-1]
healpix_map[np.where(healpix_map == 0)] = hp.UNSEEN
se = healpix_map[::-1]
<|reserved_special_token_0|>
hp.mollview(fu, title='Full map +50 GLAT', sub=311)
hp.mollview(se, title='Above threshold (4.0) +50 GLAT', sub=312)
hp.mollview(ma, title='Diff +50 GLAT', sub=313)
plt.savefig('figs/diff4a.pdf', bbox_inches='tight', pad_inches=0.106)
plt.show()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from skimage import data, filters, measure, exposure
from skimage.filters import threshold_mean
from skimage.transform import resize
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pyfits as pf
import time
import numpy as np
import healpy as hp
from healpy.projector import CartesianProj
from healpy.projector import MollweideProj
# [email protected]
start = time.time()
path = '/Users/trygveleithesvalheim/datafiles/'
# MAX SLIDES ON N: 134 (ID:135) and 136 (ID:137)
# MAX SLIDES ON S: 6 (ID:7)
# Data: 360 degrees longitude, 50-90 latitude
# Dimensions: (480,4320)
# Full map: (2160,4320) need to add 1680
NN = 338 # Identified clouds in N. hemisphere
NS = 438 # Identified clouds in S. hemisphere
nside = 512
npix = 12*nside**2
z = np.zeros((1680, 4320)) # Extra array for full sky array
"""
full
"""
c1 = 420
c2 = 475
hdulist = pf.open(path+'data/cubesCombinedN.fits')
Nfull = hdulist[0].data
hdulist = pf.open(path+'data/cubesCombinedS.fits')
Sfull = hdulist[0].data
fullLOSN = Nfull[c1:c2].sum(axis=(0))
fullLOSS = Sfull[c1:c2].sum(axis=(0))
# Add empty array for converting to full sky
fullLOSS = np.concatenate((fullLOSS, z), axis=0)
fullLOSN = np.concatenate((z,fullLOSN), axis=0)
full = fullLOSN + fullLOSS
"""
Add full first
"""
hdulist = pf.open(path+'data/LOScloudsN.fits')
LOScloudsN = hdulist[0].data
hdulist = pf.open(path+'data/LOScloudsS.fits')
LOScloudsS = hdulist[0].data
# LOS of all clouds
LOScloudsN = LOScloudsN.sum(axis=(0))
LOScloudsS = LOScloudsS.sum(axis=(0))
# Add empty array for converting to full sky
LOScloudsS = np.concatenate((LOScloudsS, z), axis=0)
LOScloudsN = np.concatenate((z,LOScloudsN), axis=0)
# Add N and S hemisphere
image_array = LOScloudsN+LOScloudsS
"""
GENERAL
"""
# Find theta and phi coordinates of image
theta = np.linspace(0, np.pi, num=image_array.shape[0])[:, None]
phi = np.linspace(-np.pi, np.pi, num=image_array.shape[1])
# Get pixel positions of full picture
pix = hp.ang2pix(nside, theta, phi)
"""
GENERAL END
"""
# Make healpix map array
healpix_map = np.zeros(hp.nside2npix(nside), dtype=np.double)
# put image in healpy map array
healpix_map[pix] = image_array # Magic
#healpix_map[np.where(healpix_map == 0)] = hp.UNSEEN # Set empty pixels to UNSEEN
"""
For full
"""
full_map = np.zeros(hp.nside2npix(nside), dtype=np.double)
full_map[pix] = full # Magic
#full_map[np.where(healpix_map == 0)] = hp.UNSEEN # Set empty pixels to UNSEEN
"""
Full end
"""
le = full_map - healpix_map
ma = le[::-1]
ma[np.where(ma == 0)] = hp.UNSEEN
full_map[np.where(full_map == 0)] = hp.UNSEEN
fu = full_map[::-1]
healpix_map[np.where(healpix_map == 0)] = hp.UNSEEN
se = healpix_map[::-1]
"""
hp.write_map(path+'data/fullHI50.fits',fu, partial=True)
hp.write_map(path+'data/segmentedHI50.fits',se, partial=True)
hp.write_map(path+'data/cloudsFITS/subtractedHI50fits',ma, partial=True)
"""
#min = 4.
#max = 350.
hp.mollview(fu,title="Full map +50 GLAT",sub=311)
hp.mollview(se,title="Above threshold (4.0) +50 GLAT", sub = 312)
hp.mollview(ma,title="Diff +50 GLAT",sub=313)
plt.savefig('figs/diff4a.pdf', bbox_inches='tight',pad_inches=0.106)
plt.show()
"""
NX = 4320
NY = 2160
#image_array = resize(LOScloudsN,(NY,NX)) # Resizing image
"""
|
flexible
|
{
"blob_id": "d86fd2e6ef5dab4444772192471538842112b3fd",
"index": 2675,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nhp.mollview(fu, title='Full map +50 GLAT', sub=311)\nhp.mollview(se, title='Above threshold (4.0) +50 GLAT', sub=312)\nhp.mollview(ma, title='Diff +50 GLAT', sub=313)\nplt.savefig('figs/diff4a.pdf', bbox_inches='tight', pad_inches=0.106)\nplt.show()\n<mask token>\n",
"step-3": "<mask token>\nstart = time.time()\npath = '/Users/trygveleithesvalheim/datafiles/'\nNN = 338\nNS = 438\nnside = 512\nnpix = 12 * nside ** 2\nz = np.zeros((1680, 4320))\n<mask token>\nc1 = 420\nc2 = 475\nhdulist = pf.open(path + 'data/cubesCombinedN.fits')\nNfull = hdulist[0].data\nhdulist = pf.open(path + 'data/cubesCombinedS.fits')\nSfull = hdulist[0].data\nfullLOSN = Nfull[c1:c2].sum(axis=0)\nfullLOSS = Sfull[c1:c2].sum(axis=0)\nfullLOSS = np.concatenate((fullLOSS, z), axis=0)\nfullLOSN = np.concatenate((z, fullLOSN), axis=0)\nfull = fullLOSN + fullLOSS\n<mask token>\nhdulist = pf.open(path + 'data/LOScloudsN.fits')\nLOScloudsN = hdulist[0].data\nhdulist = pf.open(path + 'data/LOScloudsS.fits')\nLOScloudsS = hdulist[0].data\nLOScloudsN = LOScloudsN.sum(axis=0)\nLOScloudsS = LOScloudsS.sum(axis=0)\nLOScloudsS = np.concatenate((LOScloudsS, z), axis=0)\nLOScloudsN = np.concatenate((z, LOScloudsN), axis=0)\nimage_array = LOScloudsN + LOScloudsS\n<mask token>\ntheta = np.linspace(0, np.pi, num=image_array.shape[0])[:, None]\nphi = np.linspace(-np.pi, np.pi, num=image_array.shape[1])\npix = hp.ang2pix(nside, theta, phi)\n<mask token>\nhealpix_map = np.zeros(hp.nside2npix(nside), dtype=np.double)\nhealpix_map[pix] = image_array\n<mask token>\nfull_map = np.zeros(hp.nside2npix(nside), dtype=np.double)\nfull_map[pix] = full\n<mask token>\nle = full_map - healpix_map\nma = le[::-1]\nma[np.where(ma == 0)] = hp.UNSEEN\nfull_map[np.where(full_map == 0)] = hp.UNSEEN\nfu = full_map[::-1]\nhealpix_map[np.where(healpix_map == 0)] = hp.UNSEEN\nse = healpix_map[::-1]\n<mask token>\nhp.mollview(fu, title='Full map +50 GLAT', sub=311)\nhp.mollview(se, title='Above threshold (4.0) +50 GLAT', sub=312)\nhp.mollview(ma, title='Diff +50 GLAT', sub=313)\nplt.savefig('figs/diff4a.pdf', bbox_inches='tight', pad_inches=0.106)\nplt.show()\n<mask token>\n",
"step-4": "from skimage import data, filters, measure, exposure\nfrom skimage.filters import threshold_mean\nfrom skimage.transform import resize\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport pyfits as pf\nimport time\nimport numpy as np\nimport healpy as hp\nfrom healpy.projector import CartesianProj\nfrom healpy.projector import MollweideProj\nstart = time.time()\npath = '/Users/trygveleithesvalheim/datafiles/'\nNN = 338\nNS = 438\nnside = 512\nnpix = 12 * nside ** 2\nz = np.zeros((1680, 4320))\n<mask token>\nc1 = 420\nc2 = 475\nhdulist = pf.open(path + 'data/cubesCombinedN.fits')\nNfull = hdulist[0].data\nhdulist = pf.open(path + 'data/cubesCombinedS.fits')\nSfull = hdulist[0].data\nfullLOSN = Nfull[c1:c2].sum(axis=0)\nfullLOSS = Sfull[c1:c2].sum(axis=0)\nfullLOSS = np.concatenate((fullLOSS, z), axis=0)\nfullLOSN = np.concatenate((z, fullLOSN), axis=0)\nfull = fullLOSN + fullLOSS\n<mask token>\nhdulist = pf.open(path + 'data/LOScloudsN.fits')\nLOScloudsN = hdulist[0].data\nhdulist = pf.open(path + 'data/LOScloudsS.fits')\nLOScloudsS = hdulist[0].data\nLOScloudsN = LOScloudsN.sum(axis=0)\nLOScloudsS = LOScloudsS.sum(axis=0)\nLOScloudsS = np.concatenate((LOScloudsS, z), axis=0)\nLOScloudsN = np.concatenate((z, LOScloudsN), axis=0)\nimage_array = LOScloudsN + LOScloudsS\n<mask token>\ntheta = np.linspace(0, np.pi, num=image_array.shape[0])[:, None]\nphi = np.linspace(-np.pi, np.pi, num=image_array.shape[1])\npix = hp.ang2pix(nside, theta, phi)\n<mask token>\nhealpix_map = np.zeros(hp.nside2npix(nside), dtype=np.double)\nhealpix_map[pix] = image_array\n<mask token>\nfull_map = np.zeros(hp.nside2npix(nside), dtype=np.double)\nfull_map[pix] = full\n<mask token>\nle = full_map - healpix_map\nma = le[::-1]\nma[np.where(ma == 0)] = hp.UNSEEN\nfull_map[np.where(full_map == 0)] = hp.UNSEEN\nfu = full_map[::-1]\nhealpix_map[np.where(healpix_map == 0)] = hp.UNSEEN\nse = healpix_map[::-1]\n<mask token>\nhp.mollview(fu, title='Full map +50 GLAT', sub=311)\nhp.mollview(se, title='Above threshold (4.0) +50 GLAT', sub=312)\nhp.mollview(ma, title='Diff +50 GLAT', sub=313)\nplt.savefig('figs/diff4a.pdf', bbox_inches='tight', pad_inches=0.106)\nplt.show()\n<mask token>\n",
"step-5": "from skimage import data, filters, measure, exposure\nfrom skimage.filters import threshold_mean\nfrom skimage.transform import resize\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport pyfits as pf\nimport time\nimport numpy as np\nimport healpy as hp\nfrom healpy.projector import CartesianProj\nfrom healpy.projector import MollweideProj\n# [email protected]\nstart = time.time()\npath = '/Users/trygveleithesvalheim/datafiles/'\n\n\n# MAX SLIDES ON N: 134 (ID:135) and 136 (ID:137)\n# MAX SLIDES ON S: 6 (ID:7)\n# Data: 360 degrees longitude, 50-90 latitude\n# Dimensions: (480,4320)\n# Full map: (2160,4320) need to add 1680\n\nNN = 338 # Identified clouds in N. hemisphere\nNS = 438 # Identified clouds in S. hemisphere\nnside = 512\nnpix = 12*nside**2\nz = np.zeros((1680, 4320)) # Extra array for full sky array\n\n\"\"\"\nfull\n\"\"\"\nc1 = 420\nc2 = 475\nhdulist = pf.open(path+'data/cubesCombinedN.fits')\nNfull = hdulist[0].data\nhdulist = pf.open(path+'data/cubesCombinedS.fits')\nSfull = hdulist[0].data\n\nfullLOSN = Nfull[c1:c2].sum(axis=(0))\nfullLOSS = Sfull[c1:c2].sum(axis=(0))\n\n# Add empty array for converting to full sky\nfullLOSS = np.concatenate((fullLOSS, z), axis=0)\nfullLOSN = np.concatenate((z,fullLOSN), axis=0)\n\nfull = fullLOSN + fullLOSS\n\"\"\"\nAdd full first\n\"\"\"\nhdulist = pf.open(path+'data/LOScloudsN.fits')\nLOScloudsN = hdulist[0].data\nhdulist = pf.open(path+'data/LOScloudsS.fits')\nLOScloudsS = hdulist[0].data\n\n# LOS of all clouds\nLOScloudsN = LOScloudsN.sum(axis=(0))\nLOScloudsS = LOScloudsS.sum(axis=(0))\n\n# Add empty array for converting to full sky\nLOScloudsS = np.concatenate((LOScloudsS, z), axis=0)\nLOScloudsN = np.concatenate((z,LOScloudsN), axis=0)\n\n# Add N and S hemisphere\nimage_array = LOScloudsN+LOScloudsS\n\n\"\"\"\nGENERAL\n\"\"\"\n# Find theta and phi coordinates of image\ntheta = np.linspace(0, np.pi, num=image_array.shape[0])[:, None]\nphi = np.linspace(-np.pi, np.pi, num=image_array.shape[1])\n\n# Get pixel positions of full picture\npix = hp.ang2pix(nside, theta, phi)\n\n\"\"\"\nGENERAL END\n\"\"\"\n# Make healpix map array\nhealpix_map = np.zeros(hp.nside2npix(nside), dtype=np.double)\n\n# put image in healpy map array\nhealpix_map[pix] = image_array # Magic\n#healpix_map[np.where(healpix_map == 0)] = hp.UNSEEN # Set empty pixels to UNSEEN\n\n\"\"\"\nFor full\n\"\"\"\nfull_map = np.zeros(hp.nside2npix(nside), dtype=np.double)\n\nfull_map[pix] = full # Magic\n#full_map[np.where(healpix_map == 0)] = hp.UNSEEN # Set empty pixels to UNSEEN\n\n\"\"\"\nFull end\n\"\"\"\nle = full_map - healpix_map\nma = le[::-1]\nma[np.where(ma == 0)] = hp.UNSEEN\n\nfull_map[np.where(full_map == 0)] = hp.UNSEEN\nfu = full_map[::-1]\nhealpix_map[np.where(healpix_map == 0)] = hp.UNSEEN\nse = healpix_map[::-1]\n\n\"\"\"\nhp.write_map(path+'data/fullHI50.fits',fu, partial=True)\nhp.write_map(path+'data/segmentedHI50.fits',se, partial=True)\nhp.write_map(path+'data/cloudsFITS/subtractedHI50fits',ma, partial=True)\n\"\"\"\n#min = 4.\n#max = 350.\nhp.mollview(fu,title=\"Full map +50 GLAT\",sub=311)\nhp.mollview(se,title=\"Above threshold (4.0) +50 GLAT\", sub = 312)\nhp.mollview(ma,title=\"Diff +50 GLAT\",sub=313)\nplt.savefig('figs/diff4a.pdf', bbox_inches='tight',pad_inches=0.106)\nplt.show()\n\n\"\"\"\nNX = 4320\nNY = 2160\n#image_array = resize(LOScloudsN,(NY,NX)) # Resizing image\n\"\"\"\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import sys
import utils
#import random
def findNearestPoint(points,no_used , src):
# If no nearest point found, return max.
dest = src
minDist = sys.float_info.max
for i in range(len(points)):
if no_used[i] and i!=src:
dist = utils.length(points[src], points[i])
if dist < minDist:
dest =i
minDist = dist
return dest, minDist
def solve(points):
#get an initial tour by NearestPoint method
tour = [0 for i in range(len(points))]
no_used = [True for i in range(len(points))]
totalDist = 0.0
# src =int( random.random()*(len(points)-1))
# no_used[src] = False
# tour[0]=src
src =0
no_used[0] = False
for i in range(1, len(points)):
dest, minDist = findNearestPoint(points, no_used, src) #find Nearest Point
tour[i] = dest
no_used[dest] = False #have been used
src = dest
totalDist += minDist
#plus distance between last point and initial point
return totalDist + utils.length(points[tour[-1]], points[tour[0]]), tour
|
normal
|
{
"blob_id": "943db90aa7721ddad3d7f5103c4d398fbf4e143b",
"index": 2768,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef findNearestPoint(points, no_used, src):\n dest = src\n minDist = sys.float_info.max\n for i in range(len(points)):\n if no_used[i] and i != src:\n dist = utils.length(points[src], points[i])\n if dist < minDist:\n dest = i\n minDist = dist\n return dest, minDist\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef findNearestPoint(points, no_used, src):\n dest = src\n minDist = sys.float_info.max\n for i in range(len(points)):\n if no_used[i] and i != src:\n dist = utils.length(points[src], points[i])\n if dist < minDist:\n dest = i\n minDist = dist\n return dest, minDist\n\n\ndef solve(points):\n tour = [(0) for i in range(len(points))]\n no_used = [(True) for i in range(len(points))]\n totalDist = 0.0\n src = 0\n no_used[0] = False\n for i in range(1, len(points)):\n dest, minDist = findNearestPoint(points, no_used, src)\n tour[i] = dest\n no_used[dest] = False\n src = dest\n totalDist += minDist\n return totalDist + utils.length(points[tour[-1]], points[tour[0]]), tour\n",
"step-4": "import sys\nimport utils\n\n\ndef findNearestPoint(points, no_used, src):\n dest = src\n minDist = sys.float_info.max\n for i in range(len(points)):\n if no_used[i] and i != src:\n dist = utils.length(points[src], points[i])\n if dist < minDist:\n dest = i\n minDist = dist\n return dest, minDist\n\n\ndef solve(points):\n tour = [(0) for i in range(len(points))]\n no_used = [(True) for i in range(len(points))]\n totalDist = 0.0\n src = 0\n no_used[0] = False\n for i in range(1, len(points)):\n dest, minDist = findNearestPoint(points, no_used, src)\n tour[i] = dest\n no_used[dest] = False\n src = dest\n totalDist += minDist\n return totalDist + utils.length(points[tour[-1]], points[tour[0]]), tour\n",
"step-5": "import sys\nimport utils\n#import random\n\ndef findNearestPoint(points,no_used , src):\n # If no nearest point found, return max.\n \n dest = src\n minDist = sys.float_info.max\n \n for i in range(len(points)):\n if no_used[i] and i!=src:\n\n \n dist = utils.length(points[src], points[i]) \n if dist < minDist:\n dest =i\n minDist = dist \n \n\n return dest, minDist\n \ndef solve(points):\n #get an initial tour by NearestPoint method\n tour = [0 for i in range(len(points))]\n no_used = [True for i in range(len(points))]\n totalDist = 0.0\n \n# src =int( random.random()*(len(points)-1))\n# no_used[src] = False\n# tour[0]=src\n src =0\n no_used[0] = False\n \n for i in range(1, len(points)):\n dest, minDist = findNearestPoint(points, no_used, src) #find Nearest Point\n tour[i] = dest\n no_used[dest] = False #have been used\n src = dest\n totalDist += minDist\n #plus distance between last point and initial point\n return totalDist + utils.length(points[tour[-1]], points[tour[0]]), tour\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('report.csv') as csvfile:
data = csv.reader(csvfile, delimiter=',')
for row in data:
p = tf.add_paragraph()
p.text = row[0]
p.level = 1
p = tf.add_paragraph()
p.text = row[1]
p.level = 2
prs.save('raport.pptx')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
prs = Presentation()
slide_layout = prs.slide_layouts[1]
slide = prs.slides.add_slide(slide_layout)
shapes = slide.shapes
title_shape = shapes.title
body_shape = shapes.placeholders[1]
title_shape.text = 'Tekst'
tf = body_shape.text_frame
tf.text = 'Zawartość tekst frame'
with open('report.csv') as csvfile:
data = csv.reader(csvfile, delimiter=',')
for row in data:
p = tf.add_paragraph()
p.text = row[0]
p.level = 1
p = tf.add_paragraph()
p.text = row[1]
p.level = 2
prs.save('raport.pptx')
<|reserved_special_token_1|>
from pptx import Presentation
import csv
prs = Presentation()
slide_layout = prs.slide_layouts[1]
slide = prs.slides.add_slide(slide_layout)
shapes = slide.shapes
title_shape = shapes.title
body_shape = shapes.placeholders[1]
title_shape.text = 'Tekst'
tf = body_shape.text_frame
tf.text = 'Zawartość tekst frame'
with open('report.csv') as csvfile:
data = csv.reader(csvfile, delimiter=',')
for row in data:
p = tf.add_paragraph()
p.text = row[0]
p.level = 1
p = tf.add_paragraph()
p.text = row[1]
p.level = 2
prs.save('raport.pptx')
<|reserved_special_token_1|>
from pptx import Presentation
import csv
prs = Presentation()
slide_layout = prs.slide_layouts[1]
slide = prs.slides.add_slide(slide_layout)
shapes = slide.shapes
title_shape = shapes.title
body_shape = shapes.placeholders[1]
title_shape.text = "Tekst"
tf = body_shape.text_frame
tf.text = "Zawartość tekst frame"
with open("report.csv") as csvfile:
data = csv.reader(csvfile, delimiter=',')
for row in data:
p = tf.add_paragraph()
p.text = row[0]
p.level = 1
p = tf.add_paragraph()
p.text = row[1]
p.level = 2
prs.save("raport.pptx")
|
flexible
|
{
"blob_id": "e1f003b6a687e5654a1ee6c595e789ced02cd6c3",
"index": 7086,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('report.csv') as csvfile:\n data = csv.reader(csvfile, delimiter=',')\n for row in data:\n p = tf.add_paragraph()\n p.text = row[0]\n p.level = 1\n p = tf.add_paragraph()\n p.text = row[1]\n p.level = 2\nprs.save('raport.pptx')\n",
"step-3": "<mask token>\nprs = Presentation()\nslide_layout = prs.slide_layouts[1]\nslide = prs.slides.add_slide(slide_layout)\nshapes = slide.shapes\ntitle_shape = shapes.title\nbody_shape = shapes.placeholders[1]\ntitle_shape.text = 'Tekst'\ntf = body_shape.text_frame\ntf.text = 'Zawartość tekst frame'\nwith open('report.csv') as csvfile:\n data = csv.reader(csvfile, delimiter=',')\n for row in data:\n p = tf.add_paragraph()\n p.text = row[0]\n p.level = 1\n p = tf.add_paragraph()\n p.text = row[1]\n p.level = 2\nprs.save('raport.pptx')\n",
"step-4": "from pptx import Presentation\nimport csv\nprs = Presentation()\nslide_layout = prs.slide_layouts[1]\nslide = prs.slides.add_slide(slide_layout)\nshapes = slide.shapes\ntitle_shape = shapes.title\nbody_shape = shapes.placeholders[1]\ntitle_shape.text = 'Tekst'\ntf = body_shape.text_frame\ntf.text = 'Zawartość tekst frame'\nwith open('report.csv') as csvfile:\n data = csv.reader(csvfile, delimiter=',')\n for row in data:\n p = tf.add_paragraph()\n p.text = row[0]\n p.level = 1\n p = tf.add_paragraph()\n p.text = row[1]\n p.level = 2\nprs.save('raport.pptx')\n",
"step-5": "from pptx import Presentation\nimport csv\n\nprs = Presentation()\nslide_layout = prs.slide_layouts[1]\nslide = prs.slides.add_slide(slide_layout)\nshapes = slide.shapes\n\ntitle_shape = shapes.title\n\nbody_shape = shapes.placeholders[1]\ntitle_shape.text = \"Tekst\"\n\ntf = body_shape.text_frame\ntf.text = \"Zawartość tekst frame\"\nwith open(\"report.csv\") as csvfile:\n data = csv.reader(csvfile, delimiter=',')\n for row in data:\n p = tf.add_paragraph()\n p.text = row[0]\n p.level = 1\n\n p = tf.add_paragraph()\n p.text = row[1]\n p.level = 2\n\nprs.save(\"raport.pptx\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class conv2D:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def forward(self, input_feature_maps):
output = np.zeros(self.output_shape)
input_feature_maps = self.apply_zero_padding(input_feature_maps)
for i in range(0, self.kernel_shape[0]):
kernel_stack = self.weights[i]
for j in range(0, self.kernel_shape[1]):
kernel = kernel_stack[j]
array = input_feature_maps[j]
stride_x_pointer = 0
conv_counter = 1
if self.debug:
print('**** NEW CONVOLUTION ****')
while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0
] - 1:
stride_y_pointer = 0
while stride_y_pointer + kernel.shape[1
] - 1 <= array.shape[1] - 1:
array_snip = array[stride_x_pointer:
stride_x_pointer + kernel.shape[0],
stride_y_pointer:stride_y_pointer + kernel.shape[1]
]
result = np.sum(np.multiply(array_snip, kernel))
conv_output_coordinate = (i, stride_x_pointer //
self.strides[0], stride_y_pointer // self.
strides[1])
output[conv_output_coordinate] += result
"""#cache all the results, touched weights and input for each kernel (output or Coordinates??)
for row in range(kernel.shape[0]):
for column in range(kernel.shape[1]):
# Cache coordinate only: (weight, input) --> output
#format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), (input_channel, input_x_pos, input_y_pos)) ---> (feature_map_number, output_x_pos, output_y_pos)
self.cached_calculation[((i, j, row, column), (j, row + stride_x_pointer , column + stride_y_pointer))] = conv_output_coordinate
#Cache weight coordinate and input/output values
#ALTERNATIVE
# format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), input_val) ---> output_val
#self.cached_calculation[((i, j, row, column), array_snip[row, column])] = result"""
if self.debug:
print('convolution nr ', conv_counter)
print('\narray_snip: \n', array_snip)
print('\nkernel: \n', kernel)
print('\nelementwise multiplication: \n', np.
multiply(array_snip, kernel))
print('\nresult: ', result)
stride_y_pointer += self.strides[1]
conv_counter += 1
stride_x_pointer += self.strides[0]
if self.debug:
print('\n----REVIEW----\n')
print('Total convolutions: ', conv_counter)
print('\ninput_feature_map:\n ', array)
print('\napplied kernel:\n ', kernel)
print('\nconvolution result:\n ', output[i])
print('***********************************')
self.cached_output = output
self.cached_input = input_feature_maps
output = self.activation(self, output)
return output
def backward(self, jacobian_L_Z):
jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)
jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.
cached_output)
jacobian_L_W = self.compute_gradients(jacobian_L_Z)
self.weights_grads += jacobian_L_W
jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)
return jacobian_L_Y
def update_gradients(self, learning_rate):
self.weights -= learning_rate * self.weights_grads
self.weights_grads = np.zeros(self.weights.shape)
def compute_gradients(self, jacobian_L_Z):
grads = np.zeros(self.weights.shape)
for i in range(self.weights.shape[0]):
for j in range(self.weights.shape[1]):
for k in range(self.weights.shape[2]):
for l in range(self.weights.shape[3]):
for key in self.cached_calculation.keys():
if key[0] == (k, l):
grads[i, j, k, l] += self.cached_input[j][key
[1]] * jacobian_L_Z[i][self.
cached_calculation[key]]
return grads
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __str__(self):
return 'Conv 2D Layer type with ' + str(self.kernel_shape[0]
) + ' kernels of shape = ' + str(self.kernel_shape[1:]
) + 'input/output of shape' + str(self.input_shape) + '/' + str(
self.output_shape) + ' strides= s' + str(self.strides
) + ' modes= ' + str(self.modes
) + ' with activation = ' + self.activation_name
class conv1D:
def __init__(self, input_shape, n_kernels, kernel_shape, stride, mode,
weight_init_range, activation, debug):
self.type = 'conv1D'
self.input_shape = input_shape
self.activation_name = activation
self.kernel_shape = n_kernels, input_shape[0], kernel_shape
self.activation = activations.get_activation_function(activation)
self.d_activation = activations.get_activation_derivative(activation)
self.stride = stride
self.mode = mode
self.weights = np.random.uniform(low=weight_init_range[0], high=
weight_init_range[1], size=self.kernel_shape)
self.weights_grads = np.zeros(self.weights.shape)
self.p_x_start, self.p_x_stop = self.calculate_padding()
self.output_shape = self.calculate_output_shape()
self.cached_calculation = {}
self.cache_weights_input_output_triplet_locations()
self.cached_output = None
self.debug = debug
def cache_weights_input_output_triplet_locations(self):
placeholder_input = np.zeros(self.input_shape)
array = placeholder_input[0]
kernel = self.weights[0][0]
stride_x_pointer = 0
while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1:
for column in range(kernel.shape[0]):
conv_output_coordinate = stride_x_pointer // self.stride
self.cached_calculation[column, column + stride_x_pointer
] = conv_output_coordinate
stride_x_pointer += self.stride
def forward(self, input_feature_maps):
output = np.zeros(self.output_shape)
input_feature_maps = self.apply_zero_padding(input_feature_maps)
for i in range(0, self.kernel_shape[0]):
kernel_stack = self.weights[i]
for j in range(0, self.kernel_shape[1]):
kernel = kernel_stack[j]
array = input_feature_maps[j]
stride_x_pointer = 0
conv_counter = 1
if self.debug:
print('**** NEW CONVOLUTION ****')
while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0
] - 1:
array_snip = array[stride_x_pointer:stride_x_pointer +
kernel.shape[0]]
result = np.sum(np.multiply(array_snip, kernel))
conv_output_coordinate = i, stride_x_pointer // self.stride
output[conv_output_coordinate] += result
if self.debug:
print('convolution nr ', conv_counter)
print('\narray_snip: \n', array_snip)
print('\nkernel: \n', kernel)
print('\nelementwise multiplication: \n', np.
multiply(array_snip, kernel))
print('\nresult: ', result)
conv_counter += 1
stride_x_pointer += self.stride
if self.debug:
print('\n----REVIEW----\n')
print('Total convolutions: ', conv_counter)
print('\ninput_feature_map:\n ', array)
print('\napplied kernel:\n ', kernel)
print('\nconvolution result:\n ', output[i])
print('***********************************')
self.cached_output = output
self.cached_input = input_feature_maps
output = self.activation(self, output)
return output
def backward(self, jacobian_L_Z):
jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)
jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.
cached_output)
jacobian_L_W = self.compute_gradients(jacobian_L_Z)
self.weights_grads += jacobian_L_W
jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)
return jacobian_L_Y
def update_gradients(self, learning_rate):
self.weights -= learning_rate * self.weights_grads
self.weights_grads = np.zeros(self.weights.shape)
def compute_gradients(self, jacobian_L_Z):
grads = np.zeros(self.weights.shape)
for i in range(self.weights.shape[0]):
for j in range(self.weights.shape[1]):
for k in range(self.weights.shape[2]):
for key in self.cached_calculation.keys():
if key[0] == k:
grads[i, j, k] += self.cached_input[j][key[1]
] * jacobian_L_Z[i][self.cached_calculation
[key]]
return grads
def compute_J_LY(self, jacobian_L_Z):
jacobian_L_Y = np.zeros(self.input_shape)
for i in range(self.input_shape[0]):
for j in range(self.input_shape[1]):
for key in self.cached_calculation.keys():
if key[1] == j:
for l in range(self.weights.shape[0]):
jacobian_L_Y[i, j] += self.weights[l][i][key[0]
] * jacobian_L_Z[l][self.cached_calculation
[key]]
return jacobian_L_Y
def calculate_output_shape(self):
width = math.floor((self.input_shape[1] - self.kernel_shape[2] +
self.p_x_start + self.p_x_stop) / self.stride + 1)
return self.kernel_shape[0], width
def calculate_padding(self):
s = self.stride
f = self.kernel_shape[2]
i = self.input_shape[1]
if self.mode == 'full':
p_x_start = f - 1
p_x_stop = f - 1
elif self.mode == 'same':
p_x_start = math.floor((s * math.ceil(i / s) - i + f - s) / 2)
p_x_stop = math.ceil((s * math.ceil(i / s) - i + f - s) / 2)
else:
p_x_start = 0
p_x_stop = 0
return p_x_start, p_x_stop
def apply_zero_padding(self, input_feature_maps):
padded_input_feature_maps = np.zeros((input_feature_maps.shape[0],
input_feature_maps.shape[1] + self.p_x_start + self.p_x_stop))
for channel in range(input_feature_maps.shape[0]):
array = input_feature_maps[channel]
padded_array = np.zeros(array.shape[0] + self.p_x_start + self.
p_x_stop)
padded_array[self.p_x_start:array.shape[0] + self.p_x_start
] = array
padded_input_feature_maps[channel] = padded_array
return padded_input_feature_maps
def __str__(self):
return 'Conv 1D Layer type with ' + str(self.kernel_shape[0]
) + ' kernels of shape = ' + str(self.kernel_shape[1:]
) + 'input/output of shape' + str(self.input_shape) + '/' + str(
self.output_shape) + ' stride= ' + str(self.stride
) + ' mode= ' + str(self.mode
) + ' with activation = ' + self.activation_name
class softmax:
def __init__(self, size):
self.size = size
self.shape = 1, size
self.type = 'softmax'
self.activation_function = activations.softmax
def forward(self, input_data):
return self.activation_function(self, input_data)
def backward(self, jacobian_L_S, softmaxed_network_output):
jacobian_soft = self.compute_j_soft(softmaxed_network_output)
jacobian_L_Z = np.dot(jacobian_L_S, jacobian_soft)
return jacobian_L_Z
def compute_j_soft(self, S):
S = np.squeeze(S)
n = len(S)
j_soft = np.zeros((n, n))
for i in range(n):
for j in range(n):
if i == j:
j_soft[i][j] = S[i] - S[i] ** 2
else:
j_soft[i][j] = -S[i] * S[j]
return j_soft
def __str__(self):
return 'Softmax Layer of size = ' + str(self.size)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class conv2D:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def forward(self, input_feature_maps):
output = np.zeros(self.output_shape)
input_feature_maps = self.apply_zero_padding(input_feature_maps)
for i in range(0, self.kernel_shape[0]):
kernel_stack = self.weights[i]
for j in range(0, self.kernel_shape[1]):
kernel = kernel_stack[j]
array = input_feature_maps[j]
stride_x_pointer = 0
conv_counter = 1
if self.debug:
print('**** NEW CONVOLUTION ****')
while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0
] - 1:
stride_y_pointer = 0
while stride_y_pointer + kernel.shape[1
] - 1 <= array.shape[1] - 1:
array_snip = array[stride_x_pointer:
stride_x_pointer + kernel.shape[0],
stride_y_pointer:stride_y_pointer + kernel.shape[1]
]
result = np.sum(np.multiply(array_snip, kernel))
conv_output_coordinate = (i, stride_x_pointer //
self.strides[0], stride_y_pointer // self.
strides[1])
output[conv_output_coordinate] += result
"""#cache all the results, touched weights and input for each kernel (output or Coordinates??)
for row in range(kernel.shape[0]):
for column in range(kernel.shape[1]):
# Cache coordinate only: (weight, input) --> output
#format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), (input_channel, input_x_pos, input_y_pos)) ---> (feature_map_number, output_x_pos, output_y_pos)
self.cached_calculation[((i, j, row, column), (j, row + stride_x_pointer , column + stride_y_pointer))] = conv_output_coordinate
#Cache weight coordinate and input/output values
#ALTERNATIVE
# format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), input_val) ---> output_val
#self.cached_calculation[((i, j, row, column), array_snip[row, column])] = result"""
if self.debug:
print('convolution nr ', conv_counter)
print('\narray_snip: \n', array_snip)
print('\nkernel: \n', kernel)
print('\nelementwise multiplication: \n', np.
multiply(array_snip, kernel))
print('\nresult: ', result)
stride_y_pointer += self.strides[1]
conv_counter += 1
stride_x_pointer += self.strides[0]
if self.debug:
print('\n----REVIEW----\n')
print('Total convolutions: ', conv_counter)
print('\ninput_feature_map:\n ', array)
print('\napplied kernel:\n ', kernel)
print('\nconvolution result:\n ', output[i])
print('***********************************')
self.cached_output = output
self.cached_input = input_feature_maps
output = self.activation(self, output)
return output
def backward(self, jacobian_L_Z):
jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)
jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.
cached_output)
jacobian_L_W = self.compute_gradients(jacobian_L_Z)
self.weights_grads += jacobian_L_W
jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)
return jacobian_L_Y
def update_gradients(self, learning_rate):
self.weights -= learning_rate * self.weights_grads
self.weights_grads = np.zeros(self.weights.shape)
def compute_gradients(self, jacobian_L_Z):
grads = np.zeros(self.weights.shape)
for i in range(self.weights.shape[0]):
for j in range(self.weights.shape[1]):
for k in range(self.weights.shape[2]):
for l in range(self.weights.shape[3]):
for key in self.cached_calculation.keys():
if key[0] == (k, l):
grads[i, j, k, l] += self.cached_input[j][key
[1]] * jacobian_L_Z[i][self.
cached_calculation[key]]
return grads
def compute_J_LY(self, jacobian_L_Z):
jacobian_L_Y = np.zeros(self.input_shape)
for i in range(self.input_shape[0]):
for j in range(self.input_shape[1]):
for k in range(self.input_shape[2]):
for key in self.cached_calculation.keys():
if key[1] == (j, k):
for l in range(self.weights.shape[0]):
jacobian_L_Y[i, j, k] += self.weights[l][i][key
[0]] * jacobian_L_Z[l][self.
cached_calculation[key]]
return jacobian_L_Y
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __str__(self):
return 'Conv 2D Layer type with ' + str(self.kernel_shape[0]
) + ' kernels of shape = ' + str(self.kernel_shape[1:]
) + 'input/output of shape' + str(self.input_shape) + '/' + str(
self.output_shape) + ' strides= s' + str(self.strides
) + ' modes= ' + str(self.modes
) + ' with activation = ' + self.activation_name
class conv1D:
def __init__(self, input_shape, n_kernels, kernel_shape, stride, mode,
weight_init_range, activation, debug):
self.type = 'conv1D'
self.input_shape = input_shape
self.activation_name = activation
self.kernel_shape = n_kernels, input_shape[0], kernel_shape
self.activation = activations.get_activation_function(activation)
self.d_activation = activations.get_activation_derivative(activation)
self.stride = stride
self.mode = mode
self.weights = np.random.uniform(low=weight_init_range[0], high=
weight_init_range[1], size=self.kernel_shape)
self.weights_grads = np.zeros(self.weights.shape)
self.p_x_start, self.p_x_stop = self.calculate_padding()
self.output_shape = self.calculate_output_shape()
self.cached_calculation = {}
self.cache_weights_input_output_triplet_locations()
self.cached_output = None
self.debug = debug
def cache_weights_input_output_triplet_locations(self):
placeholder_input = np.zeros(self.input_shape)
array = placeholder_input[0]
kernel = self.weights[0][0]
stride_x_pointer = 0
while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1:
for column in range(kernel.shape[0]):
conv_output_coordinate = stride_x_pointer // self.stride
self.cached_calculation[column, column + stride_x_pointer
] = conv_output_coordinate
stride_x_pointer += self.stride
def forward(self, input_feature_maps):
output = np.zeros(self.output_shape)
input_feature_maps = self.apply_zero_padding(input_feature_maps)
for i in range(0, self.kernel_shape[0]):
kernel_stack = self.weights[i]
for j in range(0, self.kernel_shape[1]):
kernel = kernel_stack[j]
array = input_feature_maps[j]
stride_x_pointer = 0
conv_counter = 1
if self.debug:
print('**** NEW CONVOLUTION ****')
while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0
] - 1:
array_snip = array[stride_x_pointer:stride_x_pointer +
kernel.shape[0]]
result = np.sum(np.multiply(array_snip, kernel))
conv_output_coordinate = i, stride_x_pointer // self.stride
output[conv_output_coordinate] += result
if self.debug:
print('convolution nr ', conv_counter)
print('\narray_snip: \n', array_snip)
print('\nkernel: \n', kernel)
print('\nelementwise multiplication: \n', np.
multiply(array_snip, kernel))
print('\nresult: ', result)
conv_counter += 1
stride_x_pointer += self.stride
if self.debug:
print('\n----REVIEW----\n')
print('Total convolutions: ', conv_counter)
print('\ninput_feature_map:\n ', array)
print('\napplied kernel:\n ', kernel)
print('\nconvolution result:\n ', output[i])
print('***********************************')
self.cached_output = output
self.cached_input = input_feature_maps
output = self.activation(self, output)
return output
def backward(self, jacobian_L_Z):
jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)
jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.
cached_output)
jacobian_L_W = self.compute_gradients(jacobian_L_Z)
self.weights_grads += jacobian_L_W
jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)
return jacobian_L_Y
def update_gradients(self, learning_rate):
self.weights -= learning_rate * self.weights_grads
self.weights_grads = np.zeros(self.weights.shape)
def compute_gradients(self, jacobian_L_Z):
grads = np.zeros(self.weights.shape)
for i in range(self.weights.shape[0]):
for j in range(self.weights.shape[1]):
for k in range(self.weights.shape[2]):
for key in self.cached_calculation.keys():
if key[0] == k:
grads[i, j, k] += self.cached_input[j][key[1]
] * jacobian_L_Z[i][self.cached_calculation
[key]]
return grads
def compute_J_LY(self, jacobian_L_Z):
jacobian_L_Y = np.zeros(self.input_shape)
for i in range(self.input_shape[0]):
for j in range(self.input_shape[1]):
for key in self.cached_calculation.keys():
if key[1] == j:
for l in range(self.weights.shape[0]):
jacobian_L_Y[i, j] += self.weights[l][i][key[0]
] * jacobian_L_Z[l][self.cached_calculation
[key]]
return jacobian_L_Y
def calculate_output_shape(self):
width = math.floor((self.input_shape[1] - self.kernel_shape[2] +
self.p_x_start + self.p_x_stop) / self.stride + 1)
return self.kernel_shape[0], width
def calculate_padding(self):
s = self.stride
f = self.kernel_shape[2]
i = self.input_shape[1]
if self.mode == 'full':
p_x_start = f - 1
p_x_stop = f - 1
elif self.mode == 'same':
p_x_start = math.floor((s * math.ceil(i / s) - i + f - s) / 2)
p_x_stop = math.ceil((s * math.ceil(i / s) - i + f - s) / 2)
else:
p_x_start = 0
p_x_stop = 0
return p_x_start, p_x_stop
def apply_zero_padding(self, input_feature_maps):
padded_input_feature_maps = np.zeros((input_feature_maps.shape[0],
input_feature_maps.shape[1] + self.p_x_start + self.p_x_stop))
for channel in range(input_feature_maps.shape[0]):
array = input_feature_maps[channel]
padded_array = np.zeros(array.shape[0] + self.p_x_start + self.
p_x_stop)
padded_array[self.p_x_start:array.shape[0] + self.p_x_start
] = array
padded_input_feature_maps[channel] = padded_array
return padded_input_feature_maps
def __str__(self):
return 'Conv 1D Layer type with ' + str(self.kernel_shape[0]
) + ' kernels of shape = ' + str(self.kernel_shape[1:]
) + 'input/output of shape' + str(self.input_shape) + '/' + str(
self.output_shape) + ' stride= ' + str(self.stride
) + ' mode= ' + str(self.mode
) + ' with activation = ' + self.activation_name
class softmax:
def __init__(self, size):
self.size = size
self.shape = 1, size
self.type = 'softmax'
self.activation_function = activations.softmax
def forward(self, input_data):
return self.activation_function(self, input_data)
def backward(self, jacobian_L_S, softmaxed_network_output):
jacobian_soft = self.compute_j_soft(softmaxed_network_output)
jacobian_L_Z = np.dot(jacobian_L_S, jacobian_soft)
return jacobian_L_Z
def compute_j_soft(self, S):
S = np.squeeze(S)
n = len(S)
j_soft = np.zeros((n, n))
for i in range(n):
for j in range(n):
if i == j:
j_soft[i][j] = S[i] - S[i] ** 2
else:
j_soft[i][j] = -S[i] * S[j]
return j_soft
def __str__(self):
return 'Softmax Layer of size = ' + str(self.size)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class conv2D:
def __init__(self, input_shape, n_kernels, kernel_shape, strides, modes,
weight_init_range, activation, debug):
self.type = 'conv2D'
self.input_shape = input_shape
self.activation_name = activation
self.kernel_shape = n_kernels, input_shape[0], kernel_shape[0
], kernel_shape[1]
self.activation = activations.get_activation_function(activation)
self.d_activation = activations.get_activation_derivative(activation)
self.strides = strides
self.modes = modes
self.weights = np.random.uniform(low=weight_init_range[0], high=
weight_init_range[1], size=self.kernel_shape)
self.weights_grads = np.zeros(self.weights.shape)
self.p_x_start, self.p_x_stop, self.p_y_start, self.p_y_stop = (self
.calculate_padding())
self.output_shape = self.calculate_output_shape()
self.cached_calculation = {}
self.cache_weights_input_output_triplet_locations()
self.cached_output = None
self.debug = debug
"""print("###########################")
a = np.random.randint(1,4,(6,6))
print(a)
padded_a = self.apply_zero_padding(a)
print(padded_a)
print("kernel shape", (self.kernel_shape[2], self.kernel_shape[3]))
print("input shape", a.shape)
print("padded shape", padded_a.shape)
print("###########################")"""
def cache_weights_input_output_triplet_locations(self):
placeholder_input = np.zeros(self.input_shape)
array = placeholder_input[0]
kernel = self.weights[0][0]
stride_x_pointer = 0
while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1:
stride_y_pointer = 0
while stride_y_pointer + kernel.shape[1] - 1 <= array.shape[1] - 1:
for row in range(kernel.shape[0]):
for column in range(kernel.shape[1]):
conv_output_coordinate = (stride_x_pointer // self.
strides[0], stride_y_pointer // self.strides[1])
self.cached_calculation[(row, column), (row +
stride_x_pointer, column + stride_y_pointer)
] = conv_output_coordinate
stride_y_pointer += self.strides[1]
stride_x_pointer += self.strides[0]
def forward(self, input_feature_maps):
output = np.zeros(self.output_shape)
input_feature_maps = self.apply_zero_padding(input_feature_maps)
for i in range(0, self.kernel_shape[0]):
kernel_stack = self.weights[i]
for j in range(0, self.kernel_shape[1]):
kernel = kernel_stack[j]
array = input_feature_maps[j]
stride_x_pointer = 0
conv_counter = 1
if self.debug:
print('**** NEW CONVOLUTION ****')
while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0
] - 1:
stride_y_pointer = 0
while stride_y_pointer + kernel.shape[1
] - 1 <= array.shape[1] - 1:
array_snip = array[stride_x_pointer:
stride_x_pointer + kernel.shape[0],
stride_y_pointer:stride_y_pointer + kernel.shape[1]
]
result = np.sum(np.multiply(array_snip, kernel))
conv_output_coordinate = (i, stride_x_pointer //
self.strides[0], stride_y_pointer // self.
strides[1])
output[conv_output_coordinate] += result
"""#cache all the results, touched weights and input for each kernel (output or Coordinates??)
for row in range(kernel.shape[0]):
for column in range(kernel.shape[1]):
# Cache coordinate only: (weight, input) --> output
#format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), (input_channel, input_x_pos, input_y_pos)) ---> (feature_map_number, output_x_pos, output_y_pos)
self.cached_calculation[((i, j, row, column), (j, row + stride_x_pointer , column + stride_y_pointer))] = conv_output_coordinate
#Cache weight coordinate and input/output values
#ALTERNATIVE
# format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), input_val) ---> output_val
#self.cached_calculation[((i, j, row, column), array_snip[row, column])] = result"""
if self.debug:
print('convolution nr ', conv_counter)
print('\narray_snip: \n', array_snip)
print('\nkernel: \n', kernel)
print('\nelementwise multiplication: \n', np.
multiply(array_snip, kernel))
print('\nresult: ', result)
stride_y_pointer += self.strides[1]
conv_counter += 1
stride_x_pointer += self.strides[0]
if self.debug:
print('\n----REVIEW----\n')
print('Total convolutions: ', conv_counter)
print('\ninput_feature_map:\n ', array)
print('\napplied kernel:\n ', kernel)
print('\nconvolution result:\n ', output[i])
print('***********************************')
self.cached_output = output
self.cached_input = input_feature_maps
output = self.activation(self, output)
return output
def backward(self, jacobian_L_Z):
jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)
jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.
cached_output)
jacobian_L_W = self.compute_gradients(jacobian_L_Z)
self.weights_grads += jacobian_L_W
jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)
return jacobian_L_Y
def update_gradients(self, learning_rate):
self.weights -= learning_rate * self.weights_grads
self.weights_grads = np.zeros(self.weights.shape)
def compute_gradients(self, jacobian_L_Z):
grads = np.zeros(self.weights.shape)
for i in range(self.weights.shape[0]):
for j in range(self.weights.shape[1]):
for k in range(self.weights.shape[2]):
for l in range(self.weights.shape[3]):
for key in self.cached_calculation.keys():
if key[0] == (k, l):
grads[i, j, k, l] += self.cached_input[j][key
[1]] * jacobian_L_Z[i][self.
cached_calculation[key]]
return grads
def compute_J_LY(self, jacobian_L_Z):
jacobian_L_Y = np.zeros(self.input_shape)
for i in range(self.input_shape[0]):
for j in range(self.input_shape[1]):
for k in range(self.input_shape[2]):
for key in self.cached_calculation.keys():
if key[1] == (j, k):
for l in range(self.weights.shape[0]):
jacobian_L_Y[i, j, k] += self.weights[l][i][key
[0]] * jacobian_L_Z[l][self.
cached_calculation[key]]
return jacobian_L_Y
def calculate_output_shape(self):
width = math.floor((self.input_shape[1] - self.kernel_shape[2] +
self.p_x_start + self.p_x_stop) / self.strides[0] + 1)
height = math.floor((self.input_shape[2] - self.kernel_shape[3] +
self.p_y_start + self.p_y_stop) / self.strides[1] + 1)
return self.kernel_shape[0], width, height
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __str__(self):
return 'Conv 2D Layer type with ' + str(self.kernel_shape[0]
) + ' kernels of shape = ' + str(self.kernel_shape[1:]
) + 'input/output of shape' + str(self.input_shape) + '/' + str(
self.output_shape) + ' strides= s' + str(self.strides
) + ' modes= ' + str(self.modes
) + ' with activation = ' + self.activation_name
class conv1D:
def __init__(self, input_shape, n_kernels, kernel_shape, stride, mode,
weight_init_range, activation, debug):
self.type = 'conv1D'
self.input_shape = input_shape
self.activation_name = activation
self.kernel_shape = n_kernels, input_shape[0], kernel_shape
self.activation = activations.get_activation_function(activation)
self.d_activation = activations.get_activation_derivative(activation)
self.stride = stride
self.mode = mode
self.weights = np.random.uniform(low=weight_init_range[0], high=
weight_init_range[1], size=self.kernel_shape)
self.weights_grads = np.zeros(self.weights.shape)
self.p_x_start, self.p_x_stop = self.calculate_padding()
self.output_shape = self.calculate_output_shape()
self.cached_calculation = {}
self.cache_weights_input_output_triplet_locations()
self.cached_output = None
self.debug = debug
def cache_weights_input_output_triplet_locations(self):
placeholder_input = np.zeros(self.input_shape)
array = placeholder_input[0]
kernel = self.weights[0][0]
stride_x_pointer = 0
while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1:
for column in range(kernel.shape[0]):
conv_output_coordinate = stride_x_pointer // self.stride
self.cached_calculation[column, column + stride_x_pointer
] = conv_output_coordinate
stride_x_pointer += self.stride
def forward(self, input_feature_maps):
output = np.zeros(self.output_shape)
input_feature_maps = self.apply_zero_padding(input_feature_maps)
for i in range(0, self.kernel_shape[0]):
kernel_stack = self.weights[i]
for j in range(0, self.kernel_shape[1]):
kernel = kernel_stack[j]
array = input_feature_maps[j]
stride_x_pointer = 0
conv_counter = 1
if self.debug:
print('**** NEW CONVOLUTION ****')
while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0
] - 1:
array_snip = array[stride_x_pointer:stride_x_pointer +
kernel.shape[0]]
result = np.sum(np.multiply(array_snip, kernel))
conv_output_coordinate = i, stride_x_pointer // self.stride
output[conv_output_coordinate] += result
if self.debug:
print('convolution nr ', conv_counter)
print('\narray_snip: \n', array_snip)
print('\nkernel: \n', kernel)
print('\nelementwise multiplication: \n', np.
multiply(array_snip, kernel))
print('\nresult: ', result)
conv_counter += 1
stride_x_pointer += self.stride
if self.debug:
print('\n----REVIEW----\n')
print('Total convolutions: ', conv_counter)
print('\ninput_feature_map:\n ', array)
print('\napplied kernel:\n ', kernel)
print('\nconvolution result:\n ', output[i])
print('***********************************')
self.cached_output = output
self.cached_input = input_feature_maps
output = self.activation(self, output)
return output
def backward(self, jacobian_L_Z):
jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)
jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.
cached_output)
jacobian_L_W = self.compute_gradients(jacobian_L_Z)
self.weights_grads += jacobian_L_W
jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)
return jacobian_L_Y
def update_gradients(self, learning_rate):
self.weights -= learning_rate * self.weights_grads
self.weights_grads = np.zeros(self.weights.shape)
def compute_gradients(self, jacobian_L_Z):
grads = np.zeros(self.weights.shape)
for i in range(self.weights.shape[0]):
for j in range(self.weights.shape[1]):
for k in range(self.weights.shape[2]):
for key in self.cached_calculation.keys():
if key[0] == k:
grads[i, j, k] += self.cached_input[j][key[1]
] * jacobian_L_Z[i][self.cached_calculation
[key]]
return grads
def compute_J_LY(self, jacobian_L_Z):
jacobian_L_Y = np.zeros(self.input_shape)
for i in range(self.input_shape[0]):
for j in range(self.input_shape[1]):
for key in self.cached_calculation.keys():
if key[1] == j:
for l in range(self.weights.shape[0]):
jacobian_L_Y[i, j] += self.weights[l][i][key[0]
] * jacobian_L_Z[l][self.cached_calculation
[key]]
return jacobian_L_Y
def calculate_output_shape(self):
width = math.floor((self.input_shape[1] - self.kernel_shape[2] +
self.p_x_start + self.p_x_stop) / self.stride + 1)
return self.kernel_shape[0], width
def calculate_padding(self):
s = self.stride
f = self.kernel_shape[2]
i = self.input_shape[1]
if self.mode == 'full':
p_x_start = f - 1
p_x_stop = f - 1
elif self.mode == 'same':
p_x_start = math.floor((s * math.ceil(i / s) - i + f - s) / 2)
p_x_stop = math.ceil((s * math.ceil(i / s) - i + f - s) / 2)
else:
p_x_start = 0
p_x_stop = 0
return p_x_start, p_x_stop
def apply_zero_padding(self, input_feature_maps):
padded_input_feature_maps = np.zeros((input_feature_maps.shape[0],
input_feature_maps.shape[1] + self.p_x_start + self.p_x_stop))
for channel in range(input_feature_maps.shape[0]):
array = input_feature_maps[channel]
padded_array = np.zeros(array.shape[0] + self.p_x_start + self.
p_x_stop)
padded_array[self.p_x_start:array.shape[0] + self.p_x_start
] = array
padded_input_feature_maps[channel] = padded_array
return padded_input_feature_maps
def __str__(self):
return 'Conv 1D Layer type with ' + str(self.kernel_shape[0]
) + ' kernels of shape = ' + str(self.kernel_shape[1:]
) + 'input/output of shape' + str(self.input_shape) + '/' + str(
self.output_shape) + ' stride= ' + str(self.stride
) + ' mode= ' + str(self.mode
) + ' with activation = ' + self.activation_name
class softmax:
def __init__(self, size):
self.size = size
self.shape = 1, size
self.type = 'softmax'
self.activation_function = activations.softmax
def forward(self, input_data):
return self.activation_function(self, input_data)
def backward(self, jacobian_L_S, softmaxed_network_output):
jacobian_soft = self.compute_j_soft(softmaxed_network_output)
jacobian_L_Z = np.dot(jacobian_L_S, jacobian_soft)
return jacobian_L_Z
def compute_j_soft(self, S):
S = np.squeeze(S)
n = len(S)
j_soft = np.zeros((n, n))
for i in range(n):
for j in range(n):
if i == j:
j_soft[i][j] = S[i] - S[i] ** 2
else:
j_soft[i][j] = -S[i] * S[j]
return j_soft
def __str__(self):
return 'Softmax Layer of size = ' + str(self.size)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FC_layer:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def backward(self, jacobian_L_Z):
Y = self.input
jacobian_Z_sum = self.create_jacobian_Z_sum()
simp_jacobian_Z_W = np.outer(Y, jacobian_Z_sum.diagonal())
jacobian_L_W = jacobian_L_Z * simp_jacobian_Z_W
jacobian_Z_Y = np.dot(jacobian_Z_sum, self.weights.T)
jacobian_L_Y = np.dot(jacobian_L_Z, jacobian_Z_Y)
jacobian_L_B = jacobian_L_Z
self.weights_grads = self.weights_grads + jacobian_L_W
self.bias_grads = self.bias_grads + jacobian_L_B
return jacobian_L_Y
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __str__(self):
return 'FC Layer type size = ' + str(self.weights.shape
) + ' with activation = ' + self.activation_name
class conv2D:
def __init__(self, input_shape, n_kernels, kernel_shape, strides, modes,
weight_init_range, activation, debug):
self.type = 'conv2D'
self.input_shape = input_shape
self.activation_name = activation
self.kernel_shape = n_kernels, input_shape[0], kernel_shape[0
], kernel_shape[1]
self.activation = activations.get_activation_function(activation)
self.d_activation = activations.get_activation_derivative(activation)
self.strides = strides
self.modes = modes
self.weights = np.random.uniform(low=weight_init_range[0], high=
weight_init_range[1], size=self.kernel_shape)
self.weights_grads = np.zeros(self.weights.shape)
self.p_x_start, self.p_x_stop, self.p_y_start, self.p_y_stop = (self
.calculate_padding())
self.output_shape = self.calculate_output_shape()
self.cached_calculation = {}
self.cache_weights_input_output_triplet_locations()
self.cached_output = None
self.debug = debug
"""print("###########################")
a = np.random.randint(1,4,(6,6))
print(a)
padded_a = self.apply_zero_padding(a)
print(padded_a)
print("kernel shape", (self.kernel_shape[2], self.kernel_shape[3]))
print("input shape", a.shape)
print("padded shape", padded_a.shape)
print("###########################")"""
def cache_weights_input_output_triplet_locations(self):
placeholder_input = np.zeros(self.input_shape)
array = placeholder_input[0]
kernel = self.weights[0][0]
stride_x_pointer = 0
while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1:
stride_y_pointer = 0
while stride_y_pointer + kernel.shape[1] - 1 <= array.shape[1] - 1:
for row in range(kernel.shape[0]):
for column in range(kernel.shape[1]):
conv_output_coordinate = (stride_x_pointer // self.
strides[0], stride_y_pointer // self.strides[1])
self.cached_calculation[(row, column), (row +
stride_x_pointer, column + stride_y_pointer)
] = conv_output_coordinate
stride_y_pointer += self.strides[1]
stride_x_pointer += self.strides[0]
def forward(self, input_feature_maps):
output = np.zeros(self.output_shape)
input_feature_maps = self.apply_zero_padding(input_feature_maps)
for i in range(0, self.kernel_shape[0]):
kernel_stack = self.weights[i]
for j in range(0, self.kernel_shape[1]):
kernel = kernel_stack[j]
array = input_feature_maps[j]
stride_x_pointer = 0
conv_counter = 1
if self.debug:
print('**** NEW CONVOLUTION ****')
while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0
] - 1:
stride_y_pointer = 0
while stride_y_pointer + kernel.shape[1
] - 1 <= array.shape[1] - 1:
array_snip = array[stride_x_pointer:
stride_x_pointer + kernel.shape[0],
stride_y_pointer:stride_y_pointer + kernel.shape[1]
]
result = np.sum(np.multiply(array_snip, kernel))
conv_output_coordinate = (i, stride_x_pointer //
self.strides[0], stride_y_pointer // self.
strides[1])
output[conv_output_coordinate] += result
"""#cache all the results, touched weights and input for each kernel (output or Coordinates??)
for row in range(kernel.shape[0]):
for column in range(kernel.shape[1]):
# Cache coordinate only: (weight, input) --> output
#format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), (input_channel, input_x_pos, input_y_pos)) ---> (feature_map_number, output_x_pos, output_y_pos)
self.cached_calculation[((i, j, row, column), (j, row + stride_x_pointer , column + stride_y_pointer))] = conv_output_coordinate
#Cache weight coordinate and input/output values
#ALTERNATIVE
# format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), input_val) ---> output_val
#self.cached_calculation[((i, j, row, column), array_snip[row, column])] = result"""
if self.debug:
print('convolution nr ', conv_counter)
print('\narray_snip: \n', array_snip)
print('\nkernel: \n', kernel)
print('\nelementwise multiplication: \n', np.
multiply(array_snip, kernel))
print('\nresult: ', result)
stride_y_pointer += self.strides[1]
conv_counter += 1
stride_x_pointer += self.strides[0]
if self.debug:
print('\n----REVIEW----\n')
print('Total convolutions: ', conv_counter)
print('\ninput_feature_map:\n ', array)
print('\napplied kernel:\n ', kernel)
print('\nconvolution result:\n ', output[i])
print('***********************************')
self.cached_output = output
self.cached_input = input_feature_maps
output = self.activation(self, output)
return output
def backward(self, jacobian_L_Z):
jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)
jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.
cached_output)
jacobian_L_W = self.compute_gradients(jacobian_L_Z)
self.weights_grads += jacobian_L_W
jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)
return jacobian_L_Y
def update_gradients(self, learning_rate):
self.weights -= learning_rate * self.weights_grads
self.weights_grads = np.zeros(self.weights.shape)
def compute_gradients(self, jacobian_L_Z):
grads = np.zeros(self.weights.shape)
for i in range(self.weights.shape[0]):
for j in range(self.weights.shape[1]):
for k in range(self.weights.shape[2]):
for l in range(self.weights.shape[3]):
for key in self.cached_calculation.keys():
if key[0] == (k, l):
grads[i, j, k, l] += self.cached_input[j][key
[1]] * jacobian_L_Z[i][self.
cached_calculation[key]]
return grads
def compute_J_LY(self, jacobian_L_Z):
jacobian_L_Y = np.zeros(self.input_shape)
for i in range(self.input_shape[0]):
for j in range(self.input_shape[1]):
for k in range(self.input_shape[2]):
for key in self.cached_calculation.keys():
if key[1] == (j, k):
for l in range(self.weights.shape[0]):
jacobian_L_Y[i, j, k] += self.weights[l][i][key
[0]] * jacobian_L_Z[l][self.
cached_calculation[key]]
return jacobian_L_Y
def calculate_output_shape(self):
width = math.floor((self.input_shape[1] - self.kernel_shape[2] +
self.p_x_start + self.p_x_stop) / self.strides[0] + 1)
height = math.floor((self.input_shape[2] - self.kernel_shape[3] +
self.p_y_start + self.p_y_stop) / self.strides[1] + 1)
return self.kernel_shape[0], width, height
def calculate_padding(self):
s = self.strides[0]
f = self.kernel_shape[2]
i = self.input_shape[1]
if self.modes[0] == 'full':
p_x_start = f - 1
p_x_stop = f - 1
elif self.modes[0] == 'same':
p_x_start = math.floor((s * math.ceil(i / s) - i + f - s) / 2)
p_x_stop = math.ceil((s * math.ceil(i / s) - i + f - s) / 2)
else:
p_x_start = 0
p_x_stop = 0
s = self.strides[1]
f = self.kernel_shape[3]
i = self.input_shape[2]
if self.modes[1] == 'full':
p_y_start = f - 1
p_y_stop = f - 1
elif self.modes[1] == 'same':
p_y_start = math.floor((s * math.ceil(i / s) - i + f - s) / 2)
p_y_stop = math.ceil((s * math.ceil(i / s) - i + f - s) / 2)
else:
p_y_start = 0
p_y_stop = 0
return p_x_start, p_x_stop, p_y_start, p_y_stop
def apply_zero_padding(self, input_feature_maps):
padded_input_feature_maps = np.zeros((input_feature_maps.shape[0],
input_feature_maps.shape[1] + self.p_x_start + self.p_x_stop,
input_feature_maps.shape[2] + self.p_y_start + self.p_y_stop))
for channel in range(input_feature_maps.shape[0]):
array = input_feature_maps[channel]
padded_array = np.zeros((array.shape[0] + self.p_x_start + self
.p_x_stop, array.shape[1] + self.p_y_start + self.p_y_stop))
padded_array[self.p_x_start:array.shape[0] + self.p_x_start,
self.p_y_start:array.shape[1] + self.p_y_start] = array
padded_input_feature_maps[channel] = padded_array
return padded_input_feature_maps
def __str__(self):
return 'Conv 2D Layer type with ' + str(self.kernel_shape[0]
) + ' kernels of shape = ' + str(self.kernel_shape[1:]
) + 'input/output of shape' + str(self.input_shape) + '/' + str(
self.output_shape) + ' strides= s' + str(self.strides
) + ' modes= ' + str(self.modes
) + ' with activation = ' + self.activation_name
class conv1D:
def __init__(self, input_shape, n_kernels, kernel_shape, stride, mode,
weight_init_range, activation, debug):
self.type = 'conv1D'
self.input_shape = input_shape
self.activation_name = activation
self.kernel_shape = n_kernels, input_shape[0], kernel_shape
self.activation = activations.get_activation_function(activation)
self.d_activation = activations.get_activation_derivative(activation)
self.stride = stride
self.mode = mode
self.weights = np.random.uniform(low=weight_init_range[0], high=
weight_init_range[1], size=self.kernel_shape)
self.weights_grads = np.zeros(self.weights.shape)
self.p_x_start, self.p_x_stop = self.calculate_padding()
self.output_shape = self.calculate_output_shape()
self.cached_calculation = {}
self.cache_weights_input_output_triplet_locations()
self.cached_output = None
self.debug = debug
def cache_weights_input_output_triplet_locations(self):
placeholder_input = np.zeros(self.input_shape)
array = placeholder_input[0]
kernel = self.weights[0][0]
stride_x_pointer = 0
while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1:
for column in range(kernel.shape[0]):
conv_output_coordinate = stride_x_pointer // self.stride
self.cached_calculation[column, column + stride_x_pointer
] = conv_output_coordinate
stride_x_pointer += self.stride
def forward(self, input_feature_maps):
output = np.zeros(self.output_shape)
input_feature_maps = self.apply_zero_padding(input_feature_maps)
for i in range(0, self.kernel_shape[0]):
kernel_stack = self.weights[i]
for j in range(0, self.kernel_shape[1]):
kernel = kernel_stack[j]
array = input_feature_maps[j]
stride_x_pointer = 0
conv_counter = 1
if self.debug:
print('**** NEW CONVOLUTION ****')
while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0
] - 1:
array_snip = array[stride_x_pointer:stride_x_pointer +
kernel.shape[0]]
result = np.sum(np.multiply(array_snip, kernel))
conv_output_coordinate = i, stride_x_pointer // self.stride
output[conv_output_coordinate] += result
if self.debug:
print('convolution nr ', conv_counter)
print('\narray_snip: \n', array_snip)
print('\nkernel: \n', kernel)
print('\nelementwise multiplication: \n', np.
multiply(array_snip, kernel))
print('\nresult: ', result)
conv_counter += 1
stride_x_pointer += self.stride
if self.debug:
print('\n----REVIEW----\n')
print('Total convolutions: ', conv_counter)
print('\ninput_feature_map:\n ', array)
print('\napplied kernel:\n ', kernel)
print('\nconvolution result:\n ', output[i])
print('***********************************')
self.cached_output = output
self.cached_input = input_feature_maps
output = self.activation(self, output)
return output
def backward(self, jacobian_L_Z):
jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)
jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.
cached_output)
jacobian_L_W = self.compute_gradients(jacobian_L_Z)
self.weights_grads += jacobian_L_W
jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)
return jacobian_L_Y
def update_gradients(self, learning_rate):
self.weights -= learning_rate * self.weights_grads
self.weights_grads = np.zeros(self.weights.shape)
def compute_gradients(self, jacobian_L_Z):
grads = np.zeros(self.weights.shape)
for i in range(self.weights.shape[0]):
for j in range(self.weights.shape[1]):
for k in range(self.weights.shape[2]):
for key in self.cached_calculation.keys():
if key[0] == k:
grads[i, j, k] += self.cached_input[j][key[1]
] * jacobian_L_Z[i][self.cached_calculation
[key]]
return grads
def compute_J_LY(self, jacobian_L_Z):
jacobian_L_Y = np.zeros(self.input_shape)
for i in range(self.input_shape[0]):
for j in range(self.input_shape[1]):
for key in self.cached_calculation.keys():
if key[1] == j:
for l in range(self.weights.shape[0]):
jacobian_L_Y[i, j] += self.weights[l][i][key[0]
] * jacobian_L_Z[l][self.cached_calculation
[key]]
return jacobian_L_Y
def calculate_output_shape(self):
width = math.floor((self.input_shape[1] - self.kernel_shape[2] +
self.p_x_start + self.p_x_stop) / self.stride + 1)
return self.kernel_shape[0], width
def calculate_padding(self):
s = self.stride
f = self.kernel_shape[2]
i = self.input_shape[1]
if self.mode == 'full':
p_x_start = f - 1
p_x_stop = f - 1
elif self.mode == 'same':
p_x_start = math.floor((s * math.ceil(i / s) - i + f - s) / 2)
p_x_stop = math.ceil((s * math.ceil(i / s) - i + f - s) / 2)
else:
p_x_start = 0
p_x_stop = 0
return p_x_start, p_x_stop
def apply_zero_padding(self, input_feature_maps):
padded_input_feature_maps = np.zeros((input_feature_maps.shape[0],
input_feature_maps.shape[1] + self.p_x_start + self.p_x_stop))
for channel in range(input_feature_maps.shape[0]):
array = input_feature_maps[channel]
padded_array = np.zeros(array.shape[0] + self.p_x_start + self.
p_x_stop)
padded_array[self.p_x_start:array.shape[0] + self.p_x_start
] = array
padded_input_feature_maps[channel] = padded_array
return padded_input_feature_maps
def __str__(self):
return 'Conv 1D Layer type with ' + str(self.kernel_shape[0]
) + ' kernels of shape = ' + str(self.kernel_shape[1:]
) + 'input/output of shape' + str(self.input_shape) + '/' + str(
self.output_shape) + ' stride= ' + str(self.stride
) + ' mode= ' + str(self.mode
) + ' with activation = ' + self.activation_name
class softmax:
def __init__(self, size):
self.size = size
self.shape = 1, size
self.type = 'softmax'
self.activation_function = activations.softmax
def forward(self, input_data):
return self.activation_function(self, input_data)
def backward(self, jacobian_L_S, softmaxed_network_output):
jacobian_soft = self.compute_j_soft(softmaxed_network_output)
jacobian_L_Z = np.dot(jacobian_L_S, jacobian_soft)
return jacobian_L_Z
def compute_j_soft(self, S):
S = np.squeeze(S)
n = len(S)
j_soft = np.zeros((n, n))
for i in range(n):
for j in range(n):
if i == j:
j_soft[i][j] = S[i] - S[i] ** 2
else:
j_soft[i][j] = -S[i] * S[j]
return j_soft
def __str__(self):
return 'Softmax Layer of size = ' + str(self.size)
<|reserved_special_token_1|>
import numpy as np
import math
import activations
class FC_layer():
def __init__(self, input_size, output_size, weight_init_range, activation, debug):
self.type = "FC"
self.activation_name = activation
self.shape = (input_size, output_size)
self.activation = activations.get_activation_function(activation)
self.d_activation = activations.get_activation_derivative(activation)
self.input = None
self.output = None
self.weights = np.random.uniform(low=weight_init_range[0], high= weight_init_range[1], size=(input_size, output_size))
self.bias = np.random.rand(1,output_size)
self.weights_grads = np.zeros(self.weights.shape)
self.bias_grads = np.zeros(self.bias.shape)
self.debug = debug
def forward(self, input_activations):
# Dot product of input with W plus bias. Cache, activate and return
output = np.dot(input_activations, self.weights) + self.bias
# Cache the weighted outputs and inputs
#self.output = output
self.input = input_activations
# Pass the output throug the activation function
output = self.activation(self, output)
self.output = output
return output
def backward(self, jacobian_L_Z):
# Get the jacobian linking the loss with respect of this layer output from the previous layer.
# PURPOSE: Calculate the weights gradients, the bias gradient and the input_loss
# that will be passed to the previous activation layer and so on, up to layer previous input
Y = self.input
# Create the jacobian J_Z_sum with the layer cached outputs and the derivative of activation function
jacobian_Z_sum = self.create_jacobian_Z_sum()
# Find the Weights gradients jacobian_L_W
# Compute the simple jacobian linking the outputs and the weights
simp_jacobian_Z_W = np.outer(Y, jacobian_Z_sum.diagonal())
# Then compute the jacobian linking the loss to the weights
jacobian_L_W = jacobian_L_Z * simp_jacobian_Z_W
# Calculate the input layer loss jacobian_L_Y
# by doing dot product of output layer loss and the weigths matrix transposed (so to invert M N to N M, where M < N, we go the other way around)
jacobian_Z_Y = np.dot(jacobian_Z_sum ,self.weights.T)
jacobian_L_Y = np.dot( jacobian_L_Z, jacobian_Z_Y)
# Bias loss is the as the output loss --> the bias influence on the loss == layer activation output influence on the loss
jacobian_L_B = jacobian_L_Z
# Now save the bias loss and weight loss (representing the calculated gradiants).
# This will be updated at the end of the batch, or SGD
self.weights_grads =self.weights_grads + jacobian_L_W
self.bias_grads = self.bias_grads + jacobian_L_B
#Finally return the calculated input loss --> this will be the output loss of the next layer
return jacobian_L_Y
def create_jacobian_Z_sum(self):
return np.identity(self.output[0].size) * self.d_activation(self, self.output)
def update_gradients(self, learning_rate, gradient_avg_factor = 1):
#Update gradients, usefull when doing batch learning
# Get the avg of the gradients (for SGD divide by 1, else divide by batchsize)
## UPDATE: removed the division by batchsize: Implemented this factor in the learning rate
#self.weights_grads = self.weights_grads / gradient_avg_factor
#self.bias_grads = self.bias_grads / gradient_avg_factor
# Update weights and biases
self.weights -= learning_rate * self.weights_grads
self.bias -= learning_rate * self.bias_grads
self.weights_grads = np.zeros(self.weights.shape)
self.bias_grads = np.zeros(self.bias.shape)
def __str__(self):
return "FC Layer type size = " + str(self.weights.shape) + " with activation = " + self.activation_name
class conv2D():
def __init__(self, input_shape, n_kernels, kernel_shape, strides, modes, weight_init_range, activation, debug):
self.type = "conv2D"
self.input_shape = input_shape
self.activation_name = activation
#Kernel stack shape for the layer (N, I, K_x, K_y)
self.kernel_shape = (n_kernels, input_shape[0], kernel_shape[0], kernel_shape[1])
self.activation = activations.get_activation_function(activation)
self.d_activation = activations.get_activation_derivative(activation)
self.strides = strides
self.modes = modes
self.weights = np.random.uniform(low=weight_init_range[0], high= weight_init_range[1], size= self.kernel_shape)
self.weights_grads = np.zeros(self.weights.shape)
self.p_x_start, self.p_x_stop, self.p_y_start, self.p_y_stop = self.calculate_padding()
self.output_shape = self.calculate_output_shape()
self.cached_calculation = {}
self.cache_weights_input_output_triplet_locations()
self.cached_output = None
self.debug = debug
'''print("###########################")
a = np.random.randint(1,4,(6,6))
print(a)
padded_a = self.apply_zero_padding(a)
print(padded_a)
print("kernel shape", (self.kernel_shape[2], self.kernel_shape[3]))
print("input shape", a.shape)
print("padded shape", padded_a.shape)
print("###########################")'''
def cache_weights_input_output_triplet_locations(self):
placeholder_input = np.zeros(self.input_shape)
array = placeholder_input[0]
kernel = self.weights[0][0]
stride_x_pointer = 0
while(stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1):
stride_y_pointer = 0
#while the kernel does not go over the x-akse of the array
while(stride_y_pointer + kernel.shape[1] -1 <= array.shape[1] - 1):
#while the kernel does not go over the x-akse of the array
#cache all touched weights and input for each kernel (output or Coordinates??)
for row in range(kernel.shape[0]):
for column in range(kernel.shape[1]):
# Cache coordinate only: (weight, input) --> output
#format: key ((weight_x_pos, weight_y_pos), (input_x_pos, input_y_pos)) ---> (output_x_pos, output_y_pos)
conv_output_coordinate = (stride_x_pointer // self.strides[0], stride_y_pointer // self.strides[1])
self.cached_calculation[((row, column), (row + stride_x_pointer , column + stride_y_pointer))] = conv_output_coordinate
#Cache weight coordinate and input/output values
# Update the stride long the y-axis
stride_y_pointer += self.strides[1]
#update the stride long the x-axis
stride_x_pointer += self.strides[0]
#End of convolution
def forward(self, input_feature_maps):
#reset the cached calculations from the previous forward pass
#self.cached_calculation = {}
output = np.zeros(self.output_shape)
#Apply padding
input_feature_maps = self.apply_zero_padding(input_feature_maps)
for i in range(0, self.kernel_shape[0]):
#for each kernel stack
kernel_stack = self.weights[i]
for j in range(0, self.kernel_shape[1]):
#for each kernel in the kernel stack (or input channel)
kernel = kernel_stack[j]
array = input_feature_maps[j]
stride_x_pointer = 0
conv_counter = 1
if self.debug:
print("**** NEW CONVOLUTION ****")
while(stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1):
stride_y_pointer = 0
#while the kernel does not go over the x-akse of the array
while(stride_y_pointer + kernel.shape[1] -1 <= array.shape[1] - 1):
#while the kernel does not go over the x-akse of the array
#Get the snip of the array to apply convolution on
array_snip = array[stride_x_pointer: stride_x_pointer + kernel.shape[0], stride_y_pointer: stride_y_pointer + kernel.shape[1]]
#apply convolution and get the result
result = np.sum(np.multiply(array_snip, kernel))
#update the output tensor
conv_output_coordinate = (i, stride_x_pointer // self.strides[0], stride_y_pointer // self.strides[1])
output[conv_output_coordinate] += result
'''#cache all the results, touched weights and input for each kernel (output or Coordinates??)
for row in range(kernel.shape[0]):
for column in range(kernel.shape[1]):
# Cache coordinate only: (weight, input) --> output
#format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), (input_channel, input_x_pos, input_y_pos)) ---> (feature_map_number, output_x_pos, output_y_pos)
self.cached_calculation[((i, j, row, column), (j, row + stride_x_pointer , column + stride_y_pointer))] = conv_output_coordinate
#Cache weight coordinate and input/output values
#ALTERNATIVE
# format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), input_val) ---> output_val
#self.cached_calculation[((i, j, row, column), array_snip[row, column])] = result'''
if self.debug:
print("convolution nr ", conv_counter )
print("\narray_snip: \n", array_snip)
print("\nkernel: \n", kernel)
print("\nelementwise multiplication: \n", np.multiply(array_snip, kernel))
print("\nresult: ", result)
# Update the stride long the y-axis
stride_y_pointer += self.strides[1]
conv_counter+=1
#update the stride long the x-axis
stride_x_pointer += self.strides[0]
#End of convolution
if self.debug:
print("\n----REVIEW----\n")
print("Total convolutions: ", conv_counter)
print("\ninput_feature_map:\n ", array)
print("\napplied kernel:\n ", kernel)
print("\nconvolution result:\n ", output[i])
print("***********************************")
#Cache input and output
self.cached_output = output
self.cached_input = input_feature_maps
#Apply activation
output = self.activation(self, output)
return output
def backward(self, jacobian_L_Z):
#Reshape J_LZ from FC to Conv2D and pass through activation layer
jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)
#print("JLZ før relu\n", jacobian_L_Z)
#jacobian_L_Z = self.d_activation(self, jacobian_L_Z)
#print("cached out after activation\n", self.cached_output)
jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.cached_output)
#print("JLZ etter relu\n", jacobian_L_Z)
# J_L_Z * f'(cached_output)
#Calculate J_LW
jacobian_L_W = self.compute_gradients(jacobian_L_Z)
self.weights_grads += jacobian_L_W
#Calculate J_LX
jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)
#Pass Jacobian L Y upstream
return jacobian_L_Y
def update_gradients(self, learning_rate):
self.weights -= learning_rate * self.weights_grads
self.weights_grads = np.zeros(self.weights.shape)
def compute_gradients(self, jacobian_L_Z):
grads = np.zeros(self.weights.shape)
#Iterate through all the weights (4 dimension)
#Iterate through the kernel stacks
for i in range(self.weights.shape[0]):
#Iterate throught each kernel/input channel
for j in range(self.weights.shape[1]):
#iterate through the x-axis of the kernel
for k in range(self.weights.shape[2]):
#iterate through the y-axis of the kernel
for l in range(self.weights.shape[3]):
#cached_data = {k: v for k,v in self.cached_calculation.items() if k[0] == (i,j,k,l)}
for key in self.cached_calculation.keys():
if key[0] == (k,l):
grads[(i,j,k,l)] += self.cached_input[j][key[1]] * jacobian_L_Z[i][self.cached_calculation[key]]
return grads
def compute_J_LY(self, jacobian_L_Z):
jacobian_L_Y = np.zeros(self.input_shape)
#Iterate through all the inputs (3 dimension)
#iterate through all channels/kernel of a kernel stack
for i in range(self.input_shape[0]):
#iterate through x-akses of 2d input
for j in range(self.input_shape[1]):
#iterate through y-axes of 2d input
for k in range(self.input_shape[2]):
#cached_data = {k: v for k,v in self.cached_calculation.items() if k[0] == (i,j,k,l)}
for key in self.cached_calculation.keys():
if key[1] == (j,k):
#for each kernel-stack
for l in range(self.weights.shape[0]):
jacobian_L_Y[(i,j,k)] += self.weights[l][i][key[0]] * jacobian_L_Z[l][self.cached_calculation[key]]
return jacobian_L_Y
def calculate_output_shape(self):
width = math.floor((self.input_shape[1] - self.kernel_shape[2] + self.p_x_start + self.p_x_stop)/self.strides[0] + 1)
height = math.floor((self.input_shape[2] - self.kernel_shape[3] + self.p_y_start + self.p_y_stop)/self.strides[1] + 1 )
return (self.kernel_shape[0], width, height)
def calculate_padding(self):
#Calculate padding long the x axis
s = self.strides[0]
f = self.kernel_shape[2]
i = self.input_shape[1]
if self.modes[0] == "full":
#Every pixel must experience every weight of the kernel
p_x_start = f - 1
p_x_stop = f - 1
elif self.modes[0] == "same":
#Every pixel must experience the middle weight of the kernel
p_x_start = math.floor((s*math.ceil(i/s)-i+f-s)/2)
p_x_stop = math.ceil((s*math.ceil(i/s)-i+f-s)/2)
else:
p_x_start = 0
p_x_stop = 0
#Calculate padding long y axis
s = self.strides[1]
f = self.kernel_shape[3]
i = self.input_shape[2]
if self.modes[1] == "full":
#Every pixel must experience every weight of the kernel
p_y_start = f - 1
p_y_stop = f - 1
elif self.modes[1] == "same":
#Every pixel must experience the middle weight of the kernel
p_y_start = math.floor((s*math.ceil(i/s)-i+f-s)/2)
p_y_stop = math.ceil((s*math.ceil(i/s)-i+f-s)/2)
else:
p_y_start = 0
p_y_stop = 0
return p_x_start, p_x_stop, p_y_start, p_y_stop
def apply_zero_padding(self, input_feature_maps):
# Apply zero padding to the input feature maps according to the modes, strides and kernel size
padded_input_feature_maps = np.zeros((input_feature_maps.shape[0], input_feature_maps.shape[1] + self.p_x_start + self.p_x_stop, input_feature_maps.shape[2] + self.p_y_start + self.p_y_stop ))
for channel in range(input_feature_maps.shape[0]):
array = input_feature_maps[channel]
#Create the background zero array
padded_array = np.zeros((array.shape[0] + self.p_x_start + self.p_x_stop, array.shape[1] + self.p_y_start + self.p_y_stop))
#Copy the array in the middle of the zero background
padded_array[self.p_x_start:array.shape[0]+ self.p_x_start, self.p_y_start:array.shape[1]+ self.p_y_start] = array
#Save the array
padded_input_feature_maps[channel] = padded_array
return padded_input_feature_maps
def __str__(self):
return "Conv 2D Layer type with "+ str(self.kernel_shape[0]) +" kernels of shape = " + str(self.kernel_shape[1:]) +"input/output of shape" + str(self.input_shape)+"/" + str(self.output_shape) + " strides= s" + str(self.strides) + " modes= " + str(self.modes) +" with activation = " + self.activation_name
class conv1D():
def __init__(self, input_shape, n_kernels, kernel_shape, stride, mode, weight_init_range, activation, debug):
self.type = "conv1D"
self.input_shape = input_shape
self.activation_name = activation
#Kernel stack shape for the layer (Num_kernel_stacks, Channels, Kernel_x)'
self.kernel_shape = (n_kernels, input_shape[0], kernel_shape)
self.activation = activations.get_activation_function(activation)
self.d_activation = activations.get_activation_derivative(activation)
self.stride = stride
self.mode = mode
self.weights = np.random.uniform(low=weight_init_range[0], high= weight_init_range[1], size= self.kernel_shape)
self.weights_grads = np.zeros(self.weights.shape)
self.p_x_start, self.p_x_stop = self.calculate_padding()
self.output_shape = self.calculate_output_shape()
self.cached_calculation = {}
self.cache_weights_input_output_triplet_locations()
self.cached_output = None
self.debug = debug
def cache_weights_input_output_triplet_locations(self):
#Performe an empty convolution and cache all the position of the kernel, input and output triplet
placeholder_input = np.zeros(self.input_shape)
array = placeholder_input[0]
kernel = self.weights[0][0]
stride_x_pointer = 0
while(stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1):
#while the kernel does not go over the x-akse of the array
#cache all touched weights and input for each kernel
for column in range(kernel.shape[0]):
# Cache coordinate only: (weight, input) --> output
#format: key ((weight_x_pos), (input_x_pos)) ---> (output_x_pos)
conv_output_coordinate = (stride_x_pointer // self.stride)
self.cached_calculation[(column, column + stride_x_pointer)] = conv_output_coordinate
#Cache weight coordinate and input/output values
#update the stride long the x-axis
stride_x_pointer += self.stride
#End of convolution
def forward(self, input_feature_maps):
output = np.zeros(self.output_shape)
#Apply padding
input_feature_maps = self.apply_zero_padding(input_feature_maps)
for i in range(0, self.kernel_shape[0]):
#for each kernel stack
kernel_stack = self.weights[i]
for j in range(0, self.kernel_shape[1]):
#for each kernel in the kernel stack (or input channel)
kernel = kernel_stack[j]
array = input_feature_maps[j]
stride_x_pointer = 0
conv_counter = 1
if self.debug:
print("**** NEW CONVOLUTION ****")
while(stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1):
#while the kernel does not go over the x-akse of the array
#Get the snip of the array to apply convolution on
array_snip = array[stride_x_pointer: stride_x_pointer + kernel.shape[0]]
#apply convolution and get the result
result = np.sum(np.multiply(array_snip, kernel))
#update the output tensor
conv_output_coordinate = (i, stride_x_pointer // self.stride)
output[conv_output_coordinate] += result
if self.debug:
print("convolution nr ", conv_counter )
print("\narray_snip: \n", array_snip)
print("\nkernel: \n", kernel)
print("\nelementwise multiplication: \n", np.multiply(array_snip, kernel))
print("\nresult: ", result)
conv_counter+=1
#update the stride long the x-axis
stride_x_pointer += self.stride
#End of convolution
if self.debug:
print("\n----REVIEW----\n")
print("Total convolutions: ", conv_counter)
print("\ninput_feature_map:\n ", array)
print("\napplied kernel:\n ", kernel)
print("\nconvolution result:\n ", output[i])
print("***********************************")
#Cache input and output
self.cached_output = output
self.cached_input = input_feature_maps
#Apply activation
output = self.activation(self, output)
return output
def backward(self, jacobian_L_Z):
#Reshape J_LZ from FC to Conv2D and pass through activation layer
jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)
#print("JLZ før relu\n", jacobian_L_Z)
#jacobian_L_Z = self.d_activation(self, jacobian_L_Z)
#print("cached out after activation\n", self.cached_output)
jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.cached_output)
#print("JLZ etter relu\n", jacobian_L_Z)
# J_L_Z * f'(cached_output)
#Calculate J_LW
jacobian_L_W = self.compute_gradients(jacobian_L_Z)
self.weights_grads += jacobian_L_W
#Calculate J_LX
jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)
#Pass Jacobian L Y upstream
return jacobian_L_Y
def update_gradients(self, learning_rate):
self.weights -= learning_rate * self.weights_grads
self.weights_grads = np.zeros(self.weights.shape)
def compute_gradients(self, jacobian_L_Z):
grads = np.zeros(self.weights.shape)
#Iterate through all the weights (3 dimension)
for i in range(self.weights.shape[0]):
for j in range(self.weights.shape[1]):
for k in range(self.weights.shape[2]):
for key in self.cached_calculation.keys():
if key[0] == k:
grads[(i,j,k)] += self.cached_input[j][key[1]] * jacobian_L_Z[i][self.cached_calculation[key]]
return grads
def compute_J_LY(self, jacobian_L_Z):
jacobian_L_Y = np.zeros(self.input_shape)
#Iterate through all the inputs (3 dimension)
#iterate through all channels/kernel of a kernel stack
for i in range(self.input_shape[0]):
#iterate through x-akses of 1d input
for j in range(self.input_shape[1]):
for key in self.cached_calculation.keys():
if key[1] == j:
#for each kernel-stack
for l in range(self.weights.shape[0]):
jacobian_L_Y[(i,j)] += self.weights[l][i][key[0]] * jacobian_L_Z[l][self.cached_calculation[key]]
return jacobian_L_Y
def calculate_output_shape(self):
width = math.floor((self.input_shape[1] - self.kernel_shape[2] + self.p_x_start + self.p_x_stop)/self.stride + 1)
return (self.kernel_shape[0], width)
def calculate_padding(self):
#Calculate padding long the x axis
s = self.stride
f = self.kernel_shape[2]
i = self.input_shape[1]
if self.mode == "full":
#Every pixel must experience every weight of the kernel
p_x_start = f - 1
p_x_stop = f - 1
elif self.mode == "same":
#Every pixel must experience the middle weight of the kernel
p_x_start = math.floor((s*math.ceil(i/s)-i+f-s)/2)
p_x_stop = math.ceil((s*math.ceil(i/s)-i+f-s)/2)
else:
p_x_start = 0
p_x_stop = 0
return p_x_start, p_x_stop
def apply_zero_padding(self, input_feature_maps):
# Apply zero padding to the input feature maps according to the modes, strides and kernel size
#if self.p_x_start == 0 and self.p_x_stop == 0:
# return input_feature_maps
padded_input_feature_maps = np.zeros((input_feature_maps.shape[0], input_feature_maps.shape[1] + self.p_x_start + self.p_x_stop))
for channel in range(input_feature_maps.shape[0]):
array = input_feature_maps[channel]
#Create the background zero array
padded_array = np.zeros((array.shape[0] + self.p_x_start + self.p_x_stop))
#Copy the array in the middle of the zero background
padded_array[self.p_x_start:array.shape[0]+ self.p_x_start] = array
#Save the array
padded_input_feature_maps[channel] = padded_array
return padded_input_feature_maps
def __str__(self):
return "Conv 1D Layer type with "+ str(self.kernel_shape[0]) +" kernels of shape = " + str(self.kernel_shape[1:]) +"input/output of shape" + str(self.input_shape)+"/" + str(self.output_shape) + " stride= " + str(self.stride) + " mode= " + str(self.mode) +" with activation = " + self.activation_name
class softmax():
def __init__(self, size):
self.size = size
self.shape = (1, size)
self.type = "softmax"
self.activation_function = activations.softmax
def forward(self, input_data):
return self.activation_function(self, input_data)
def backward(self, jacobian_L_S, softmaxed_network_output):
# Create jacobian of derivate of softmax
jacobian_soft = self.compute_j_soft(softmaxed_network_output)
# Compute jacobian linking Loss to output
jacobian_L_Z = np.dot(jacobian_L_S, jacobian_soft)
return jacobian_L_Z
def compute_j_soft(self, S):
S = np.squeeze(S)
n = len(S)
j_soft = np.zeros((n,n))
for i in range(n):
for j in range(n):
if i == j:
j_soft[i][j] = S[i] - S[i]**2
else:
j_soft[i][j] = -S[i]*S[j]
return j_soft
def __str__(self):
return "Softmax Layer of size = " + str(self.size)
|
flexible
|
{
"blob_id": "ff99b5fd168d7987e488d7f6d0455619e988f15a",
"index": 3574,
"step-1": "<mask token>\n\n\nclass conv2D:\n <mask token>\n <mask token>\n\n def forward(self, input_feature_maps):\n output = np.zeros(self.output_shape)\n input_feature_maps = self.apply_zero_padding(input_feature_maps)\n for i in range(0, self.kernel_shape[0]):\n kernel_stack = self.weights[i]\n for j in range(0, self.kernel_shape[1]):\n kernel = kernel_stack[j]\n array = input_feature_maps[j]\n stride_x_pointer = 0\n conv_counter = 1\n if self.debug:\n print('**** NEW CONVOLUTION ****')\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0\n ] - 1:\n stride_y_pointer = 0\n while stride_y_pointer + kernel.shape[1\n ] - 1 <= array.shape[1] - 1:\n array_snip = array[stride_x_pointer:\n stride_x_pointer + kernel.shape[0],\n stride_y_pointer:stride_y_pointer + kernel.shape[1]\n ]\n result = np.sum(np.multiply(array_snip, kernel))\n conv_output_coordinate = (i, stride_x_pointer //\n self.strides[0], stride_y_pointer // self.\n strides[1])\n output[conv_output_coordinate] += result\n \"\"\"#cache all the results, touched weights and input for each kernel (output or Coordinates??)\n for row in range(kernel.shape[0]):\n for column in range(kernel.shape[1]):\n # Cache coordinate only: (weight, input) --> output\n #format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), (input_channel, input_x_pos, input_y_pos)) ---> (feature_map_number, output_x_pos, output_y_pos)\n self.cached_calculation[((i, j, row, column), (j, row + stride_x_pointer , column + stride_y_pointer))] = conv_output_coordinate\n #Cache weight coordinate and input/output values\n #ALTERNATIVE\n # format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), input_val) ---> output_val\n #self.cached_calculation[((i, j, row, column), array_snip[row, column])] = result\"\"\"\n if self.debug:\n print('convolution nr ', conv_counter)\n print('\\narray_snip: \\n', array_snip)\n print('\\nkernel: \\n', kernel)\n print('\\nelementwise multiplication: \\n', np.\n multiply(array_snip, kernel))\n print('\\nresult: ', result)\n stride_y_pointer += self.strides[1]\n conv_counter += 1\n stride_x_pointer += self.strides[0]\n if self.debug:\n print('\\n----REVIEW----\\n')\n print('Total convolutions: ', conv_counter)\n print('\\ninput_feature_map:\\n ', array)\n print('\\napplied kernel:\\n ', kernel)\n print('\\nconvolution result:\\n ', output[i])\n print('***********************************')\n self.cached_output = output\n self.cached_input = input_feature_maps\n output = self.activation(self, output)\n return output\n\n def backward(self, jacobian_L_Z):\n jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)\n jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.\n cached_output)\n jacobian_L_W = self.compute_gradients(jacobian_L_Z)\n self.weights_grads += jacobian_L_W\n jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)\n return jacobian_L_Y\n\n def update_gradients(self, learning_rate):\n self.weights -= learning_rate * self.weights_grads\n self.weights_grads = np.zeros(self.weights.shape)\n\n def compute_gradients(self, jacobian_L_Z):\n grads = np.zeros(self.weights.shape)\n for i in range(self.weights.shape[0]):\n for j in range(self.weights.shape[1]):\n for k in range(self.weights.shape[2]):\n for l in range(self.weights.shape[3]):\n for key in self.cached_calculation.keys():\n if key[0] == (k, l):\n grads[i, j, k, l] += self.cached_input[j][key\n [1]] * jacobian_L_Z[i][self.\n cached_calculation[key]]\n return grads\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return 'Conv 2D Layer type with ' + str(self.kernel_shape[0]\n ) + ' kernels of shape = ' + str(self.kernel_shape[1:]\n ) + 'input/output of shape' + str(self.input_shape) + '/' + str(\n self.output_shape) + ' strides= s' + str(self.strides\n ) + ' modes= ' + str(self.modes\n ) + ' with activation = ' + self.activation_name\n\n\nclass conv1D:\n\n def __init__(self, input_shape, n_kernels, kernel_shape, stride, mode,\n weight_init_range, activation, debug):\n self.type = 'conv1D'\n self.input_shape = input_shape\n self.activation_name = activation\n self.kernel_shape = n_kernels, input_shape[0], kernel_shape\n self.activation = activations.get_activation_function(activation)\n self.d_activation = activations.get_activation_derivative(activation)\n self.stride = stride\n self.mode = mode\n self.weights = np.random.uniform(low=weight_init_range[0], high=\n weight_init_range[1], size=self.kernel_shape)\n self.weights_grads = np.zeros(self.weights.shape)\n self.p_x_start, self.p_x_stop = self.calculate_padding()\n self.output_shape = self.calculate_output_shape()\n self.cached_calculation = {}\n self.cache_weights_input_output_triplet_locations()\n self.cached_output = None\n self.debug = debug\n\n def cache_weights_input_output_triplet_locations(self):\n placeholder_input = np.zeros(self.input_shape)\n array = placeholder_input[0]\n kernel = self.weights[0][0]\n stride_x_pointer = 0\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1:\n for column in range(kernel.shape[0]):\n conv_output_coordinate = stride_x_pointer // self.stride\n self.cached_calculation[column, column + stride_x_pointer\n ] = conv_output_coordinate\n stride_x_pointer += self.stride\n\n def forward(self, input_feature_maps):\n output = np.zeros(self.output_shape)\n input_feature_maps = self.apply_zero_padding(input_feature_maps)\n for i in range(0, self.kernel_shape[0]):\n kernel_stack = self.weights[i]\n for j in range(0, self.kernel_shape[1]):\n kernel = kernel_stack[j]\n array = input_feature_maps[j]\n stride_x_pointer = 0\n conv_counter = 1\n if self.debug:\n print('**** NEW CONVOLUTION ****')\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0\n ] - 1:\n array_snip = array[stride_x_pointer:stride_x_pointer +\n kernel.shape[0]]\n result = np.sum(np.multiply(array_snip, kernel))\n conv_output_coordinate = i, stride_x_pointer // self.stride\n output[conv_output_coordinate] += result\n if self.debug:\n print('convolution nr ', conv_counter)\n print('\\narray_snip: \\n', array_snip)\n print('\\nkernel: \\n', kernel)\n print('\\nelementwise multiplication: \\n', np.\n multiply(array_snip, kernel))\n print('\\nresult: ', result)\n conv_counter += 1\n stride_x_pointer += self.stride\n if self.debug:\n print('\\n----REVIEW----\\n')\n print('Total convolutions: ', conv_counter)\n print('\\ninput_feature_map:\\n ', array)\n print('\\napplied kernel:\\n ', kernel)\n print('\\nconvolution result:\\n ', output[i])\n print('***********************************')\n self.cached_output = output\n self.cached_input = input_feature_maps\n output = self.activation(self, output)\n return output\n\n def backward(self, jacobian_L_Z):\n jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)\n jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.\n cached_output)\n jacobian_L_W = self.compute_gradients(jacobian_L_Z)\n self.weights_grads += jacobian_L_W\n jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)\n return jacobian_L_Y\n\n def update_gradients(self, learning_rate):\n self.weights -= learning_rate * self.weights_grads\n self.weights_grads = np.zeros(self.weights.shape)\n\n def compute_gradients(self, jacobian_L_Z):\n grads = np.zeros(self.weights.shape)\n for i in range(self.weights.shape[0]):\n for j in range(self.weights.shape[1]):\n for k in range(self.weights.shape[2]):\n for key in self.cached_calculation.keys():\n if key[0] == k:\n grads[i, j, k] += self.cached_input[j][key[1]\n ] * jacobian_L_Z[i][self.cached_calculation\n [key]]\n return grads\n\n def compute_J_LY(self, jacobian_L_Z):\n jacobian_L_Y = np.zeros(self.input_shape)\n for i in range(self.input_shape[0]):\n for j in range(self.input_shape[1]):\n for key in self.cached_calculation.keys():\n if key[1] == j:\n for l in range(self.weights.shape[0]):\n jacobian_L_Y[i, j] += self.weights[l][i][key[0]\n ] * jacobian_L_Z[l][self.cached_calculation\n [key]]\n return jacobian_L_Y\n\n def calculate_output_shape(self):\n width = math.floor((self.input_shape[1] - self.kernel_shape[2] +\n self.p_x_start + self.p_x_stop) / self.stride + 1)\n return self.kernel_shape[0], width\n\n def calculate_padding(self):\n s = self.stride\n f = self.kernel_shape[2]\n i = self.input_shape[1]\n if self.mode == 'full':\n p_x_start = f - 1\n p_x_stop = f - 1\n elif self.mode == 'same':\n p_x_start = math.floor((s * math.ceil(i / s) - i + f - s) / 2)\n p_x_stop = math.ceil((s * math.ceil(i / s) - i + f - s) / 2)\n else:\n p_x_start = 0\n p_x_stop = 0\n return p_x_start, p_x_stop\n\n def apply_zero_padding(self, input_feature_maps):\n padded_input_feature_maps = np.zeros((input_feature_maps.shape[0], \n input_feature_maps.shape[1] + self.p_x_start + self.p_x_stop))\n for channel in range(input_feature_maps.shape[0]):\n array = input_feature_maps[channel]\n padded_array = np.zeros(array.shape[0] + self.p_x_start + self.\n p_x_stop)\n padded_array[self.p_x_start:array.shape[0] + self.p_x_start\n ] = array\n padded_input_feature_maps[channel] = padded_array\n return padded_input_feature_maps\n\n def __str__(self):\n return 'Conv 1D Layer type with ' + str(self.kernel_shape[0]\n ) + ' kernels of shape = ' + str(self.kernel_shape[1:]\n ) + 'input/output of shape' + str(self.input_shape) + '/' + str(\n self.output_shape) + ' stride= ' + str(self.stride\n ) + ' mode= ' + str(self.mode\n ) + ' with activation = ' + self.activation_name\n\n\nclass softmax:\n\n def __init__(self, size):\n self.size = size\n self.shape = 1, size\n self.type = 'softmax'\n self.activation_function = activations.softmax\n\n def forward(self, input_data):\n return self.activation_function(self, input_data)\n\n def backward(self, jacobian_L_S, softmaxed_network_output):\n jacobian_soft = self.compute_j_soft(softmaxed_network_output)\n jacobian_L_Z = np.dot(jacobian_L_S, jacobian_soft)\n return jacobian_L_Z\n\n def compute_j_soft(self, S):\n S = np.squeeze(S)\n n = len(S)\n j_soft = np.zeros((n, n))\n for i in range(n):\n for j in range(n):\n if i == j:\n j_soft[i][j] = S[i] - S[i] ** 2\n else:\n j_soft[i][j] = -S[i] * S[j]\n return j_soft\n\n def __str__(self):\n return 'Softmax Layer of size = ' + str(self.size)\n",
"step-2": "<mask token>\n\n\nclass conv2D:\n <mask token>\n <mask token>\n\n def forward(self, input_feature_maps):\n output = np.zeros(self.output_shape)\n input_feature_maps = self.apply_zero_padding(input_feature_maps)\n for i in range(0, self.kernel_shape[0]):\n kernel_stack = self.weights[i]\n for j in range(0, self.kernel_shape[1]):\n kernel = kernel_stack[j]\n array = input_feature_maps[j]\n stride_x_pointer = 0\n conv_counter = 1\n if self.debug:\n print('**** NEW CONVOLUTION ****')\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0\n ] - 1:\n stride_y_pointer = 0\n while stride_y_pointer + kernel.shape[1\n ] - 1 <= array.shape[1] - 1:\n array_snip = array[stride_x_pointer:\n stride_x_pointer + kernel.shape[0],\n stride_y_pointer:stride_y_pointer + kernel.shape[1]\n ]\n result = np.sum(np.multiply(array_snip, kernel))\n conv_output_coordinate = (i, stride_x_pointer //\n self.strides[0], stride_y_pointer // self.\n strides[1])\n output[conv_output_coordinate] += result\n \"\"\"#cache all the results, touched weights and input for each kernel (output or Coordinates??)\n for row in range(kernel.shape[0]):\n for column in range(kernel.shape[1]):\n # Cache coordinate only: (weight, input) --> output\n #format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), (input_channel, input_x_pos, input_y_pos)) ---> (feature_map_number, output_x_pos, output_y_pos)\n self.cached_calculation[((i, j, row, column), (j, row + stride_x_pointer , column + stride_y_pointer))] = conv_output_coordinate\n #Cache weight coordinate and input/output values\n #ALTERNATIVE\n # format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), input_val) ---> output_val\n #self.cached_calculation[((i, j, row, column), array_snip[row, column])] = result\"\"\"\n if self.debug:\n print('convolution nr ', conv_counter)\n print('\\narray_snip: \\n', array_snip)\n print('\\nkernel: \\n', kernel)\n print('\\nelementwise multiplication: \\n', np.\n multiply(array_snip, kernel))\n print('\\nresult: ', result)\n stride_y_pointer += self.strides[1]\n conv_counter += 1\n stride_x_pointer += self.strides[0]\n if self.debug:\n print('\\n----REVIEW----\\n')\n print('Total convolutions: ', conv_counter)\n print('\\ninput_feature_map:\\n ', array)\n print('\\napplied kernel:\\n ', kernel)\n print('\\nconvolution result:\\n ', output[i])\n print('***********************************')\n self.cached_output = output\n self.cached_input = input_feature_maps\n output = self.activation(self, output)\n return output\n\n def backward(self, jacobian_L_Z):\n jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)\n jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.\n cached_output)\n jacobian_L_W = self.compute_gradients(jacobian_L_Z)\n self.weights_grads += jacobian_L_W\n jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)\n return jacobian_L_Y\n\n def update_gradients(self, learning_rate):\n self.weights -= learning_rate * self.weights_grads\n self.weights_grads = np.zeros(self.weights.shape)\n\n def compute_gradients(self, jacobian_L_Z):\n grads = np.zeros(self.weights.shape)\n for i in range(self.weights.shape[0]):\n for j in range(self.weights.shape[1]):\n for k in range(self.weights.shape[2]):\n for l in range(self.weights.shape[3]):\n for key in self.cached_calculation.keys():\n if key[0] == (k, l):\n grads[i, j, k, l] += self.cached_input[j][key\n [1]] * jacobian_L_Z[i][self.\n cached_calculation[key]]\n return grads\n\n def compute_J_LY(self, jacobian_L_Z):\n jacobian_L_Y = np.zeros(self.input_shape)\n for i in range(self.input_shape[0]):\n for j in range(self.input_shape[1]):\n for k in range(self.input_shape[2]):\n for key in self.cached_calculation.keys():\n if key[1] == (j, k):\n for l in range(self.weights.shape[0]):\n jacobian_L_Y[i, j, k] += self.weights[l][i][key\n [0]] * jacobian_L_Z[l][self.\n cached_calculation[key]]\n return jacobian_L_Y\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return 'Conv 2D Layer type with ' + str(self.kernel_shape[0]\n ) + ' kernels of shape = ' + str(self.kernel_shape[1:]\n ) + 'input/output of shape' + str(self.input_shape) + '/' + str(\n self.output_shape) + ' strides= s' + str(self.strides\n ) + ' modes= ' + str(self.modes\n ) + ' with activation = ' + self.activation_name\n\n\nclass conv1D:\n\n def __init__(self, input_shape, n_kernels, kernel_shape, stride, mode,\n weight_init_range, activation, debug):\n self.type = 'conv1D'\n self.input_shape = input_shape\n self.activation_name = activation\n self.kernel_shape = n_kernels, input_shape[0], kernel_shape\n self.activation = activations.get_activation_function(activation)\n self.d_activation = activations.get_activation_derivative(activation)\n self.stride = stride\n self.mode = mode\n self.weights = np.random.uniform(low=weight_init_range[0], high=\n weight_init_range[1], size=self.kernel_shape)\n self.weights_grads = np.zeros(self.weights.shape)\n self.p_x_start, self.p_x_stop = self.calculate_padding()\n self.output_shape = self.calculate_output_shape()\n self.cached_calculation = {}\n self.cache_weights_input_output_triplet_locations()\n self.cached_output = None\n self.debug = debug\n\n def cache_weights_input_output_triplet_locations(self):\n placeholder_input = np.zeros(self.input_shape)\n array = placeholder_input[0]\n kernel = self.weights[0][0]\n stride_x_pointer = 0\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1:\n for column in range(kernel.shape[0]):\n conv_output_coordinate = stride_x_pointer // self.stride\n self.cached_calculation[column, column + stride_x_pointer\n ] = conv_output_coordinate\n stride_x_pointer += self.stride\n\n def forward(self, input_feature_maps):\n output = np.zeros(self.output_shape)\n input_feature_maps = self.apply_zero_padding(input_feature_maps)\n for i in range(0, self.kernel_shape[0]):\n kernel_stack = self.weights[i]\n for j in range(0, self.kernel_shape[1]):\n kernel = kernel_stack[j]\n array = input_feature_maps[j]\n stride_x_pointer = 0\n conv_counter = 1\n if self.debug:\n print('**** NEW CONVOLUTION ****')\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0\n ] - 1:\n array_snip = array[stride_x_pointer:stride_x_pointer +\n kernel.shape[0]]\n result = np.sum(np.multiply(array_snip, kernel))\n conv_output_coordinate = i, stride_x_pointer // self.stride\n output[conv_output_coordinate] += result\n if self.debug:\n print('convolution nr ', conv_counter)\n print('\\narray_snip: \\n', array_snip)\n print('\\nkernel: \\n', kernel)\n print('\\nelementwise multiplication: \\n', np.\n multiply(array_snip, kernel))\n print('\\nresult: ', result)\n conv_counter += 1\n stride_x_pointer += self.stride\n if self.debug:\n print('\\n----REVIEW----\\n')\n print('Total convolutions: ', conv_counter)\n print('\\ninput_feature_map:\\n ', array)\n print('\\napplied kernel:\\n ', kernel)\n print('\\nconvolution result:\\n ', output[i])\n print('***********************************')\n self.cached_output = output\n self.cached_input = input_feature_maps\n output = self.activation(self, output)\n return output\n\n def backward(self, jacobian_L_Z):\n jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)\n jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.\n cached_output)\n jacobian_L_W = self.compute_gradients(jacobian_L_Z)\n self.weights_grads += jacobian_L_W\n jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)\n return jacobian_L_Y\n\n def update_gradients(self, learning_rate):\n self.weights -= learning_rate * self.weights_grads\n self.weights_grads = np.zeros(self.weights.shape)\n\n def compute_gradients(self, jacobian_L_Z):\n grads = np.zeros(self.weights.shape)\n for i in range(self.weights.shape[0]):\n for j in range(self.weights.shape[1]):\n for k in range(self.weights.shape[2]):\n for key in self.cached_calculation.keys():\n if key[0] == k:\n grads[i, j, k] += self.cached_input[j][key[1]\n ] * jacobian_L_Z[i][self.cached_calculation\n [key]]\n return grads\n\n def compute_J_LY(self, jacobian_L_Z):\n jacobian_L_Y = np.zeros(self.input_shape)\n for i in range(self.input_shape[0]):\n for j in range(self.input_shape[1]):\n for key in self.cached_calculation.keys():\n if key[1] == j:\n for l in range(self.weights.shape[0]):\n jacobian_L_Y[i, j] += self.weights[l][i][key[0]\n ] * jacobian_L_Z[l][self.cached_calculation\n [key]]\n return jacobian_L_Y\n\n def calculate_output_shape(self):\n width = math.floor((self.input_shape[1] - self.kernel_shape[2] +\n self.p_x_start + self.p_x_stop) / self.stride + 1)\n return self.kernel_shape[0], width\n\n def calculate_padding(self):\n s = self.stride\n f = self.kernel_shape[2]\n i = self.input_shape[1]\n if self.mode == 'full':\n p_x_start = f - 1\n p_x_stop = f - 1\n elif self.mode == 'same':\n p_x_start = math.floor((s * math.ceil(i / s) - i + f - s) / 2)\n p_x_stop = math.ceil((s * math.ceil(i / s) - i + f - s) / 2)\n else:\n p_x_start = 0\n p_x_stop = 0\n return p_x_start, p_x_stop\n\n def apply_zero_padding(self, input_feature_maps):\n padded_input_feature_maps = np.zeros((input_feature_maps.shape[0], \n input_feature_maps.shape[1] + self.p_x_start + self.p_x_stop))\n for channel in range(input_feature_maps.shape[0]):\n array = input_feature_maps[channel]\n padded_array = np.zeros(array.shape[0] + self.p_x_start + self.\n p_x_stop)\n padded_array[self.p_x_start:array.shape[0] + self.p_x_start\n ] = array\n padded_input_feature_maps[channel] = padded_array\n return padded_input_feature_maps\n\n def __str__(self):\n return 'Conv 1D Layer type with ' + str(self.kernel_shape[0]\n ) + ' kernels of shape = ' + str(self.kernel_shape[1:]\n ) + 'input/output of shape' + str(self.input_shape) + '/' + str(\n self.output_shape) + ' stride= ' + str(self.stride\n ) + ' mode= ' + str(self.mode\n ) + ' with activation = ' + self.activation_name\n\n\nclass softmax:\n\n def __init__(self, size):\n self.size = size\n self.shape = 1, size\n self.type = 'softmax'\n self.activation_function = activations.softmax\n\n def forward(self, input_data):\n return self.activation_function(self, input_data)\n\n def backward(self, jacobian_L_S, softmaxed_network_output):\n jacobian_soft = self.compute_j_soft(softmaxed_network_output)\n jacobian_L_Z = np.dot(jacobian_L_S, jacobian_soft)\n return jacobian_L_Z\n\n def compute_j_soft(self, S):\n S = np.squeeze(S)\n n = len(S)\n j_soft = np.zeros((n, n))\n for i in range(n):\n for j in range(n):\n if i == j:\n j_soft[i][j] = S[i] - S[i] ** 2\n else:\n j_soft[i][j] = -S[i] * S[j]\n return j_soft\n\n def __str__(self):\n return 'Softmax Layer of size = ' + str(self.size)\n",
"step-3": "<mask token>\n\n\nclass conv2D:\n\n def __init__(self, input_shape, n_kernels, kernel_shape, strides, modes,\n weight_init_range, activation, debug):\n self.type = 'conv2D'\n self.input_shape = input_shape\n self.activation_name = activation\n self.kernel_shape = n_kernels, input_shape[0], kernel_shape[0\n ], kernel_shape[1]\n self.activation = activations.get_activation_function(activation)\n self.d_activation = activations.get_activation_derivative(activation)\n self.strides = strides\n self.modes = modes\n self.weights = np.random.uniform(low=weight_init_range[0], high=\n weight_init_range[1], size=self.kernel_shape)\n self.weights_grads = np.zeros(self.weights.shape)\n self.p_x_start, self.p_x_stop, self.p_y_start, self.p_y_stop = (self\n .calculate_padding())\n self.output_shape = self.calculate_output_shape()\n self.cached_calculation = {}\n self.cache_weights_input_output_triplet_locations()\n self.cached_output = None\n self.debug = debug\n \"\"\"print(\"###########################\")\n a = np.random.randint(1,4,(6,6))\n print(a)\n padded_a = self.apply_zero_padding(a)\n print(padded_a)\n print(\"kernel shape\", (self.kernel_shape[2], self.kernel_shape[3]))\n print(\"input shape\", a.shape)\n print(\"padded shape\", padded_a.shape)\n print(\"###########################\")\"\"\"\n\n def cache_weights_input_output_triplet_locations(self):\n placeholder_input = np.zeros(self.input_shape)\n array = placeholder_input[0]\n kernel = self.weights[0][0]\n stride_x_pointer = 0\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1:\n stride_y_pointer = 0\n while stride_y_pointer + kernel.shape[1] - 1 <= array.shape[1] - 1:\n for row in range(kernel.shape[0]):\n for column in range(kernel.shape[1]):\n conv_output_coordinate = (stride_x_pointer // self.\n strides[0], stride_y_pointer // self.strides[1])\n self.cached_calculation[(row, column), (row +\n stride_x_pointer, column + stride_y_pointer)\n ] = conv_output_coordinate\n stride_y_pointer += self.strides[1]\n stride_x_pointer += self.strides[0]\n\n def forward(self, input_feature_maps):\n output = np.zeros(self.output_shape)\n input_feature_maps = self.apply_zero_padding(input_feature_maps)\n for i in range(0, self.kernel_shape[0]):\n kernel_stack = self.weights[i]\n for j in range(0, self.kernel_shape[1]):\n kernel = kernel_stack[j]\n array = input_feature_maps[j]\n stride_x_pointer = 0\n conv_counter = 1\n if self.debug:\n print('**** NEW CONVOLUTION ****')\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0\n ] - 1:\n stride_y_pointer = 0\n while stride_y_pointer + kernel.shape[1\n ] - 1 <= array.shape[1] - 1:\n array_snip = array[stride_x_pointer:\n stride_x_pointer + kernel.shape[0],\n stride_y_pointer:stride_y_pointer + kernel.shape[1]\n ]\n result = np.sum(np.multiply(array_snip, kernel))\n conv_output_coordinate = (i, stride_x_pointer //\n self.strides[0], stride_y_pointer // self.\n strides[1])\n output[conv_output_coordinate] += result\n \"\"\"#cache all the results, touched weights and input for each kernel (output or Coordinates??)\n for row in range(kernel.shape[0]):\n for column in range(kernel.shape[1]):\n # Cache coordinate only: (weight, input) --> output\n #format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), (input_channel, input_x_pos, input_y_pos)) ---> (feature_map_number, output_x_pos, output_y_pos)\n self.cached_calculation[((i, j, row, column), (j, row + stride_x_pointer , column + stride_y_pointer))] = conv_output_coordinate\n #Cache weight coordinate and input/output values\n #ALTERNATIVE\n # format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), input_val) ---> output_val\n #self.cached_calculation[((i, j, row, column), array_snip[row, column])] = result\"\"\"\n if self.debug:\n print('convolution nr ', conv_counter)\n print('\\narray_snip: \\n', array_snip)\n print('\\nkernel: \\n', kernel)\n print('\\nelementwise multiplication: \\n', np.\n multiply(array_snip, kernel))\n print('\\nresult: ', result)\n stride_y_pointer += self.strides[1]\n conv_counter += 1\n stride_x_pointer += self.strides[0]\n if self.debug:\n print('\\n----REVIEW----\\n')\n print('Total convolutions: ', conv_counter)\n print('\\ninput_feature_map:\\n ', array)\n print('\\napplied kernel:\\n ', kernel)\n print('\\nconvolution result:\\n ', output[i])\n print('***********************************')\n self.cached_output = output\n self.cached_input = input_feature_maps\n output = self.activation(self, output)\n return output\n\n def backward(self, jacobian_L_Z):\n jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)\n jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.\n cached_output)\n jacobian_L_W = self.compute_gradients(jacobian_L_Z)\n self.weights_grads += jacobian_L_W\n jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)\n return jacobian_L_Y\n\n def update_gradients(self, learning_rate):\n self.weights -= learning_rate * self.weights_grads\n self.weights_grads = np.zeros(self.weights.shape)\n\n def compute_gradients(self, jacobian_L_Z):\n grads = np.zeros(self.weights.shape)\n for i in range(self.weights.shape[0]):\n for j in range(self.weights.shape[1]):\n for k in range(self.weights.shape[2]):\n for l in range(self.weights.shape[3]):\n for key in self.cached_calculation.keys():\n if key[0] == (k, l):\n grads[i, j, k, l] += self.cached_input[j][key\n [1]] * jacobian_L_Z[i][self.\n cached_calculation[key]]\n return grads\n\n def compute_J_LY(self, jacobian_L_Z):\n jacobian_L_Y = np.zeros(self.input_shape)\n for i in range(self.input_shape[0]):\n for j in range(self.input_shape[1]):\n for k in range(self.input_shape[2]):\n for key in self.cached_calculation.keys():\n if key[1] == (j, k):\n for l in range(self.weights.shape[0]):\n jacobian_L_Y[i, j, k] += self.weights[l][i][key\n [0]] * jacobian_L_Z[l][self.\n cached_calculation[key]]\n return jacobian_L_Y\n\n def calculate_output_shape(self):\n width = math.floor((self.input_shape[1] - self.kernel_shape[2] +\n self.p_x_start + self.p_x_stop) / self.strides[0] + 1)\n height = math.floor((self.input_shape[2] - self.kernel_shape[3] +\n self.p_y_start + self.p_y_stop) / self.strides[1] + 1)\n return self.kernel_shape[0], width, height\n <mask token>\n <mask token>\n\n def __str__(self):\n return 'Conv 2D Layer type with ' + str(self.kernel_shape[0]\n ) + ' kernels of shape = ' + str(self.kernel_shape[1:]\n ) + 'input/output of shape' + str(self.input_shape) + '/' + str(\n self.output_shape) + ' strides= s' + str(self.strides\n ) + ' modes= ' + str(self.modes\n ) + ' with activation = ' + self.activation_name\n\n\nclass conv1D:\n\n def __init__(self, input_shape, n_kernels, kernel_shape, stride, mode,\n weight_init_range, activation, debug):\n self.type = 'conv1D'\n self.input_shape = input_shape\n self.activation_name = activation\n self.kernel_shape = n_kernels, input_shape[0], kernel_shape\n self.activation = activations.get_activation_function(activation)\n self.d_activation = activations.get_activation_derivative(activation)\n self.stride = stride\n self.mode = mode\n self.weights = np.random.uniform(low=weight_init_range[0], high=\n weight_init_range[1], size=self.kernel_shape)\n self.weights_grads = np.zeros(self.weights.shape)\n self.p_x_start, self.p_x_stop = self.calculate_padding()\n self.output_shape = self.calculate_output_shape()\n self.cached_calculation = {}\n self.cache_weights_input_output_triplet_locations()\n self.cached_output = None\n self.debug = debug\n\n def cache_weights_input_output_triplet_locations(self):\n placeholder_input = np.zeros(self.input_shape)\n array = placeholder_input[0]\n kernel = self.weights[0][0]\n stride_x_pointer = 0\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1:\n for column in range(kernel.shape[0]):\n conv_output_coordinate = stride_x_pointer // self.stride\n self.cached_calculation[column, column + stride_x_pointer\n ] = conv_output_coordinate\n stride_x_pointer += self.stride\n\n def forward(self, input_feature_maps):\n output = np.zeros(self.output_shape)\n input_feature_maps = self.apply_zero_padding(input_feature_maps)\n for i in range(0, self.kernel_shape[0]):\n kernel_stack = self.weights[i]\n for j in range(0, self.kernel_shape[1]):\n kernel = kernel_stack[j]\n array = input_feature_maps[j]\n stride_x_pointer = 0\n conv_counter = 1\n if self.debug:\n print('**** NEW CONVOLUTION ****')\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0\n ] - 1:\n array_snip = array[stride_x_pointer:stride_x_pointer +\n kernel.shape[0]]\n result = np.sum(np.multiply(array_snip, kernel))\n conv_output_coordinate = i, stride_x_pointer // self.stride\n output[conv_output_coordinate] += result\n if self.debug:\n print('convolution nr ', conv_counter)\n print('\\narray_snip: \\n', array_snip)\n print('\\nkernel: \\n', kernel)\n print('\\nelementwise multiplication: \\n', np.\n multiply(array_snip, kernel))\n print('\\nresult: ', result)\n conv_counter += 1\n stride_x_pointer += self.stride\n if self.debug:\n print('\\n----REVIEW----\\n')\n print('Total convolutions: ', conv_counter)\n print('\\ninput_feature_map:\\n ', array)\n print('\\napplied kernel:\\n ', kernel)\n print('\\nconvolution result:\\n ', output[i])\n print('***********************************')\n self.cached_output = output\n self.cached_input = input_feature_maps\n output = self.activation(self, output)\n return output\n\n def backward(self, jacobian_L_Z):\n jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)\n jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.\n cached_output)\n jacobian_L_W = self.compute_gradients(jacobian_L_Z)\n self.weights_grads += jacobian_L_W\n jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)\n return jacobian_L_Y\n\n def update_gradients(self, learning_rate):\n self.weights -= learning_rate * self.weights_grads\n self.weights_grads = np.zeros(self.weights.shape)\n\n def compute_gradients(self, jacobian_L_Z):\n grads = np.zeros(self.weights.shape)\n for i in range(self.weights.shape[0]):\n for j in range(self.weights.shape[1]):\n for k in range(self.weights.shape[2]):\n for key in self.cached_calculation.keys():\n if key[0] == k:\n grads[i, j, k] += self.cached_input[j][key[1]\n ] * jacobian_L_Z[i][self.cached_calculation\n [key]]\n return grads\n\n def compute_J_LY(self, jacobian_L_Z):\n jacobian_L_Y = np.zeros(self.input_shape)\n for i in range(self.input_shape[0]):\n for j in range(self.input_shape[1]):\n for key in self.cached_calculation.keys():\n if key[1] == j:\n for l in range(self.weights.shape[0]):\n jacobian_L_Y[i, j] += self.weights[l][i][key[0]\n ] * jacobian_L_Z[l][self.cached_calculation\n [key]]\n return jacobian_L_Y\n\n def calculate_output_shape(self):\n width = math.floor((self.input_shape[1] - self.kernel_shape[2] +\n self.p_x_start + self.p_x_stop) / self.stride + 1)\n return self.kernel_shape[0], width\n\n def calculate_padding(self):\n s = self.stride\n f = self.kernel_shape[2]\n i = self.input_shape[1]\n if self.mode == 'full':\n p_x_start = f - 1\n p_x_stop = f - 1\n elif self.mode == 'same':\n p_x_start = math.floor((s * math.ceil(i / s) - i + f - s) / 2)\n p_x_stop = math.ceil((s * math.ceil(i / s) - i + f - s) / 2)\n else:\n p_x_start = 0\n p_x_stop = 0\n return p_x_start, p_x_stop\n\n def apply_zero_padding(self, input_feature_maps):\n padded_input_feature_maps = np.zeros((input_feature_maps.shape[0], \n input_feature_maps.shape[1] + self.p_x_start + self.p_x_stop))\n for channel in range(input_feature_maps.shape[0]):\n array = input_feature_maps[channel]\n padded_array = np.zeros(array.shape[0] + self.p_x_start + self.\n p_x_stop)\n padded_array[self.p_x_start:array.shape[0] + self.p_x_start\n ] = array\n padded_input_feature_maps[channel] = padded_array\n return padded_input_feature_maps\n\n def __str__(self):\n return 'Conv 1D Layer type with ' + str(self.kernel_shape[0]\n ) + ' kernels of shape = ' + str(self.kernel_shape[1:]\n ) + 'input/output of shape' + str(self.input_shape) + '/' + str(\n self.output_shape) + ' stride= ' + str(self.stride\n ) + ' mode= ' + str(self.mode\n ) + ' with activation = ' + self.activation_name\n\n\nclass softmax:\n\n def __init__(self, size):\n self.size = size\n self.shape = 1, size\n self.type = 'softmax'\n self.activation_function = activations.softmax\n\n def forward(self, input_data):\n return self.activation_function(self, input_data)\n\n def backward(self, jacobian_L_S, softmaxed_network_output):\n jacobian_soft = self.compute_j_soft(softmaxed_network_output)\n jacobian_L_Z = np.dot(jacobian_L_S, jacobian_soft)\n return jacobian_L_Z\n\n def compute_j_soft(self, S):\n S = np.squeeze(S)\n n = len(S)\n j_soft = np.zeros((n, n))\n for i in range(n):\n for j in range(n):\n if i == j:\n j_soft[i][j] = S[i] - S[i] ** 2\n else:\n j_soft[i][j] = -S[i] * S[j]\n return j_soft\n\n def __str__(self):\n return 'Softmax Layer of size = ' + str(self.size)\n",
"step-4": "<mask token>\n\n\nclass FC_layer:\n <mask token>\n <mask token>\n\n def backward(self, jacobian_L_Z):\n Y = self.input\n jacobian_Z_sum = self.create_jacobian_Z_sum()\n simp_jacobian_Z_W = np.outer(Y, jacobian_Z_sum.diagonal())\n jacobian_L_W = jacobian_L_Z * simp_jacobian_Z_W\n jacobian_Z_Y = np.dot(jacobian_Z_sum, self.weights.T)\n jacobian_L_Y = np.dot(jacobian_L_Z, jacobian_Z_Y)\n jacobian_L_B = jacobian_L_Z\n self.weights_grads = self.weights_grads + jacobian_L_W\n self.bias_grads = self.bias_grads + jacobian_L_B\n return jacobian_L_Y\n <mask token>\n <mask token>\n\n def __str__(self):\n return 'FC Layer type size = ' + str(self.weights.shape\n ) + ' with activation = ' + self.activation_name\n\n\nclass conv2D:\n\n def __init__(self, input_shape, n_kernels, kernel_shape, strides, modes,\n weight_init_range, activation, debug):\n self.type = 'conv2D'\n self.input_shape = input_shape\n self.activation_name = activation\n self.kernel_shape = n_kernels, input_shape[0], kernel_shape[0\n ], kernel_shape[1]\n self.activation = activations.get_activation_function(activation)\n self.d_activation = activations.get_activation_derivative(activation)\n self.strides = strides\n self.modes = modes\n self.weights = np.random.uniform(low=weight_init_range[0], high=\n weight_init_range[1], size=self.kernel_shape)\n self.weights_grads = np.zeros(self.weights.shape)\n self.p_x_start, self.p_x_stop, self.p_y_start, self.p_y_stop = (self\n .calculate_padding())\n self.output_shape = self.calculate_output_shape()\n self.cached_calculation = {}\n self.cache_weights_input_output_triplet_locations()\n self.cached_output = None\n self.debug = debug\n \"\"\"print(\"###########################\")\n a = np.random.randint(1,4,(6,6))\n print(a)\n padded_a = self.apply_zero_padding(a)\n print(padded_a)\n print(\"kernel shape\", (self.kernel_shape[2], self.kernel_shape[3]))\n print(\"input shape\", a.shape)\n print(\"padded shape\", padded_a.shape)\n print(\"###########################\")\"\"\"\n\n def cache_weights_input_output_triplet_locations(self):\n placeholder_input = np.zeros(self.input_shape)\n array = placeholder_input[0]\n kernel = self.weights[0][0]\n stride_x_pointer = 0\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1:\n stride_y_pointer = 0\n while stride_y_pointer + kernel.shape[1] - 1 <= array.shape[1] - 1:\n for row in range(kernel.shape[0]):\n for column in range(kernel.shape[1]):\n conv_output_coordinate = (stride_x_pointer // self.\n strides[0], stride_y_pointer // self.strides[1])\n self.cached_calculation[(row, column), (row +\n stride_x_pointer, column + stride_y_pointer)\n ] = conv_output_coordinate\n stride_y_pointer += self.strides[1]\n stride_x_pointer += self.strides[0]\n\n def forward(self, input_feature_maps):\n output = np.zeros(self.output_shape)\n input_feature_maps = self.apply_zero_padding(input_feature_maps)\n for i in range(0, self.kernel_shape[0]):\n kernel_stack = self.weights[i]\n for j in range(0, self.kernel_shape[1]):\n kernel = kernel_stack[j]\n array = input_feature_maps[j]\n stride_x_pointer = 0\n conv_counter = 1\n if self.debug:\n print('**** NEW CONVOLUTION ****')\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0\n ] - 1:\n stride_y_pointer = 0\n while stride_y_pointer + kernel.shape[1\n ] - 1 <= array.shape[1] - 1:\n array_snip = array[stride_x_pointer:\n stride_x_pointer + kernel.shape[0],\n stride_y_pointer:stride_y_pointer + kernel.shape[1]\n ]\n result = np.sum(np.multiply(array_snip, kernel))\n conv_output_coordinate = (i, stride_x_pointer //\n self.strides[0], stride_y_pointer // self.\n strides[1])\n output[conv_output_coordinate] += result\n \"\"\"#cache all the results, touched weights and input for each kernel (output or Coordinates??)\n for row in range(kernel.shape[0]):\n for column in range(kernel.shape[1]):\n # Cache coordinate only: (weight, input) --> output\n #format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), (input_channel, input_x_pos, input_y_pos)) ---> (feature_map_number, output_x_pos, output_y_pos)\n self.cached_calculation[((i, j, row, column), (j, row + stride_x_pointer , column + stride_y_pointer))] = conv_output_coordinate\n #Cache weight coordinate and input/output values\n #ALTERNATIVE\n # format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), input_val) ---> output_val\n #self.cached_calculation[((i, j, row, column), array_snip[row, column])] = result\"\"\"\n if self.debug:\n print('convolution nr ', conv_counter)\n print('\\narray_snip: \\n', array_snip)\n print('\\nkernel: \\n', kernel)\n print('\\nelementwise multiplication: \\n', np.\n multiply(array_snip, kernel))\n print('\\nresult: ', result)\n stride_y_pointer += self.strides[1]\n conv_counter += 1\n stride_x_pointer += self.strides[0]\n if self.debug:\n print('\\n----REVIEW----\\n')\n print('Total convolutions: ', conv_counter)\n print('\\ninput_feature_map:\\n ', array)\n print('\\napplied kernel:\\n ', kernel)\n print('\\nconvolution result:\\n ', output[i])\n print('***********************************')\n self.cached_output = output\n self.cached_input = input_feature_maps\n output = self.activation(self, output)\n return output\n\n def backward(self, jacobian_L_Z):\n jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)\n jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.\n cached_output)\n jacobian_L_W = self.compute_gradients(jacobian_L_Z)\n self.weights_grads += jacobian_L_W\n jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)\n return jacobian_L_Y\n\n def update_gradients(self, learning_rate):\n self.weights -= learning_rate * self.weights_grads\n self.weights_grads = np.zeros(self.weights.shape)\n\n def compute_gradients(self, jacobian_L_Z):\n grads = np.zeros(self.weights.shape)\n for i in range(self.weights.shape[0]):\n for j in range(self.weights.shape[1]):\n for k in range(self.weights.shape[2]):\n for l in range(self.weights.shape[3]):\n for key in self.cached_calculation.keys():\n if key[0] == (k, l):\n grads[i, j, k, l] += self.cached_input[j][key\n [1]] * jacobian_L_Z[i][self.\n cached_calculation[key]]\n return grads\n\n def compute_J_LY(self, jacobian_L_Z):\n jacobian_L_Y = np.zeros(self.input_shape)\n for i in range(self.input_shape[0]):\n for j in range(self.input_shape[1]):\n for k in range(self.input_shape[2]):\n for key in self.cached_calculation.keys():\n if key[1] == (j, k):\n for l in range(self.weights.shape[0]):\n jacobian_L_Y[i, j, k] += self.weights[l][i][key\n [0]] * jacobian_L_Z[l][self.\n cached_calculation[key]]\n return jacobian_L_Y\n\n def calculate_output_shape(self):\n width = math.floor((self.input_shape[1] - self.kernel_shape[2] +\n self.p_x_start + self.p_x_stop) / self.strides[0] + 1)\n height = math.floor((self.input_shape[2] - self.kernel_shape[3] +\n self.p_y_start + self.p_y_stop) / self.strides[1] + 1)\n return self.kernel_shape[0], width, height\n\n def calculate_padding(self):\n s = self.strides[0]\n f = self.kernel_shape[2]\n i = self.input_shape[1]\n if self.modes[0] == 'full':\n p_x_start = f - 1\n p_x_stop = f - 1\n elif self.modes[0] == 'same':\n p_x_start = math.floor((s * math.ceil(i / s) - i + f - s) / 2)\n p_x_stop = math.ceil((s * math.ceil(i / s) - i + f - s) / 2)\n else:\n p_x_start = 0\n p_x_stop = 0\n s = self.strides[1]\n f = self.kernel_shape[3]\n i = self.input_shape[2]\n if self.modes[1] == 'full':\n p_y_start = f - 1\n p_y_stop = f - 1\n elif self.modes[1] == 'same':\n p_y_start = math.floor((s * math.ceil(i / s) - i + f - s) / 2)\n p_y_stop = math.ceil((s * math.ceil(i / s) - i + f - s) / 2)\n else:\n p_y_start = 0\n p_y_stop = 0\n return p_x_start, p_x_stop, p_y_start, p_y_stop\n\n def apply_zero_padding(self, input_feature_maps):\n padded_input_feature_maps = np.zeros((input_feature_maps.shape[0], \n input_feature_maps.shape[1] + self.p_x_start + self.p_x_stop, \n input_feature_maps.shape[2] + self.p_y_start + self.p_y_stop))\n for channel in range(input_feature_maps.shape[0]):\n array = input_feature_maps[channel]\n padded_array = np.zeros((array.shape[0] + self.p_x_start + self\n .p_x_stop, array.shape[1] + self.p_y_start + self.p_y_stop))\n padded_array[self.p_x_start:array.shape[0] + self.p_x_start,\n self.p_y_start:array.shape[1] + self.p_y_start] = array\n padded_input_feature_maps[channel] = padded_array\n return padded_input_feature_maps\n\n def __str__(self):\n return 'Conv 2D Layer type with ' + str(self.kernel_shape[0]\n ) + ' kernels of shape = ' + str(self.kernel_shape[1:]\n ) + 'input/output of shape' + str(self.input_shape) + '/' + str(\n self.output_shape) + ' strides= s' + str(self.strides\n ) + ' modes= ' + str(self.modes\n ) + ' with activation = ' + self.activation_name\n\n\nclass conv1D:\n\n def __init__(self, input_shape, n_kernels, kernel_shape, stride, mode,\n weight_init_range, activation, debug):\n self.type = 'conv1D'\n self.input_shape = input_shape\n self.activation_name = activation\n self.kernel_shape = n_kernels, input_shape[0], kernel_shape\n self.activation = activations.get_activation_function(activation)\n self.d_activation = activations.get_activation_derivative(activation)\n self.stride = stride\n self.mode = mode\n self.weights = np.random.uniform(low=weight_init_range[0], high=\n weight_init_range[1], size=self.kernel_shape)\n self.weights_grads = np.zeros(self.weights.shape)\n self.p_x_start, self.p_x_stop = self.calculate_padding()\n self.output_shape = self.calculate_output_shape()\n self.cached_calculation = {}\n self.cache_weights_input_output_triplet_locations()\n self.cached_output = None\n self.debug = debug\n\n def cache_weights_input_output_triplet_locations(self):\n placeholder_input = np.zeros(self.input_shape)\n array = placeholder_input[0]\n kernel = self.weights[0][0]\n stride_x_pointer = 0\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1:\n for column in range(kernel.shape[0]):\n conv_output_coordinate = stride_x_pointer // self.stride\n self.cached_calculation[column, column + stride_x_pointer\n ] = conv_output_coordinate\n stride_x_pointer += self.stride\n\n def forward(self, input_feature_maps):\n output = np.zeros(self.output_shape)\n input_feature_maps = self.apply_zero_padding(input_feature_maps)\n for i in range(0, self.kernel_shape[0]):\n kernel_stack = self.weights[i]\n for j in range(0, self.kernel_shape[1]):\n kernel = kernel_stack[j]\n array = input_feature_maps[j]\n stride_x_pointer = 0\n conv_counter = 1\n if self.debug:\n print('**** NEW CONVOLUTION ****')\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0\n ] - 1:\n array_snip = array[stride_x_pointer:stride_x_pointer +\n kernel.shape[0]]\n result = np.sum(np.multiply(array_snip, kernel))\n conv_output_coordinate = i, stride_x_pointer // self.stride\n output[conv_output_coordinate] += result\n if self.debug:\n print('convolution nr ', conv_counter)\n print('\\narray_snip: \\n', array_snip)\n print('\\nkernel: \\n', kernel)\n print('\\nelementwise multiplication: \\n', np.\n multiply(array_snip, kernel))\n print('\\nresult: ', result)\n conv_counter += 1\n stride_x_pointer += self.stride\n if self.debug:\n print('\\n----REVIEW----\\n')\n print('Total convolutions: ', conv_counter)\n print('\\ninput_feature_map:\\n ', array)\n print('\\napplied kernel:\\n ', kernel)\n print('\\nconvolution result:\\n ', output[i])\n print('***********************************')\n self.cached_output = output\n self.cached_input = input_feature_maps\n output = self.activation(self, output)\n return output\n\n def backward(self, jacobian_L_Z):\n jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)\n jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.\n cached_output)\n jacobian_L_W = self.compute_gradients(jacobian_L_Z)\n self.weights_grads += jacobian_L_W\n jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)\n return jacobian_L_Y\n\n def update_gradients(self, learning_rate):\n self.weights -= learning_rate * self.weights_grads\n self.weights_grads = np.zeros(self.weights.shape)\n\n def compute_gradients(self, jacobian_L_Z):\n grads = np.zeros(self.weights.shape)\n for i in range(self.weights.shape[0]):\n for j in range(self.weights.shape[1]):\n for k in range(self.weights.shape[2]):\n for key in self.cached_calculation.keys():\n if key[0] == k:\n grads[i, j, k] += self.cached_input[j][key[1]\n ] * jacobian_L_Z[i][self.cached_calculation\n [key]]\n return grads\n\n def compute_J_LY(self, jacobian_L_Z):\n jacobian_L_Y = np.zeros(self.input_shape)\n for i in range(self.input_shape[0]):\n for j in range(self.input_shape[1]):\n for key in self.cached_calculation.keys():\n if key[1] == j:\n for l in range(self.weights.shape[0]):\n jacobian_L_Y[i, j] += self.weights[l][i][key[0]\n ] * jacobian_L_Z[l][self.cached_calculation\n [key]]\n return jacobian_L_Y\n\n def calculate_output_shape(self):\n width = math.floor((self.input_shape[1] - self.kernel_shape[2] +\n self.p_x_start + self.p_x_stop) / self.stride + 1)\n return self.kernel_shape[0], width\n\n def calculate_padding(self):\n s = self.stride\n f = self.kernel_shape[2]\n i = self.input_shape[1]\n if self.mode == 'full':\n p_x_start = f - 1\n p_x_stop = f - 1\n elif self.mode == 'same':\n p_x_start = math.floor((s * math.ceil(i / s) - i + f - s) / 2)\n p_x_stop = math.ceil((s * math.ceil(i / s) - i + f - s) / 2)\n else:\n p_x_start = 0\n p_x_stop = 0\n return p_x_start, p_x_stop\n\n def apply_zero_padding(self, input_feature_maps):\n padded_input_feature_maps = np.zeros((input_feature_maps.shape[0], \n input_feature_maps.shape[1] + self.p_x_start + self.p_x_stop))\n for channel in range(input_feature_maps.shape[0]):\n array = input_feature_maps[channel]\n padded_array = np.zeros(array.shape[0] + self.p_x_start + self.\n p_x_stop)\n padded_array[self.p_x_start:array.shape[0] + self.p_x_start\n ] = array\n padded_input_feature_maps[channel] = padded_array\n return padded_input_feature_maps\n\n def __str__(self):\n return 'Conv 1D Layer type with ' + str(self.kernel_shape[0]\n ) + ' kernels of shape = ' + str(self.kernel_shape[1:]\n ) + 'input/output of shape' + str(self.input_shape) + '/' + str(\n self.output_shape) + ' stride= ' + str(self.stride\n ) + ' mode= ' + str(self.mode\n ) + ' with activation = ' + self.activation_name\n\n\nclass softmax:\n\n def __init__(self, size):\n self.size = size\n self.shape = 1, size\n self.type = 'softmax'\n self.activation_function = activations.softmax\n\n def forward(self, input_data):\n return self.activation_function(self, input_data)\n\n def backward(self, jacobian_L_S, softmaxed_network_output):\n jacobian_soft = self.compute_j_soft(softmaxed_network_output)\n jacobian_L_Z = np.dot(jacobian_L_S, jacobian_soft)\n return jacobian_L_Z\n\n def compute_j_soft(self, S):\n S = np.squeeze(S)\n n = len(S)\n j_soft = np.zeros((n, n))\n for i in range(n):\n for j in range(n):\n if i == j:\n j_soft[i][j] = S[i] - S[i] ** 2\n else:\n j_soft[i][j] = -S[i] * S[j]\n return j_soft\n\n def __str__(self):\n return 'Softmax Layer of size = ' + str(self.size)\n",
"step-5": "import numpy as np\nimport math\nimport activations\n\nclass FC_layer():\n def __init__(self, input_size, output_size, weight_init_range, activation, debug):\n self.type = \"FC\"\n self.activation_name = activation\n self.shape = (input_size, output_size)\n self.activation = activations.get_activation_function(activation)\n self.d_activation = activations.get_activation_derivative(activation)\n self.input = None\n self.output = None\n self.weights = np.random.uniform(low=weight_init_range[0], high= weight_init_range[1], size=(input_size, output_size))\n self.bias = np.random.rand(1,output_size)\n self.weights_grads = np.zeros(self.weights.shape)\n self.bias_grads = np.zeros(self.bias.shape)\n self.debug = debug\n\n def forward(self, input_activations):\n # Dot product of input with W plus bias. Cache, activate and return\n output = np.dot(input_activations, self.weights) + self.bias\n # Cache the weighted outputs and inputs\n #self.output = output\n self.input = input_activations\n # Pass the output throug the activation function\n output = self.activation(self, output)\n self.output = output\n return output\n \n def backward(self, jacobian_L_Z):\n # Get the jacobian linking the loss with respect of this layer output from the previous layer.\n # PURPOSE: Calculate the weights gradients, the bias gradient and the input_loss\n # that will be passed to the previous activation layer and so on, up to layer previous input\n Y = self.input\n # Create the jacobian J_Z_sum with the layer cached outputs and the derivative of activation function\n jacobian_Z_sum = self.create_jacobian_Z_sum()\n\n # Find the Weights gradients jacobian_L_W\n # Compute the simple jacobian linking the outputs and the weights\n simp_jacobian_Z_W = np.outer(Y, jacobian_Z_sum.diagonal())\n # Then compute the jacobian linking the loss to the weights\n jacobian_L_W = jacobian_L_Z * simp_jacobian_Z_W\n\n # Calculate the input layer loss jacobian_L_Y\n # by doing dot product of output layer loss and the weigths matrix transposed (so to invert M N to N M, where M < N, we go the other way around)\n jacobian_Z_Y = np.dot(jacobian_Z_sum ,self.weights.T)\n jacobian_L_Y = np.dot( jacobian_L_Z, jacobian_Z_Y)\n \n\n # Bias loss is the as the output loss --> the bias influence on the loss == layer activation output influence on the loss\n jacobian_L_B = jacobian_L_Z\n\n # Now save the bias loss and weight loss (representing the calculated gradiants).\n # This will be updated at the end of the batch, or SGD\n self.weights_grads =self.weights_grads + jacobian_L_W\n self.bias_grads = self.bias_grads + jacobian_L_B\n \n #Finally return the calculated input loss --> this will be the output loss of the next layer\n return jacobian_L_Y\n\n def create_jacobian_Z_sum(self):\n return np.identity(self.output[0].size) * self.d_activation(self, self.output)\n\n def update_gradients(self, learning_rate, gradient_avg_factor = 1):\n #Update gradients, usefull when doing batch learning\n # Get the avg of the gradients (for SGD divide by 1, else divide by batchsize)\n ## UPDATE: removed the division by batchsize: Implemented this factor in the learning rate\n #self.weights_grads = self.weights_grads / gradient_avg_factor\n #self.bias_grads = self.bias_grads / gradient_avg_factor\n\n # Update weights and biases\n self.weights -= learning_rate * self.weights_grads\n self.bias -= learning_rate * self.bias_grads\n self.weights_grads = np.zeros(self.weights.shape)\n self.bias_grads = np.zeros(self.bias.shape)\n\n\n def __str__(self):\n return \"FC Layer type size = \" + str(self.weights.shape) + \" with activation = \" + self.activation_name\n\nclass conv2D():\n def __init__(self, input_shape, n_kernels, kernel_shape, strides, modes, weight_init_range, activation, debug):\n self.type = \"conv2D\"\n self.input_shape = input_shape\n self.activation_name = activation\n #Kernel stack shape for the layer (N, I, K_x, K_y)\n self.kernel_shape = (n_kernels, input_shape[0], kernel_shape[0], kernel_shape[1])\n self.activation = activations.get_activation_function(activation)\n self.d_activation = activations.get_activation_derivative(activation)\n self.strides = strides\n self.modes = modes\n self.weights = np.random.uniform(low=weight_init_range[0], high= weight_init_range[1], size= self.kernel_shape)\n self.weights_grads = np.zeros(self.weights.shape)\n self.p_x_start, self.p_x_stop, self.p_y_start, self.p_y_stop = self.calculate_padding()\n self.output_shape = self.calculate_output_shape()\n self.cached_calculation = {}\n self.cache_weights_input_output_triplet_locations()\n self.cached_output = None\n self.debug = debug\n\n \n \n '''print(\"###########################\")\n a = np.random.randint(1,4,(6,6))\n print(a)\n padded_a = self.apply_zero_padding(a)\n print(padded_a)\n print(\"kernel shape\", (self.kernel_shape[2], self.kernel_shape[3]))\n print(\"input shape\", a.shape)\n print(\"padded shape\", padded_a.shape)\n print(\"###########################\")'''\n\n def cache_weights_input_output_triplet_locations(self):\n placeholder_input = np.zeros(self.input_shape)\n array = placeholder_input[0]\n kernel = self.weights[0][0]\n stride_x_pointer = 0\n while(stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1):\n stride_y_pointer = 0\n #while the kernel does not go over the x-akse of the array\n while(stride_y_pointer + kernel.shape[1] -1 <= array.shape[1] - 1):\n #while the kernel does not go over the x-akse of the array\n #cache all touched weights and input for each kernel (output or Coordinates??)\n for row in range(kernel.shape[0]):\n for column in range(kernel.shape[1]):\n # Cache coordinate only: (weight, input) --> output\n #format: key ((weight_x_pos, weight_y_pos), (input_x_pos, input_y_pos)) ---> (output_x_pos, output_y_pos)\n conv_output_coordinate = (stride_x_pointer // self.strides[0], stride_y_pointer // self.strides[1])\n self.cached_calculation[((row, column), (row + stride_x_pointer , column + stride_y_pointer))] = conv_output_coordinate\n #Cache weight coordinate and input/output values\n # Update the stride long the y-axis\n stride_y_pointer += self.strides[1]\n #update the stride long the x-axis\n stride_x_pointer += self.strides[0]\n #End of convolution\n \n\n def forward(self, input_feature_maps):\n #reset the cached calculations from the previous forward pass\n #self.cached_calculation = {}\n output = np.zeros(self.output_shape)\n #Apply padding\n input_feature_maps = self.apply_zero_padding(input_feature_maps)\n for i in range(0, self.kernel_shape[0]):\n #for each kernel stack\n kernel_stack = self.weights[i]\n for j in range(0, self.kernel_shape[1]):\n #for each kernel in the kernel stack (or input channel)\n kernel = kernel_stack[j]\n array = input_feature_maps[j]\n stride_x_pointer = 0\n conv_counter = 1\n if self.debug:\n print(\"**** NEW CONVOLUTION ****\")\n while(stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1):\n stride_y_pointer = 0\n #while the kernel does not go over the x-akse of the array\n while(stride_y_pointer + kernel.shape[1] -1 <= array.shape[1] - 1):\n #while the kernel does not go over the x-akse of the array\n #Get the snip of the array to apply convolution on\n array_snip = array[stride_x_pointer: stride_x_pointer + kernel.shape[0], stride_y_pointer: stride_y_pointer + kernel.shape[1]]\n #apply convolution and get the result \n result = np.sum(np.multiply(array_snip, kernel)) \n #update the output tensor\n conv_output_coordinate = (i, stride_x_pointer // self.strides[0], stride_y_pointer // self.strides[1])\n output[conv_output_coordinate] += result\n '''#cache all the results, touched weights and input for each kernel (output or Coordinates??)\n for row in range(kernel.shape[0]):\n for column in range(kernel.shape[1]):\n # Cache coordinate only: (weight, input) --> output\n #format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), (input_channel, input_x_pos, input_y_pos)) ---> (feature_map_number, output_x_pos, output_y_pos)\n self.cached_calculation[((i, j, row, column), (j, row + stride_x_pointer , column + stride_y_pointer))] = conv_output_coordinate\n #Cache weight coordinate and input/output values\n #ALTERNATIVE\n # format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), input_val) ---> output_val\n #self.cached_calculation[((i, j, row, column), array_snip[row, column])] = result'''\n if self.debug:\n print(\"convolution nr \", conv_counter )\n print(\"\\narray_snip: \\n\", array_snip)\n print(\"\\nkernel: \\n\", kernel)\n print(\"\\nelementwise multiplication: \\n\", np.multiply(array_snip, kernel))\n print(\"\\nresult: \", result)\n # Update the stride long the y-axis\n stride_y_pointer += self.strides[1]\n conv_counter+=1\n #update the stride long the x-axis\n stride_x_pointer += self.strides[0]\n #End of convolution\n if self.debug:\n print(\"\\n----REVIEW----\\n\")\n print(\"Total convolutions: \", conv_counter)\n print(\"\\ninput_feature_map:\\n \", array)\n print(\"\\napplied kernel:\\n \", kernel)\n print(\"\\nconvolution result:\\n \", output[i])\n print(\"***********************************\")\n #Cache input and output\n self.cached_output = output\n self.cached_input = input_feature_maps\n #Apply activation\n output = self.activation(self, output)\n return output\n \n \n def backward(self, jacobian_L_Z):\n #Reshape J_LZ from FC to Conv2D and pass through activation layer\n jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)\n #print(\"JLZ før relu\\n\", jacobian_L_Z)\n #jacobian_L_Z = self.d_activation(self, jacobian_L_Z)\n #print(\"cached out after activation\\n\", self.cached_output)\n jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.cached_output)\n #print(\"JLZ etter relu\\n\", jacobian_L_Z)\n # J_L_Z * f'(cached_output)\n\n #Calculate J_LW\n jacobian_L_W = self.compute_gradients(jacobian_L_Z)\n self.weights_grads += jacobian_L_W\n\n #Calculate J_LX\n jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)\n\n #Pass Jacobian L Y upstream\n return jacobian_L_Y\n \n def update_gradients(self, learning_rate):\n self.weights -= learning_rate * self.weights_grads\n self.weights_grads = np.zeros(self.weights.shape)\n\n def compute_gradients(self, jacobian_L_Z):\n grads = np.zeros(self.weights.shape)\n #Iterate through all the weights (4 dimension)\n #Iterate through the kernel stacks\n for i in range(self.weights.shape[0]):\n #Iterate throught each kernel/input channel\n for j in range(self.weights.shape[1]):\n #iterate through the x-axis of the kernel\n for k in range(self.weights.shape[2]):\n #iterate through the y-axis of the kernel\n for l in range(self.weights.shape[3]):\n #cached_data = {k: v for k,v in self.cached_calculation.items() if k[0] == (i,j,k,l)}\n for key in self.cached_calculation.keys():\n if key[0] == (k,l):\n grads[(i,j,k,l)] += self.cached_input[j][key[1]] * jacobian_L_Z[i][self.cached_calculation[key]]\n return grads\n\n def compute_J_LY(self, jacobian_L_Z):\n jacobian_L_Y = np.zeros(self.input_shape)\n #Iterate through all the inputs (3 dimension)\n #iterate through all channels/kernel of a kernel stack\n for i in range(self.input_shape[0]):\n #iterate through x-akses of 2d input\n for j in range(self.input_shape[1]):\n #iterate through y-axes of 2d input\n for k in range(self.input_shape[2]):\n #cached_data = {k: v for k,v in self.cached_calculation.items() if k[0] == (i,j,k,l)}\n for key in self.cached_calculation.keys():\n if key[1] == (j,k):\n #for each kernel-stack\n for l in range(self.weights.shape[0]):\n jacobian_L_Y[(i,j,k)] += self.weights[l][i][key[0]] * jacobian_L_Z[l][self.cached_calculation[key]]\n return jacobian_L_Y\n \n def calculate_output_shape(self):\n width = math.floor((self.input_shape[1] - self.kernel_shape[2] + self.p_x_start + self.p_x_stop)/self.strides[0] + 1)\n height = math.floor((self.input_shape[2] - self.kernel_shape[3] + self.p_y_start + self.p_y_stop)/self.strides[1] + 1 )\n return (self.kernel_shape[0], width, height)\n\n def calculate_padding(self):\n #Calculate padding long the x axis\n s = self.strides[0]\n f = self.kernel_shape[2]\n i = self.input_shape[1]\n if self.modes[0] == \"full\":\n #Every pixel must experience every weight of the kernel\n p_x_start = f - 1\n p_x_stop = f - 1\n elif self.modes[0] == \"same\":\n #Every pixel must experience the middle weight of the kernel\n p_x_start = math.floor((s*math.ceil(i/s)-i+f-s)/2)\n p_x_stop = math.ceil((s*math.ceil(i/s)-i+f-s)/2)\n else:\n p_x_start = 0\n p_x_stop = 0\n\n\n #Calculate padding long y axis\n s = self.strides[1]\n f = self.kernel_shape[3]\n i = self.input_shape[2]\n if self.modes[1] == \"full\":\n #Every pixel must experience every weight of the kernel\n p_y_start = f - 1\n p_y_stop = f - 1\n elif self.modes[1] == \"same\":\n #Every pixel must experience the middle weight of the kernel\n p_y_start = math.floor((s*math.ceil(i/s)-i+f-s)/2)\n p_y_stop = math.ceil((s*math.ceil(i/s)-i+f-s)/2)\n else:\n p_y_start = 0\n p_y_stop = 0\n\n\n return p_x_start, p_x_stop, p_y_start, p_y_stop\n \n def apply_zero_padding(self, input_feature_maps):\n # Apply zero padding to the input feature maps according to the modes, strides and kernel size\n padded_input_feature_maps = np.zeros((input_feature_maps.shape[0], input_feature_maps.shape[1] + self.p_x_start + self.p_x_stop, input_feature_maps.shape[2] + self.p_y_start + self.p_y_stop ))\n for channel in range(input_feature_maps.shape[0]):\n array = input_feature_maps[channel]\n #Create the background zero array\n padded_array = np.zeros((array.shape[0] + self.p_x_start + self.p_x_stop, array.shape[1] + self.p_y_start + self.p_y_stop))\n #Copy the array in the middle of the zero background\n padded_array[self.p_x_start:array.shape[0]+ self.p_x_start, self.p_y_start:array.shape[1]+ self.p_y_start] = array \n #Save the array\n padded_input_feature_maps[channel] = padded_array\n return padded_input_feature_maps\n\n def __str__(self):\n return \"Conv 2D Layer type with \"+ str(self.kernel_shape[0]) +\" kernels of shape = \" + str(self.kernel_shape[1:]) +\"input/output of shape\" + str(self.input_shape)+\"/\" + str(self.output_shape) + \" strides= s\" + str(self.strides) + \" modes= \" + str(self.modes) +\" with activation = \" + self.activation_name\n\nclass conv1D():\n def __init__(self, input_shape, n_kernels, kernel_shape, stride, mode, weight_init_range, activation, debug):\n self.type = \"conv1D\"\n self.input_shape = input_shape\n self.activation_name = activation\n #Kernel stack shape for the layer (Num_kernel_stacks, Channels, Kernel_x)'\n self.kernel_shape = (n_kernels, input_shape[0], kernel_shape)\n self.activation = activations.get_activation_function(activation)\n self.d_activation = activations.get_activation_derivative(activation)\n self.stride = stride\n self.mode = mode\n self.weights = np.random.uniform(low=weight_init_range[0], high= weight_init_range[1], size= self.kernel_shape)\n self.weights_grads = np.zeros(self.weights.shape)\n self.p_x_start, self.p_x_stop = self.calculate_padding()\n self.output_shape = self.calculate_output_shape()\n self.cached_calculation = {}\n self.cache_weights_input_output_triplet_locations()\n self.cached_output = None\n self.debug = debug\n\n def cache_weights_input_output_triplet_locations(self):\n #Performe an empty convolution and cache all the position of the kernel, input and output triplet\n placeholder_input = np.zeros(self.input_shape)\n array = placeholder_input[0]\n kernel = self.weights[0][0]\n stride_x_pointer = 0\n while(stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1):\n #while the kernel does not go over the x-akse of the array\n #cache all touched weights and input for each kernel\n for column in range(kernel.shape[0]):\n # Cache coordinate only: (weight, input) --> output\n #format: key ((weight_x_pos), (input_x_pos)) ---> (output_x_pos)\n conv_output_coordinate = (stride_x_pointer // self.stride)\n self.cached_calculation[(column, column + stride_x_pointer)] = conv_output_coordinate\n #Cache weight coordinate and input/output values\n #update the stride long the x-axis\n stride_x_pointer += self.stride\n #End of convolution\n \n\n def forward(self, input_feature_maps):\n output = np.zeros(self.output_shape)\n #Apply padding\n input_feature_maps = self.apply_zero_padding(input_feature_maps)\n for i in range(0, self.kernel_shape[0]):\n #for each kernel stack\n kernel_stack = self.weights[i]\n for j in range(0, self.kernel_shape[1]):\n #for each kernel in the kernel stack (or input channel)\n kernel = kernel_stack[j]\n array = input_feature_maps[j]\n stride_x_pointer = 0\n conv_counter = 1\n if self.debug:\n print(\"**** NEW CONVOLUTION ****\")\n while(stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1):\n #while the kernel does not go over the x-akse of the array\n #Get the snip of the array to apply convolution on\n array_snip = array[stride_x_pointer: stride_x_pointer + kernel.shape[0]]\n #apply convolution and get the result \n result = np.sum(np.multiply(array_snip, kernel)) \n #update the output tensor\n conv_output_coordinate = (i, stride_x_pointer // self.stride)\n output[conv_output_coordinate] += result\n if self.debug:\n print(\"convolution nr \", conv_counter )\n print(\"\\narray_snip: \\n\", array_snip)\n print(\"\\nkernel: \\n\", kernel)\n print(\"\\nelementwise multiplication: \\n\", np.multiply(array_snip, kernel))\n print(\"\\nresult: \", result)\n conv_counter+=1\n #update the stride long the x-axis\n stride_x_pointer += self.stride\n #End of convolution\n if self.debug:\n print(\"\\n----REVIEW----\\n\")\n print(\"Total convolutions: \", conv_counter)\n print(\"\\ninput_feature_map:\\n \", array)\n print(\"\\napplied kernel:\\n \", kernel)\n print(\"\\nconvolution result:\\n \", output[i])\n print(\"***********************************\")\n #Cache input and output\n self.cached_output = output\n self.cached_input = input_feature_maps\n #Apply activation\n output = self.activation(self, output)\n return output\n \n \n def backward(self, jacobian_L_Z):\n #Reshape J_LZ from FC to Conv2D and pass through activation layer\n jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)\n #print(\"JLZ før relu\\n\", jacobian_L_Z)\n #jacobian_L_Z = self.d_activation(self, jacobian_L_Z)\n #print(\"cached out after activation\\n\", self.cached_output)\n jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.cached_output)\n #print(\"JLZ etter relu\\n\", jacobian_L_Z)\n # J_L_Z * f'(cached_output)\n\n #Calculate J_LW\n jacobian_L_W = self.compute_gradients(jacobian_L_Z)\n self.weights_grads += jacobian_L_W\n\n #Calculate J_LX\n jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)\n\n #Pass Jacobian L Y upstream\n return jacobian_L_Y\n \n def update_gradients(self, learning_rate):\n self.weights -= learning_rate * self.weights_grads\n self.weights_grads = np.zeros(self.weights.shape)\n\n def compute_gradients(self, jacobian_L_Z):\n grads = np.zeros(self.weights.shape)\n #Iterate through all the weights (3 dimension)\n for i in range(self.weights.shape[0]):\n for j in range(self.weights.shape[1]):\n for k in range(self.weights.shape[2]):\n for key in self.cached_calculation.keys():\n if key[0] == k:\n grads[(i,j,k)] += self.cached_input[j][key[1]] * jacobian_L_Z[i][self.cached_calculation[key]]\n return grads\n\n def compute_J_LY(self, jacobian_L_Z):\n jacobian_L_Y = np.zeros(self.input_shape)\n #Iterate through all the inputs (3 dimension)\n #iterate through all channels/kernel of a kernel stack\n for i in range(self.input_shape[0]):\n #iterate through x-akses of 1d input\n for j in range(self.input_shape[1]):\n for key in self.cached_calculation.keys():\n if key[1] == j:\n #for each kernel-stack\n for l in range(self.weights.shape[0]):\n jacobian_L_Y[(i,j)] += self.weights[l][i][key[0]] * jacobian_L_Z[l][self.cached_calculation[key]]\n return jacobian_L_Y\n\n def calculate_output_shape(self):\n width = math.floor((self.input_shape[1] - self.kernel_shape[2] + self.p_x_start + self.p_x_stop)/self.stride + 1)\n return (self.kernel_shape[0], width)\n\n def calculate_padding(self):\n #Calculate padding long the x axis\n s = self.stride\n f = self.kernel_shape[2]\n i = self.input_shape[1]\n if self.mode == \"full\":\n #Every pixel must experience every weight of the kernel\n p_x_start = f - 1\n p_x_stop = f - 1\n elif self.mode == \"same\":\n\n #Every pixel must experience the middle weight of the kernel\n p_x_start = math.floor((s*math.ceil(i/s)-i+f-s)/2)\n p_x_stop = math.ceil((s*math.ceil(i/s)-i+f-s)/2)\n else:\n p_x_start = 0\n p_x_stop = 0\n return p_x_start, p_x_stop\n \n def apply_zero_padding(self, input_feature_maps):\n # Apply zero padding to the input feature maps according to the modes, strides and kernel size\n #if self.p_x_start == 0 and self.p_x_stop == 0:\n # return input_feature_maps\n padded_input_feature_maps = np.zeros((input_feature_maps.shape[0], input_feature_maps.shape[1] + self.p_x_start + self.p_x_stop))\n for channel in range(input_feature_maps.shape[0]):\n array = input_feature_maps[channel]\n #Create the background zero array\n padded_array = np.zeros((array.shape[0] + self.p_x_start + self.p_x_stop))\n #Copy the array in the middle of the zero background\n padded_array[self.p_x_start:array.shape[0]+ self.p_x_start] = array \n #Save the array\n padded_input_feature_maps[channel] = padded_array\n return padded_input_feature_maps\n\n def __str__(self):\n return \"Conv 1D Layer type with \"+ str(self.kernel_shape[0]) +\" kernels of shape = \" + str(self.kernel_shape[1:]) +\"input/output of shape\" + str(self.input_shape)+\"/\" + str(self.output_shape) + \" stride= \" + str(self.stride) + \" mode= \" + str(self.mode) +\" with activation = \" + self.activation_name\n\nclass softmax():\n def __init__(self, size):\n self.size = size\n self.shape = (1, size)\n self.type = \"softmax\"\n self.activation_function = activations.softmax\n\n def forward(self, input_data):\n return self.activation_function(self, input_data)\n\n def backward(self, jacobian_L_S, softmaxed_network_output):\n # Create jacobian of derivate of softmax\n jacobian_soft = self.compute_j_soft(softmaxed_network_output) \n # Compute jacobian linking Loss to output \n jacobian_L_Z = np.dot(jacobian_L_S, jacobian_soft)\n return jacobian_L_Z\n\n def compute_j_soft(self, S):\n S = np.squeeze(S)\n n = len(S)\n j_soft = np.zeros((n,n))\n for i in range(n):\n for j in range(n):\n if i == j:\n j_soft[i][j] = S[i] - S[i]**2\n else:\n j_soft[i][j] = -S[i]*S[j]\n return j_soft\n\n def __str__(self):\n return \"Softmax Layer of size = \" + str(self.size)\n\n",
"step-ids": [
24,
25,
28,
33,
39
]
}
|
[
24,
25,
28,
33,
39
] |
import random
import time
import unittest
from old import dict_groupby
class TestDictGroupBy(unittest.TestCase):
def setUp(self):
random.seed(0)
self.sut = dict_groupby
def generate_transaction(self):
return {
'transaction_type': random.choice(['a', 'b', 'c']),
'outstanding': random.randint(0, 100)
}
def generate_facility(self):
num_transactions = random.randint(1, 3)
transactions = {}
outstanding = 0
for i in range(num_transactions):
transactions[i] = self.generate_transaction()
outstanding += transactions[i]['outstanding']
return {
'facility_type': random.choice(['a', 'b', 'c']),
'outstanding': outstanding,
'transactions': transactions
}
def generate_facilities(self, num):
out = {}
for i in range(num):
out[i] = self.generate_facility()
return out
def generate_record(self):
return {
'gcol1': random.choice(['a', 'b', 'c']), 'gcol2': random.choice(['a', 'b', 'c']),
'gcol3': random.choice(['a', 'b', 'c']), 'vcol1': random.randint(0, 100), 'vcol2': random.random(),
'vcol3': random.randint(0, 2)
}
def test_hierarchical_groupby(self):
input_set = self.generate_facilities(4)
group_columns = ['facility_type', {'transactions': 'transaction_type'}]
print(input_set)
self.sut.DictGroupBy(input_set, group_columns)
def test_groupby_and_sum_speed(self):
data = {}
for i in range(100000):
data[i] = self.generate_record()
print('Generated data.')
group_columns = ['gcol1', 'gcol2', 'gcol3']
t0 = time.time()
gb = dict_groupby.GroupByObj(data, group_columns)
t1 = time.time()
out = gb.sum()
tf = time.time()
# print(out)
print(t1 - t0, tf - t1, tf - t0)
# df = pd.DataFrame(data).T
# t0 = time.time()
# df.groupby(group_columns).sum()
# tf = time.time()
# # print(out)
# print(tf - t0)
|
normal
|
{
"blob_id": "f8e6f6e1be6c4ea306b7770c918b97808a0765b2",
"index": 6580,
"step-1": "<mask token>\n\n\nclass TestDictGroupBy(unittest.TestCase):\n\n def setUp(self):\n random.seed(0)\n self.sut = dict_groupby\n <mask token>\n\n def generate_facility(self):\n num_transactions = random.randint(1, 3)\n transactions = {}\n outstanding = 0\n for i in range(num_transactions):\n transactions[i] = self.generate_transaction()\n outstanding += transactions[i]['outstanding']\n return {'facility_type': random.choice(['a', 'b', 'c']),\n 'outstanding': outstanding, 'transactions': transactions}\n <mask token>\n\n def generate_record(self):\n return {'gcol1': random.choice(['a', 'b', 'c']), 'gcol2': random.\n choice(['a', 'b', 'c']), 'gcol3': random.choice(['a', 'b', 'c']\n ), 'vcol1': random.randint(0, 100), 'vcol2': random.random(),\n 'vcol3': random.randint(0, 2)}\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestDictGroupBy(unittest.TestCase):\n\n def setUp(self):\n random.seed(0)\n self.sut = dict_groupby\n <mask token>\n\n def generate_facility(self):\n num_transactions = random.randint(1, 3)\n transactions = {}\n outstanding = 0\n for i in range(num_transactions):\n transactions[i] = self.generate_transaction()\n outstanding += transactions[i]['outstanding']\n return {'facility_type': random.choice(['a', 'b', 'c']),\n 'outstanding': outstanding, 'transactions': transactions}\n\n def generate_facilities(self, num):\n out = {}\n for i in range(num):\n out[i] = self.generate_facility()\n return out\n\n def generate_record(self):\n return {'gcol1': random.choice(['a', 'b', 'c']), 'gcol2': random.\n choice(['a', 'b', 'c']), 'gcol3': random.choice(['a', 'b', 'c']\n ), 'vcol1': random.randint(0, 100), 'vcol2': random.random(),\n 'vcol3': random.randint(0, 2)}\n\n def test_hierarchical_groupby(self):\n input_set = self.generate_facilities(4)\n group_columns = ['facility_type', {'transactions': 'transaction_type'}]\n print(input_set)\n self.sut.DictGroupBy(input_set, group_columns)\n\n def test_groupby_and_sum_speed(self):\n data = {}\n for i in range(100000):\n data[i] = self.generate_record()\n print('Generated data.')\n group_columns = ['gcol1', 'gcol2', 'gcol3']\n t0 = time.time()\n gb = dict_groupby.GroupByObj(data, group_columns)\n t1 = time.time()\n out = gb.sum()\n tf = time.time()\n print(t1 - t0, tf - t1, tf - t0)\n",
"step-3": "<mask token>\n\n\nclass TestDictGroupBy(unittest.TestCase):\n\n def setUp(self):\n random.seed(0)\n self.sut = dict_groupby\n\n def generate_transaction(self):\n return {'transaction_type': random.choice(['a', 'b', 'c']),\n 'outstanding': random.randint(0, 100)}\n\n def generate_facility(self):\n num_transactions = random.randint(1, 3)\n transactions = {}\n outstanding = 0\n for i in range(num_transactions):\n transactions[i] = self.generate_transaction()\n outstanding += transactions[i]['outstanding']\n return {'facility_type': random.choice(['a', 'b', 'c']),\n 'outstanding': outstanding, 'transactions': transactions}\n\n def generate_facilities(self, num):\n out = {}\n for i in range(num):\n out[i] = self.generate_facility()\n return out\n\n def generate_record(self):\n return {'gcol1': random.choice(['a', 'b', 'c']), 'gcol2': random.\n choice(['a', 'b', 'c']), 'gcol3': random.choice(['a', 'b', 'c']\n ), 'vcol1': random.randint(0, 100), 'vcol2': random.random(),\n 'vcol3': random.randint(0, 2)}\n\n def test_hierarchical_groupby(self):\n input_set = self.generate_facilities(4)\n group_columns = ['facility_type', {'transactions': 'transaction_type'}]\n print(input_set)\n self.sut.DictGroupBy(input_set, group_columns)\n\n def test_groupby_and_sum_speed(self):\n data = {}\n for i in range(100000):\n data[i] = self.generate_record()\n print('Generated data.')\n group_columns = ['gcol1', 'gcol2', 'gcol3']\n t0 = time.time()\n gb = dict_groupby.GroupByObj(data, group_columns)\n t1 = time.time()\n out = gb.sum()\n tf = time.time()\n print(t1 - t0, tf - t1, tf - t0)\n",
"step-4": "import random\nimport time\nimport unittest\nfrom old import dict_groupby\n\n\nclass TestDictGroupBy(unittest.TestCase):\n\n def setUp(self):\n random.seed(0)\n self.sut = dict_groupby\n\n def generate_transaction(self):\n return {'transaction_type': random.choice(['a', 'b', 'c']),\n 'outstanding': random.randint(0, 100)}\n\n def generate_facility(self):\n num_transactions = random.randint(1, 3)\n transactions = {}\n outstanding = 0\n for i in range(num_transactions):\n transactions[i] = self.generate_transaction()\n outstanding += transactions[i]['outstanding']\n return {'facility_type': random.choice(['a', 'b', 'c']),\n 'outstanding': outstanding, 'transactions': transactions}\n\n def generate_facilities(self, num):\n out = {}\n for i in range(num):\n out[i] = self.generate_facility()\n return out\n\n def generate_record(self):\n return {'gcol1': random.choice(['a', 'b', 'c']), 'gcol2': random.\n choice(['a', 'b', 'c']), 'gcol3': random.choice(['a', 'b', 'c']\n ), 'vcol1': random.randint(0, 100), 'vcol2': random.random(),\n 'vcol3': random.randint(0, 2)}\n\n def test_hierarchical_groupby(self):\n input_set = self.generate_facilities(4)\n group_columns = ['facility_type', {'transactions': 'transaction_type'}]\n print(input_set)\n self.sut.DictGroupBy(input_set, group_columns)\n\n def test_groupby_and_sum_speed(self):\n data = {}\n for i in range(100000):\n data[i] = self.generate_record()\n print('Generated data.')\n group_columns = ['gcol1', 'gcol2', 'gcol3']\n t0 = time.time()\n gb = dict_groupby.GroupByObj(data, group_columns)\n t1 = time.time()\n out = gb.sum()\n tf = time.time()\n print(t1 - t0, tf - t1, tf - t0)\n",
"step-5": "import random\nimport time\nimport unittest\n\nfrom old import dict_groupby\n\n\nclass TestDictGroupBy(unittest.TestCase):\n\n def setUp(self):\n random.seed(0)\n self.sut = dict_groupby\n\n def generate_transaction(self):\n return {\n 'transaction_type': random.choice(['a', 'b', 'c']),\n 'outstanding': random.randint(0, 100)\n }\n\n def generate_facility(self):\n num_transactions = random.randint(1, 3)\n transactions = {}\n outstanding = 0\n for i in range(num_transactions):\n transactions[i] = self.generate_transaction()\n outstanding += transactions[i]['outstanding']\n\n return {\n 'facility_type': random.choice(['a', 'b', 'c']),\n 'outstanding': outstanding,\n 'transactions': transactions\n }\n\n def generate_facilities(self, num):\n out = {}\n for i in range(num):\n out[i] = self.generate_facility()\n return out\n\n def generate_record(self):\n return {\n 'gcol1': random.choice(['a', 'b', 'c']), 'gcol2': random.choice(['a', 'b', 'c']),\n 'gcol3': random.choice(['a', 'b', 'c']), 'vcol1': random.randint(0, 100), 'vcol2': random.random(),\n 'vcol3': random.randint(0, 2)\n }\n\n def test_hierarchical_groupby(self):\n input_set = self.generate_facilities(4)\n group_columns = ['facility_type', {'transactions': 'transaction_type'}]\n print(input_set)\n self.sut.DictGroupBy(input_set, group_columns)\n\n def test_groupby_and_sum_speed(self):\n data = {}\n for i in range(100000):\n data[i] = self.generate_record()\n print('Generated data.')\n group_columns = ['gcol1', 'gcol2', 'gcol3']\n\n t0 = time.time()\n gb = dict_groupby.GroupByObj(data, group_columns)\n t1 = time.time()\n out = gb.sum()\n tf = time.time()\n # print(out)\n print(t1 - t0, tf - t1, tf - t0)\n\n # df = pd.DataFrame(data).T\n # t0 = time.time()\n # df.groupby(group_columns).sum()\n # tf = time.time()\n # # print(out)\n # print(tf - t0)",
"step-ids": [
4,
7,
8,
9,
10
]
}
|
[
4,
7,
8,
9,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for sequence_file in sequences:
f_in = open(current_dir + '/sample_genomes/' + sequence_file, 'r')
f_out.write(f_in.read())
f_in.close()
data = []
fa_file = current_dir + '/sample_genomes/' + sequence_file
seqs = SeqIO.parse(fa_file, 'fasta')
for record in seqs:
data.append(record.seq.upper())
seq = data[0]
temp_fos = []
temp_glcm = []
temp_lbp = []
temp_mlbp = []
for mapping_type in range(mapping_function_size):
skewness, my_kurtosis, energy, entropy = get_features(seq, mapping_type
)
temp_fos.append([skewness, my_kurtosis, energy, entropy])
entropy, contrast, energy, correlation, homogeneity = (
get_features_glcm(seq, mapping_type))
temp_glcm.append([entropy, contrast, energy, correlation, homogeneity])
hist_lbp = get_features_lbp(seq, mapping_type)
temp_lbp.append(hist_lbp)
hist_mlbp = get_features_mlbp(seq, mapping_type)
temp_mlbp.append(hist_mlbp)
data_features_fos.append(temp_fos)
data_features_glcm.append(temp_glcm)
data_features_lbp.append(temp_lbp)
data_features_mlbp.append(temp_mlbp)
f_out.close()
<|reserved_special_token_0|>
for mapping_type in range(mapping_function_size):
DIST_fos = np.zeros((data_features_fos.shape[0], data_features_fos.
shape[0]))
for i in range(data_features_fos.shape[0]):
row = np.zeros(data_features_fos.shape[0])
for j in range(i, data_features_fos.shape[0]):
dist = np.sqrt(np.sum((data_features_fos[i][mapping_type] -
data_features_fos[j][mapping_type]) ** 2))
row[j] = dist
DIST_fos[i] = row
DIST_fos = DIST_fos + DIST_fos.T - np.diag(np.diag(DIST_fos))
DIST_fos = (DIST_fos - np.min(DIST_fos)) / (np.max(DIST_fos) - np.min(
DIST_fos))
full_distances_fos.append(DIST_fos[0, 1:DIST_fos.shape[0]])
<|reserved_special_token_0|>
print('full_distances_fos', full_distances_fos.shape)
<|reserved_special_token_0|>
for mapping_type in range(mapping_function_size):
DIST_glcm = np.zeros((data_features_glcm.shape[0], data_features_glcm.
shape[0]))
for i in range(data_features_glcm.shape[0]):
row = np.zeros(data_features_glcm.shape[0])
for j in range(i, data_features_glcm.shape[0]):
dist = np.sqrt(np.sum((data_features_glcm[i][mapping_type] -
data_features_glcm[j][mapping_type]) ** 2))
row[j] = dist
DIST_glcm[i] = row
DIST_glcm = DIST_glcm + DIST_glcm.T - np.diag(np.diag(DIST_glcm))
DIST_glcm = (DIST_glcm - np.min(DIST_glcm)) / (np.max(DIST_glcm) - np.
min(DIST_glcm))
full_distances_glcm.append(DIST_glcm[0, 1:DIST_glcm.shape[0]])
<|reserved_special_token_0|>
print('full_distances_glcm', full_distances_glcm.shape)
<|reserved_special_token_0|>
for mapping_type in range(mapping_function_size):
DIST_lbp = np.zeros((data_features_lbp.shape[0], data_features_lbp.
shape[0]))
for i in range(data_features_lbp.shape[0]):
row = np.zeros(data_features_lbp.shape[0])
for j in range(i, data_features_lbp.shape[0]):
dist = np.sqrt(np.sum((data_features_lbp[i][mapping_type] -
data_features_lbp[j][mapping_type]) ** 2))
row[j] = dist
DIST_lbp[i] = row
DIST_lbp = DIST_lbp + DIST_lbp.T - np.diag(np.diag(DIST_lbp))
DIST_lbp = (DIST_lbp - np.min(DIST_lbp)) / (np.max(DIST_lbp) - np.min(
DIST_lbp))
full_distances_lbp.append(DIST_lbp[0, 1:DIST_lbp.shape[0]])
<|reserved_special_token_0|>
print('full_distances_lbp', full_distances_lbp.shape)
<|reserved_special_token_0|>
for mapping_type in range(mapping_function_size):
DIST_mlbp = np.zeros((data_features_mlbp.shape[0], data_features_mlbp.
shape[0]))
for i in range(data_features_mlbp.shape[0]):
row = np.zeros(data_features_mlbp.shape[0])
for j in range(i, data_features_mlbp.shape[0]):
dist = np.sqrt(np.sum((data_features_mlbp[i][mapping_type] -
data_features_mlbp[j][mapping_type]) ** 2))
row[j] = dist
DIST_mlbp[i] = row
DIST_mlbp = DIST_mlbp + DIST_mlbp.T - np.diag(np.diag(DIST_mlbp))
DIST_mlbp = (DIST_mlbp - np.min(DIST_mlbp)) / (np.max(DIST_mlbp) - np.
min(DIST_mlbp))
full_distances_mlbp.append(DIST_mlbp[0, 1:DIST_mlbp.shape[0]])
<|reserved_special_token_0|>
print('full_distances_mlbp', full_distances_mlbp.shape)
<|reserved_special_token_0|>
plt.clf()
<|reserved_special_token_0|>
axs[0, 0].plot(names_temp, full_distances_fos[0], 'b--', label='FOS-MAP0')
axs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 0].legend(loc='upper right', fontsize=6)
axs[0, 1].plot(names_temp, full_distances_fos[1], 'b--', label='FOS-MAP1')
axs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 1].legend(loc='upper right', fontsize=6)
axs[1, 0].plot(names_temp, full_distances_fos[2], 'b--', label='FOS-MAP2')
axs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 0].legend(loc='upper right', fontsize=6)
axs[1, 1].plot(names_temp, full_distances_fos[3], 'b--', label='FOS-MAP3')
axs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 1].legend(loc='upper right', fontsize=6)
axs[2, 0].plot(names_temp, full_distances_fos[4], 'b--', label='FOS-MAP4')
axs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[2, 0].legend(loc='upper right', fontsize=6)
axs[2, 1].plot(names_temp, full_distances_fos[5], 'b--', label='FOS-MAP5')
axs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[2, 1].legend(loc='upper right', fontsize=6)
for ax in axs.flat:
ax.label_outer()
ax.yaxis.set_tick_params(labelsize=6)
plt.sca(ax)
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',
fontsize=6)
plt.xlabel('Sequences', fontsize=6)
fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)
plt.savefig(results_file + '_fos.png', dpi=200, bbox_inches='tight')
plt.clf()
<|reserved_special_token_0|>
axs[0, 0].plot(names_temp, full_distances_glcm[0], 'b--', label='GLCM-MAP0')
axs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 0].legend(loc='upper right', fontsize=6)
axs[0, 1].plot(names_temp, full_distances_glcm[1], 'b--', label='GLCM-MAP1')
axs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 1].legend(loc='upper right', fontsize=6)
axs[1, 0].plot(names_temp, full_distances_glcm[2], 'b--', label='GLCM-MAP2')
axs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 0].legend(loc='upper right', fontsize=6)
axs[1, 1].plot(names_temp, full_distances_glcm[3], 'b--', label='GLCM-MAP3')
axs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 1].legend(loc='upper right', fontsize=6)
axs[2, 0].plot(names_temp, full_distances_glcm[4], 'b--', label='GLCM-MAP4')
axs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[2, 0].legend(loc='upper right', fontsize=6)
axs[2, 1].plot(names_temp, full_distances_glcm[5], 'b--', label='GLCM-MAP5')
axs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[2, 1].legend(loc='upper right', fontsize=6)
for ax in axs.flat:
ax.label_outer()
ax.yaxis.set_tick_params(labelsize=6)
plt.sca(ax)
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',
fontsize=6)
plt.xlabel('Sequences', fontsize=6)
fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)
plt.savefig(results_file + '_glcm.png', dpi=200, bbox_inches='tight')
plt.clf()
<|reserved_special_token_0|>
axs[0, 0].plot(names_temp, full_distances_lbp[0], 'b--', label='LBP-MAP0')
axs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 0].legend(loc='upper right', fontsize=6)
axs[0, 1].plot(names_temp, full_distances_lbp[1], 'b--', label='LBP-MAP1')
axs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 1].legend(loc='upper right', fontsize=6)
axs[1, 0].plot(names_temp, full_distances_lbp[2], 'b--', label='LBP-MAP2')
axs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 0].legend(loc='upper right', fontsize=6)
axs[1, 1].plot(names_temp, full_distances_lbp[3], 'b--', label='LBP-MAP3')
axs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 1].legend(loc='upper right', fontsize=6)
axs[2, 0].plot(names_temp, full_distances_lbp[4], 'b--', label='LBP-MAP4')
axs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[2, 0].legend(loc='upper right', fontsize=6)
axs[2, 1].plot(names_temp, full_distances_lbp[5], 'b--', label='LBP-MAP5')
axs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[2, 1].legend(loc='upper right', fontsize=6)
for ax in axs.flat:
ax.label_outer()
ax.yaxis.set_tick_params(labelsize=6)
plt.sca(ax)
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',
fontsize=6)
plt.xlabel('Sequences', fontsize=6)
fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)
plt.savefig(results_file + '_lbp.png', dpi=200, bbox_inches='tight')
plt.clf()
<|reserved_special_token_0|>
axs[0, 0].plot(names_temp, full_distances_mlbp[0], 'b--', label='MLBP-MAP0')
axs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 0].legend(loc='upper right', fontsize=6)
axs[0, 1].plot(names_temp, full_distances_mlbp[1], 'b--', label='MLBP-MAP1')
axs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 1].legend(loc='upper right', fontsize=6)
axs[1, 0].plot(names_temp, full_distances_mlbp[2], 'b--', label='MLBP-MAP2')
axs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 0].legend(loc='upper right', fontsize=6)
axs[1, 1].plot(names_temp, full_distances_mlbp[3], 'b--', label='MLBP-MAP3')
axs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 1].legend(loc='upper right', fontsize=6)
axs[2, 0].plot(names_temp, full_distances_mlbp[4], 'b--', label='MLBP-MAP4')
axs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[2, 0].legend(loc='upper right', fontsize=6)
axs[2, 1].plot(names_temp, full_distances_mlbp[5], 'b--', label='MLBP-MAP5')
axs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[2, 1].legend(loc='upper right', fontsize=6)
for ax in axs.flat:
ax.label_outer()
ax.yaxis.set_tick_params(labelsize=6)
plt.sca(ax)
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',
fontsize=6)
plt.xlabel('Sequences', fontsize=6)
fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)
plt.savefig(results_file + '_mlbp.png', dpi=200, bbox_inches='tight')
<|reserved_special_token_0|>
for mapping_type in range(mapping_function_size):
error_fos.append(np.sum((full_distances_fos[mapping_type] -
distances_mega) ** 2) / distances_mega.shape[0])
error_glcm.append(np.sum((full_distances_glcm[mapping_type] -
distances_mega) ** 2) / distances_mega.shape[0])
error_lbp.append(np.sum((full_distances_lbp[mapping_type] -
distances_mega) ** 2) / distances_mega.shape[0])
error_mlbp.append(np.sum((full_distances_mlbp[mapping_type] -
distances_mega) ** 2) / distances_mega.shape[0])
data_csv.append(error_fos)
data_csv.append(error_glcm)
data_csv.append(error_lbp)
data_csv.append(error_mlbp)
<|reserved_special_token_0|>
print(df)
df.to_csv(results_file + '.csv', index=True)
plt.clf()
<|reserved_special_token_0|>
axs[0, 0].plot(names_temp, full_distances_fos[0], 'b--', label='FOS-MAP0')
axs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 0].legend(loc='upper right', fontsize=6)
axs[0, 1].plot(names_temp, full_distances_glcm[0], 'b--', label='GLCM-MAP0')
axs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 1].legend(loc='upper right', fontsize=6)
axs[1, 0].plot(names_temp, full_distances_lbp[0], 'b--', label='LBP-MAP0')
axs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 0].legend(loc='upper right', fontsize=6)
axs[1, 1].plot(names_temp, full_distances_mlbp[0], 'b--', label='MLBP-MAP0')
axs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 1].legend(loc='upper right', fontsize=6)
for ax in axs.flat:
ax.label_outer()
ax.yaxis.set_tick_params(labelsize=6)
plt.sca(ax)
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',
fontsize=6)
plt.xlabel('Sequences', fontsize=6)
fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)
plt.savefig(results_file + '_0map.png', dpi=200, bbox_inches='tight')
plt.clf()
<|reserved_special_token_0|>
axs[0, 0].plot(names_temp, full_distances_fos[1], 'b--', label='FOS-MAP1')
axs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 0].legend(loc='upper right', fontsize=6)
axs[0, 1].plot(names_temp, full_distances_glcm[1], 'b--', label='GLCM-MAP1')
axs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 1].legend(loc='upper right', fontsize=6)
axs[1, 0].plot(names_temp, full_distances_lbp[1], 'b--', label='LBP-MAP1')
axs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 0].legend(loc='upper right', fontsize=6)
axs[1, 1].plot(names_temp, full_distances_mlbp[1], 'b--', label='MLBP-MAP1')
axs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 1].legend(loc='upper right', fontsize=6)
for ax in axs.flat:
ax.label_outer()
ax.yaxis.set_tick_params(labelsize=6)
plt.sca(ax)
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',
fontsize=6)
plt.xlabel('Sequences', fontsize=6)
fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)
plt.savefig(results_file + '_1map.png', dpi=200, bbox_inches='tight')
plt.clf()
<|reserved_special_token_0|>
axs[0, 0].plot(names_temp, full_distances_fos[2], 'b--', label='FOS-MAP2')
axs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 0].legend(loc='upper right', fontsize=6)
axs[0, 1].plot(names_temp, full_distances_glcm[2], 'b--', label='GLCM-MAP2')
axs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 1].legend(loc='upper right', fontsize=6)
axs[1, 0].plot(names_temp, full_distances_lbp[2], 'b--', label='LBP-MAP2')
axs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 0].legend(loc='upper right', fontsize=6)
axs[1, 1].plot(names_temp, full_distances_mlbp[2], 'b--', label='MLBP-MAP2')
axs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 1].legend(loc='upper right', fontsize=6)
for ax in axs.flat:
ax.label_outer()
ax.yaxis.set_tick_params(labelsize=6)
plt.sca(ax)
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',
fontsize=6)
plt.xlabel('Sequences', fontsize=6)
fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)
plt.savefig(results_file + '_2map.png', dpi=200, bbox_inches='tight')
plt.clf()
<|reserved_special_token_0|>
axs[0, 0].plot(names_temp, full_distances_fos[3], 'b--', label='FOS-MAP3')
axs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 0].legend(loc='upper right', fontsize=6)
axs[0, 1].plot(names_temp, full_distances_glcm[3], 'b--', label='GLCM-MAP3')
axs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 1].legend(loc='upper right', fontsize=6)
axs[1, 0].plot(names_temp, full_distances_lbp[3], 'b--', label='LBP-MAP3')
axs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 0].legend(loc='upper right', fontsize=6)
axs[1, 1].plot(names_temp, full_distances_mlbp[3], 'b--', label='MLBP-MAP3')
axs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 1].legend(loc='upper right', fontsize=6)
for ax in axs.flat:
ax.label_outer()
ax.yaxis.set_tick_params(labelsize=6)
plt.sca(ax)
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',
fontsize=6)
plt.xlabel('Sequences', fontsize=6)
fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)
plt.savefig(results_file + '_3map.png', dpi=200, bbox_inches='tight')
plt.clf()
<|reserved_special_token_0|>
axs[0, 0].plot(names_temp, full_distances_fos[4], 'b--', label='FOS-MAP4')
axs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 0].legend(loc='upper right', fontsize=6)
axs[0, 1].plot(names_temp, full_distances_glcm[4], 'b--', label='GLCM-MAP4')
axs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 1].legend(loc='upper right', fontsize=6)
axs[1, 0].plot(names_temp, full_distances_lbp[4], 'b--', label='LBP-MAP4')
axs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 0].legend(loc='upper right', fontsize=6)
axs[1, 1].plot(names_temp, full_distances_mlbp[4], 'b--', label='MLBP-MAP4')
axs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 1].legend(loc='upper right', fontsize=6)
for ax in axs.flat:
ax.label_outer()
ax.yaxis.set_tick_params(labelsize=6)
plt.sca(ax)
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',
fontsize=6)
plt.xlabel('Sequences', fontsize=6)
fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)
plt.savefig(results_file + '_4map.png', dpi=200, bbox_inches='tight')
plt.clf()
<|reserved_special_token_0|>
axs[0, 0].plot(names_temp, full_distances_fos[5], 'b--', label='FOS-MAP5')
axs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 0].legend(loc='upper right', fontsize=6)
axs[0, 1].plot(names_temp, full_distances_glcm[5], 'b--', label='GLCM-MAP5')
axs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 1].legend(loc='upper right', fontsize=6)
axs[1, 0].plot(names_temp, full_distances_lbp[5], 'b--', label='LBP-MAP5')
axs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 0].legend(loc='upper right', fontsize=6)
axs[1, 1].plot(names_temp, full_distances_mlbp[5], 'b--', label='MLBP-MAP5')
axs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 1].legend(loc='upper right', fontsize=6)
for ax in axs.flat:
ax.label_outer()
ax.yaxis.set_tick_params(labelsize=6)
plt.sca(ax)
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',
fontsize=6)
plt.xlabel('Sequences', fontsize=6)
fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)
plt.savefig(results_file + '_5map.png', dpi=200, bbox_inches='tight')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
current_dir = os.path.dirname(os.path.abspath(__file__))
sequences = ['J01859.fna', 'NR_037066.fna', 'NR_040849.fna',
'NR_117152.fna', 'NR_132306.fna', 'NR_134817.fna', 'NR_134818.fna',
'NR_136784.fna', 'NR_148244.fna', 'NR_148787.fna', 'NR_152063.fna',
'KP317497.fna', 'NR_156072.fna']
names = ['Escherichia coli', 'T.Thermophilus', 'B.Wakoensis',
'T.Filiformis', 'T.Tengchongensis', 'S.Cameli', 'S.Tangierensis',
'T.amyloliquefaciens', 'B.Xiamenensis', 'B.Australimaris',
'S.Halotolerans', 'B.Maritimus', 'S.Himalayensis']
csv_mega = current_dir + '/sample_genomes/seqs_db1_distances.csv'
seq_file_full = current_dir + '/sample_genomes/seqs_db1.fasta'
results_file = current_dir + '/results/compare_features/db1'
sequences = ['L00016.fna', 'M22650.fna', 'M22651.fna', 'M22653.fna',
'M22654.fna', 'M22655.fna', 'M22656.fna', 'M22657.fna', 'V00658.fna',
'V00659.fna', 'V00672.fna', 'V00675.fna']
names = ['Human', 'Macaca mulatta', 'Macaca fuscata', 'Macaca fascicularis',
'Macaca sylvanus', 'Saimiri sciureus', 'Tarsius syrichta',
'Lemur catta', 'Gorilla', 'Hylobates', 'Chimpanzee', 'Sumatran Orangutan']
csv_mega = current_dir + '/sample_genomes/seqs_db2_distances.csv'
seq_file_full = current_dir + '/sample_genomes/seqs_db2.fasta'
results_file = current_dir + '/results/compare_features/db2'
sequences = ['V00662.fna', 'D38116.fna', 'D38113.fna', 'D38114.fna',
'D38115.fna', 'X99256.fna', 'Y18001.fna', 'X79547.fna', 'Y07726.fna',
'X63726.fna', 'X72004.fna', 'U20753.fna', 'X61145.fna', 'X72204.fna',
'V00654.fna', 'X14848.fna', 'V00711.fna', 'X83427.fna']
names = ['Human', 'Pygmy chimpanzee', 'Common chimpanzee', 'Gorilla',
'Orangutan', 'Gibbon', 'Baboon', 'Horse', 'White rhinoceros',
'Harbor seal', 'Gray seal', 'Cat', 'Fin whale', 'Blue whale', 'Cow',
'Rat', 'Mouse', 'Platypus']
csv_mega = current_dir + '/sample_genomes/seqs_db3_distances.csv'
seq_file_full = current_dir + '/sample_genomes/seqs_db3.fasta'
results_file = current_dir + '/results/compare_features/db3'
data_features_fos = []
data_features_glcm = []
data_features_lbp = []
data_features_mlbp = []
mapping_function_size = 6
f_out = open(seq_file_full, 'w')
for sequence_file in sequences:
f_in = open(current_dir + '/sample_genomes/' + sequence_file, 'r')
f_out.write(f_in.read())
f_in.close()
data = []
fa_file = current_dir + '/sample_genomes/' + sequence_file
seqs = SeqIO.parse(fa_file, 'fasta')
for record in seqs:
data.append(record.seq.upper())
seq = data[0]
temp_fos = []
temp_glcm = []
temp_lbp = []
temp_mlbp = []
for mapping_type in range(mapping_function_size):
skewness, my_kurtosis, energy, entropy = get_features(seq, mapping_type
)
temp_fos.append([skewness, my_kurtosis, energy, entropy])
entropy, contrast, energy, correlation, homogeneity = (
get_features_glcm(seq, mapping_type))
temp_glcm.append([entropy, contrast, energy, correlation, homogeneity])
hist_lbp = get_features_lbp(seq, mapping_type)
temp_lbp.append(hist_lbp)
hist_mlbp = get_features_mlbp(seq, mapping_type)
temp_mlbp.append(hist_mlbp)
data_features_fos.append(temp_fos)
data_features_glcm.append(temp_glcm)
data_features_lbp.append(temp_lbp)
data_features_mlbp.append(temp_mlbp)
f_out.close()
data_features_fos = np.array(data_features_fos)
data_features_glcm = np.array(data_features_glcm)
data_features_lbp = np.array(data_features_lbp)
data_features_mlbp = np.array(data_features_mlbp)
full_distances_fos = []
for mapping_type in range(mapping_function_size):
DIST_fos = np.zeros((data_features_fos.shape[0], data_features_fos.
shape[0]))
for i in range(data_features_fos.shape[0]):
row = np.zeros(data_features_fos.shape[0])
for j in range(i, data_features_fos.shape[0]):
dist = np.sqrt(np.sum((data_features_fos[i][mapping_type] -
data_features_fos[j][mapping_type]) ** 2))
row[j] = dist
DIST_fos[i] = row
DIST_fos = DIST_fos + DIST_fos.T - np.diag(np.diag(DIST_fos))
DIST_fos = (DIST_fos - np.min(DIST_fos)) / (np.max(DIST_fos) - np.min(
DIST_fos))
full_distances_fos.append(DIST_fos[0, 1:DIST_fos.shape[0]])
full_distances_fos = np.array(full_distances_fos)
print('full_distances_fos', full_distances_fos.shape)
full_distances_glcm = []
for mapping_type in range(mapping_function_size):
DIST_glcm = np.zeros((data_features_glcm.shape[0], data_features_glcm.
shape[0]))
for i in range(data_features_glcm.shape[0]):
row = np.zeros(data_features_glcm.shape[0])
for j in range(i, data_features_glcm.shape[0]):
dist = np.sqrt(np.sum((data_features_glcm[i][mapping_type] -
data_features_glcm[j][mapping_type]) ** 2))
row[j] = dist
DIST_glcm[i] = row
DIST_glcm = DIST_glcm + DIST_glcm.T - np.diag(np.diag(DIST_glcm))
DIST_glcm = (DIST_glcm - np.min(DIST_glcm)) / (np.max(DIST_glcm) - np.
min(DIST_glcm))
full_distances_glcm.append(DIST_glcm[0, 1:DIST_glcm.shape[0]])
full_distances_glcm = np.array(full_distances_glcm)
print('full_distances_glcm', full_distances_glcm.shape)
full_distances_lbp = []
for mapping_type in range(mapping_function_size):
DIST_lbp = np.zeros((data_features_lbp.shape[0], data_features_lbp.
shape[0]))
for i in range(data_features_lbp.shape[0]):
row = np.zeros(data_features_lbp.shape[0])
for j in range(i, data_features_lbp.shape[0]):
dist = np.sqrt(np.sum((data_features_lbp[i][mapping_type] -
data_features_lbp[j][mapping_type]) ** 2))
row[j] = dist
DIST_lbp[i] = row
DIST_lbp = DIST_lbp + DIST_lbp.T - np.diag(np.diag(DIST_lbp))
DIST_lbp = (DIST_lbp - np.min(DIST_lbp)) / (np.max(DIST_lbp) - np.min(
DIST_lbp))
full_distances_lbp.append(DIST_lbp[0, 1:DIST_lbp.shape[0]])
full_distances_lbp = np.array(full_distances_lbp)
print('full_distances_lbp', full_distances_lbp.shape)
full_distances_mlbp = []
for mapping_type in range(mapping_function_size):
DIST_mlbp = np.zeros((data_features_mlbp.shape[0], data_features_mlbp.
shape[0]))
for i in range(data_features_mlbp.shape[0]):
row = np.zeros(data_features_mlbp.shape[0])
for j in range(i, data_features_mlbp.shape[0]):
dist = np.sqrt(np.sum((data_features_mlbp[i][mapping_type] -
data_features_mlbp[j][mapping_type]) ** 2))
row[j] = dist
DIST_mlbp[i] = row
DIST_mlbp = DIST_mlbp + DIST_mlbp.T - np.diag(np.diag(DIST_mlbp))
DIST_mlbp = (DIST_mlbp - np.min(DIST_mlbp)) / (np.max(DIST_mlbp) - np.
min(DIST_mlbp))
full_distances_mlbp.append(DIST_mlbp[0, 1:DIST_mlbp.shape[0]])
full_distances_mlbp = np.array(full_distances_mlbp)
print('full_distances_mlbp', full_distances_mlbp.shape)
mega_dist_csv = pd.read_csv(csv_mega)
mega_dist_csv = mega_dist_csv.set_index(mega_dist_csv.columns[0])
DIST_mega = mega_dist_csv.values
DIST_mega[np.isnan(DIST_mega)] = 0
DIST_mega = DIST_mega + DIST_mega.T
distances_mega = DIST_mega[0, 1:DIST_mega.shape[0]]
distances_mega = (distances_mega - np.min(distances_mega)) / (np.max(
distances_mega) - np.min(distances_mega))
names_temp = np.array(sequences)
names_temp = names_temp[1:names_temp.shape[0]]
plt.clf()
fig, axs = plt.subplots(3, 2)
axs[0, 0].plot(names_temp, full_distances_fos[0], 'b--', label='FOS-MAP0')
axs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 0].legend(loc='upper right', fontsize=6)
axs[0, 1].plot(names_temp, full_distances_fos[1], 'b--', label='FOS-MAP1')
axs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 1].legend(loc='upper right', fontsize=6)
axs[1, 0].plot(names_temp, full_distances_fos[2], 'b--', label='FOS-MAP2')
axs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 0].legend(loc='upper right', fontsize=6)
axs[1, 1].plot(names_temp, full_distances_fos[3], 'b--', label='FOS-MAP3')
axs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 1].legend(loc='upper right', fontsize=6)
axs[2, 0].plot(names_temp, full_distances_fos[4], 'b--', label='FOS-MAP4')
axs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[2, 0].legend(loc='upper right', fontsize=6)
axs[2, 1].plot(names_temp, full_distances_fos[5], 'b--', label='FOS-MAP5')
axs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[2, 1].legend(loc='upper right', fontsize=6)
for ax in axs.flat:
ax.label_outer()
ax.yaxis.set_tick_params(labelsize=6)
plt.sca(ax)
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',
fontsize=6)
plt.xlabel('Sequences', fontsize=6)
fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)
plt.savefig(results_file + '_fos.png', dpi=200, bbox_inches='tight')
plt.clf()
fig, axs = plt.subplots(3, 2)
axs[0, 0].plot(names_temp, full_distances_glcm[0], 'b--', label='GLCM-MAP0')
axs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 0].legend(loc='upper right', fontsize=6)
axs[0, 1].plot(names_temp, full_distances_glcm[1], 'b--', label='GLCM-MAP1')
axs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 1].legend(loc='upper right', fontsize=6)
axs[1, 0].plot(names_temp, full_distances_glcm[2], 'b--', label='GLCM-MAP2')
axs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 0].legend(loc='upper right', fontsize=6)
axs[1, 1].plot(names_temp, full_distances_glcm[3], 'b--', label='GLCM-MAP3')
axs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 1].legend(loc='upper right', fontsize=6)
axs[2, 0].plot(names_temp, full_distances_glcm[4], 'b--', label='GLCM-MAP4')
axs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[2, 0].legend(loc='upper right', fontsize=6)
axs[2, 1].plot(names_temp, full_distances_glcm[5], 'b--', label='GLCM-MAP5')
axs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[2, 1].legend(loc='upper right', fontsize=6)
for ax in axs.flat:
ax.label_outer()
ax.yaxis.set_tick_params(labelsize=6)
plt.sca(ax)
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',
fontsize=6)
plt.xlabel('Sequences', fontsize=6)
fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)
plt.savefig(results_file + '_glcm.png', dpi=200, bbox_inches='tight')
plt.clf()
fig, axs = plt.subplots(3, 2)
axs[0, 0].plot(names_temp, full_distances_lbp[0], 'b--', label='LBP-MAP0')
axs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 0].legend(loc='upper right', fontsize=6)
axs[0, 1].plot(names_temp, full_distances_lbp[1], 'b--', label='LBP-MAP1')
axs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 1].legend(loc='upper right', fontsize=6)
axs[1, 0].plot(names_temp, full_distances_lbp[2], 'b--', label='LBP-MAP2')
axs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 0].legend(loc='upper right', fontsize=6)
axs[1, 1].plot(names_temp, full_distances_lbp[3], 'b--', label='LBP-MAP3')
axs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 1].legend(loc='upper right', fontsize=6)
axs[2, 0].plot(names_temp, full_distances_lbp[4], 'b--', label='LBP-MAP4')
axs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[2, 0].legend(loc='upper right', fontsize=6)
axs[2, 1].plot(names_temp, full_distances_lbp[5], 'b--', label='LBP-MAP5')
axs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[2, 1].legend(loc='upper right', fontsize=6)
for ax in axs.flat:
ax.label_outer()
ax.yaxis.set_tick_params(labelsize=6)
plt.sca(ax)
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',
fontsize=6)
plt.xlabel('Sequences', fontsize=6)
fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)
plt.savefig(results_file + '_lbp.png', dpi=200, bbox_inches='tight')
plt.clf()
fig, axs = plt.subplots(3, 2)
axs[0, 0].plot(names_temp, full_distances_mlbp[0], 'b--', label='MLBP-MAP0')
axs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 0].legend(loc='upper right', fontsize=6)
axs[0, 1].plot(names_temp, full_distances_mlbp[1], 'b--', label='MLBP-MAP1')
axs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 1].legend(loc='upper right', fontsize=6)
axs[1, 0].plot(names_temp, full_distances_mlbp[2], 'b--', label='MLBP-MAP2')
axs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 0].legend(loc='upper right', fontsize=6)
axs[1, 1].plot(names_temp, full_distances_mlbp[3], 'b--', label='MLBP-MAP3')
axs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 1].legend(loc='upper right', fontsize=6)
axs[2, 0].plot(names_temp, full_distances_mlbp[4], 'b--', label='MLBP-MAP4')
axs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[2, 0].legend(loc='upper right', fontsize=6)
axs[2, 1].plot(names_temp, full_distances_mlbp[5], 'b--', label='MLBP-MAP5')
axs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[2, 1].legend(loc='upper right', fontsize=6)
for ax in axs.flat:
ax.label_outer()
ax.yaxis.set_tick_params(labelsize=6)
plt.sca(ax)
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',
fontsize=6)
plt.xlabel('Sequences', fontsize=6)
fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)
plt.savefig(results_file + '_mlbp.png', dpi=200, bbox_inches='tight')
data_csv = []
error_fos = []
error_glcm = []
error_lbp = []
error_mlbp = []
for mapping_type in range(mapping_function_size):
error_fos.append(np.sum((full_distances_fos[mapping_type] -
distances_mega) ** 2) / distances_mega.shape[0])
error_glcm.append(np.sum((full_distances_glcm[mapping_type] -
distances_mega) ** 2) / distances_mega.shape[0])
error_lbp.append(np.sum((full_distances_lbp[mapping_type] -
distances_mega) ** 2) / distances_mega.shape[0])
error_mlbp.append(np.sum((full_distances_mlbp[mapping_type] -
distances_mega) ** 2) / distances_mega.shape[0])
data_csv.append(error_fos)
data_csv.append(error_glcm)
data_csv.append(error_lbp)
data_csv.append(error_mlbp)
data_csv = np.array(data_csv)
df = pd.DataFrame(data=data_csv.T, index=['map0', 'map1', 'map2', 'map3',
'map4', 'map5'], columns=['FOS', 'GLCM', 'LBP', 'MLBP'])
print(df)
df.to_csv(results_file + '.csv', index=True)
plt.clf()
fig, axs = plt.subplots(2, 2)
axs[0, 0].plot(names_temp, full_distances_fos[0], 'b--', label='FOS-MAP0')
axs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 0].legend(loc='upper right', fontsize=6)
axs[0, 1].plot(names_temp, full_distances_glcm[0], 'b--', label='GLCM-MAP0')
axs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 1].legend(loc='upper right', fontsize=6)
axs[1, 0].plot(names_temp, full_distances_lbp[0], 'b--', label='LBP-MAP0')
axs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 0].legend(loc='upper right', fontsize=6)
axs[1, 1].plot(names_temp, full_distances_mlbp[0], 'b--', label='MLBP-MAP0')
axs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 1].legend(loc='upper right', fontsize=6)
for ax in axs.flat:
ax.label_outer()
ax.yaxis.set_tick_params(labelsize=6)
plt.sca(ax)
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',
fontsize=6)
plt.xlabel('Sequences', fontsize=6)
fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)
plt.savefig(results_file + '_0map.png', dpi=200, bbox_inches='tight')
plt.clf()
fig, axs = plt.subplots(2, 2)
axs[0, 0].plot(names_temp, full_distances_fos[1], 'b--', label='FOS-MAP1')
axs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 0].legend(loc='upper right', fontsize=6)
axs[0, 1].plot(names_temp, full_distances_glcm[1], 'b--', label='GLCM-MAP1')
axs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 1].legend(loc='upper right', fontsize=6)
axs[1, 0].plot(names_temp, full_distances_lbp[1], 'b--', label='LBP-MAP1')
axs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 0].legend(loc='upper right', fontsize=6)
axs[1, 1].plot(names_temp, full_distances_mlbp[1], 'b--', label='MLBP-MAP1')
axs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 1].legend(loc='upper right', fontsize=6)
for ax in axs.flat:
ax.label_outer()
ax.yaxis.set_tick_params(labelsize=6)
plt.sca(ax)
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',
fontsize=6)
plt.xlabel('Sequences', fontsize=6)
fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)
plt.savefig(results_file + '_1map.png', dpi=200, bbox_inches='tight')
plt.clf()
fig, axs = plt.subplots(2, 2)
axs[0, 0].plot(names_temp, full_distances_fos[2], 'b--', label='FOS-MAP2')
axs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 0].legend(loc='upper right', fontsize=6)
axs[0, 1].plot(names_temp, full_distances_glcm[2], 'b--', label='GLCM-MAP2')
axs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 1].legend(loc='upper right', fontsize=6)
axs[1, 0].plot(names_temp, full_distances_lbp[2], 'b--', label='LBP-MAP2')
axs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 0].legend(loc='upper right', fontsize=6)
axs[1, 1].plot(names_temp, full_distances_mlbp[2], 'b--', label='MLBP-MAP2')
axs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 1].legend(loc='upper right', fontsize=6)
for ax in axs.flat:
ax.label_outer()
ax.yaxis.set_tick_params(labelsize=6)
plt.sca(ax)
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',
fontsize=6)
plt.xlabel('Sequences', fontsize=6)
fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)
plt.savefig(results_file + '_2map.png', dpi=200, bbox_inches='tight')
plt.clf()
fig, axs = plt.subplots(2, 2)
axs[0, 0].plot(names_temp, full_distances_fos[3], 'b--', label='FOS-MAP3')
axs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 0].legend(loc='upper right', fontsize=6)
axs[0, 1].plot(names_temp, full_distances_glcm[3], 'b--', label='GLCM-MAP3')
axs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 1].legend(loc='upper right', fontsize=6)
axs[1, 0].plot(names_temp, full_distances_lbp[3], 'b--', label='LBP-MAP3')
axs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 0].legend(loc='upper right', fontsize=6)
axs[1, 1].plot(names_temp, full_distances_mlbp[3], 'b--', label='MLBP-MAP3')
axs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 1].legend(loc='upper right', fontsize=6)
for ax in axs.flat:
ax.label_outer()
ax.yaxis.set_tick_params(labelsize=6)
plt.sca(ax)
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',
fontsize=6)
plt.xlabel('Sequences', fontsize=6)
fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)
plt.savefig(results_file + '_3map.png', dpi=200, bbox_inches='tight')
plt.clf()
fig, axs = plt.subplots(2, 2)
axs[0, 0].plot(names_temp, full_distances_fos[4], 'b--', label='FOS-MAP4')
axs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 0].legend(loc='upper right', fontsize=6)
axs[0, 1].plot(names_temp, full_distances_glcm[4], 'b--', label='GLCM-MAP4')
axs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 1].legend(loc='upper right', fontsize=6)
axs[1, 0].plot(names_temp, full_distances_lbp[4], 'b--', label='LBP-MAP4')
axs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 0].legend(loc='upper right', fontsize=6)
axs[1, 1].plot(names_temp, full_distances_mlbp[4], 'b--', label='MLBP-MAP4')
axs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 1].legend(loc='upper right', fontsize=6)
for ax in axs.flat:
ax.label_outer()
ax.yaxis.set_tick_params(labelsize=6)
plt.sca(ax)
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',
fontsize=6)
plt.xlabel('Sequences', fontsize=6)
fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)
plt.savefig(results_file + '_4map.png', dpi=200, bbox_inches='tight')
plt.clf()
fig, axs = plt.subplots(2, 2)
axs[0, 0].plot(names_temp, full_distances_fos[5], 'b--', label='FOS-MAP5')
axs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 0].legend(loc='upper right', fontsize=6)
axs[0, 1].plot(names_temp, full_distances_glcm[5], 'b--', label='GLCM-MAP5')
axs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 1].legend(loc='upper right', fontsize=6)
axs[1, 0].plot(names_temp, full_distances_lbp[5], 'b--', label='LBP-MAP5')
axs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 0].legend(loc='upper right', fontsize=6)
axs[1, 1].plot(names_temp, full_distances_mlbp[5], 'b--', label='MLBP-MAP5')
axs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 1].legend(loc='upper right', fontsize=6)
for ax in axs.flat:
ax.label_outer()
ax.yaxis.set_tick_params(labelsize=6)
plt.sca(ax)
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',
fontsize=6)
plt.xlabel('Sequences', fontsize=6)
fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)
plt.savefig(results_file + '_5map.png', dpi=200, bbox_inches='tight')
<|reserved_special_token_1|>
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from matplotlib import pyplot
import math
import os
import sys
import cv2
import numpy as np
import math
from scipy.stats import kurtosis, skew
from Bio import SeqIO
import pandas as pd
import seaborn as sns
from descriptor import get_features
from descriptor import get_features_glcm
from descriptor import get_features_lbp
from descriptor import get_features_mlbp
from ete3 import PhyloTree, TreeStyle
from ete3 import Tree
from skbio import DistanceMatrix
from skbio.tree import nj
current_dir = os.path.dirname(os.path.abspath(__file__))
sequences = ['J01859.fna', 'NR_037066.fna', 'NR_040849.fna',
'NR_117152.fna', 'NR_132306.fna', 'NR_134817.fna', 'NR_134818.fna',
'NR_136784.fna', 'NR_148244.fna', 'NR_148787.fna', 'NR_152063.fna',
'KP317497.fna', 'NR_156072.fna']
names = ['Escherichia coli', 'T.Thermophilus', 'B.Wakoensis',
'T.Filiformis', 'T.Tengchongensis', 'S.Cameli', 'S.Tangierensis',
'T.amyloliquefaciens', 'B.Xiamenensis', 'B.Australimaris',
'S.Halotolerans', 'B.Maritimus', 'S.Himalayensis']
csv_mega = current_dir + '/sample_genomes/seqs_db1_distances.csv'
seq_file_full = current_dir + '/sample_genomes/seqs_db1.fasta'
results_file = current_dir + '/results/compare_features/db1'
sequences = ['L00016.fna', 'M22650.fna', 'M22651.fna', 'M22653.fna',
'M22654.fna', 'M22655.fna', 'M22656.fna', 'M22657.fna', 'V00658.fna',
'V00659.fna', 'V00672.fna', 'V00675.fna']
names = ['Human', 'Macaca mulatta', 'Macaca fuscata', 'Macaca fascicularis',
'Macaca sylvanus', 'Saimiri sciureus', 'Tarsius syrichta',
'Lemur catta', 'Gorilla', 'Hylobates', 'Chimpanzee', 'Sumatran Orangutan']
csv_mega = current_dir + '/sample_genomes/seqs_db2_distances.csv'
seq_file_full = current_dir + '/sample_genomes/seqs_db2.fasta'
results_file = current_dir + '/results/compare_features/db2'
sequences = ['V00662.fna', 'D38116.fna', 'D38113.fna', 'D38114.fna',
'D38115.fna', 'X99256.fna', 'Y18001.fna', 'X79547.fna', 'Y07726.fna',
'X63726.fna', 'X72004.fna', 'U20753.fna', 'X61145.fna', 'X72204.fna',
'V00654.fna', 'X14848.fna', 'V00711.fna', 'X83427.fna']
names = ['Human', 'Pygmy chimpanzee', 'Common chimpanzee', 'Gorilla',
'Orangutan', 'Gibbon', 'Baboon', 'Horse', 'White rhinoceros',
'Harbor seal', 'Gray seal', 'Cat', 'Fin whale', 'Blue whale', 'Cow',
'Rat', 'Mouse', 'Platypus']
csv_mega = current_dir + '/sample_genomes/seqs_db3_distances.csv'
seq_file_full = current_dir + '/sample_genomes/seqs_db3.fasta'
results_file = current_dir + '/results/compare_features/db3'
data_features_fos = []
data_features_glcm = []
data_features_lbp = []
data_features_mlbp = []
mapping_function_size = 6
f_out = open(seq_file_full, 'w')
for sequence_file in sequences:
f_in = open(current_dir + '/sample_genomes/' + sequence_file, 'r')
f_out.write(f_in.read())
f_in.close()
data = []
fa_file = current_dir + '/sample_genomes/' + sequence_file
seqs = SeqIO.parse(fa_file, 'fasta')
for record in seqs:
data.append(record.seq.upper())
seq = data[0]
temp_fos = []
temp_glcm = []
temp_lbp = []
temp_mlbp = []
for mapping_type in range(mapping_function_size):
skewness, my_kurtosis, energy, entropy = get_features(seq, mapping_type
)
temp_fos.append([skewness, my_kurtosis, energy, entropy])
entropy, contrast, energy, correlation, homogeneity = (
get_features_glcm(seq, mapping_type))
temp_glcm.append([entropy, contrast, energy, correlation, homogeneity])
hist_lbp = get_features_lbp(seq, mapping_type)
temp_lbp.append(hist_lbp)
hist_mlbp = get_features_mlbp(seq, mapping_type)
temp_mlbp.append(hist_mlbp)
data_features_fos.append(temp_fos)
data_features_glcm.append(temp_glcm)
data_features_lbp.append(temp_lbp)
data_features_mlbp.append(temp_mlbp)
f_out.close()
data_features_fos = np.array(data_features_fos)
data_features_glcm = np.array(data_features_glcm)
data_features_lbp = np.array(data_features_lbp)
data_features_mlbp = np.array(data_features_mlbp)
full_distances_fos = []
for mapping_type in range(mapping_function_size):
DIST_fos = np.zeros((data_features_fos.shape[0], data_features_fos.
shape[0]))
for i in range(data_features_fos.shape[0]):
row = np.zeros(data_features_fos.shape[0])
for j in range(i, data_features_fos.shape[0]):
dist = np.sqrt(np.sum((data_features_fos[i][mapping_type] -
data_features_fos[j][mapping_type]) ** 2))
row[j] = dist
DIST_fos[i] = row
DIST_fos = DIST_fos + DIST_fos.T - np.diag(np.diag(DIST_fos))
DIST_fos = (DIST_fos - np.min(DIST_fos)) / (np.max(DIST_fos) - np.min(
DIST_fos))
full_distances_fos.append(DIST_fos[0, 1:DIST_fos.shape[0]])
full_distances_fos = np.array(full_distances_fos)
print('full_distances_fos', full_distances_fos.shape)
full_distances_glcm = []
for mapping_type in range(mapping_function_size):
DIST_glcm = np.zeros((data_features_glcm.shape[0], data_features_glcm.
shape[0]))
for i in range(data_features_glcm.shape[0]):
row = np.zeros(data_features_glcm.shape[0])
for j in range(i, data_features_glcm.shape[0]):
dist = np.sqrt(np.sum((data_features_glcm[i][mapping_type] -
data_features_glcm[j][mapping_type]) ** 2))
row[j] = dist
DIST_glcm[i] = row
DIST_glcm = DIST_glcm + DIST_glcm.T - np.diag(np.diag(DIST_glcm))
DIST_glcm = (DIST_glcm - np.min(DIST_glcm)) / (np.max(DIST_glcm) - np.
min(DIST_glcm))
full_distances_glcm.append(DIST_glcm[0, 1:DIST_glcm.shape[0]])
full_distances_glcm = np.array(full_distances_glcm)
print('full_distances_glcm', full_distances_glcm.shape)
full_distances_lbp = []
for mapping_type in range(mapping_function_size):
DIST_lbp = np.zeros((data_features_lbp.shape[0], data_features_lbp.
shape[0]))
for i in range(data_features_lbp.shape[0]):
row = np.zeros(data_features_lbp.shape[0])
for j in range(i, data_features_lbp.shape[0]):
dist = np.sqrt(np.sum((data_features_lbp[i][mapping_type] -
data_features_lbp[j][mapping_type]) ** 2))
row[j] = dist
DIST_lbp[i] = row
DIST_lbp = DIST_lbp + DIST_lbp.T - np.diag(np.diag(DIST_lbp))
DIST_lbp = (DIST_lbp - np.min(DIST_lbp)) / (np.max(DIST_lbp) - np.min(
DIST_lbp))
full_distances_lbp.append(DIST_lbp[0, 1:DIST_lbp.shape[0]])
full_distances_lbp = np.array(full_distances_lbp)
print('full_distances_lbp', full_distances_lbp.shape)
full_distances_mlbp = []
for mapping_type in range(mapping_function_size):
DIST_mlbp = np.zeros((data_features_mlbp.shape[0], data_features_mlbp.
shape[0]))
for i in range(data_features_mlbp.shape[0]):
row = np.zeros(data_features_mlbp.shape[0])
for j in range(i, data_features_mlbp.shape[0]):
dist = np.sqrt(np.sum((data_features_mlbp[i][mapping_type] -
data_features_mlbp[j][mapping_type]) ** 2))
row[j] = dist
DIST_mlbp[i] = row
DIST_mlbp = DIST_mlbp + DIST_mlbp.T - np.diag(np.diag(DIST_mlbp))
DIST_mlbp = (DIST_mlbp - np.min(DIST_mlbp)) / (np.max(DIST_mlbp) - np.
min(DIST_mlbp))
full_distances_mlbp.append(DIST_mlbp[0, 1:DIST_mlbp.shape[0]])
full_distances_mlbp = np.array(full_distances_mlbp)
print('full_distances_mlbp', full_distances_mlbp.shape)
mega_dist_csv = pd.read_csv(csv_mega)
mega_dist_csv = mega_dist_csv.set_index(mega_dist_csv.columns[0])
DIST_mega = mega_dist_csv.values
DIST_mega[np.isnan(DIST_mega)] = 0
DIST_mega = DIST_mega + DIST_mega.T
distances_mega = DIST_mega[0, 1:DIST_mega.shape[0]]
distances_mega = (distances_mega - np.min(distances_mega)) / (np.max(
distances_mega) - np.min(distances_mega))
names_temp = np.array(sequences)
names_temp = names_temp[1:names_temp.shape[0]]
plt.clf()
fig, axs = plt.subplots(3, 2)
axs[0, 0].plot(names_temp, full_distances_fos[0], 'b--', label='FOS-MAP0')
axs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 0].legend(loc='upper right', fontsize=6)
axs[0, 1].plot(names_temp, full_distances_fos[1], 'b--', label='FOS-MAP1')
axs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 1].legend(loc='upper right', fontsize=6)
axs[1, 0].plot(names_temp, full_distances_fos[2], 'b--', label='FOS-MAP2')
axs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 0].legend(loc='upper right', fontsize=6)
axs[1, 1].plot(names_temp, full_distances_fos[3], 'b--', label='FOS-MAP3')
axs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 1].legend(loc='upper right', fontsize=6)
axs[2, 0].plot(names_temp, full_distances_fos[4], 'b--', label='FOS-MAP4')
axs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[2, 0].legend(loc='upper right', fontsize=6)
axs[2, 1].plot(names_temp, full_distances_fos[5], 'b--', label='FOS-MAP5')
axs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[2, 1].legend(loc='upper right', fontsize=6)
for ax in axs.flat:
ax.label_outer()
ax.yaxis.set_tick_params(labelsize=6)
plt.sca(ax)
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',
fontsize=6)
plt.xlabel('Sequences', fontsize=6)
fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)
plt.savefig(results_file + '_fos.png', dpi=200, bbox_inches='tight')
plt.clf()
fig, axs = plt.subplots(3, 2)
axs[0, 0].plot(names_temp, full_distances_glcm[0], 'b--', label='GLCM-MAP0')
axs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 0].legend(loc='upper right', fontsize=6)
axs[0, 1].plot(names_temp, full_distances_glcm[1], 'b--', label='GLCM-MAP1')
axs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 1].legend(loc='upper right', fontsize=6)
axs[1, 0].plot(names_temp, full_distances_glcm[2], 'b--', label='GLCM-MAP2')
axs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 0].legend(loc='upper right', fontsize=6)
axs[1, 1].plot(names_temp, full_distances_glcm[3], 'b--', label='GLCM-MAP3')
axs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 1].legend(loc='upper right', fontsize=6)
axs[2, 0].plot(names_temp, full_distances_glcm[4], 'b--', label='GLCM-MAP4')
axs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[2, 0].legend(loc='upper right', fontsize=6)
axs[2, 1].plot(names_temp, full_distances_glcm[5], 'b--', label='GLCM-MAP5')
axs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[2, 1].legend(loc='upper right', fontsize=6)
for ax in axs.flat:
ax.label_outer()
ax.yaxis.set_tick_params(labelsize=6)
plt.sca(ax)
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',
fontsize=6)
plt.xlabel('Sequences', fontsize=6)
fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)
plt.savefig(results_file + '_glcm.png', dpi=200, bbox_inches='tight')
plt.clf()
fig, axs = plt.subplots(3, 2)
axs[0, 0].plot(names_temp, full_distances_lbp[0], 'b--', label='LBP-MAP0')
axs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 0].legend(loc='upper right', fontsize=6)
axs[0, 1].plot(names_temp, full_distances_lbp[1], 'b--', label='LBP-MAP1')
axs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 1].legend(loc='upper right', fontsize=6)
axs[1, 0].plot(names_temp, full_distances_lbp[2], 'b--', label='LBP-MAP2')
axs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 0].legend(loc='upper right', fontsize=6)
axs[1, 1].plot(names_temp, full_distances_lbp[3], 'b--', label='LBP-MAP3')
axs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 1].legend(loc='upper right', fontsize=6)
axs[2, 0].plot(names_temp, full_distances_lbp[4], 'b--', label='LBP-MAP4')
axs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[2, 0].legend(loc='upper right', fontsize=6)
axs[2, 1].plot(names_temp, full_distances_lbp[5], 'b--', label='LBP-MAP5')
axs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[2, 1].legend(loc='upper right', fontsize=6)
for ax in axs.flat:
ax.label_outer()
ax.yaxis.set_tick_params(labelsize=6)
plt.sca(ax)
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',
fontsize=6)
plt.xlabel('Sequences', fontsize=6)
fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)
plt.savefig(results_file + '_lbp.png', dpi=200, bbox_inches='tight')
plt.clf()
fig, axs = plt.subplots(3, 2)
axs[0, 0].plot(names_temp, full_distances_mlbp[0], 'b--', label='MLBP-MAP0')
axs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 0].legend(loc='upper right', fontsize=6)
axs[0, 1].plot(names_temp, full_distances_mlbp[1], 'b--', label='MLBP-MAP1')
axs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 1].legend(loc='upper right', fontsize=6)
axs[1, 0].plot(names_temp, full_distances_mlbp[2], 'b--', label='MLBP-MAP2')
axs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 0].legend(loc='upper right', fontsize=6)
axs[1, 1].plot(names_temp, full_distances_mlbp[3], 'b--', label='MLBP-MAP3')
axs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 1].legend(loc='upper right', fontsize=6)
axs[2, 0].plot(names_temp, full_distances_mlbp[4], 'b--', label='MLBP-MAP4')
axs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[2, 0].legend(loc='upper right', fontsize=6)
axs[2, 1].plot(names_temp, full_distances_mlbp[5], 'b--', label='MLBP-MAP5')
axs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[2, 1].legend(loc='upper right', fontsize=6)
for ax in axs.flat:
ax.label_outer()
ax.yaxis.set_tick_params(labelsize=6)
plt.sca(ax)
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',
fontsize=6)
plt.xlabel('Sequences', fontsize=6)
fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)
plt.savefig(results_file + '_mlbp.png', dpi=200, bbox_inches='tight')
data_csv = []
error_fos = []
error_glcm = []
error_lbp = []
error_mlbp = []
for mapping_type in range(mapping_function_size):
error_fos.append(np.sum((full_distances_fos[mapping_type] -
distances_mega) ** 2) / distances_mega.shape[0])
error_glcm.append(np.sum((full_distances_glcm[mapping_type] -
distances_mega) ** 2) / distances_mega.shape[0])
error_lbp.append(np.sum((full_distances_lbp[mapping_type] -
distances_mega) ** 2) / distances_mega.shape[0])
error_mlbp.append(np.sum((full_distances_mlbp[mapping_type] -
distances_mega) ** 2) / distances_mega.shape[0])
data_csv.append(error_fos)
data_csv.append(error_glcm)
data_csv.append(error_lbp)
data_csv.append(error_mlbp)
data_csv = np.array(data_csv)
df = pd.DataFrame(data=data_csv.T, index=['map0', 'map1', 'map2', 'map3',
'map4', 'map5'], columns=['FOS', 'GLCM', 'LBP', 'MLBP'])
print(df)
df.to_csv(results_file + '.csv', index=True)
plt.clf()
fig, axs = plt.subplots(2, 2)
axs[0, 0].plot(names_temp, full_distances_fos[0], 'b--', label='FOS-MAP0')
axs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 0].legend(loc='upper right', fontsize=6)
axs[0, 1].plot(names_temp, full_distances_glcm[0], 'b--', label='GLCM-MAP0')
axs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 1].legend(loc='upper right', fontsize=6)
axs[1, 0].plot(names_temp, full_distances_lbp[0], 'b--', label='LBP-MAP0')
axs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 0].legend(loc='upper right', fontsize=6)
axs[1, 1].plot(names_temp, full_distances_mlbp[0], 'b--', label='MLBP-MAP0')
axs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 1].legend(loc='upper right', fontsize=6)
for ax in axs.flat:
ax.label_outer()
ax.yaxis.set_tick_params(labelsize=6)
plt.sca(ax)
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',
fontsize=6)
plt.xlabel('Sequences', fontsize=6)
fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)
plt.savefig(results_file + '_0map.png', dpi=200, bbox_inches='tight')
plt.clf()
fig, axs = plt.subplots(2, 2)
axs[0, 0].plot(names_temp, full_distances_fos[1], 'b--', label='FOS-MAP1')
axs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 0].legend(loc='upper right', fontsize=6)
axs[0, 1].plot(names_temp, full_distances_glcm[1], 'b--', label='GLCM-MAP1')
axs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 1].legend(loc='upper right', fontsize=6)
axs[1, 0].plot(names_temp, full_distances_lbp[1], 'b--', label='LBP-MAP1')
axs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 0].legend(loc='upper right', fontsize=6)
axs[1, 1].plot(names_temp, full_distances_mlbp[1], 'b--', label='MLBP-MAP1')
axs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 1].legend(loc='upper right', fontsize=6)
for ax in axs.flat:
ax.label_outer()
ax.yaxis.set_tick_params(labelsize=6)
plt.sca(ax)
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',
fontsize=6)
plt.xlabel('Sequences', fontsize=6)
fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)
plt.savefig(results_file + '_1map.png', dpi=200, bbox_inches='tight')
plt.clf()
fig, axs = plt.subplots(2, 2)
axs[0, 0].plot(names_temp, full_distances_fos[2], 'b--', label='FOS-MAP2')
axs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 0].legend(loc='upper right', fontsize=6)
axs[0, 1].plot(names_temp, full_distances_glcm[2], 'b--', label='GLCM-MAP2')
axs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 1].legend(loc='upper right', fontsize=6)
axs[1, 0].plot(names_temp, full_distances_lbp[2], 'b--', label='LBP-MAP2')
axs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 0].legend(loc='upper right', fontsize=6)
axs[1, 1].plot(names_temp, full_distances_mlbp[2], 'b--', label='MLBP-MAP2')
axs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 1].legend(loc='upper right', fontsize=6)
for ax in axs.flat:
ax.label_outer()
ax.yaxis.set_tick_params(labelsize=6)
plt.sca(ax)
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',
fontsize=6)
plt.xlabel('Sequences', fontsize=6)
fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)
plt.savefig(results_file + '_2map.png', dpi=200, bbox_inches='tight')
plt.clf()
fig, axs = plt.subplots(2, 2)
axs[0, 0].plot(names_temp, full_distances_fos[3], 'b--', label='FOS-MAP3')
axs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 0].legend(loc='upper right', fontsize=6)
axs[0, 1].plot(names_temp, full_distances_glcm[3], 'b--', label='GLCM-MAP3')
axs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 1].legend(loc='upper right', fontsize=6)
axs[1, 0].plot(names_temp, full_distances_lbp[3], 'b--', label='LBP-MAP3')
axs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 0].legend(loc='upper right', fontsize=6)
axs[1, 1].plot(names_temp, full_distances_mlbp[3], 'b--', label='MLBP-MAP3')
axs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 1].legend(loc='upper right', fontsize=6)
for ax in axs.flat:
ax.label_outer()
ax.yaxis.set_tick_params(labelsize=6)
plt.sca(ax)
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',
fontsize=6)
plt.xlabel('Sequences', fontsize=6)
fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)
plt.savefig(results_file + '_3map.png', dpi=200, bbox_inches='tight')
plt.clf()
fig, axs = plt.subplots(2, 2)
axs[0, 0].plot(names_temp, full_distances_fos[4], 'b--', label='FOS-MAP4')
axs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 0].legend(loc='upper right', fontsize=6)
axs[0, 1].plot(names_temp, full_distances_glcm[4], 'b--', label='GLCM-MAP4')
axs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 1].legend(loc='upper right', fontsize=6)
axs[1, 0].plot(names_temp, full_distances_lbp[4], 'b--', label='LBP-MAP4')
axs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 0].legend(loc='upper right', fontsize=6)
axs[1, 1].plot(names_temp, full_distances_mlbp[4], 'b--', label='MLBP-MAP4')
axs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 1].legend(loc='upper right', fontsize=6)
for ax in axs.flat:
ax.label_outer()
ax.yaxis.set_tick_params(labelsize=6)
plt.sca(ax)
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',
fontsize=6)
plt.xlabel('Sequences', fontsize=6)
fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)
plt.savefig(results_file + '_4map.png', dpi=200, bbox_inches='tight')
plt.clf()
fig, axs = plt.subplots(2, 2)
axs[0, 0].plot(names_temp, full_distances_fos[5], 'b--', label='FOS-MAP5')
axs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 0].legend(loc='upper right', fontsize=6)
axs[0, 1].plot(names_temp, full_distances_glcm[5], 'b--', label='GLCM-MAP5')
axs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0, 1].legend(loc='upper right', fontsize=6)
axs[1, 0].plot(names_temp, full_distances_lbp[5], 'b--', label='LBP-MAP5')
axs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 0].legend(loc='upper right', fontsize=6)
axs[1, 1].plot(names_temp, full_distances_mlbp[5], 'b--', label='MLBP-MAP5')
axs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1, 1].legend(loc='upper right', fontsize=6)
for ax in axs.flat:
ax.label_outer()
ax.yaxis.set_tick_params(labelsize=6)
plt.sca(ax)
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',
fontsize=6)
plt.xlabel('Sequences', fontsize=6)
fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)
plt.savefig(results_file + '_5map.png', dpi=200, bbox_inches='tight')
<|reserved_special_token_1|>
# este script comprar diferente metodos de base2number
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
#from matplotlib import pyplot as plt
#from matplotlib import cm
import matplotlib.pyplot as plt
from matplotlib import pyplot
import math
import os
import sys
import cv2
import numpy as np
import math
from scipy.stats import kurtosis, skew
from Bio import SeqIO
import pandas as pd
import seaborn as sns
from descriptor import get_features
from descriptor import get_features_glcm
from descriptor import get_features_lbp
from descriptor import get_features_mlbp
from ete3 import PhyloTree, TreeStyle
from ete3 import Tree
from skbio import DistanceMatrix
from skbio.tree import nj
current_dir = os.path.dirname(os.path.abspath(__file__))
###################################################################################################################################
###################################################################################################################################
sequences = [ 'J01859.fna', 'NR_037066.fna', 'NR_040849.fna', 'NR_117152.fna', 'NR_132306.fna',
'NR_134817.fna', 'NR_134818.fna', 'NR_136784.fna', 'NR_148244.fna', 'NR_148787.fna',
'NR_152063.fna', 'KP317497.fna', 'NR_156072.fna' ]
names = [ 'Escherichia coli', 'T.Thermophilus', 'B.Wakoensis', 'T.Filiformis', 'T.Tengchongensis',
'S.Cameli', 'S.Tangierensis', 'T.amyloliquefaciens', 'B.Xiamenensis', 'B.Australimaris',
'S.Halotolerans', 'B.Maritimus', 'S.Himalayensis']
csv_mega = current_dir + "/sample_genomes/seqs_db1_distances.csv"
seq_file_full = current_dir + "/sample_genomes/seqs_db1.fasta"
results_file = current_dir + "/results/compare_features/db1"
###################################################################################################################################
###################################################################################################################################
sequences = [ 'L00016.fna', 'M22650.fna', 'M22651.fna', 'M22653.fna', 'M22654.fna',
'M22655.fna', 'M22656.fna', 'M22657.fna', 'V00658.fna', 'V00659.fna',
'V00672.fna', 'V00675.fna']
names = [ 'Human', 'Macaca mulatta', 'Macaca fuscata', 'Macaca fascicularis', 'Macaca sylvanus',
'Saimiri sciureus', 'Tarsius syrichta', 'Lemur catta', 'Gorilla', 'Hylobates',
'Chimpanzee', 'Sumatran Orangutan']
csv_mega = current_dir + "/sample_genomes/seqs_db2_distances.csv"
seq_file_full = current_dir + "/sample_genomes/seqs_db2.fasta"
results_file = current_dir + "/results/compare_features/db2"
###################################################################################################################################
###################################################################################################################################
sequences = [ 'V00662.fna', 'D38116.fna', 'D38113.fna', 'D38114.fna', 'D38115.fna',
'X99256.fna', 'Y18001.fna', 'X79547.fna', 'Y07726.fna', 'X63726.fna',
'X72004.fna', 'U20753.fna', 'X61145.fna', 'X72204.fna', 'V00654.fna',
'X14848.fna', 'V00711.fna', 'X83427.fna']
names = [ 'Human', 'Pygmy chimpanzee', 'Common chimpanzee', 'Gorilla', 'Orangutan',
'Gibbon', 'Baboon', 'Horse', 'White rhinoceros', 'Harbor seal',
'Gray seal', 'Cat', 'Fin whale', 'Blue whale', 'Cow',
'Rat', 'Mouse', 'Platypus']
csv_mega = current_dir + "/sample_genomes/seqs_db3_distances.csv"
seq_file_full = current_dir + "/sample_genomes/seqs_db3.fasta"
results_file = current_dir + "/results/compare_features/db3"
###################################################################################################################################
###################################################################################################################################
data_features_fos = []
data_features_glcm = []
data_features_lbp = []
data_features_mlbp = []
mapping_function_size = 6 # trere is 6 types of mapping functions
f_out = open(seq_file_full, "w")
for sequence_file in sequences:
f_in = open(current_dir + "/sample_genomes/" + sequence_file, "r")
f_out.write(f_in.read())
f_in.close()
data = []
fa_file = current_dir + "/sample_genomes/" + sequence_file
seqs = SeqIO.parse(fa_file, "fasta")
for record in seqs:
data.append(record.seq.upper())
seq = data[0]
temp_fos = []
temp_glcm = []
temp_lbp = []
temp_mlbp = []
# here we evaluate each mapping funciton
for mapping_type in range(mapping_function_size):
skewness, my_kurtosis, energy, entropy = get_features(seq, mapping_type)
temp_fos.append( [skewness, my_kurtosis, energy, entropy] )
#rint("fos mapping=",mapping_type, [skewness, my_kurtosis, energy, entropy])
entropy, contrast, energy, correlation, homogeneity = get_features_glcm(seq, mapping_type)
temp_glcm.append( [entropy, contrast, energy, correlation, homogeneity] )
#print("glcm mapping=",mapping_type, [entropy, contrast, energy, correlation, homogeneity])
hist_lbp = get_features_lbp(seq, mapping_type)
temp_lbp.append( hist_lbp )
#print("lbp mapping=",mapping_type, hist_lbp)
hist_mlbp = get_features_mlbp(seq, mapping_type)
temp_mlbp.append( hist_mlbp )
#print("mlbp mapping=",mapping_type, hist_mlbp)
data_features_fos.append(temp_fos)
data_features_glcm.append(temp_glcm)
data_features_lbp.append(temp_lbp)
data_features_mlbp.append(temp_mlbp)
f_out.close()
data_features_fos = np.array(data_features_fos)
data_features_glcm = np.array(data_features_glcm)
data_features_lbp = np.array(data_features_lbp)
data_features_mlbp = np.array(data_features_mlbp)
###################################################################################################################3
# procesamos las distancias con FOS
###################################################################################################################
full_distances_fos = []
for mapping_type in range(mapping_function_size):
DIST_fos = np.zeros((data_features_fos.shape[0], data_features_fos.shape[0]))
for i in range(data_features_fos.shape[0]):
row = np.zeros(data_features_fos.shape[0])
for j in range(i, data_features_fos.shape[0]):
dist = np.sqrt(np.sum((data_features_fos[i][mapping_type] - data_features_fos[j][mapping_type])**2))
row[j] = dist
DIST_fos[i] = row
DIST_fos = DIST_fos + DIST_fos.T - np.diag(np.diag(DIST_fos))
DIST_fos = (DIST_fos - np.min(DIST_fos)) / (np.max(DIST_fos) - np.min(DIST_fos))
full_distances_fos.append( DIST_fos[0,1:DIST_fos.shape[0]] )
full_distances_fos = np.array(full_distances_fos)
print("full_distances_fos", full_distances_fos.shape)
###################################################################################################################3
# procesamos las distancias con GLCM
###################################################################################################################
full_distances_glcm = []
for mapping_type in range(mapping_function_size):
DIST_glcm = np.zeros((data_features_glcm.shape[0], data_features_glcm.shape[0]))
for i in range(data_features_glcm.shape[0]):
row = np.zeros(data_features_glcm.shape[0])
for j in range(i, data_features_glcm.shape[0]):
dist = np.sqrt(np.sum((data_features_glcm[i][mapping_type] - data_features_glcm[j][mapping_type])**2))
row[j] = dist
DIST_glcm[i] = row
DIST_glcm = DIST_glcm + DIST_glcm.T - np.diag(np.diag(DIST_glcm))
DIST_glcm = (DIST_glcm - np.min(DIST_glcm)) / (np.max(DIST_glcm) - np.min(DIST_glcm))
full_distances_glcm.append( DIST_glcm[0,1:DIST_glcm.shape[0]] )
full_distances_glcm = np.array(full_distances_glcm)
print("full_distances_glcm", full_distances_glcm.shape)
###################################################################################################################3
# procesamos las distancias con LBP
###################################################################################################################
full_distances_lbp = []
for mapping_type in range(mapping_function_size):
DIST_lbp = np.zeros((data_features_lbp.shape[0], data_features_lbp.shape[0]))
for i in range(data_features_lbp.shape[0]):
row = np.zeros(data_features_lbp.shape[0])
for j in range(i, data_features_lbp.shape[0]):
dist = np.sqrt(np.sum((data_features_lbp[i][mapping_type] - data_features_lbp[j][mapping_type])**2))
row[j] = dist
DIST_lbp[i] = row
DIST_lbp = DIST_lbp + DIST_lbp.T - np.diag(np.diag(DIST_lbp))
DIST_lbp = (DIST_lbp - np.min(DIST_lbp)) / (np.max(DIST_lbp) - np.min(DIST_lbp))
full_distances_lbp.append( DIST_lbp[0,1:DIST_lbp.shape[0]] )
full_distances_lbp = np.array(full_distances_lbp)
print("full_distances_lbp", full_distances_lbp.shape)
###################################################################################################################3
# procesamos las distancias con MLBP
###################################################################################################################
full_distances_mlbp = []
for mapping_type in range(mapping_function_size):
DIST_mlbp = np.zeros((data_features_mlbp.shape[0], data_features_mlbp.shape[0]))
for i in range(data_features_mlbp.shape[0]):
row = np.zeros(data_features_mlbp.shape[0])
for j in range(i, data_features_mlbp.shape[0]):
dist = np.sqrt(np.sum((data_features_mlbp[i][mapping_type] - data_features_mlbp[j][mapping_type])**2))
row[j] = dist
DIST_mlbp[i] = row
DIST_mlbp = DIST_mlbp + DIST_mlbp.T - np.diag(np.diag(DIST_mlbp))
DIST_mlbp = (DIST_mlbp - np.min(DIST_mlbp)) / (np.max(DIST_mlbp) - np.min(DIST_mlbp))
full_distances_mlbp.append( DIST_mlbp[0,1:DIST_mlbp.shape[0]] )
full_distances_mlbp = np.array(full_distances_mlbp)
print("full_distances_mlbp", full_distances_mlbp.shape)
###################################################################################################################
### distances from mega ###########################################################
###################################################################################################################
mega_dist_csv = pd.read_csv(csv_mega)
mega_dist_csv = mega_dist_csv.set_index(mega_dist_csv.columns[0])
DIST_mega = mega_dist_csv.values
DIST_mega[np.isnan(DIST_mega)] = 0 # lllenamos con ceros los valores nan
DIST_mega = DIST_mega + DIST_mega.T #copiamos el triangulo inferior al superir en la matriz
distances_mega = DIST_mega[0,1:DIST_mega.shape[0]]
distances_mega = (distances_mega - np.min(distances_mega)) / (np.max(distances_mega) - np.min(distances_mega))
###################################################################################################################
###################################################################################################################
names_temp = np.array(sequences)
names_temp = names_temp[1:names_temp.shape[0]] # eliminamos el primer elemento
###################################################################################################################3
# procesamos las distancias con FOS
###################################################################################################################
plt.clf()
fig, axs = plt.subplots(3,2)
axs[0,0].plot(names_temp, full_distances_fos[0], 'b--', label='FOS-MAP0')
axs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0,0].legend(loc='upper right', fontsize=6)
axs[0,1].plot(names_temp, full_distances_fos[1], 'b--', label='FOS-MAP1')
axs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0,1].legend(loc='upper right', fontsize=6)
axs[1,0].plot(names_temp, full_distances_fos[2], 'b--', label='FOS-MAP2')
axs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1,0].legend(loc='upper right', fontsize=6)
axs[1,1].plot(names_temp, full_distances_fos[3], 'b--', label='FOS-MAP3')
axs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1,1].legend(loc='upper right', fontsize=6)
axs[2,0].plot(names_temp, full_distances_fos[4], 'b--', label='FOS-MAP4')
axs[2,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[2,0].legend(loc='upper right', fontsize=6)
axs[2,1].plot(names_temp, full_distances_fos[5], 'b--', label='FOS-MAP5')
axs[2,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[2,1].legend(loc='upper right', fontsize=6)
for ax in axs.flat:
ax.label_outer()
ax.yaxis.set_tick_params(labelsize=6)
plt.sca(ax)
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 )
plt.xlabel('Sequences', fontsize=6)
fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 )
plt.savefig( results_file + "_fos.png", dpi = 200, bbox_inches='tight')
###################################################################################################################
# procesamos las distancias con GLCM
###################################################################################################################
plt.clf()
fig, axs = plt.subplots(3,2)
axs[0,0].plot(names_temp, full_distances_glcm[0], 'b--', label='GLCM-MAP0')
axs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0,0].legend(loc='upper right', fontsize=6)
axs[0,1].plot(names_temp, full_distances_glcm[1], 'b--', label='GLCM-MAP1')
axs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0,1].legend(loc='upper right', fontsize=6)
axs[1,0].plot(names_temp, full_distances_glcm[2], 'b--', label='GLCM-MAP2')
axs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1,0].legend(loc='upper right', fontsize=6)
axs[1,1].plot(names_temp, full_distances_glcm[3], 'b--', label='GLCM-MAP3')
axs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1,1].legend(loc='upper right', fontsize=6)
axs[2,0].plot(names_temp, full_distances_glcm[4], 'b--', label='GLCM-MAP4')
axs[2,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[2,0].legend(loc='upper right', fontsize=6)
axs[2,1].plot(names_temp, full_distances_glcm[5], 'b--', label='GLCM-MAP5')
axs[2,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[2,1].legend(loc='upper right', fontsize=6)
for ax in axs.flat:
ax.label_outer()
ax.yaxis.set_tick_params(labelsize=6)
plt.sca(ax)
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 )
plt.xlabel('Sequences', fontsize=6)
fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 )
plt.savefig( results_file + "_glcm.png", dpi = 200, bbox_inches='tight')
###################################################################################################################
# procesamos las distancias con LBP
###################################################################################################################
plt.clf()
fig, axs = plt.subplots(3,2)
axs[0,0].plot(names_temp, full_distances_lbp[0], 'b--', label='LBP-MAP0')
axs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0,0].legend(loc='upper right', fontsize=6)
axs[0,1].plot(names_temp, full_distances_lbp[1], 'b--', label='LBP-MAP1')
axs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0,1].legend(loc='upper right', fontsize=6)
axs[1,0].plot(names_temp, full_distances_lbp[2], 'b--', label='LBP-MAP2')
axs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1,0].legend(loc='upper right', fontsize=6)
axs[1,1].plot(names_temp, full_distances_lbp[3], 'b--', label='LBP-MAP3')
axs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1,1].legend(loc='upper right', fontsize=6)
axs[2,0].plot(names_temp, full_distances_lbp[4], 'b--', label='LBP-MAP4')
axs[2,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[2,0].legend(loc='upper right', fontsize=6)
axs[2,1].plot(names_temp, full_distances_lbp[5], 'b--', label='LBP-MAP5')
axs[2,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[2,1].legend(loc='upper right', fontsize=6)
for ax in axs.flat:
ax.label_outer()
ax.yaxis.set_tick_params(labelsize=6)
plt.sca(ax)
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 )
plt.xlabel('Sequences', fontsize=6)
fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 )
plt.savefig( results_file + "_lbp.png", dpi = 200, bbox_inches='tight')
###################################################################################################################
# procesamos las distancias con MLBP
###################################################################################################################
plt.clf()
fig, axs = plt.subplots(3,2)
axs[0,0].plot(names_temp, full_distances_mlbp[0], 'b--', label='MLBP-MAP0')
axs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0,0].legend(loc='upper right', fontsize=6)
axs[0,1].plot(names_temp, full_distances_mlbp[1], 'b--', label='MLBP-MAP1')
axs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0,1].legend(loc='upper right', fontsize=6)
axs[1,0].plot(names_temp, full_distances_mlbp[2], 'b--', label='MLBP-MAP2')
axs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1,0].legend(loc='upper right', fontsize=6)
axs[1,1].plot(names_temp, full_distances_mlbp[3], 'b--', label='MLBP-MAP3')
axs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1,1].legend(loc='upper right', fontsize=6)
axs[2,0].plot(names_temp, full_distances_mlbp[4], 'b--', label='MLBP-MAP4')
axs[2,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[2,0].legend(loc='upper right', fontsize=6)
axs[2,1].plot(names_temp, full_distances_mlbp[5], 'b--', label='MLBP-MAP5')
axs[2,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[2,1].legend(loc='upper right', fontsize=6)
for ax in axs.flat:
ax.label_outer()
ax.yaxis.set_tick_params(labelsize=6)
plt.sca(ax)
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 )
plt.xlabel('Sequences', fontsize=6)
fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 )
plt.savefig( results_file + "_mlbp.png", dpi = 200, bbox_inches='tight')
data_csv = []
error_fos = [] # save the error for each mappoing function with FOS
error_glcm = [] # save the error for each mappoing function with GLCM
error_lbp = [] # save the error for each mappoing function with LBP
error_mlbp = [] # save the error for each mappoing function with MLBP
for mapping_type in range(mapping_function_size):
error_fos.append((np.sum((full_distances_fos[mapping_type] - distances_mega)**2))/distances_mega.shape[0])
error_glcm.append((np.sum((full_distances_glcm[mapping_type] - distances_mega)**2))/distances_mega.shape[0])
error_lbp.append((np.sum((full_distances_lbp[mapping_type] - distances_mega)**2))/distances_mega.shape[0])
error_mlbp.append((np.sum((full_distances_mlbp[mapping_type] - distances_mega)**2))/distances_mega.shape[0])
data_csv.append(error_fos)
data_csv.append(error_glcm)
data_csv.append(error_lbp)
data_csv.append(error_mlbp)
data_csv = np.array(data_csv)
df = pd.DataFrame(data=data_csv.T, index=["map0", "map1", "map2", "map3", "map4", "map5"], columns=["FOS", "GLCM", "LBP", "MLBP"])
print(df)
df.to_csv(results_file + ".csv", index=True)
#print(error_fos)
#print(error_glcm)
#print(error_lbp)
#print(error_mlbp)
###################################################################################################################
# proccesing a MAPPING 0 funciton with the all algorithms
###################################################################################################################
plt.clf()
fig, axs = plt.subplots(2,2)
axs[0,0].plot(names_temp, full_distances_fos[0], 'b--', label='FOS-MAP0')
axs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0,0].legend(loc='upper right', fontsize=6)
axs[0,1].plot(names_temp, full_distances_glcm[0], 'b--', label='GLCM-MAP0')
axs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0,1].legend(loc='upper right', fontsize=6)
axs[1,0].plot(names_temp, full_distances_lbp[0], 'b--', label='LBP-MAP0')
axs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1,0].legend(loc='upper right', fontsize=6)
axs[1,1].plot(names_temp, full_distances_mlbp[0], 'b--', label='MLBP-MAP0')
axs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1,1].legend(loc='upper right', fontsize=6)
for ax in axs.flat:
ax.label_outer()
ax.yaxis.set_tick_params(labelsize=6)
plt.sca(ax)
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 )
plt.xlabel('Sequences', fontsize=6)
fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 )
plt.savefig( results_file + "_0map.png", dpi = 200, bbox_inches='tight')
###################################################################################################################
# proccesing a MAPPING 1 funciton with the all algorithms
###################################################################################################################
plt.clf()
fig, axs = plt.subplots(2,2)
axs[0,0].plot(names_temp, full_distances_fos[1], 'b--', label='FOS-MAP1')
axs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0,0].legend(loc='upper right', fontsize=6)
axs[0,1].plot(names_temp, full_distances_glcm[1], 'b--', label='GLCM-MAP1')
axs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0,1].legend(loc='upper right', fontsize=6)
axs[1,0].plot(names_temp, full_distances_lbp[1], 'b--', label='LBP-MAP1')
axs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1,0].legend(loc='upper right', fontsize=6)
axs[1,1].plot(names_temp, full_distances_mlbp[1], 'b--', label='MLBP-MAP1')
axs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1,1].legend(loc='upper right', fontsize=6)
for ax in axs.flat:
ax.label_outer()
ax.yaxis.set_tick_params(labelsize=6)
plt.sca(ax)
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 )
plt.xlabel('Sequences', fontsize=6)
fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 )
plt.savefig( results_file + "_1map.png", dpi = 200, bbox_inches='tight')
###################################################################################################################
# proccesing a MAPPING 2 funciton with the all algorithms
###################################################################################################################
plt.clf()
fig, axs = plt.subplots(2,2)
axs[0,0].plot(names_temp, full_distances_fos[2], 'b--', label='FOS-MAP2')
axs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0,0].legend(loc='upper right', fontsize=6)
axs[0,1].plot(names_temp, full_distances_glcm[2], 'b--', label='GLCM-MAP2')
axs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0,1].legend(loc='upper right', fontsize=6)
axs[1,0].plot(names_temp, full_distances_lbp[2], 'b--', label='LBP-MAP2')
axs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1,0].legend(loc='upper right', fontsize=6)
axs[1,1].plot(names_temp, full_distances_mlbp[2], 'b--', label='MLBP-MAP2')
axs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1,1].legend(loc='upper right', fontsize=6)
for ax in axs.flat:
ax.label_outer()
ax.yaxis.set_tick_params(labelsize=6)
plt.sca(ax)
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 )
plt.xlabel('Sequences', fontsize=6)
fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 )
plt.savefig( results_file + "_2map.png", dpi = 200, bbox_inches='tight')
###################################################################################################################
# proccesing a MAPPING 3 funciton with the all algorithms
###################################################################################################################
plt.clf()
fig, axs = plt.subplots(2,2)
axs[0,0].plot(names_temp, full_distances_fos[3], 'b--', label='FOS-MAP3')
axs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0,0].legend(loc='upper right', fontsize=6)
axs[0,1].plot(names_temp, full_distances_glcm[3], 'b--', label='GLCM-MAP3')
axs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0,1].legend(loc='upper right', fontsize=6)
axs[1,0].plot(names_temp, full_distances_lbp[3], 'b--', label='LBP-MAP3')
axs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1,0].legend(loc='upper right', fontsize=6)
axs[1,1].plot(names_temp, full_distances_mlbp[3], 'b--', label='MLBP-MAP3')
axs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1,1].legend(loc='upper right', fontsize=6)
for ax in axs.flat:
ax.label_outer()
ax.yaxis.set_tick_params(labelsize=6)
plt.sca(ax)
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 )
plt.xlabel('Sequences', fontsize=6)
fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 )
plt.savefig( results_file + "_3map.png", dpi = 200, bbox_inches='tight')
###################################################################################################################
# proccesing a MAPPING 4 funciton with the all algorithms
###################################################################################################################
plt.clf()
fig, axs = plt.subplots(2,2)
axs[0,0].plot(names_temp, full_distances_fos[4], 'b--', label='FOS-MAP4')
axs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0,0].legend(loc='upper right', fontsize=6)
axs[0,1].plot(names_temp, full_distances_glcm[4], 'b--', label='GLCM-MAP4')
axs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0,1].legend(loc='upper right', fontsize=6)
axs[1,0].plot(names_temp, full_distances_lbp[4], 'b--', label='LBP-MAP4')
axs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1,0].legend(loc='upper right', fontsize=6)
axs[1,1].plot(names_temp, full_distances_mlbp[4], 'b--', label='MLBP-MAP4')
axs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1,1].legend(loc='upper right', fontsize=6)
for ax in axs.flat:
ax.label_outer()
ax.yaxis.set_tick_params(labelsize=6)
plt.sca(ax)
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 )
plt.xlabel('Sequences', fontsize=6)
fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 )
plt.savefig( results_file + "_4map.png", dpi = 200, bbox_inches='tight')
###################################################################################################################
# proccesing a MAPPING 5 funciton with the all algorithms
###################################################################################################################
plt.clf()
fig, axs = plt.subplots(2,2)
axs[0,0].plot(names_temp, full_distances_fos[5], 'b--', label='FOS-MAP5')
axs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0,0].legend(loc='upper right', fontsize=6)
axs[0,1].plot(names_temp, full_distances_glcm[5], 'b--', label='GLCM-MAP5')
axs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[0,1].legend(loc='upper right', fontsize=6)
axs[1,0].plot(names_temp, full_distances_lbp[5], 'b--', label='LBP-MAP5')
axs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1,0].legend(loc='upper right', fontsize=6)
axs[1,1].plot(names_temp, full_distances_mlbp[5], 'b--', label='MLBP-MAP5')
axs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')
axs[1,1].legend(loc='upper right', fontsize=6)
for ax in axs.flat:
ax.label_outer()
ax.yaxis.set_tick_params(labelsize=6)
plt.sca(ax)
plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 )
plt.xlabel('Sequences', fontsize=6)
fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 )
plt.savefig( results_file + "_5map.png", dpi = 200, bbox_inches='tight')
|
flexible
|
{
"blob_id": "9696e5799d46adb5b92c0900e2064b927addfd93",
"index": 2506,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor sequence_file in sequences:\n f_in = open(current_dir + '/sample_genomes/' + sequence_file, 'r')\n f_out.write(f_in.read())\n f_in.close()\n data = []\n fa_file = current_dir + '/sample_genomes/' + sequence_file\n seqs = SeqIO.parse(fa_file, 'fasta')\n for record in seqs:\n data.append(record.seq.upper())\n seq = data[0]\n temp_fos = []\n temp_glcm = []\n temp_lbp = []\n temp_mlbp = []\n for mapping_type in range(mapping_function_size):\n skewness, my_kurtosis, energy, entropy = get_features(seq, mapping_type\n )\n temp_fos.append([skewness, my_kurtosis, energy, entropy])\n entropy, contrast, energy, correlation, homogeneity = (\n get_features_glcm(seq, mapping_type))\n temp_glcm.append([entropy, contrast, energy, correlation, homogeneity])\n hist_lbp = get_features_lbp(seq, mapping_type)\n temp_lbp.append(hist_lbp)\n hist_mlbp = get_features_mlbp(seq, mapping_type)\n temp_mlbp.append(hist_mlbp)\n data_features_fos.append(temp_fos)\n data_features_glcm.append(temp_glcm)\n data_features_lbp.append(temp_lbp)\n data_features_mlbp.append(temp_mlbp)\nf_out.close()\n<mask token>\nfor mapping_type in range(mapping_function_size):\n DIST_fos = np.zeros((data_features_fos.shape[0], data_features_fos.\n shape[0]))\n for i in range(data_features_fos.shape[0]):\n row = np.zeros(data_features_fos.shape[0])\n for j in range(i, data_features_fos.shape[0]):\n dist = np.sqrt(np.sum((data_features_fos[i][mapping_type] -\n data_features_fos[j][mapping_type]) ** 2))\n row[j] = dist\n DIST_fos[i] = row\n DIST_fos = DIST_fos + DIST_fos.T - np.diag(np.diag(DIST_fos))\n DIST_fos = (DIST_fos - np.min(DIST_fos)) / (np.max(DIST_fos) - np.min(\n DIST_fos))\n full_distances_fos.append(DIST_fos[0, 1:DIST_fos.shape[0]])\n<mask token>\nprint('full_distances_fos', full_distances_fos.shape)\n<mask token>\nfor mapping_type in range(mapping_function_size):\n DIST_glcm = np.zeros((data_features_glcm.shape[0], data_features_glcm.\n shape[0]))\n for i in range(data_features_glcm.shape[0]):\n row = np.zeros(data_features_glcm.shape[0])\n for j in range(i, data_features_glcm.shape[0]):\n dist = np.sqrt(np.sum((data_features_glcm[i][mapping_type] -\n data_features_glcm[j][mapping_type]) ** 2))\n row[j] = dist\n DIST_glcm[i] = row\n DIST_glcm = DIST_glcm + DIST_glcm.T - np.diag(np.diag(DIST_glcm))\n DIST_glcm = (DIST_glcm - np.min(DIST_glcm)) / (np.max(DIST_glcm) - np.\n min(DIST_glcm))\n full_distances_glcm.append(DIST_glcm[0, 1:DIST_glcm.shape[0]])\n<mask token>\nprint('full_distances_glcm', full_distances_glcm.shape)\n<mask token>\nfor mapping_type in range(mapping_function_size):\n DIST_lbp = np.zeros((data_features_lbp.shape[0], data_features_lbp.\n shape[0]))\n for i in range(data_features_lbp.shape[0]):\n row = np.zeros(data_features_lbp.shape[0])\n for j in range(i, data_features_lbp.shape[0]):\n dist = np.sqrt(np.sum((data_features_lbp[i][mapping_type] -\n data_features_lbp[j][mapping_type]) ** 2))\n row[j] = dist\n DIST_lbp[i] = row\n DIST_lbp = DIST_lbp + DIST_lbp.T - np.diag(np.diag(DIST_lbp))\n DIST_lbp = (DIST_lbp - np.min(DIST_lbp)) / (np.max(DIST_lbp) - np.min(\n DIST_lbp))\n full_distances_lbp.append(DIST_lbp[0, 1:DIST_lbp.shape[0]])\n<mask token>\nprint('full_distances_lbp', full_distances_lbp.shape)\n<mask token>\nfor mapping_type in range(mapping_function_size):\n DIST_mlbp = np.zeros((data_features_mlbp.shape[0], data_features_mlbp.\n shape[0]))\n for i in range(data_features_mlbp.shape[0]):\n row = np.zeros(data_features_mlbp.shape[0])\n for j in range(i, data_features_mlbp.shape[0]):\n dist = np.sqrt(np.sum((data_features_mlbp[i][mapping_type] -\n data_features_mlbp[j][mapping_type]) ** 2))\n row[j] = dist\n DIST_mlbp[i] = row\n DIST_mlbp = DIST_mlbp + DIST_mlbp.T - np.diag(np.diag(DIST_mlbp))\n DIST_mlbp = (DIST_mlbp - np.min(DIST_mlbp)) / (np.max(DIST_mlbp) - np.\n min(DIST_mlbp))\n full_distances_mlbp.append(DIST_mlbp[0, 1:DIST_mlbp.shape[0]])\n<mask token>\nprint('full_distances_mlbp', full_distances_mlbp.shape)\n<mask token>\nplt.clf()\n<mask token>\naxs[0, 0].plot(names_temp, full_distances_fos[0], 'b--', label='FOS-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_fos[1], 'b--', label='FOS-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_fos[2], 'b--', label='FOS-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_fos[3], 'b--', label='FOS-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\naxs[2, 0].plot(names_temp, full_distances_fos[4], 'b--', label='FOS-MAP4')\naxs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 0].legend(loc='upper right', fontsize=6)\naxs[2, 1].plot(names_temp, full_distances_fos[5], 'b--', label='FOS-MAP5')\naxs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_fos.png', dpi=200, bbox_inches='tight')\nplt.clf()\n<mask token>\naxs[0, 0].plot(names_temp, full_distances_glcm[0], 'b--', label='GLCM-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[1], 'b--', label='GLCM-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_glcm[2], 'b--', label='GLCM-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_glcm[3], 'b--', label='GLCM-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\naxs[2, 0].plot(names_temp, full_distances_glcm[4], 'b--', label='GLCM-MAP4')\naxs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 0].legend(loc='upper right', fontsize=6)\naxs[2, 1].plot(names_temp, full_distances_glcm[5], 'b--', label='GLCM-MAP5')\naxs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_glcm.png', dpi=200, bbox_inches='tight')\nplt.clf()\n<mask token>\naxs[0, 0].plot(names_temp, full_distances_lbp[0], 'b--', label='LBP-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_lbp[1], 'b--', label='LBP-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[2], 'b--', label='LBP-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_lbp[3], 'b--', label='LBP-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\naxs[2, 0].plot(names_temp, full_distances_lbp[4], 'b--', label='LBP-MAP4')\naxs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 0].legend(loc='upper right', fontsize=6)\naxs[2, 1].plot(names_temp, full_distances_lbp[5], 'b--', label='LBP-MAP5')\naxs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_lbp.png', dpi=200, bbox_inches='tight')\nplt.clf()\n<mask token>\naxs[0, 0].plot(names_temp, full_distances_mlbp[0], 'b--', label='MLBP-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_mlbp[1], 'b--', label='MLBP-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_mlbp[2], 'b--', label='MLBP-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[3], 'b--', label='MLBP-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\naxs[2, 0].plot(names_temp, full_distances_mlbp[4], 'b--', label='MLBP-MAP4')\naxs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 0].legend(loc='upper right', fontsize=6)\naxs[2, 1].plot(names_temp, full_distances_mlbp[5], 'b--', label='MLBP-MAP5')\naxs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_mlbp.png', dpi=200, bbox_inches='tight')\n<mask token>\nfor mapping_type in range(mapping_function_size):\n error_fos.append(np.sum((full_distances_fos[mapping_type] -\n distances_mega) ** 2) / distances_mega.shape[0])\n error_glcm.append(np.sum((full_distances_glcm[mapping_type] -\n distances_mega) ** 2) / distances_mega.shape[0])\n error_lbp.append(np.sum((full_distances_lbp[mapping_type] -\n distances_mega) ** 2) / distances_mega.shape[0])\n error_mlbp.append(np.sum((full_distances_mlbp[mapping_type] -\n distances_mega) ** 2) / distances_mega.shape[0])\ndata_csv.append(error_fos)\ndata_csv.append(error_glcm)\ndata_csv.append(error_lbp)\ndata_csv.append(error_mlbp)\n<mask token>\nprint(df)\ndf.to_csv(results_file + '.csv', index=True)\nplt.clf()\n<mask token>\naxs[0, 0].plot(names_temp, full_distances_fos[0], 'b--', label='FOS-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[0], 'b--', label='GLCM-MAP0')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[0], 'b--', label='LBP-MAP0')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[0], 'b--', label='MLBP-MAP0')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_0map.png', dpi=200, bbox_inches='tight')\nplt.clf()\n<mask token>\naxs[0, 0].plot(names_temp, full_distances_fos[1], 'b--', label='FOS-MAP1')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[1], 'b--', label='GLCM-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[1], 'b--', label='LBP-MAP1')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[1], 'b--', label='MLBP-MAP1')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_1map.png', dpi=200, bbox_inches='tight')\nplt.clf()\n<mask token>\naxs[0, 0].plot(names_temp, full_distances_fos[2], 'b--', label='FOS-MAP2')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[2], 'b--', label='GLCM-MAP2')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[2], 'b--', label='LBP-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[2], 'b--', label='MLBP-MAP2')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_2map.png', dpi=200, bbox_inches='tight')\nplt.clf()\n<mask token>\naxs[0, 0].plot(names_temp, full_distances_fos[3], 'b--', label='FOS-MAP3')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[3], 'b--', label='GLCM-MAP3')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[3], 'b--', label='LBP-MAP3')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[3], 'b--', label='MLBP-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_3map.png', dpi=200, bbox_inches='tight')\nplt.clf()\n<mask token>\naxs[0, 0].plot(names_temp, full_distances_fos[4], 'b--', label='FOS-MAP4')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[4], 'b--', label='GLCM-MAP4')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[4], 'b--', label='LBP-MAP4')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[4], 'b--', label='MLBP-MAP4')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_4map.png', dpi=200, bbox_inches='tight')\nplt.clf()\n<mask token>\naxs[0, 0].plot(names_temp, full_distances_fos[5], 'b--', label='FOS-MAP5')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[5], 'b--', label='GLCM-MAP5')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[5], 'b--', label='LBP-MAP5')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[5], 'b--', label='MLBP-MAP5')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_5map.png', dpi=200, bbox_inches='tight')\n",
"step-3": "<mask token>\ncurrent_dir = os.path.dirname(os.path.abspath(__file__))\nsequences = ['J01859.fna', 'NR_037066.fna', 'NR_040849.fna',\n 'NR_117152.fna', 'NR_132306.fna', 'NR_134817.fna', 'NR_134818.fna',\n 'NR_136784.fna', 'NR_148244.fna', 'NR_148787.fna', 'NR_152063.fna',\n 'KP317497.fna', 'NR_156072.fna']\nnames = ['Escherichia coli', 'T.Thermophilus', 'B.Wakoensis',\n 'T.Filiformis', 'T.Tengchongensis', 'S.Cameli', 'S.Tangierensis',\n 'T.amyloliquefaciens', 'B.Xiamenensis', 'B.Australimaris',\n 'S.Halotolerans', 'B.Maritimus', 'S.Himalayensis']\ncsv_mega = current_dir + '/sample_genomes/seqs_db1_distances.csv'\nseq_file_full = current_dir + '/sample_genomes/seqs_db1.fasta'\nresults_file = current_dir + '/results/compare_features/db1'\nsequences = ['L00016.fna', 'M22650.fna', 'M22651.fna', 'M22653.fna',\n 'M22654.fna', 'M22655.fna', 'M22656.fna', 'M22657.fna', 'V00658.fna',\n 'V00659.fna', 'V00672.fna', 'V00675.fna']\nnames = ['Human', 'Macaca mulatta', 'Macaca fuscata', 'Macaca fascicularis',\n 'Macaca sylvanus', 'Saimiri sciureus', 'Tarsius syrichta',\n 'Lemur catta', 'Gorilla', 'Hylobates', 'Chimpanzee', 'Sumatran Orangutan']\ncsv_mega = current_dir + '/sample_genomes/seqs_db2_distances.csv'\nseq_file_full = current_dir + '/sample_genomes/seqs_db2.fasta'\nresults_file = current_dir + '/results/compare_features/db2'\nsequences = ['V00662.fna', 'D38116.fna', 'D38113.fna', 'D38114.fna',\n 'D38115.fna', 'X99256.fna', 'Y18001.fna', 'X79547.fna', 'Y07726.fna',\n 'X63726.fna', 'X72004.fna', 'U20753.fna', 'X61145.fna', 'X72204.fna',\n 'V00654.fna', 'X14848.fna', 'V00711.fna', 'X83427.fna']\nnames = ['Human', 'Pygmy chimpanzee', 'Common chimpanzee', 'Gorilla',\n 'Orangutan', 'Gibbon', 'Baboon', 'Horse', 'White rhinoceros',\n 'Harbor seal', 'Gray seal', 'Cat', 'Fin whale', 'Blue whale', 'Cow',\n 'Rat', 'Mouse', 'Platypus']\ncsv_mega = current_dir + '/sample_genomes/seqs_db3_distances.csv'\nseq_file_full = current_dir + '/sample_genomes/seqs_db3.fasta'\nresults_file = current_dir + '/results/compare_features/db3'\ndata_features_fos = []\ndata_features_glcm = []\ndata_features_lbp = []\ndata_features_mlbp = []\nmapping_function_size = 6\nf_out = open(seq_file_full, 'w')\nfor sequence_file in sequences:\n f_in = open(current_dir + '/sample_genomes/' + sequence_file, 'r')\n f_out.write(f_in.read())\n f_in.close()\n data = []\n fa_file = current_dir + '/sample_genomes/' + sequence_file\n seqs = SeqIO.parse(fa_file, 'fasta')\n for record in seqs:\n data.append(record.seq.upper())\n seq = data[0]\n temp_fos = []\n temp_glcm = []\n temp_lbp = []\n temp_mlbp = []\n for mapping_type in range(mapping_function_size):\n skewness, my_kurtosis, energy, entropy = get_features(seq, mapping_type\n )\n temp_fos.append([skewness, my_kurtosis, energy, entropy])\n entropy, contrast, energy, correlation, homogeneity = (\n get_features_glcm(seq, mapping_type))\n temp_glcm.append([entropy, contrast, energy, correlation, homogeneity])\n hist_lbp = get_features_lbp(seq, mapping_type)\n temp_lbp.append(hist_lbp)\n hist_mlbp = get_features_mlbp(seq, mapping_type)\n temp_mlbp.append(hist_mlbp)\n data_features_fos.append(temp_fos)\n data_features_glcm.append(temp_glcm)\n data_features_lbp.append(temp_lbp)\n data_features_mlbp.append(temp_mlbp)\nf_out.close()\ndata_features_fos = np.array(data_features_fos)\ndata_features_glcm = np.array(data_features_glcm)\ndata_features_lbp = np.array(data_features_lbp)\ndata_features_mlbp = np.array(data_features_mlbp)\nfull_distances_fos = []\nfor mapping_type in range(mapping_function_size):\n DIST_fos = np.zeros((data_features_fos.shape[0], data_features_fos.\n shape[0]))\n for i in range(data_features_fos.shape[0]):\n row = np.zeros(data_features_fos.shape[0])\n for j in range(i, data_features_fos.shape[0]):\n dist = np.sqrt(np.sum((data_features_fos[i][mapping_type] -\n data_features_fos[j][mapping_type]) ** 2))\n row[j] = dist\n DIST_fos[i] = row\n DIST_fos = DIST_fos + DIST_fos.T - np.diag(np.diag(DIST_fos))\n DIST_fos = (DIST_fos - np.min(DIST_fos)) / (np.max(DIST_fos) - np.min(\n DIST_fos))\n full_distances_fos.append(DIST_fos[0, 1:DIST_fos.shape[0]])\nfull_distances_fos = np.array(full_distances_fos)\nprint('full_distances_fos', full_distances_fos.shape)\nfull_distances_glcm = []\nfor mapping_type in range(mapping_function_size):\n DIST_glcm = np.zeros((data_features_glcm.shape[0], data_features_glcm.\n shape[0]))\n for i in range(data_features_glcm.shape[0]):\n row = np.zeros(data_features_glcm.shape[0])\n for j in range(i, data_features_glcm.shape[0]):\n dist = np.sqrt(np.sum((data_features_glcm[i][mapping_type] -\n data_features_glcm[j][mapping_type]) ** 2))\n row[j] = dist\n DIST_glcm[i] = row\n DIST_glcm = DIST_glcm + DIST_glcm.T - np.diag(np.diag(DIST_glcm))\n DIST_glcm = (DIST_glcm - np.min(DIST_glcm)) / (np.max(DIST_glcm) - np.\n min(DIST_glcm))\n full_distances_glcm.append(DIST_glcm[0, 1:DIST_glcm.shape[0]])\nfull_distances_glcm = np.array(full_distances_glcm)\nprint('full_distances_glcm', full_distances_glcm.shape)\nfull_distances_lbp = []\nfor mapping_type in range(mapping_function_size):\n DIST_lbp = np.zeros((data_features_lbp.shape[0], data_features_lbp.\n shape[0]))\n for i in range(data_features_lbp.shape[0]):\n row = np.zeros(data_features_lbp.shape[0])\n for j in range(i, data_features_lbp.shape[0]):\n dist = np.sqrt(np.sum((data_features_lbp[i][mapping_type] -\n data_features_lbp[j][mapping_type]) ** 2))\n row[j] = dist\n DIST_lbp[i] = row\n DIST_lbp = DIST_lbp + DIST_lbp.T - np.diag(np.diag(DIST_lbp))\n DIST_lbp = (DIST_lbp - np.min(DIST_lbp)) / (np.max(DIST_lbp) - np.min(\n DIST_lbp))\n full_distances_lbp.append(DIST_lbp[0, 1:DIST_lbp.shape[0]])\nfull_distances_lbp = np.array(full_distances_lbp)\nprint('full_distances_lbp', full_distances_lbp.shape)\nfull_distances_mlbp = []\nfor mapping_type in range(mapping_function_size):\n DIST_mlbp = np.zeros((data_features_mlbp.shape[0], data_features_mlbp.\n shape[0]))\n for i in range(data_features_mlbp.shape[0]):\n row = np.zeros(data_features_mlbp.shape[0])\n for j in range(i, data_features_mlbp.shape[0]):\n dist = np.sqrt(np.sum((data_features_mlbp[i][mapping_type] -\n data_features_mlbp[j][mapping_type]) ** 2))\n row[j] = dist\n DIST_mlbp[i] = row\n DIST_mlbp = DIST_mlbp + DIST_mlbp.T - np.diag(np.diag(DIST_mlbp))\n DIST_mlbp = (DIST_mlbp - np.min(DIST_mlbp)) / (np.max(DIST_mlbp) - np.\n min(DIST_mlbp))\n full_distances_mlbp.append(DIST_mlbp[0, 1:DIST_mlbp.shape[0]])\nfull_distances_mlbp = np.array(full_distances_mlbp)\nprint('full_distances_mlbp', full_distances_mlbp.shape)\nmega_dist_csv = pd.read_csv(csv_mega)\nmega_dist_csv = mega_dist_csv.set_index(mega_dist_csv.columns[0])\nDIST_mega = mega_dist_csv.values\nDIST_mega[np.isnan(DIST_mega)] = 0\nDIST_mega = DIST_mega + DIST_mega.T\ndistances_mega = DIST_mega[0, 1:DIST_mega.shape[0]]\ndistances_mega = (distances_mega - np.min(distances_mega)) / (np.max(\n distances_mega) - np.min(distances_mega))\nnames_temp = np.array(sequences)\nnames_temp = names_temp[1:names_temp.shape[0]]\nplt.clf()\nfig, axs = plt.subplots(3, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[0], 'b--', label='FOS-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_fos[1], 'b--', label='FOS-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_fos[2], 'b--', label='FOS-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_fos[3], 'b--', label='FOS-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\naxs[2, 0].plot(names_temp, full_distances_fos[4], 'b--', label='FOS-MAP4')\naxs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 0].legend(loc='upper right', fontsize=6)\naxs[2, 1].plot(names_temp, full_distances_fos[5], 'b--', label='FOS-MAP5')\naxs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_fos.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(3, 2)\naxs[0, 0].plot(names_temp, full_distances_glcm[0], 'b--', label='GLCM-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[1], 'b--', label='GLCM-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_glcm[2], 'b--', label='GLCM-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_glcm[3], 'b--', label='GLCM-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\naxs[2, 0].plot(names_temp, full_distances_glcm[4], 'b--', label='GLCM-MAP4')\naxs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 0].legend(loc='upper right', fontsize=6)\naxs[2, 1].plot(names_temp, full_distances_glcm[5], 'b--', label='GLCM-MAP5')\naxs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_glcm.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(3, 2)\naxs[0, 0].plot(names_temp, full_distances_lbp[0], 'b--', label='LBP-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_lbp[1], 'b--', label='LBP-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[2], 'b--', label='LBP-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_lbp[3], 'b--', label='LBP-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\naxs[2, 0].plot(names_temp, full_distances_lbp[4], 'b--', label='LBP-MAP4')\naxs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 0].legend(loc='upper right', fontsize=6)\naxs[2, 1].plot(names_temp, full_distances_lbp[5], 'b--', label='LBP-MAP5')\naxs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_lbp.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(3, 2)\naxs[0, 0].plot(names_temp, full_distances_mlbp[0], 'b--', label='MLBP-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_mlbp[1], 'b--', label='MLBP-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_mlbp[2], 'b--', label='MLBP-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[3], 'b--', label='MLBP-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\naxs[2, 0].plot(names_temp, full_distances_mlbp[4], 'b--', label='MLBP-MAP4')\naxs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 0].legend(loc='upper right', fontsize=6)\naxs[2, 1].plot(names_temp, full_distances_mlbp[5], 'b--', label='MLBP-MAP5')\naxs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_mlbp.png', dpi=200, bbox_inches='tight')\ndata_csv = []\nerror_fos = []\nerror_glcm = []\nerror_lbp = []\nerror_mlbp = []\nfor mapping_type in range(mapping_function_size):\n error_fos.append(np.sum((full_distances_fos[mapping_type] -\n distances_mega) ** 2) / distances_mega.shape[0])\n error_glcm.append(np.sum((full_distances_glcm[mapping_type] -\n distances_mega) ** 2) / distances_mega.shape[0])\n error_lbp.append(np.sum((full_distances_lbp[mapping_type] -\n distances_mega) ** 2) / distances_mega.shape[0])\n error_mlbp.append(np.sum((full_distances_mlbp[mapping_type] -\n distances_mega) ** 2) / distances_mega.shape[0])\ndata_csv.append(error_fos)\ndata_csv.append(error_glcm)\ndata_csv.append(error_lbp)\ndata_csv.append(error_mlbp)\ndata_csv = np.array(data_csv)\ndf = pd.DataFrame(data=data_csv.T, index=['map0', 'map1', 'map2', 'map3',\n 'map4', 'map5'], columns=['FOS', 'GLCM', 'LBP', 'MLBP'])\nprint(df)\ndf.to_csv(results_file + '.csv', index=True)\nplt.clf()\nfig, axs = plt.subplots(2, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[0], 'b--', label='FOS-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[0], 'b--', label='GLCM-MAP0')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[0], 'b--', label='LBP-MAP0')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[0], 'b--', label='MLBP-MAP0')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_0map.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(2, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[1], 'b--', label='FOS-MAP1')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[1], 'b--', label='GLCM-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[1], 'b--', label='LBP-MAP1')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[1], 'b--', label='MLBP-MAP1')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_1map.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(2, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[2], 'b--', label='FOS-MAP2')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[2], 'b--', label='GLCM-MAP2')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[2], 'b--', label='LBP-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[2], 'b--', label='MLBP-MAP2')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_2map.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(2, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[3], 'b--', label='FOS-MAP3')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[3], 'b--', label='GLCM-MAP3')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[3], 'b--', label='LBP-MAP3')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[3], 'b--', label='MLBP-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_3map.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(2, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[4], 'b--', label='FOS-MAP4')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[4], 'b--', label='GLCM-MAP4')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[4], 'b--', label='LBP-MAP4')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[4], 'b--', label='MLBP-MAP4')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_4map.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(2, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[5], 'b--', label='FOS-MAP5')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[5], 'b--', label='GLCM-MAP5')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[5], 'b--', label='LBP-MAP5')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[5], 'b--', label='MLBP-MAP5')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_5map.png', dpi=200, bbox_inches='tight')\n",
"step-4": "from sklearn.model_selection import KFold\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\nfrom matplotlib import pyplot\nimport math\nimport os\nimport sys\nimport cv2\nimport numpy as np\nimport math\nfrom scipy.stats import kurtosis, skew\nfrom Bio import SeqIO\nimport pandas as pd\nimport seaborn as sns\nfrom descriptor import get_features\nfrom descriptor import get_features_glcm\nfrom descriptor import get_features_lbp\nfrom descriptor import get_features_mlbp\nfrom ete3 import PhyloTree, TreeStyle\nfrom ete3 import Tree\nfrom skbio import DistanceMatrix\nfrom skbio.tree import nj\ncurrent_dir = os.path.dirname(os.path.abspath(__file__))\nsequences = ['J01859.fna', 'NR_037066.fna', 'NR_040849.fna',\n 'NR_117152.fna', 'NR_132306.fna', 'NR_134817.fna', 'NR_134818.fna',\n 'NR_136784.fna', 'NR_148244.fna', 'NR_148787.fna', 'NR_152063.fna',\n 'KP317497.fna', 'NR_156072.fna']\nnames = ['Escherichia coli', 'T.Thermophilus', 'B.Wakoensis',\n 'T.Filiformis', 'T.Tengchongensis', 'S.Cameli', 'S.Tangierensis',\n 'T.amyloliquefaciens', 'B.Xiamenensis', 'B.Australimaris',\n 'S.Halotolerans', 'B.Maritimus', 'S.Himalayensis']\ncsv_mega = current_dir + '/sample_genomes/seqs_db1_distances.csv'\nseq_file_full = current_dir + '/sample_genomes/seqs_db1.fasta'\nresults_file = current_dir + '/results/compare_features/db1'\nsequences = ['L00016.fna', 'M22650.fna', 'M22651.fna', 'M22653.fna',\n 'M22654.fna', 'M22655.fna', 'M22656.fna', 'M22657.fna', 'V00658.fna',\n 'V00659.fna', 'V00672.fna', 'V00675.fna']\nnames = ['Human', 'Macaca mulatta', 'Macaca fuscata', 'Macaca fascicularis',\n 'Macaca sylvanus', 'Saimiri sciureus', 'Tarsius syrichta',\n 'Lemur catta', 'Gorilla', 'Hylobates', 'Chimpanzee', 'Sumatran Orangutan']\ncsv_mega = current_dir + '/sample_genomes/seqs_db2_distances.csv'\nseq_file_full = current_dir + '/sample_genomes/seqs_db2.fasta'\nresults_file = current_dir + '/results/compare_features/db2'\nsequences = ['V00662.fna', 'D38116.fna', 'D38113.fna', 'D38114.fna',\n 'D38115.fna', 'X99256.fna', 'Y18001.fna', 'X79547.fna', 'Y07726.fna',\n 'X63726.fna', 'X72004.fna', 'U20753.fna', 'X61145.fna', 'X72204.fna',\n 'V00654.fna', 'X14848.fna', 'V00711.fna', 'X83427.fna']\nnames = ['Human', 'Pygmy chimpanzee', 'Common chimpanzee', 'Gorilla',\n 'Orangutan', 'Gibbon', 'Baboon', 'Horse', 'White rhinoceros',\n 'Harbor seal', 'Gray seal', 'Cat', 'Fin whale', 'Blue whale', 'Cow',\n 'Rat', 'Mouse', 'Platypus']\ncsv_mega = current_dir + '/sample_genomes/seqs_db3_distances.csv'\nseq_file_full = current_dir + '/sample_genomes/seqs_db3.fasta'\nresults_file = current_dir + '/results/compare_features/db3'\ndata_features_fos = []\ndata_features_glcm = []\ndata_features_lbp = []\ndata_features_mlbp = []\nmapping_function_size = 6\nf_out = open(seq_file_full, 'w')\nfor sequence_file in sequences:\n f_in = open(current_dir + '/sample_genomes/' + sequence_file, 'r')\n f_out.write(f_in.read())\n f_in.close()\n data = []\n fa_file = current_dir + '/sample_genomes/' + sequence_file\n seqs = SeqIO.parse(fa_file, 'fasta')\n for record in seqs:\n data.append(record.seq.upper())\n seq = data[0]\n temp_fos = []\n temp_glcm = []\n temp_lbp = []\n temp_mlbp = []\n for mapping_type in range(mapping_function_size):\n skewness, my_kurtosis, energy, entropy = get_features(seq, mapping_type\n )\n temp_fos.append([skewness, my_kurtosis, energy, entropy])\n entropy, contrast, energy, correlation, homogeneity = (\n get_features_glcm(seq, mapping_type))\n temp_glcm.append([entropy, contrast, energy, correlation, homogeneity])\n hist_lbp = get_features_lbp(seq, mapping_type)\n temp_lbp.append(hist_lbp)\n hist_mlbp = get_features_mlbp(seq, mapping_type)\n temp_mlbp.append(hist_mlbp)\n data_features_fos.append(temp_fos)\n data_features_glcm.append(temp_glcm)\n data_features_lbp.append(temp_lbp)\n data_features_mlbp.append(temp_mlbp)\nf_out.close()\ndata_features_fos = np.array(data_features_fos)\ndata_features_glcm = np.array(data_features_glcm)\ndata_features_lbp = np.array(data_features_lbp)\ndata_features_mlbp = np.array(data_features_mlbp)\nfull_distances_fos = []\nfor mapping_type in range(mapping_function_size):\n DIST_fos = np.zeros((data_features_fos.shape[0], data_features_fos.\n shape[0]))\n for i in range(data_features_fos.shape[0]):\n row = np.zeros(data_features_fos.shape[0])\n for j in range(i, data_features_fos.shape[0]):\n dist = np.sqrt(np.sum((data_features_fos[i][mapping_type] -\n data_features_fos[j][mapping_type]) ** 2))\n row[j] = dist\n DIST_fos[i] = row\n DIST_fos = DIST_fos + DIST_fos.T - np.diag(np.diag(DIST_fos))\n DIST_fos = (DIST_fos - np.min(DIST_fos)) / (np.max(DIST_fos) - np.min(\n DIST_fos))\n full_distances_fos.append(DIST_fos[0, 1:DIST_fos.shape[0]])\nfull_distances_fos = np.array(full_distances_fos)\nprint('full_distances_fos', full_distances_fos.shape)\nfull_distances_glcm = []\nfor mapping_type in range(mapping_function_size):\n DIST_glcm = np.zeros((data_features_glcm.shape[0], data_features_glcm.\n shape[0]))\n for i in range(data_features_glcm.shape[0]):\n row = np.zeros(data_features_glcm.shape[0])\n for j in range(i, data_features_glcm.shape[0]):\n dist = np.sqrt(np.sum((data_features_glcm[i][mapping_type] -\n data_features_glcm[j][mapping_type]) ** 2))\n row[j] = dist\n DIST_glcm[i] = row\n DIST_glcm = DIST_glcm + DIST_glcm.T - np.diag(np.diag(DIST_glcm))\n DIST_glcm = (DIST_glcm - np.min(DIST_glcm)) / (np.max(DIST_glcm) - np.\n min(DIST_glcm))\n full_distances_glcm.append(DIST_glcm[0, 1:DIST_glcm.shape[0]])\nfull_distances_glcm = np.array(full_distances_glcm)\nprint('full_distances_glcm', full_distances_glcm.shape)\nfull_distances_lbp = []\nfor mapping_type in range(mapping_function_size):\n DIST_lbp = np.zeros((data_features_lbp.shape[0], data_features_lbp.\n shape[0]))\n for i in range(data_features_lbp.shape[0]):\n row = np.zeros(data_features_lbp.shape[0])\n for j in range(i, data_features_lbp.shape[0]):\n dist = np.sqrt(np.sum((data_features_lbp[i][mapping_type] -\n data_features_lbp[j][mapping_type]) ** 2))\n row[j] = dist\n DIST_lbp[i] = row\n DIST_lbp = DIST_lbp + DIST_lbp.T - np.diag(np.diag(DIST_lbp))\n DIST_lbp = (DIST_lbp - np.min(DIST_lbp)) / (np.max(DIST_lbp) - np.min(\n DIST_lbp))\n full_distances_lbp.append(DIST_lbp[0, 1:DIST_lbp.shape[0]])\nfull_distances_lbp = np.array(full_distances_lbp)\nprint('full_distances_lbp', full_distances_lbp.shape)\nfull_distances_mlbp = []\nfor mapping_type in range(mapping_function_size):\n DIST_mlbp = np.zeros((data_features_mlbp.shape[0], data_features_mlbp.\n shape[0]))\n for i in range(data_features_mlbp.shape[0]):\n row = np.zeros(data_features_mlbp.shape[0])\n for j in range(i, data_features_mlbp.shape[0]):\n dist = np.sqrt(np.sum((data_features_mlbp[i][mapping_type] -\n data_features_mlbp[j][mapping_type]) ** 2))\n row[j] = dist\n DIST_mlbp[i] = row\n DIST_mlbp = DIST_mlbp + DIST_mlbp.T - np.diag(np.diag(DIST_mlbp))\n DIST_mlbp = (DIST_mlbp - np.min(DIST_mlbp)) / (np.max(DIST_mlbp) - np.\n min(DIST_mlbp))\n full_distances_mlbp.append(DIST_mlbp[0, 1:DIST_mlbp.shape[0]])\nfull_distances_mlbp = np.array(full_distances_mlbp)\nprint('full_distances_mlbp', full_distances_mlbp.shape)\nmega_dist_csv = pd.read_csv(csv_mega)\nmega_dist_csv = mega_dist_csv.set_index(mega_dist_csv.columns[0])\nDIST_mega = mega_dist_csv.values\nDIST_mega[np.isnan(DIST_mega)] = 0\nDIST_mega = DIST_mega + DIST_mega.T\ndistances_mega = DIST_mega[0, 1:DIST_mega.shape[0]]\ndistances_mega = (distances_mega - np.min(distances_mega)) / (np.max(\n distances_mega) - np.min(distances_mega))\nnames_temp = np.array(sequences)\nnames_temp = names_temp[1:names_temp.shape[0]]\nplt.clf()\nfig, axs = plt.subplots(3, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[0], 'b--', label='FOS-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_fos[1], 'b--', label='FOS-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_fos[2], 'b--', label='FOS-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_fos[3], 'b--', label='FOS-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\naxs[2, 0].plot(names_temp, full_distances_fos[4], 'b--', label='FOS-MAP4')\naxs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 0].legend(loc='upper right', fontsize=6)\naxs[2, 1].plot(names_temp, full_distances_fos[5], 'b--', label='FOS-MAP5')\naxs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_fos.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(3, 2)\naxs[0, 0].plot(names_temp, full_distances_glcm[0], 'b--', label='GLCM-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[1], 'b--', label='GLCM-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_glcm[2], 'b--', label='GLCM-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_glcm[3], 'b--', label='GLCM-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\naxs[2, 0].plot(names_temp, full_distances_glcm[4], 'b--', label='GLCM-MAP4')\naxs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 0].legend(loc='upper right', fontsize=6)\naxs[2, 1].plot(names_temp, full_distances_glcm[5], 'b--', label='GLCM-MAP5')\naxs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_glcm.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(3, 2)\naxs[0, 0].plot(names_temp, full_distances_lbp[0], 'b--', label='LBP-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_lbp[1], 'b--', label='LBP-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[2], 'b--', label='LBP-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_lbp[3], 'b--', label='LBP-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\naxs[2, 0].plot(names_temp, full_distances_lbp[4], 'b--', label='LBP-MAP4')\naxs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 0].legend(loc='upper right', fontsize=6)\naxs[2, 1].plot(names_temp, full_distances_lbp[5], 'b--', label='LBP-MAP5')\naxs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_lbp.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(3, 2)\naxs[0, 0].plot(names_temp, full_distances_mlbp[0], 'b--', label='MLBP-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_mlbp[1], 'b--', label='MLBP-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_mlbp[2], 'b--', label='MLBP-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[3], 'b--', label='MLBP-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\naxs[2, 0].plot(names_temp, full_distances_mlbp[4], 'b--', label='MLBP-MAP4')\naxs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 0].legend(loc='upper right', fontsize=6)\naxs[2, 1].plot(names_temp, full_distances_mlbp[5], 'b--', label='MLBP-MAP5')\naxs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_mlbp.png', dpi=200, bbox_inches='tight')\ndata_csv = []\nerror_fos = []\nerror_glcm = []\nerror_lbp = []\nerror_mlbp = []\nfor mapping_type in range(mapping_function_size):\n error_fos.append(np.sum((full_distances_fos[mapping_type] -\n distances_mega) ** 2) / distances_mega.shape[0])\n error_glcm.append(np.sum((full_distances_glcm[mapping_type] -\n distances_mega) ** 2) / distances_mega.shape[0])\n error_lbp.append(np.sum((full_distances_lbp[mapping_type] -\n distances_mega) ** 2) / distances_mega.shape[0])\n error_mlbp.append(np.sum((full_distances_mlbp[mapping_type] -\n distances_mega) ** 2) / distances_mega.shape[0])\ndata_csv.append(error_fos)\ndata_csv.append(error_glcm)\ndata_csv.append(error_lbp)\ndata_csv.append(error_mlbp)\ndata_csv = np.array(data_csv)\ndf = pd.DataFrame(data=data_csv.T, index=['map0', 'map1', 'map2', 'map3',\n 'map4', 'map5'], columns=['FOS', 'GLCM', 'LBP', 'MLBP'])\nprint(df)\ndf.to_csv(results_file + '.csv', index=True)\nplt.clf()\nfig, axs = plt.subplots(2, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[0], 'b--', label='FOS-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[0], 'b--', label='GLCM-MAP0')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[0], 'b--', label='LBP-MAP0')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[0], 'b--', label='MLBP-MAP0')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_0map.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(2, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[1], 'b--', label='FOS-MAP1')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[1], 'b--', label='GLCM-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[1], 'b--', label='LBP-MAP1')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[1], 'b--', label='MLBP-MAP1')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_1map.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(2, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[2], 'b--', label='FOS-MAP2')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[2], 'b--', label='GLCM-MAP2')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[2], 'b--', label='LBP-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[2], 'b--', label='MLBP-MAP2')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_2map.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(2, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[3], 'b--', label='FOS-MAP3')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[3], 'b--', label='GLCM-MAP3')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[3], 'b--', label='LBP-MAP3')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[3], 'b--', label='MLBP-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_3map.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(2, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[4], 'b--', label='FOS-MAP4')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[4], 'b--', label='GLCM-MAP4')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[4], 'b--', label='LBP-MAP4')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[4], 'b--', label='MLBP-MAP4')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_4map.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(2, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[5], 'b--', label='FOS-MAP5')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[5], 'b--', label='GLCM-MAP5')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[5], 'b--', label='LBP-MAP5')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[5], 'b--', label='MLBP-MAP5')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_5map.png', dpi=200, bbox_inches='tight')\n",
"step-5": "# este script comprar diferente metodos de base2number\n\nfrom sklearn.model_selection import KFold \nfrom sklearn.model_selection import train_test_split\n#from matplotlib import pyplot as plt\n#from matplotlib import cm\nimport matplotlib.pyplot as plt \nfrom matplotlib import pyplot\nimport math\nimport os\nimport sys\nimport cv2\nimport numpy as np\nimport math\nfrom scipy.stats import kurtosis, skew\nfrom Bio import SeqIO\nimport pandas as pd\nimport seaborn as sns\n\nfrom descriptor import get_features\nfrom descriptor import get_features_glcm\nfrom descriptor import get_features_lbp\nfrom descriptor import get_features_mlbp\n\nfrom ete3 import PhyloTree, TreeStyle\nfrom ete3 import Tree\n\nfrom skbio import DistanceMatrix\nfrom skbio.tree import nj\n\n\ncurrent_dir = os.path.dirname(os.path.abspath(__file__))\n\n###################################################################################################################################\n###################################################################################################################################\n\nsequences = [ 'J01859.fna', 'NR_037066.fna', 'NR_040849.fna', 'NR_117152.fna', 'NR_132306.fna', \n 'NR_134817.fna', 'NR_134818.fna', 'NR_136784.fna', 'NR_148244.fna', 'NR_148787.fna', \n 'NR_152063.fna', 'KP317497.fna', 'NR_156072.fna' ]\n\nnames = [ 'Escherichia coli', 'T.Thermophilus', 'B.Wakoensis', 'T.Filiformis', 'T.Tengchongensis', \n 'S.Cameli', 'S.Tangierensis', 'T.amyloliquefaciens', 'B.Xiamenensis', 'B.Australimaris', \n 'S.Halotolerans', 'B.Maritimus', 'S.Himalayensis']\n\ncsv_mega = current_dir + \"/sample_genomes/seqs_db1_distances.csv\"\nseq_file_full = current_dir + \"/sample_genomes/seqs_db1.fasta\"\nresults_file = current_dir + \"/results/compare_features/db1\"\n\n###################################################################################################################################\n###################################################################################################################################\n\nsequences = [ 'L00016.fna', 'M22650.fna', 'M22651.fna', 'M22653.fna', 'M22654.fna', \n 'M22655.fna', 'M22656.fna', 'M22657.fna', 'V00658.fna', 'V00659.fna', \n 'V00672.fna', 'V00675.fna']\n\nnames = [ 'Human', 'Macaca mulatta', 'Macaca fuscata', 'Macaca fascicularis', 'Macaca sylvanus', \n 'Saimiri sciureus', 'Tarsius syrichta', 'Lemur catta', 'Gorilla', 'Hylobates', \n 'Chimpanzee', 'Sumatran Orangutan']\n\ncsv_mega = current_dir + \"/sample_genomes/seqs_db2_distances.csv\"\nseq_file_full = current_dir + \"/sample_genomes/seqs_db2.fasta\"\nresults_file = current_dir + \"/results/compare_features/db2\"\n\n###################################################################################################################################\n###################################################################################################################################\n\nsequences = [ 'V00662.fna', 'D38116.fna', 'D38113.fna', 'D38114.fna', 'D38115.fna', \n 'X99256.fna', 'Y18001.fna', 'X79547.fna', 'Y07726.fna', 'X63726.fna', \n 'X72004.fna', 'U20753.fna', 'X61145.fna', 'X72204.fna', 'V00654.fna', \n 'X14848.fna', 'V00711.fna', 'X83427.fna']\n\nnames = [ 'Human', 'Pygmy chimpanzee', 'Common chimpanzee', 'Gorilla', 'Orangutan', \n 'Gibbon', 'Baboon', 'Horse', 'White rhinoceros', 'Harbor seal', \n 'Gray seal', 'Cat', 'Fin whale', 'Blue whale', 'Cow', \n 'Rat', 'Mouse', 'Platypus']\n\ncsv_mega = current_dir + \"/sample_genomes/seqs_db3_distances.csv\"\nseq_file_full = current_dir + \"/sample_genomes/seqs_db3.fasta\"\nresults_file = current_dir + \"/results/compare_features/db3\"\n\n###################################################################################################################################\n###################################################################################################################################\n\ndata_features_fos = []\ndata_features_glcm = []\ndata_features_lbp = []\ndata_features_mlbp = []\n\nmapping_function_size = 6 # trere is 6 types of mapping functions\n\nf_out = open(seq_file_full, \"w\")\n\nfor sequence_file in sequences:\n\n f_in = open(current_dir + \"/sample_genomes/\" + sequence_file, \"r\")\n f_out.write(f_in.read())\n f_in.close()\n\n data = [] \n fa_file = current_dir + \"/sample_genomes/\" + sequence_file\n seqs = SeqIO.parse(fa_file, \"fasta\")\n for record in seqs:\n data.append(record.seq.upper()) \n\n seq = data[0] \n\n temp_fos = []\n temp_glcm = []\n temp_lbp = []\n temp_mlbp = []\n # here we evaluate each mapping funciton\n for mapping_type in range(mapping_function_size):\n skewness, my_kurtosis, energy, entropy = get_features(seq, mapping_type)\n temp_fos.append( [skewness, my_kurtosis, energy, entropy] )\n #rint(\"fos mapping=\",mapping_type, [skewness, my_kurtosis, energy, entropy])\n\n entropy, contrast, energy, correlation, homogeneity = get_features_glcm(seq, mapping_type)\n temp_glcm.append( [entropy, contrast, energy, correlation, homogeneity] )\n #print(\"glcm mapping=\",mapping_type, [entropy, contrast, energy, correlation, homogeneity])\n\n hist_lbp = get_features_lbp(seq, mapping_type)\n temp_lbp.append( hist_lbp )\n #print(\"lbp mapping=\",mapping_type, hist_lbp)\n\n hist_mlbp = get_features_mlbp(seq, mapping_type)\n temp_mlbp.append( hist_mlbp )\n #print(\"mlbp mapping=\",mapping_type, hist_mlbp)\n\n data_features_fos.append(temp_fos)\n data_features_glcm.append(temp_glcm)\n data_features_lbp.append(temp_lbp)\n data_features_mlbp.append(temp_mlbp)\n\nf_out.close()\n\ndata_features_fos = np.array(data_features_fos)\ndata_features_glcm = np.array(data_features_glcm)\ndata_features_lbp = np.array(data_features_lbp)\ndata_features_mlbp = np.array(data_features_mlbp)\n\n###################################################################################################################3\n# procesamos las distancias con FOS\n###################################################################################################################\nfull_distances_fos = []\nfor mapping_type in range(mapping_function_size):\n\n DIST_fos = np.zeros((data_features_fos.shape[0], data_features_fos.shape[0]))\n for i in range(data_features_fos.shape[0]):\n row = np.zeros(data_features_fos.shape[0])\n for j in range(i, data_features_fos.shape[0]):\n dist = np.sqrt(np.sum((data_features_fos[i][mapping_type] - data_features_fos[j][mapping_type])**2))\n row[j] = dist \n DIST_fos[i] = row\n\n DIST_fos = DIST_fos + DIST_fos.T - np.diag(np.diag(DIST_fos))\n DIST_fos = (DIST_fos - np.min(DIST_fos)) / (np.max(DIST_fos) - np.min(DIST_fos))\n full_distances_fos.append( DIST_fos[0,1:DIST_fos.shape[0]] )\n\nfull_distances_fos = np.array(full_distances_fos)\nprint(\"full_distances_fos\", full_distances_fos.shape)\n\n###################################################################################################################3\n# procesamos las distancias con GLCM\n###################################################################################################################\nfull_distances_glcm = []\nfor mapping_type in range(mapping_function_size):\n\n DIST_glcm = np.zeros((data_features_glcm.shape[0], data_features_glcm.shape[0]))\n for i in range(data_features_glcm.shape[0]):\n row = np.zeros(data_features_glcm.shape[0])\n for j in range(i, data_features_glcm.shape[0]):\n dist = np.sqrt(np.sum((data_features_glcm[i][mapping_type] - data_features_glcm[j][mapping_type])**2))\n row[j] = dist \n DIST_glcm[i] = row\n\n DIST_glcm = DIST_glcm + DIST_glcm.T - np.diag(np.diag(DIST_glcm))\n DIST_glcm = (DIST_glcm - np.min(DIST_glcm)) / (np.max(DIST_glcm) - np.min(DIST_glcm))\n full_distances_glcm.append( DIST_glcm[0,1:DIST_glcm.shape[0]] )\n\nfull_distances_glcm = np.array(full_distances_glcm)\nprint(\"full_distances_glcm\", full_distances_glcm.shape)\n\n###################################################################################################################3\n# procesamos las distancias con LBP\n###################################################################################################################\nfull_distances_lbp = []\nfor mapping_type in range(mapping_function_size):\n\n DIST_lbp = np.zeros((data_features_lbp.shape[0], data_features_lbp.shape[0]))\n for i in range(data_features_lbp.shape[0]):\n row = np.zeros(data_features_lbp.shape[0])\n for j in range(i, data_features_lbp.shape[0]):\n dist = np.sqrt(np.sum((data_features_lbp[i][mapping_type] - data_features_lbp[j][mapping_type])**2))\n row[j] = dist \n DIST_lbp[i] = row\n\n DIST_lbp = DIST_lbp + DIST_lbp.T - np.diag(np.diag(DIST_lbp))\n DIST_lbp = (DIST_lbp - np.min(DIST_lbp)) / (np.max(DIST_lbp) - np.min(DIST_lbp))\n full_distances_lbp.append( DIST_lbp[0,1:DIST_lbp.shape[0]] )\n\nfull_distances_lbp = np.array(full_distances_lbp)\nprint(\"full_distances_lbp\", full_distances_lbp.shape)\n\n###################################################################################################################3\n# procesamos las distancias con MLBP\n###################################################################################################################\nfull_distances_mlbp = []\nfor mapping_type in range(mapping_function_size):\n\n DIST_mlbp = np.zeros((data_features_mlbp.shape[0], data_features_mlbp.shape[0]))\n for i in range(data_features_mlbp.shape[0]):\n row = np.zeros(data_features_mlbp.shape[0])\n for j in range(i, data_features_mlbp.shape[0]):\n dist = np.sqrt(np.sum((data_features_mlbp[i][mapping_type] - data_features_mlbp[j][mapping_type])**2))\n row[j] = dist \n DIST_mlbp[i] = row\n\n DIST_mlbp = DIST_mlbp + DIST_mlbp.T - np.diag(np.diag(DIST_mlbp))\n DIST_mlbp = (DIST_mlbp - np.min(DIST_mlbp)) / (np.max(DIST_mlbp) - np.min(DIST_mlbp))\n full_distances_mlbp.append( DIST_mlbp[0,1:DIST_mlbp.shape[0]] )\n\nfull_distances_mlbp = np.array(full_distances_mlbp)\nprint(\"full_distances_mlbp\", full_distances_mlbp.shape)\n\n###################################################################################################################\n### distances from mega ###########################################################\n###################################################################################################################\nmega_dist_csv = pd.read_csv(csv_mega) \nmega_dist_csv = mega_dist_csv.set_index(mega_dist_csv.columns[0])\nDIST_mega = mega_dist_csv.values\nDIST_mega[np.isnan(DIST_mega)] = 0 # lllenamos con ceros los valores nan\nDIST_mega = DIST_mega + DIST_mega.T #copiamos el triangulo inferior al superir en la matriz\ndistances_mega = DIST_mega[0,1:DIST_mega.shape[0]]\n\ndistances_mega = (distances_mega - np.min(distances_mega)) / (np.max(distances_mega) - np.min(distances_mega))\n###################################################################################################################\n###################################################################################################################\n\nnames_temp = np.array(sequences)\nnames_temp = names_temp[1:names_temp.shape[0]] # eliminamos el primer elemento\n\n###################################################################################################################3\n# procesamos las distancias con FOS\n###################################################################################################################\n\nplt.clf()\nfig, axs = plt.subplots(3,2)\naxs[0,0].plot(names_temp, full_distances_fos[0], 'b--', label='FOS-MAP0')\naxs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,0].legend(loc='upper right', fontsize=6)\naxs[0,1].plot(names_temp, full_distances_fos[1], 'b--', label='FOS-MAP1')\naxs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,1].legend(loc='upper right', fontsize=6)\naxs[1,0].plot(names_temp, full_distances_fos[2], 'b--', label='FOS-MAP2')\naxs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,0].legend(loc='upper right', fontsize=6)\naxs[1,1].plot(names_temp, full_distances_fos[3], 'b--', label='FOS-MAP3')\naxs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,1].legend(loc='upper right', fontsize=6)\naxs[2,0].plot(names_temp, full_distances_fos[4], 'b--', label='FOS-MAP4')\naxs[2,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2,0].legend(loc='upper right', fontsize=6)\naxs[2,1].plot(names_temp, full_distances_fos[5], 'b--', label='FOS-MAP5')\naxs[2,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2,1].legend(loc='upper right', fontsize=6)\n\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 )\n plt.xlabel('Sequences', fontsize=6)\n\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 )\nplt.savefig( results_file + \"_fos.png\", dpi = 200, bbox_inches='tight')\n\n###################################################################################################################\n# procesamos las distancias con GLCM\n###################################################################################################################\nplt.clf()\nfig, axs = plt.subplots(3,2)\naxs[0,0].plot(names_temp, full_distances_glcm[0], 'b--', label='GLCM-MAP0')\naxs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,0].legend(loc='upper right', fontsize=6)\naxs[0,1].plot(names_temp, full_distances_glcm[1], 'b--', label='GLCM-MAP1')\naxs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,1].legend(loc='upper right', fontsize=6)\naxs[1,0].plot(names_temp, full_distances_glcm[2], 'b--', label='GLCM-MAP2')\naxs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,0].legend(loc='upper right', fontsize=6)\naxs[1,1].plot(names_temp, full_distances_glcm[3], 'b--', label='GLCM-MAP3')\naxs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,1].legend(loc='upper right', fontsize=6)\naxs[2,0].plot(names_temp, full_distances_glcm[4], 'b--', label='GLCM-MAP4')\naxs[2,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2,0].legend(loc='upper right', fontsize=6)\naxs[2,1].plot(names_temp, full_distances_glcm[5], 'b--', label='GLCM-MAP5')\naxs[2,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2,1].legend(loc='upper right', fontsize=6)\n\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 )\n plt.xlabel('Sequences', fontsize=6)\n\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 )\nplt.savefig( results_file + \"_glcm.png\", dpi = 200, bbox_inches='tight')\n\n###################################################################################################################\n# procesamos las distancias con LBP\n###################################################################################################################\nplt.clf()\nfig, axs = plt.subplots(3,2)\naxs[0,0].plot(names_temp, full_distances_lbp[0], 'b--', label='LBP-MAP0')\naxs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,0].legend(loc='upper right', fontsize=6)\naxs[0,1].plot(names_temp, full_distances_lbp[1], 'b--', label='LBP-MAP1')\naxs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,1].legend(loc='upper right', fontsize=6)\naxs[1,0].plot(names_temp, full_distances_lbp[2], 'b--', label='LBP-MAP2')\naxs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,0].legend(loc='upper right', fontsize=6)\naxs[1,1].plot(names_temp, full_distances_lbp[3], 'b--', label='LBP-MAP3')\naxs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,1].legend(loc='upper right', fontsize=6)\naxs[2,0].plot(names_temp, full_distances_lbp[4], 'b--', label='LBP-MAP4')\naxs[2,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2,0].legend(loc='upper right', fontsize=6)\naxs[2,1].plot(names_temp, full_distances_lbp[5], 'b--', label='LBP-MAP5')\naxs[2,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2,1].legend(loc='upper right', fontsize=6)\n\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 )\n plt.xlabel('Sequences', fontsize=6)\n\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 )\nplt.savefig( results_file + \"_lbp.png\", dpi = 200, bbox_inches='tight')\n\n###################################################################################################################\n# procesamos las distancias con MLBP\n###################################################################################################################\nplt.clf()\nfig, axs = plt.subplots(3,2)\naxs[0,0].plot(names_temp, full_distances_mlbp[0], 'b--', label='MLBP-MAP0')\naxs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,0].legend(loc='upper right', fontsize=6)\naxs[0,1].plot(names_temp, full_distances_mlbp[1], 'b--', label='MLBP-MAP1')\naxs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,1].legend(loc='upper right', fontsize=6)\naxs[1,0].plot(names_temp, full_distances_mlbp[2], 'b--', label='MLBP-MAP2')\naxs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,0].legend(loc='upper right', fontsize=6)\naxs[1,1].plot(names_temp, full_distances_mlbp[3], 'b--', label='MLBP-MAP3')\naxs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,1].legend(loc='upper right', fontsize=6)\naxs[2,0].plot(names_temp, full_distances_mlbp[4], 'b--', label='MLBP-MAP4')\naxs[2,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2,0].legend(loc='upper right', fontsize=6)\naxs[2,1].plot(names_temp, full_distances_mlbp[5], 'b--', label='MLBP-MAP5')\naxs[2,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2,1].legend(loc='upper right', fontsize=6)\n\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 )\n plt.xlabel('Sequences', fontsize=6)\n\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 )\nplt.savefig( results_file + \"_mlbp.png\", dpi = 200, bbox_inches='tight')\n\n\ndata_csv = []\nerror_fos = [] # save the error for each mappoing function with FOS\nerror_glcm = [] # save the error for each mappoing function with GLCM\nerror_lbp = [] # save the error for each mappoing function with LBP\nerror_mlbp = [] # save the error for each mappoing function with MLBP\nfor mapping_type in range(mapping_function_size):\n error_fos.append((np.sum((full_distances_fos[mapping_type] - distances_mega)**2))/distances_mega.shape[0])\n error_glcm.append((np.sum((full_distances_glcm[mapping_type] - distances_mega)**2))/distances_mega.shape[0])\n error_lbp.append((np.sum((full_distances_lbp[mapping_type] - distances_mega)**2))/distances_mega.shape[0])\n error_mlbp.append((np.sum((full_distances_mlbp[mapping_type] - distances_mega)**2))/distances_mega.shape[0])\n\ndata_csv.append(error_fos)\ndata_csv.append(error_glcm)\ndata_csv.append(error_lbp)\ndata_csv.append(error_mlbp)\n\ndata_csv = np.array(data_csv)\ndf = pd.DataFrame(data=data_csv.T, index=[\"map0\", \"map1\", \"map2\", \"map3\", \"map4\", \"map5\"], columns=[\"FOS\", \"GLCM\", \"LBP\", \"MLBP\"])\nprint(df)\ndf.to_csv(results_file + \".csv\", index=True)\n#print(error_fos)\n#print(error_glcm)\n#print(error_lbp)\n#print(error_mlbp)\n\n\n\n###################################################################################################################\n# proccesing a MAPPING 0 funciton with the all algorithms\n###################################################################################################################\n\nplt.clf()\nfig, axs = plt.subplots(2,2)\naxs[0,0].plot(names_temp, full_distances_fos[0], 'b--', label='FOS-MAP0')\naxs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,0].legend(loc='upper right', fontsize=6)\naxs[0,1].plot(names_temp, full_distances_glcm[0], 'b--', label='GLCM-MAP0')\naxs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,1].legend(loc='upper right', fontsize=6)\naxs[1,0].plot(names_temp, full_distances_lbp[0], 'b--', label='LBP-MAP0')\naxs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,0].legend(loc='upper right', fontsize=6)\naxs[1,1].plot(names_temp, full_distances_mlbp[0], 'b--', label='MLBP-MAP0')\naxs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,1].legend(loc='upper right', fontsize=6)\n\n\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 )\n plt.xlabel('Sequences', fontsize=6)\n\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 )\nplt.savefig( results_file + \"_0map.png\", dpi = 200, bbox_inches='tight')\n\n###################################################################################################################\n# proccesing a MAPPING 1 funciton with the all algorithms\n###################################################################################################################\n\nplt.clf()\nfig, axs = plt.subplots(2,2)\naxs[0,0].plot(names_temp, full_distances_fos[1], 'b--', label='FOS-MAP1')\naxs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,0].legend(loc='upper right', fontsize=6)\naxs[0,1].plot(names_temp, full_distances_glcm[1], 'b--', label='GLCM-MAP1')\naxs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,1].legend(loc='upper right', fontsize=6)\naxs[1,0].plot(names_temp, full_distances_lbp[1], 'b--', label='LBP-MAP1')\naxs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,0].legend(loc='upper right', fontsize=6)\naxs[1,1].plot(names_temp, full_distances_mlbp[1], 'b--', label='MLBP-MAP1')\naxs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,1].legend(loc='upper right', fontsize=6)\n\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 )\n plt.xlabel('Sequences', fontsize=6)\n\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 )\nplt.savefig( results_file + \"_1map.png\", dpi = 200, bbox_inches='tight')\n\n###################################################################################################################\n# proccesing a MAPPING 2 funciton with the all algorithms\n###################################################################################################################\n\nplt.clf()\nfig, axs = plt.subplots(2,2)\naxs[0,0].plot(names_temp, full_distances_fos[2], 'b--', label='FOS-MAP2')\naxs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,0].legend(loc='upper right', fontsize=6)\naxs[0,1].plot(names_temp, full_distances_glcm[2], 'b--', label='GLCM-MAP2')\naxs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,1].legend(loc='upper right', fontsize=6)\naxs[1,0].plot(names_temp, full_distances_lbp[2], 'b--', label='LBP-MAP2')\naxs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,0].legend(loc='upper right', fontsize=6)\naxs[1,1].plot(names_temp, full_distances_mlbp[2], 'b--', label='MLBP-MAP2')\naxs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,1].legend(loc='upper right', fontsize=6)\n\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 )\n plt.xlabel('Sequences', fontsize=6)\n\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 )\nplt.savefig( results_file + \"_2map.png\", dpi = 200, bbox_inches='tight')\n\n###################################################################################################################\n# proccesing a MAPPING 3 funciton with the all algorithms\n###################################################################################################################\n\nplt.clf()\nfig, axs = plt.subplots(2,2)\naxs[0,0].plot(names_temp, full_distances_fos[3], 'b--', label='FOS-MAP3')\naxs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,0].legend(loc='upper right', fontsize=6)\naxs[0,1].plot(names_temp, full_distances_glcm[3], 'b--', label='GLCM-MAP3')\naxs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,1].legend(loc='upper right', fontsize=6)\naxs[1,0].plot(names_temp, full_distances_lbp[3], 'b--', label='LBP-MAP3')\naxs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,0].legend(loc='upper right', fontsize=6)\naxs[1,1].plot(names_temp, full_distances_mlbp[3], 'b--', label='MLBP-MAP3')\naxs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,1].legend(loc='upper right', fontsize=6)\n\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 )\n plt.xlabel('Sequences', fontsize=6)\n\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 )\nplt.savefig( results_file + \"_3map.png\", dpi = 200, bbox_inches='tight')\n\n\n###################################################################################################################\n# proccesing a MAPPING 4 funciton with the all algorithms\n###################################################################################################################\n\nplt.clf()\nfig, axs = plt.subplots(2,2)\naxs[0,0].plot(names_temp, full_distances_fos[4], 'b--', label='FOS-MAP4')\naxs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,0].legend(loc='upper right', fontsize=6)\naxs[0,1].plot(names_temp, full_distances_glcm[4], 'b--', label='GLCM-MAP4')\naxs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,1].legend(loc='upper right', fontsize=6)\naxs[1,0].plot(names_temp, full_distances_lbp[4], 'b--', label='LBP-MAP4')\naxs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,0].legend(loc='upper right', fontsize=6)\naxs[1,1].plot(names_temp, full_distances_mlbp[4], 'b--', label='MLBP-MAP4')\naxs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,1].legend(loc='upper right', fontsize=6)\n\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 )\n plt.xlabel('Sequences', fontsize=6)\n\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 )\nplt.savefig( results_file + \"_4map.png\", dpi = 200, bbox_inches='tight')\n\n###################################################################################################################\n# proccesing a MAPPING 5 funciton with the all algorithms\n###################################################################################################################\n\nplt.clf()\nfig, axs = plt.subplots(2,2)\naxs[0,0].plot(names_temp, full_distances_fos[5], 'b--', label='FOS-MAP5')\naxs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,0].legend(loc='upper right', fontsize=6)\naxs[0,1].plot(names_temp, full_distances_glcm[5], 'b--', label='GLCM-MAP5')\naxs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,1].legend(loc='upper right', fontsize=6)\naxs[1,0].plot(names_temp, full_distances_lbp[5], 'b--', label='LBP-MAP5')\naxs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,0].legend(loc='upper right', fontsize=6)\naxs[1,1].plot(names_temp, full_distances_mlbp[5], 'b--', label='MLBP-MAP5')\naxs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,1].legend(loc='upper right', fontsize=6)\n\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 )\n plt.xlabel('Sequences', fontsize=6)\n\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 )\nplt.savefig( results_file + \"_5map.png\", dpi = 200, bbox_inches='tight')",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
initial = True
dependencies = [('tracker', '0003_auto_20210626_0735')]
operations = [migrations.CreateModel(name='Result', fields=[('uuid',
models.UUIDField(default=uuid.uuid4, editable=False, primary_key=
True, serialize=False)), ('created_date', models.DateTimeField(
auto_now_add=True, db_index=True, null=True)), ('modified_date',
models.DateTimeField(auto_now=True, db_index=True)), ('value',
models.TextField(max_length=2000)), ('tracker', models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to='tracker.tracker'))
], options={'abstract': False})]
<|reserved_special_token_1|>
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [('tracker', '0003_auto_20210626_0735')]
operations = [migrations.CreateModel(name='Result', fields=[('uuid',
models.UUIDField(default=uuid.uuid4, editable=False, primary_key=
True, serialize=False)), ('created_date', models.DateTimeField(
auto_now_add=True, db_index=True, null=True)), ('modified_date',
models.DateTimeField(auto_now=True, db_index=True)), ('value',
models.TextField(max_length=2000)), ('tracker', models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to='tracker.tracker'))
], options={'abstract': False})]
<|reserved_special_token_1|>
# Generated by Django 3.2.4 on 2021-07-18 02:05
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('tracker', '0003_auto_20210626_0735'),
]
operations = [
migrations.CreateModel(
name='Result',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('created_date', models.DateTimeField(auto_now_add=True, db_index=True, null=True)),
('modified_date', models.DateTimeField(auto_now=True, db_index=True)),
('value', models.TextField(max_length=2000)),
('tracker', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tracker.tracker')),
],
options={
'abstract': False,
},
),
]
|
flexible
|
{
"blob_id": "ead843f1edcfe798613effb049e3ca79dcd03b71",
"index": 7919,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [('tracker', '0003_auto_20210626_0735')]\n operations = [migrations.CreateModel(name='Result', fields=[('uuid',\n models.UUIDField(default=uuid.uuid4, editable=False, primary_key=\n True, serialize=False)), ('created_date', models.DateTimeField(\n auto_now_add=True, db_index=True, null=True)), ('modified_date',\n models.DateTimeField(auto_now=True, db_index=True)), ('value',\n models.TextField(max_length=2000)), ('tracker', models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE, to='tracker.tracker'))\n ], options={'abstract': False})]\n",
"step-4": "from django.db import migrations, models\nimport django.db.models.deletion\nimport uuid\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [('tracker', '0003_auto_20210626_0735')]\n operations = [migrations.CreateModel(name='Result', fields=[('uuid',\n models.UUIDField(default=uuid.uuid4, editable=False, primary_key=\n True, serialize=False)), ('created_date', models.DateTimeField(\n auto_now_add=True, db_index=True, null=True)), ('modified_date',\n models.DateTimeField(auto_now=True, db_index=True)), ('value',\n models.TextField(max_length=2000)), ('tracker', models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE, to='tracker.tracker'))\n ], options={'abstract': False})]\n",
"step-5": "# Generated by Django 3.2.4 on 2021-07-18 02:05\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport uuid\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('tracker', '0003_auto_20210626_0735'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Result',\n fields=[\n ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),\n ('created_date', models.DateTimeField(auto_now_add=True, db_index=True, null=True)),\n ('modified_date', models.DateTimeField(auto_now=True, db_index=True)),\n ('value', models.TextField(max_length=2000)),\n ('tracker', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tracker.tracker')),\n ],\n options={\n 'abstract': False,\n },\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# this is the example code from the t0p-level README..d
from spatialmath import SE3
import roboticstoolbox as rtb
import swift
robot = rtb.models.DH.Panda()
print(robot)
T = robot.fkine(robot.qz)
print(T)
# IK
T = SE3(0.7, 0.2, 0.1) * SE3.OA([0, 1, 0], [0, 0, -1])
sol = robot.ikine_LMS(T) # solve IK, ignore additional outputs
print(sol.q) # display joint angles
# FK shows that desired end-effector pose was achieved
print(robot.fkine(sol.q))
qtraj = rtb.jtraj(robot.qz, sol.q, 50)
robot.plot(qtraj.q, movie="panda1.gif")
# URDF + Swift version
dt = 0.050 # simulation timestep in seconds
robot = rtb.models.URDF.Panda()
print(robot)
env = swift.Swift() # instantiate 3D browser-based visualizer
env.launch("chrome") # activate it
env.add(robot) # add robot to the 3D scene
env.start_recording("panda2", 1 / dt)
for qk in qtraj.q: # for each joint configuration on trajectory
robot.q = qk # update the robot state
env.step() # update visualization
env.stop_recording()
# ffmpeg -i panda2.webm -vf "scale=iw*.5:ih*.5" panda2.gif
|
normal
|
{
"blob_id": "cc1a1491ffbcf470705aeea079faac290dbaa25e",
"index": 5965,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(robot)\n<mask token>\nprint(T)\n<mask token>\nprint(sol.q)\nprint(robot.fkine(sol.q))\n<mask token>\nrobot.plot(qtraj.q, movie='panda1.gif')\n<mask token>\nprint(robot)\n<mask token>\nenv.launch('chrome')\nenv.add(robot)\nenv.start_recording('panda2', 1 / dt)\nfor qk in qtraj.q:\n robot.q = qk\n env.step()\nenv.stop_recording()\n",
"step-3": "<mask token>\nrobot = rtb.models.DH.Panda()\nprint(robot)\nT = robot.fkine(robot.qz)\nprint(T)\nT = SE3(0.7, 0.2, 0.1) * SE3.OA([0, 1, 0], [0, 0, -1])\nsol = robot.ikine_LMS(T)\nprint(sol.q)\nprint(robot.fkine(sol.q))\nqtraj = rtb.jtraj(robot.qz, sol.q, 50)\nrobot.plot(qtraj.q, movie='panda1.gif')\ndt = 0.05\nrobot = rtb.models.URDF.Panda()\nprint(robot)\nenv = swift.Swift()\nenv.launch('chrome')\nenv.add(robot)\nenv.start_recording('panda2', 1 / dt)\nfor qk in qtraj.q:\n robot.q = qk\n env.step()\nenv.stop_recording()\n",
"step-4": "from spatialmath import SE3\nimport roboticstoolbox as rtb\nimport swift\nrobot = rtb.models.DH.Panda()\nprint(robot)\nT = robot.fkine(robot.qz)\nprint(T)\nT = SE3(0.7, 0.2, 0.1) * SE3.OA([0, 1, 0], [0, 0, -1])\nsol = robot.ikine_LMS(T)\nprint(sol.q)\nprint(robot.fkine(sol.q))\nqtraj = rtb.jtraj(robot.qz, sol.q, 50)\nrobot.plot(qtraj.q, movie='panda1.gif')\ndt = 0.05\nrobot = rtb.models.URDF.Panda()\nprint(robot)\nenv = swift.Swift()\nenv.launch('chrome')\nenv.add(robot)\nenv.start_recording('panda2', 1 / dt)\nfor qk in qtraj.q:\n robot.q = qk\n env.step()\nenv.stop_recording()\n",
"step-5": "# this is the example code from the t0p-level README..d\nfrom spatialmath import SE3\nimport roboticstoolbox as rtb\nimport swift\n\nrobot = rtb.models.DH.Panda()\nprint(robot)\nT = robot.fkine(robot.qz)\nprint(T)\n\n# IK\n\nT = SE3(0.7, 0.2, 0.1) * SE3.OA([0, 1, 0], [0, 0, -1])\nsol = robot.ikine_LMS(T) # solve IK, ignore additional outputs\nprint(sol.q) # display joint angles\n# FK shows that desired end-effector pose was achieved\nprint(robot.fkine(sol.q))\n\n\nqtraj = rtb.jtraj(robot.qz, sol.q, 50)\nrobot.plot(qtraj.q, movie=\"panda1.gif\")\n\n# URDF + Swift version\ndt = 0.050 # simulation timestep in seconds\nrobot = rtb.models.URDF.Panda()\nprint(robot)\n\nenv = swift.Swift() # instantiate 3D browser-based visualizer\nenv.launch(\"chrome\") # activate it\nenv.add(robot) # add robot to the 3D scene\nenv.start_recording(\"panda2\", 1 / dt)\nfor qk in qtraj.q: # for each joint configuration on trajectory\n robot.q = qk # update the robot state\n env.step() # update visualization\nenv.stop_recording()\n\n# ffmpeg -i panda2.webm -vf \"scale=iw*.5:ih*.5\" panda2.gif\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.db import models
from django.core.validators import RegexValidator, MaxValueValidator
# from Delivery.models import Delivery
# from Customers.models import Customer, Address, Order, Item
# Create your models here.
class Restaurant(models.Model):
Restaurant_ID = models.AutoField(primary_key=True)
Restaurant_Name = models.CharField(max_length=250)
Restaurant_Logo = models.ImageField(upload_to='Restaurants/Pictures/Logo')
# + str(Restaurant_ID) + '/' + str(Restaurant_Name))
Restaurant_Area = models.CharField(max_length=250)
Restaurant_Pin = models.CharField(max_length=6, default=132658)
Restaurant_City = models.CharField(max_length=250)
Restaurant_State = models.CharField(max_length=250)
Restaurant_Regex = RegexValidator(regex=r'^\+?1?\d{9,15}$',
message="Phone number must be entered in the format:" +
" '+999999999'. Up to 15 digits allowed.")
Restaurant_Num = models.CharField(validators=[Restaurant_Regex], max_length=17)
Restaurant_Email = models.CharField(max_length=250)
Restaurant_Ratings_Count = models.IntegerField()
Restaurant_Rating = models.IntegerField(MaxValueValidator(10))
class FoodCategory(models.Model):
FoodCategory_ID = models.AutoField(primary_key=True)
FoodCategory_Name = models.CharField(max_length=250)
class Food(models.Model):
Food_ID = models.AutoField(primary_key=True)
Food_Name = models.CharField(max_length=250)
Food_Pic = models.ImageField(upload_to='Restaurants/Pictures/Food')
Food_Category_ID = models.ForeignKey(FoodCategory, on_delete=models.CASCADE)
Food_Price = models.IntegerField()
Food_Discount = models.IntegerField(default=0)
Food_Res_ID = models.ForeignKey(Restaurant, on_delete=models.CASCADE)
|
normal
|
{
"blob_id": "7ea1ee7c55cd53f7137c933790c3a22957f0ffea",
"index": 4987,
"step-1": "<mask token>\n\n\nclass Food(models.Model):\n Food_ID = models.AutoField(primary_key=True)\n Food_Name = models.CharField(max_length=250)\n Food_Pic = models.ImageField(upload_to='Restaurants/Pictures/Food')\n Food_Category_ID = models.ForeignKey(FoodCategory, on_delete=models.CASCADE\n )\n Food_Price = models.IntegerField()\n Food_Discount = models.IntegerField(default=0)\n Food_Res_ID = models.ForeignKey(Restaurant, on_delete=models.CASCADE)\n",
"step-2": "<mask token>\n\n\nclass FoodCategory(models.Model):\n FoodCategory_ID = models.AutoField(primary_key=True)\n FoodCategory_Name = models.CharField(max_length=250)\n\n\nclass Food(models.Model):\n Food_ID = models.AutoField(primary_key=True)\n Food_Name = models.CharField(max_length=250)\n Food_Pic = models.ImageField(upload_to='Restaurants/Pictures/Food')\n Food_Category_ID = models.ForeignKey(FoodCategory, on_delete=models.CASCADE\n )\n Food_Price = models.IntegerField()\n Food_Discount = models.IntegerField(default=0)\n Food_Res_ID = models.ForeignKey(Restaurant, on_delete=models.CASCADE)\n",
"step-3": "<mask token>\n\n\nclass Restaurant(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass FoodCategory(models.Model):\n FoodCategory_ID = models.AutoField(primary_key=True)\n FoodCategory_Name = models.CharField(max_length=250)\n\n\nclass Food(models.Model):\n Food_ID = models.AutoField(primary_key=True)\n Food_Name = models.CharField(max_length=250)\n Food_Pic = models.ImageField(upload_to='Restaurants/Pictures/Food')\n Food_Category_ID = models.ForeignKey(FoodCategory, on_delete=models.CASCADE\n )\n Food_Price = models.IntegerField()\n Food_Discount = models.IntegerField(default=0)\n Food_Res_ID = models.ForeignKey(Restaurant, on_delete=models.CASCADE)\n",
"step-4": "<mask token>\n\n\nclass Restaurant(models.Model):\n Restaurant_ID = models.AutoField(primary_key=True)\n Restaurant_Name = models.CharField(max_length=250)\n Restaurant_Logo = models.ImageField(upload_to='Restaurants/Pictures/Logo')\n Restaurant_Area = models.CharField(max_length=250)\n Restaurant_Pin = models.CharField(max_length=6, default=132658)\n Restaurant_City = models.CharField(max_length=250)\n Restaurant_State = models.CharField(max_length=250)\n Restaurant_Regex = RegexValidator(regex='^\\\\+?1?\\\\d{9,15}$', message=\n 'Phone number must be entered in the format:' +\n \" '+999999999'. Up to 15 digits allowed.\")\n Restaurant_Num = models.CharField(validators=[Restaurant_Regex],\n max_length=17)\n Restaurant_Email = models.CharField(max_length=250)\n Restaurant_Ratings_Count = models.IntegerField()\n Restaurant_Rating = models.IntegerField(MaxValueValidator(10))\n\n\nclass FoodCategory(models.Model):\n FoodCategory_ID = models.AutoField(primary_key=True)\n FoodCategory_Name = models.CharField(max_length=250)\n\n\nclass Food(models.Model):\n Food_ID = models.AutoField(primary_key=True)\n Food_Name = models.CharField(max_length=250)\n Food_Pic = models.ImageField(upload_to='Restaurants/Pictures/Food')\n Food_Category_ID = models.ForeignKey(FoodCategory, on_delete=models.CASCADE\n )\n Food_Price = models.IntegerField()\n Food_Discount = models.IntegerField(default=0)\n Food_Res_ID = models.ForeignKey(Restaurant, on_delete=models.CASCADE)\n",
"step-5": "from django.db import models\nfrom django.core.validators import RegexValidator, MaxValueValidator\n# from Delivery.models import Delivery\n# from Customers.models import Customer, Address, Order, Item\n\n# Create your models here.\n\n\nclass Restaurant(models.Model):\n Restaurant_ID = models.AutoField(primary_key=True)\n Restaurant_Name = models.CharField(max_length=250)\n Restaurant_Logo = models.ImageField(upload_to='Restaurants/Pictures/Logo')\n # + str(Restaurant_ID) + '/' + str(Restaurant_Name))\n Restaurant_Area = models.CharField(max_length=250)\n Restaurant_Pin = models.CharField(max_length=6, default=132658)\n Restaurant_City = models.CharField(max_length=250)\n Restaurant_State = models.CharField(max_length=250)\n Restaurant_Regex = RegexValidator(regex=r'^\\+?1?\\d{9,15}$',\n message=\"Phone number must be entered in the format:\" +\n \" '+999999999'. Up to 15 digits allowed.\")\n Restaurant_Num = models.CharField(validators=[Restaurant_Regex], max_length=17)\n Restaurant_Email = models.CharField(max_length=250)\n Restaurant_Ratings_Count = models.IntegerField()\n Restaurant_Rating = models.IntegerField(MaxValueValidator(10))\n\n\nclass FoodCategory(models.Model):\n FoodCategory_ID = models.AutoField(primary_key=True)\n FoodCategory_Name = models.CharField(max_length=250)\n\n\nclass Food(models.Model):\n Food_ID = models.AutoField(primary_key=True)\n Food_Name = models.CharField(max_length=250)\n Food_Pic = models.ImageField(upload_to='Restaurants/Pictures/Food')\n Food_Category_ID = models.ForeignKey(FoodCategory, on_delete=models.CASCADE)\n Food_Price = models.IntegerField()\n Food_Discount = models.IntegerField(default=0)\n Food_Res_ID = models.ForeignKey(Restaurant, on_delete=models.CASCADE)\n\n",
"step-ids": [
2,
4,
5,
6,
8
]
}
|
[
2,
4,
5,
6,
8
] |
#determines where the robot is located.
def sense(p, Z, colors, sensor_right):
#initialization
q = []
pHit = sensor_right;
pMiss = 1 - sensor_right;
#number of rows
m = len(colors)
#number of columns
n = len(colors[0])
#sum
s = 0
for i in range(m):
temp = []
for j in range(n):
hit = (Z == colors[i][j])
#product
temp.append(p[i][j] * (hit * pHit + (1-hit) * pMiss))
q.append(temp)
s = s + sum(temp)
#normalization
if(s != 0):
for i in range(m):
for j in range(n):
q[i][j] = q[i][j] / s
return q
#moves the robot by U units.
def move(p, U, p_move, m, n):
#initialization
q = []
pExact = p_move;
pUndershoot = 1 - p_move;#probability of staying at the same location
for i in range(m):
temp = []
for j in range(n):
s = pExact * p[(i - U[0])% m][(j - U[1])% n]
#convolution /addition
s = s + pUndershoot * p[i][j]
temp.append(s)
q.append(temp)
return q
#p_move probablity that motion is correct
#sensor_right probability that the sensor is correct
def localize(colors, measurements, motions, sensor_right, p_move):
p = []
#start with uniform distribution
#number of rows
m = len(colors)
#number of columns
n = len(colors[0])
#size
size = m * n;
for i in range(m):
temp = [];
for j in range(n):
temp.append(1/size);
p.append(temp)
for k in range(len(measurements)):
p = move(p, motions[k], p_move, m, n)
p = sense(p, measurements[k], colors, sensor_right)
return p
|
normal
|
{
"blob_id": "10937ee1e48d23b12b76a2abc44ee8bd0647aef5",
"index": 9248,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef localize(colors, measurements, motions, sensor_right, p_move):\n p = []\n m = len(colors)\n n = len(colors[0])\n size = m * n\n for i in range(m):\n temp = []\n for j in range(n):\n temp.append(1 / size)\n p.append(temp)\n for k in range(len(measurements)):\n p = move(p, motions[k], p_move, m, n)\n p = sense(p, measurements[k], colors, sensor_right)\n return p\n",
"step-3": "<mask token>\n\n\ndef move(p, U, p_move, m, n):\n q = []\n pExact = p_move\n pUndershoot = 1 - p_move\n for i in range(m):\n temp = []\n for j in range(n):\n s = pExact * p[(i - U[0]) % m][(j - U[1]) % n]\n s = s + pUndershoot * p[i][j]\n temp.append(s)\n q.append(temp)\n return q\n\n\ndef localize(colors, measurements, motions, sensor_right, p_move):\n p = []\n m = len(colors)\n n = len(colors[0])\n size = m * n\n for i in range(m):\n temp = []\n for j in range(n):\n temp.append(1 / size)\n p.append(temp)\n for k in range(len(measurements)):\n p = move(p, motions[k], p_move, m, n)\n p = sense(p, measurements[k], colors, sensor_right)\n return p\n",
"step-4": "def sense(p, Z, colors, sensor_right):\n q = []\n pHit = sensor_right\n pMiss = 1 - sensor_right\n m = len(colors)\n n = len(colors[0])\n s = 0\n for i in range(m):\n temp = []\n for j in range(n):\n hit = Z == colors[i][j]\n temp.append(p[i][j] * (hit * pHit + (1 - hit) * pMiss))\n q.append(temp)\n s = s + sum(temp)\n if s != 0:\n for i in range(m):\n for j in range(n):\n q[i][j] = q[i][j] / s\n return q\n\n\ndef move(p, U, p_move, m, n):\n q = []\n pExact = p_move\n pUndershoot = 1 - p_move\n for i in range(m):\n temp = []\n for j in range(n):\n s = pExact * p[(i - U[0]) % m][(j - U[1]) % n]\n s = s + pUndershoot * p[i][j]\n temp.append(s)\n q.append(temp)\n return q\n\n\ndef localize(colors, measurements, motions, sensor_right, p_move):\n p = []\n m = len(colors)\n n = len(colors[0])\n size = m * n\n for i in range(m):\n temp = []\n for j in range(n):\n temp.append(1 / size)\n p.append(temp)\n for k in range(len(measurements)):\n p = move(p, motions[k], p_move, m, n)\n p = sense(p, measurements[k], colors, sensor_right)\n return p\n",
"step-5": "#determines where the robot is located.\ndef sense(p, Z, colors, sensor_right):\n #initialization\n q = []\n pHit = sensor_right;\n pMiss = 1 - sensor_right;\n #number of rows\n m = len(colors) \n #number of columns\n n = len(colors[0])\n #sum \n s = 0\n for i in range(m):\n temp = []\n \n for j in range(n):\n hit = (Z == colors[i][j]) \n #product \n temp.append(p[i][j] * (hit * pHit + (1-hit) * pMiss))\n q.append(temp)\n s = s + sum(temp) \n \n #normalization\n if(s != 0):\n for i in range(m):\n for j in range(n):\n q[i][j] = q[i][j] / s\n return q\n\n#moves the robot by U units.\ndef move(p, U, p_move, m, n):\n #initialization\n q = []\n pExact = p_move;\n pUndershoot = 1 - p_move;#probability of staying at the same location\n \n for i in range(m):\n temp = []\n \n for j in range(n):\n s = pExact * p[(i - U[0])% m][(j - U[1])% n]\n #convolution /addition\n s = s + pUndershoot * p[i][j]\n temp.append(s)\n q.append(temp)\n\n return q\n\n#p_move probablity that motion is correct\n#sensor_right probability that the sensor is correct \ndef localize(colors, measurements, motions, sensor_right, p_move):\n p = []\n #start with uniform distribution\n #number of rows\n m = len(colors) \n #number of columns\n n = len(colors[0])\n #size \n size = m * n;\n \n for i in range(m):\n temp = [];\n for j in range(n):\n temp.append(1/size);\n p.append(temp)\n \n\n for k in range(len(measurements)):\n p = move(p, motions[k], p_move, m, n)\n p = sense(p, measurements[k], colors, sensor_right) \n\n return p",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
import mcvine.cli
from numpy import array
from mcvine_workflow.singlextal.resolution import use_res_comps as urc
beam_neutrons_path = '/SNS/users/p63/ORNL_public_research/MCViNE_Covmat_comparison/mcvine_resolution/beams/beam_125_1e9/out/neutrons'
instrument = urc.instrument('ARCS', '3.*meter', '13.6*meter', '-0.15*meter')
samplexmlpath = '/SNS/users/p63/ORNL_public_research/learning_from_mcvine/res_sims/Ei_125/E86.7120348337_hkl-7.62228386234,3.53360635791,-3.42342194523/sample/sampleassembly.xml'
psi = -0.011798841097534662
hkl2Q = array([[-0.64961065, 0.94207344, 0. ],
[ 0.66614652, 0.4593441 , -0.80916512],
[-0.66614652, -0.4593441 , -0.80916512]])
pp = array([-1.22433552, 2.73879582, 0.0612745 ])
pixel = urc.pixel('0.5*inch', 'meter/128', '10*atm', position=(pp[1], pp[2], pp[0]))
t_m2p = 0.0038753067573975117
Q = array([ 9.58591698, -3.98508133, -0.08915738])
E = 86.712034833655451
hkl_projection = array([-0.6235806 , -0.08226367, 0.30709024])
urc.run(
beam_neutrons_path, instrument, samplexmlpath, psi, hkl2Q, pixel, t_m2p,
Q, E, hkl_projection, Nbuffer=100000)
|
normal
|
{
"blob_id": "47c5fb03cb427d5c9f7703e1715e026b6f2c7a35",
"index": 4660,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurc.run(beam_neutrons_path, instrument, samplexmlpath, psi, hkl2Q, pixel,\n t_m2p, Q, E, hkl_projection, Nbuffer=100000)\n",
"step-3": "<mask token>\nbeam_neutrons_path = (\n '/SNS/users/p63/ORNL_public_research/MCViNE_Covmat_comparison/mcvine_resolution/beams/beam_125_1e9/out/neutrons'\n )\ninstrument = urc.instrument('ARCS', '3.*meter', '13.6*meter', '-0.15*meter')\nsamplexmlpath = (\n '/SNS/users/p63/ORNL_public_research/learning_from_mcvine/res_sims/Ei_125/E86.7120348337_hkl-7.62228386234,3.53360635791,-3.42342194523/sample/sampleassembly.xml'\n )\npsi = -0.011798841097534662\nhkl2Q = array([[-0.64961065, 0.94207344, 0.0], [0.66614652, 0.4593441, -\n 0.80916512], [-0.66614652, -0.4593441, -0.80916512]])\npp = array([-1.22433552, 2.73879582, 0.0612745])\npixel = urc.pixel('0.5*inch', 'meter/128', '10*atm', position=(pp[1], pp[2],\n pp[0]))\nt_m2p = 0.0038753067573975117\nQ = array([9.58591698, -3.98508133, -0.08915738])\nE = 86.71203483365545\nhkl_projection = array([-0.6235806, -0.08226367, 0.30709024])\nurc.run(beam_neutrons_path, instrument, samplexmlpath, psi, hkl2Q, pixel,\n t_m2p, Q, E, hkl_projection, Nbuffer=100000)\n",
"step-4": "import mcvine.cli\nfrom numpy import array\nfrom mcvine_workflow.singlextal.resolution import use_res_comps as urc\nbeam_neutrons_path = (\n '/SNS/users/p63/ORNL_public_research/MCViNE_Covmat_comparison/mcvine_resolution/beams/beam_125_1e9/out/neutrons'\n )\ninstrument = urc.instrument('ARCS', '3.*meter', '13.6*meter', '-0.15*meter')\nsamplexmlpath = (\n '/SNS/users/p63/ORNL_public_research/learning_from_mcvine/res_sims/Ei_125/E86.7120348337_hkl-7.62228386234,3.53360635791,-3.42342194523/sample/sampleassembly.xml'\n )\npsi = -0.011798841097534662\nhkl2Q = array([[-0.64961065, 0.94207344, 0.0], [0.66614652, 0.4593441, -\n 0.80916512], [-0.66614652, -0.4593441, -0.80916512]])\npp = array([-1.22433552, 2.73879582, 0.0612745])\npixel = urc.pixel('0.5*inch', 'meter/128', '10*atm', position=(pp[1], pp[2],\n pp[0]))\nt_m2p = 0.0038753067573975117\nQ = array([9.58591698, -3.98508133, -0.08915738])\nE = 86.71203483365545\nhkl_projection = array([-0.6235806, -0.08226367, 0.30709024])\nurc.run(beam_neutrons_path, instrument, samplexmlpath, psi, hkl2Q, pixel,\n t_m2p, Q, E, hkl_projection, Nbuffer=100000)\n",
"step-5": "#!/usr/bin/env python\nimport mcvine.cli\nfrom numpy import array\nfrom mcvine_workflow.singlextal.resolution import use_res_comps as urc\nbeam_neutrons_path = '/SNS/users/p63/ORNL_public_research/MCViNE_Covmat_comparison/mcvine_resolution/beams/beam_125_1e9/out/neutrons'\ninstrument = urc.instrument('ARCS', '3.*meter', '13.6*meter', '-0.15*meter')\nsamplexmlpath = '/SNS/users/p63/ORNL_public_research/learning_from_mcvine/res_sims/Ei_125/E86.7120348337_hkl-7.62228386234,3.53360635791,-3.42342194523/sample/sampleassembly.xml'\npsi = -0.011798841097534662\nhkl2Q = array([[-0.64961065, 0.94207344, 0. ],\n [ 0.66614652, 0.4593441 , -0.80916512],\n [-0.66614652, -0.4593441 , -0.80916512]])\npp = array([-1.22433552, 2.73879582, 0.0612745 ])\npixel = urc.pixel('0.5*inch', 'meter/128', '10*atm', position=(pp[1], pp[2], pp[0]))\nt_m2p = 0.0038753067573975117\nQ = array([ 9.58591698, -3.98508133, -0.08915738])\nE = 86.712034833655451\nhkl_projection = array([-0.6235806 , -0.08226367, 0.30709024])\nurc.run(\n beam_neutrons_path, instrument, samplexmlpath, psi, hkl2Q, pixel, t_m2p,\n Q, E, hkl_projection, Nbuffer=100000)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pytest.mark.usefixtures('driver')
class BaseClass:
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pytest.mark.usefixtures('driver')
class BaseClass:
"""BaseClass takes in driver fixture."""
<|reserved_special_token_1|>
import pytest
@pytest.mark.usefixtures('driver')
class BaseClass:
"""BaseClass takes in driver fixture."""
<|reserved_special_token_1|>
import pytest
@pytest.mark.usefixtures("driver")
class BaseClass:
"""BaseClass takes in driver fixture."""
|
flexible
|
{
"blob_id": "1b49cb59ebdb548cfc7567cd5cb4affe30f33aac",
"index": 5576,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('driver')\nclass BaseClass:\n <mask token>\n",
"step-3": "<mask token>\n\n\[email protected]('driver')\nclass BaseClass:\n \"\"\"BaseClass takes in driver fixture.\"\"\"\n",
"step-4": "import pytest\n\n\[email protected]('driver')\nclass BaseClass:\n \"\"\"BaseClass takes in driver fixture.\"\"\"\n",
"step-5": "import pytest\n\n\[email protected](\"driver\")\nclass BaseClass:\n \"\"\"BaseClass takes in driver fixture.\"\"\"\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('Добрый день,', name)
<|reserved_special_token_1|>
name = input('Введите ваше имя ')
print('Добрый день,', name)
<|reserved_special_token_1|>
name = input("Введите ваше имя ")
print("Добрый день,", name)
|
flexible
|
{
"blob_id": "e44c4b2c3b60d34d4540ec2d3a782c777c52fbc0",
"index": 8662,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Добрый день,', name)\n",
"step-3": "name = input('Введите ваше имя ')\nprint('Добрый день,', name)\n",
"step-4": "name = input(\"Введите ваше имя \")\nprint(\"Добрый день,\", name)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#Some people are standing in a queue. A selection process follows a rule where people standing on even positions are selected. Of the selected people a queue is formed and again out of these only people on even position are selected. This continues until we are left with one person. Find out the position of that person in the original queue.
#Input:
#The first line of input contains an integer T denoting the number of test cases.The first line of each test case is N,number of people standing in a queue.
#Output:
#Print the position(original queue) of that person who is left.
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def even(n):
if n == 0 or n == 1:
return
elif n == 2:
return 2
else:
for i in reversed(range(n+1)):
if 2**i < n:
return 2**i
t = int(input("Enter number of test cases:"))
arr = []
for i in range(t):
n = int(input())
ans = even(n)
arr.append(ans)
for i in range(len(arr)):
print(arr[i], end = ' ')
# --------------------------------------------------------------------------------------------------------------------
import math
t = int(input())
for i in range(t):
n =int(input())
print(pow(2,int(math.log(n,2))))
|
normal
|
{
"blob_id": "358fd8efd5c3823255ab64d5f8b88b343415ed0e",
"index": 2708,
"step-1": "def even(n):\n if n == 0 or n == 1:\n return\n elif n == 2:\n return 2\n else:\n for i in reversed(range(n + 1)):\n if 2 ** i < n:\n return 2 ** i\n\n\n<mask token>\n",
"step-2": "def even(n):\n if n == 0 or n == 1:\n return\n elif n == 2:\n return 2\n else:\n for i in reversed(range(n + 1)):\n if 2 ** i < n:\n return 2 ** i\n\n\n<mask token>\nfor i in range(t):\n n = int(input())\n ans = even(n)\n arr.append(ans)\nfor i in range(len(arr)):\n print(arr[i], end=' ')\n<mask token>\nfor i in range(t):\n n = int(input())\n print(pow(2, int(math.log(n, 2))))\n",
"step-3": "def even(n):\n if n == 0 or n == 1:\n return\n elif n == 2:\n return 2\n else:\n for i in reversed(range(n + 1)):\n if 2 ** i < n:\n return 2 ** i\n\n\nt = int(input('Enter number of test cases:'))\narr = []\nfor i in range(t):\n n = int(input())\n ans = even(n)\n arr.append(ans)\nfor i in range(len(arr)):\n print(arr[i], end=' ')\n<mask token>\nt = int(input())\nfor i in range(t):\n n = int(input())\n print(pow(2, int(math.log(n, 2))))\n",
"step-4": "def even(n):\n if n == 0 or n == 1:\n return\n elif n == 2:\n return 2\n else:\n for i in reversed(range(n + 1)):\n if 2 ** i < n:\n return 2 ** i\n\n\nt = int(input('Enter number of test cases:'))\narr = []\nfor i in range(t):\n n = int(input())\n ans = even(n)\n arr.append(ans)\nfor i in range(len(arr)):\n print(arr[i], end=' ')\nimport math\nt = int(input())\nfor i in range(t):\n n = int(input())\n print(pow(2, int(math.log(n, 2))))\n",
"step-5": "#Some people are standing in a queue. A selection process follows a rule where people standing on even positions are selected. Of the selected people a queue is formed and again out of these only people on even position are selected. This continues until we are left with one person. Find out the position of that person in the original queue.\n\n#Input:\n#The first line of input contains an integer T denoting the number of test cases.The first line of each test case is N,number of people standing in a queue.\n\n#Output:\n#Print the position(original queue) of that person who is left.\n#----------------------------------------------------------------------------------------------------------------------------------------------------------------------------\ndef even(n):\n if n == 0 or n == 1:\n return \n elif n == 2:\n return 2\n else: \n for i in reversed(range(n+1)):\n if 2**i < n:\n return 2**i\nt = int(input(\"Enter number of test cases:\"))\narr = []\nfor i in range(t):\n n = int(input())\n ans = even(n)\n arr.append(ans)\nfor i in range(len(arr)): \n print(arr[i], end = ' ')\n# --------------------------------------------------------------------------------------------------------------------\n\nimport math\nt = int(input())\nfor i in range(t):\n n =int(input())\n print(pow(2,int(math.log(n,2))))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def get_amount():
"""Get valid donation amount from user"""
while True:
try:
amount = input('How much did they donate: ')
if str(amount).lower() == 'exit':
return amount
else:
return float(amount)
except ValueError:
print('you have made an invalid choice, try again.')
def get_key(donor_chart):
""" Return key for sorted function """
return sum(donor_chart[1])
def menu_page():
""" Return valid menu option from user """
while True:
try:
print(
"""Please choose one of the following options(1,2,3):
1. Send a Thank you.
2. Create a report
3. Send Letters to Everyone
4. Quit"""
)
option = int(input('--->'))
except ValueError:
print('You have made an invalid choice, try again.')
page_break()
return option
def send_thanks():
""" Send Thanks """
page_break()
while True:
list_names = [item[0] for item in donor_chart.items()]
try:
print(
"""To whom would you like to say thank you?
(type "list" for a full list of names or"exit" to return to the menu)"""
)
name = input('--->')
except ValueError:
print('you have made an invalid choice, try again.')
page_break()
continue
if name == 'list':
print(('{}\n' * len(list_names)).format(*list_names))
continue
elif name in list_names:
amount = get_amount()
new_donor = False
elif name.lower() == 'exit':
break
else:
addname = input(
'The name you selected is not in the list, would you like to add it(y/n)? '
)
if addname[0].lower() == 'y':
amount = get_amount()
new_donor = True
elif addname.lower() == 'exit':
break
else:
print('\nName was not added, try again\n')
continue
if amount == 'exit':
break
add_donation(name, amount, new_donor)
print(
"""
Dear {}
Thank you for your generous donation of ${:.2f}!!
Now all of the kittens will get to eat this year"""
.format(name, amount))
break
<|reserved_special_token_0|>
def send_letters():
""" Write letters to each donor in the donor chart and
save them in a user specified directory """
while True:
try:
dir_path = input(
'Please type the desired directory to save the letters: ')
letter_form = (
'Dear {},\n\n\tThank you for your very kind donation of ${:.2f}!'
)
letter_form += (
'\n\n\tNow all of the kittens will get to eat this year!')
letter_form += '\n\n\t\t\t\t Cheers! \n\t\t\t\t -The Team'
if dir_path.lower() == 'Exit':
break
if not os.path.exists(dir_path):
print('That is not a valid directory, using working directory')
dir_path = os.getcwd()
for name, donation in donor_chart.items():
file_name = '{}.txt'.format(name)
path_name = dir_path + '/' + file_name
with open(path_name, 'w') as file:
file.write(letter_form.format(name, sum(donation)))
break
except ValueError:
print('\nsomething went wrong please try again: ')
<|reserved_special_token_0|>
def menu_quit():
""" return quit for menus """
return 'Quit'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_amount():
"""Get valid donation amount from user"""
while True:
try:
amount = input('How much did they donate: ')
if str(amount).lower() == 'exit':
return amount
else:
return float(amount)
except ValueError:
print('you have made an invalid choice, try again.')
def get_key(donor_chart):
""" Return key for sorted function """
return sum(donor_chart[1])
def menu_page():
""" Return valid menu option from user """
while True:
try:
print(
"""Please choose one of the following options(1,2,3):
1. Send a Thank you.
2. Create a report
3. Send Letters to Everyone
4. Quit"""
)
option = int(input('--->'))
except ValueError:
print('You have made an invalid choice, try again.')
page_break()
return option
def send_thanks():
""" Send Thanks """
page_break()
while True:
list_names = [item[0] for item in donor_chart.items()]
try:
print(
"""To whom would you like to say thank you?
(type "list" for a full list of names or"exit" to return to the menu)"""
)
name = input('--->')
except ValueError:
print('you have made an invalid choice, try again.')
page_break()
continue
if name == 'list':
print(('{}\n' * len(list_names)).format(*list_names))
continue
elif name in list_names:
amount = get_amount()
new_donor = False
elif name.lower() == 'exit':
break
else:
addname = input(
'The name you selected is not in the list, would you like to add it(y/n)? '
)
if addname[0].lower() == 'y':
amount = get_amount()
new_donor = True
elif addname.lower() == 'exit':
break
else:
print('\nName was not added, try again\n')
continue
if amount == 'exit':
break
add_donation(name, amount, new_donor)
print(
"""
Dear {}
Thank you for your generous donation of ${:.2f}!!
Now all of the kittens will get to eat this year"""
.format(name, amount))
break
<|reserved_special_token_0|>
def send_letters():
""" Write letters to each donor in the donor chart and
save them in a user specified directory """
while True:
try:
dir_path = input(
'Please type the desired directory to save the letters: ')
letter_form = (
'Dear {},\n\n\tThank you for your very kind donation of ${:.2f}!'
)
letter_form += (
'\n\n\tNow all of the kittens will get to eat this year!')
letter_form += '\n\n\t\t\t\t Cheers! \n\t\t\t\t -The Team'
if dir_path.lower() == 'Exit':
break
if not os.path.exists(dir_path):
print('That is not a valid directory, using working directory')
dir_path = os.getcwd()
for name, donation in donor_chart.items():
file_name = '{}.txt'.format(name)
path_name = dir_path + '/' + file_name
with open(path_name, 'w') as file:
file.write(letter_form.format(name, sum(donation)))
break
except ValueError:
print('\nsomething went wrong please try again: ')
def add_donation(name, amount, donor_bool):
""" add a donation for a new or existing donor """
if donor_bool is False:
donor_chart.get(list_names.index(name), [1]).append(amount)
else:
donor_chart.update({name: [amount]})
return
def menu_quit():
""" return quit for menus """
return 'Quit'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def page_break():
""" Print a separator to distinguish new 'pages'"""
print('_' * 75 + '\n')
def get_amount():
"""Get valid donation amount from user"""
while True:
try:
amount = input('How much did they donate: ')
if str(amount).lower() == 'exit':
return amount
else:
return float(amount)
except ValueError:
print('you have made an invalid choice, try again.')
def get_key(donor_chart):
""" Return key for sorted function """
return sum(donor_chart[1])
def menu_page():
""" Return valid menu option from user """
while True:
try:
print(
"""Please choose one of the following options(1,2,3):
1. Send a Thank you.
2. Create a report
3. Send Letters to Everyone
4. Quit"""
)
option = int(input('--->'))
except ValueError:
print('You have made an invalid choice, try again.')
page_break()
return option
def send_thanks():
""" Send Thanks """
page_break()
while True:
list_names = [item[0] for item in donor_chart.items()]
try:
print(
"""To whom would you like to say thank you?
(type "list" for a full list of names or"exit" to return to the menu)"""
)
name = input('--->')
except ValueError:
print('you have made an invalid choice, try again.')
page_break()
continue
if name == 'list':
print(('{}\n' * len(list_names)).format(*list_names))
continue
elif name in list_names:
amount = get_amount()
new_donor = False
elif name.lower() == 'exit':
break
else:
addname = input(
'The name you selected is not in the list, would you like to add it(y/n)? '
)
if addname[0].lower() == 'y':
amount = get_amount()
new_donor = True
elif addname.lower() == 'exit':
break
else:
print('\nName was not added, try again\n')
continue
if amount == 'exit':
break
add_donation(name, amount, new_donor)
print(
"""
Dear {}
Thank you for your generous donation of ${:.2f}!!
Now all of the kittens will get to eat this year"""
.format(name, amount))
break
<|reserved_special_token_0|>
def send_letters():
""" Write letters to each donor in the donor chart and
save them in a user specified directory """
while True:
try:
dir_path = input(
'Please type the desired directory to save the letters: ')
letter_form = (
'Dear {},\n\n\tThank you for your very kind donation of ${:.2f}!'
)
letter_form += (
'\n\n\tNow all of the kittens will get to eat this year!')
letter_form += '\n\n\t\t\t\t Cheers! \n\t\t\t\t -The Team'
if dir_path.lower() == 'Exit':
break
if not os.path.exists(dir_path):
print('That is not a valid directory, using working directory')
dir_path = os.getcwd()
for name, donation in donor_chart.items():
file_name = '{}.txt'.format(name)
path_name = dir_path + '/' + file_name
with open(path_name, 'w') as file:
file.write(letter_form.format(name, sum(donation)))
break
except ValueError:
print('\nsomething went wrong please try again: ')
def add_donation(name, amount, donor_bool):
""" add a donation for a new or existing donor """
if donor_bool is False:
donor_chart.get(list_names.index(name), [1]).append(amount)
else:
donor_chart.update({name: [amount]})
return
def menu_quit():
""" return quit for menus """
return 'Quit'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def page_break():
""" Print a separator to distinguish new 'pages'"""
print('_' * 75 + '\n')
def get_amount():
"""Get valid donation amount from user"""
while True:
try:
amount = input('How much did they donate: ')
if str(amount).lower() == 'exit':
return amount
else:
return float(amount)
except ValueError:
print('you have made an invalid choice, try again.')
def get_key(donor_chart):
""" Return key for sorted function """
return sum(donor_chart[1])
def menu_page():
""" Return valid menu option from user """
while True:
try:
print(
"""Please choose one of the following options(1,2,3):
1. Send a Thank you.
2. Create a report
3. Send Letters to Everyone
4. Quit"""
)
option = int(input('--->'))
except ValueError:
print('You have made an invalid choice, try again.')
page_break()
return option
def send_thanks():
""" Send Thanks """
page_break()
while True:
list_names = [item[0] for item in donor_chart.items()]
try:
print(
"""To whom would you like to say thank you?
(type "list" for a full list of names or"exit" to return to the menu)"""
)
name = input('--->')
except ValueError:
print('you have made an invalid choice, try again.')
page_break()
continue
if name == 'list':
print(('{}\n' * len(list_names)).format(*list_names))
continue
elif name in list_names:
amount = get_amount()
new_donor = False
elif name.lower() == 'exit':
break
else:
addname = input(
'The name you selected is not in the list, would you like to add it(y/n)? '
)
if addname[0].lower() == 'y':
amount = get_amount()
new_donor = True
elif addname.lower() == 'exit':
break
else:
print('\nName was not added, try again\n')
continue
if amount == 'exit':
break
add_donation(name, amount, new_donor)
print(
"""
Dear {}
Thank you for your generous donation of ${:.2f}!!
Now all of the kittens will get to eat this year"""
.format(name, amount))
break
def create_report():
""" Create Report """
page_break()
list_names = [item[0] for item in donor_chart.items()]
new_list = []
for donor in donor_chart.items():
sum_don = sum(donor[1])
new_list.append(sum_don)
col_lab = ['Donor Name', 'Total Given', 'Num Gifts', 'Average Gift']
max_name = max([len(x) for x in list_names])
max_don = []
for don in donor_chart.items():
max_don.append(max(don[1]))
max_donl = len(str(max(max_don)))
max_gift = len(col_lab[2])
if max_donl < len(col_lab[1]):
max_donl = len(col_lab[1])
format_col = '\n{:<' + '{}'.format(max_name + 5) + '}|{:^'
format_col += '{}'.format(max_donl + 5)
format_col += '}|{:^' + '{}'.format(max_gift + 5)
format_col += '}|{:>' + '{}'.format(max_donl + 5) + '}'
print(format_col.format(*col_lab))
print('-' * len(format_col.format(*col_lab)))
sorted_list = sorted(donor_chart.items(), key=get_key, reverse=True)
for donor in sorted_list:
num_gifts = len(donor[1])
avg_gift = sum(donor[1]) / num_gifts
format_item = '{:<' + '{}'.format(max_name + 5) + '}${:>'
format_item += '{}'.format(max_donl + 5) + '.2f}{:>'
format_item += '{}'.format(max_gift + 5) + 'd} ${:>'
format_item += '{}'.format(max_donl + 5) + '.2f}'
print(format_item.format(donor[0], sum(donor[1]), num_gifts, avg_gift))
def send_letters():
""" Write letters to each donor in the donor chart and
save them in a user specified directory """
while True:
try:
dir_path = input(
'Please type the desired directory to save the letters: ')
letter_form = (
'Dear {},\n\n\tThank you for your very kind donation of ${:.2f}!'
)
letter_form += (
'\n\n\tNow all of the kittens will get to eat this year!')
letter_form += '\n\n\t\t\t\t Cheers! \n\t\t\t\t -The Team'
if dir_path.lower() == 'Exit':
break
if not os.path.exists(dir_path):
print('That is not a valid directory, using working directory')
dir_path = os.getcwd()
for name, donation in donor_chart.items():
file_name = '{}.txt'.format(name)
path_name = dir_path + '/' + file_name
with open(path_name, 'w') as file:
file.write(letter_form.format(name, sum(donation)))
break
except ValueError:
print('\nsomething went wrong please try again: ')
def add_donation(name, amount, donor_bool):
""" add a donation for a new or existing donor """
if donor_bool is False:
donor_chart.get(list_names.index(name), [1]).append(amount)
else:
donor_chart.update({name: [amount]})
return
def menu_quit():
""" return quit for menus """
return 'Quit'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
#!/usr/bin/env python3
# Lesson_5 Activity 2 Mailroom Part 2
import os
def page_break():
""" Print a separator to distinguish new 'pages'"""
print("_"*75+"\n")
def get_amount():
"""Get valid donation amount from user"""
while True:
try:
amount = input("How much did they donate: ")
if str(amount).lower() == 'exit':
return amount
else:
return float(amount)
except ValueError:
print("you have made an invalid choice, try again.")
def get_key(donor_chart):
""" Return key for sorted function """
return(sum(donor_chart[1]))
def menu_page():
""" Return valid menu option from user """
while True:
try:
print("Please choose one of the following options(1,2,3):"
"\n1. Send a Thank you. \n2. Create a report"
"\n3. Send Letters to Everyone \n4. Quit")
option = int(input('--->'))
except ValueError:
print("You have made an invalid choice, try again.")
page_break()
return option
def send_thanks():
""" Send Thanks """
page_break()
while True:
list_names = [item[0] for item in donor_chart.items()]
try:
print("To whom would you like to say thank you?\n"
"(type \"list\" for a full list of names or"
"\"exit\" to return to the menu)")
name = input("--->")
except ValueError:
print("you have made an invalid choice, try again.")
page_break()
continue
if name == 'list':
print(("{}\n"*len(list_names)).format(*list_names))
continue
elif name in list_names:
amount = get_amount()
new_donor = False
elif name.lower() == 'exit':
break
else:
addname = input("The name you selected is not in the list,"
" would you like to add it(y/n)? ")
if addname[0].lower() == 'y':
amount = get_amount()
new_donor = True
elif addname.lower() == 'exit':
break
else:
print("\nName was not added, try again\n")
continue
if amount == "exit":
break
add_donation(name, amount, new_donor)
print("\nDear {} \nThank you for your generous donation of ${:.2f}!!\n"
"Now all of the kittens will get "
"to eat this year".format(name, amount))
break
def create_report():
""" Create Report """
page_break()
list_names = [item[0] for item in donor_chart.items()]
new_list = []
for donor in donor_chart.items():
sum_don = sum(donor[1])
new_list.append(sum_don)
col_lab = ["Donor Name", "Total Given", "Num Gifts", "Average Gift"]
max_name = max([len(x) for x in list_names])
max_don = []
for don in donor_chart.items():
max_don.append(max(don[1]))
max_donl = len(str(max(max_don)))
max_gift = len(col_lab[2])
if max_donl < len(col_lab[1]):
max_donl = len(col_lab[1])
format_col = "\n{:<" + "{}".format(max_name+5) + "}|{:^"
format_col += "{}".format(max_donl+5)
format_col += "}|{:^" + "{}".format(max_gift+5)
format_col += "}|{:>" + "{}".format(max_donl+5) + "}"
print(format_col.format(*col_lab))
print("-"*len(format_col.format(*col_lab)))
sorted_list = sorted(donor_chart.items(), key=get_key, reverse=True)
for donor in sorted_list:
num_gifts = len(donor[1])
avg_gift = sum(donor[1])/num_gifts
format_item = "{:<" + "{}".format(max_name+5) + "}${:>"
format_item += "{}".format(max_donl+5) + ".2f}{:>"
format_item += "{}".format(max_gift+5) + "d} ${:>"
format_item += "{}".format(max_donl+5) + ".2f}"
print(format_item.format(donor[0], sum(donor[1]), num_gifts, avg_gift))
def send_letters():
""" Write letters to each donor in the donor chart and
save them in a user specified directory """
while True:
try:
dir_path = input("Please type the desired directory "
"to save the letters: ")
letter_form = ("Dear {},\n\n\tThank you for your very "
"kind donation of ${:.2f}!")
letter_form += ("\n\n\tNow all of the kittens will "
"get to eat this year!")
letter_form += ("\n\n\t\t\t\t Cheers! \n\t\t\t\t "
"-The Team")
if dir_path.lower() == "Exit":
break
if not os.path.exists(dir_path):
print("That is not a valid directory, using working directory")
dir_path = os.getcwd()
for name, donation in donor_chart.items():
file_name = ("{}.txt".format(name))
path_name = dir_path + "/" + file_name
with open(path_name, 'w') as file:
file.write(letter_form.format(name, sum(donation)))
break
except ValueError:
print("\nsomething went wrong please try again: ")
def add_donation(name, amount, donor_bool):
""" add a donation for a new or existing donor """
if donor_bool is False:
donor_chart.get(list_names.index(name), [1]).append(amount)
else:
donor_chart.update({name: [amount]})
return
def menu_quit():
""" return quit for menus """
return "Quit"
if __name__ == '__main__':
donor_chart = {"Justin Thyme": [1, 1, 1],
"Beau Andarrow": [207.121324, 400.321234, 12345.001234],
"Crystal Clearwater": [80082],
"Harry Shins": [1.00, 2.00, 3.00],
"Bob Zuruncle": [0.53, 7.00],
"Al Kaseltzer": [1010101, 666.00],
"Joe Somebody": [25]}
options = range(1, 5)
menus = (send_thanks, create_report, send_letters, menu_quit)
menu_dict = dict(zip(options, menus))
option = 0
while True:
page_break()
try:
option = menu_page()
if menu_dict[option]() == "Quit":
break
except KeyError:
print("You have made an invalid choice, try again.")
page_break()
|
flexible
|
{
"blob_id": "8a192fc08a65c80b8733a9d07374156c09f36598",
"index": 2823,
"step-1": "<mask token>\n\n\ndef get_amount():\n \"\"\"Get valid donation amount from user\"\"\"\n while True:\n try:\n amount = input('How much did they donate: ')\n if str(amount).lower() == 'exit':\n return amount\n else:\n return float(amount)\n except ValueError:\n print('you have made an invalid choice, try again.')\n\n\ndef get_key(donor_chart):\n \"\"\" Return key for sorted function \"\"\"\n return sum(donor_chart[1])\n\n\ndef menu_page():\n \"\"\" Return valid menu option from user \"\"\"\n while True:\n try:\n print(\n \"\"\"Please choose one of the following options(1,2,3):\n1. Send a Thank you. \n2. Create a report\n3. Send Letters to Everyone \n4. Quit\"\"\"\n )\n option = int(input('--->'))\n except ValueError:\n print('You have made an invalid choice, try again.')\n page_break()\n return option\n\n\ndef send_thanks():\n \"\"\" Send Thanks \"\"\"\n page_break()\n while True:\n list_names = [item[0] for item in donor_chart.items()]\n try:\n print(\n \"\"\"To whom would you like to say thank you?\n(type \"list\" for a full list of names or\"exit\" to return to the menu)\"\"\"\n )\n name = input('--->')\n except ValueError:\n print('you have made an invalid choice, try again.')\n page_break()\n continue\n if name == 'list':\n print(('{}\\n' * len(list_names)).format(*list_names))\n continue\n elif name in list_names:\n amount = get_amount()\n new_donor = False\n elif name.lower() == 'exit':\n break\n else:\n addname = input(\n 'The name you selected is not in the list, would you like to add it(y/n)? '\n )\n if addname[0].lower() == 'y':\n amount = get_amount()\n new_donor = True\n elif addname.lower() == 'exit':\n break\n else:\n print('\\nName was not added, try again\\n')\n continue\n if amount == 'exit':\n break\n add_donation(name, amount, new_donor)\n print(\n \"\"\"\nDear {} \nThank you for your generous donation of ${:.2f}!!\nNow all of the kittens will get to eat this year\"\"\"\n .format(name, amount))\n break\n\n\n<mask token>\n\n\ndef send_letters():\n \"\"\" Write letters to each donor in the donor chart and\n save them in a user specified directory \"\"\"\n while True:\n try:\n dir_path = input(\n 'Please type the desired directory to save the letters: ')\n letter_form = (\n 'Dear {},\\n\\n\\tThank you for your very kind donation of ${:.2f}!'\n )\n letter_form += (\n '\\n\\n\\tNow all of the kittens will get to eat this year!')\n letter_form += '\\n\\n\\t\\t\\t\\t Cheers! \\n\\t\\t\\t\\t -The Team'\n if dir_path.lower() == 'Exit':\n break\n if not os.path.exists(dir_path):\n print('That is not a valid directory, using working directory')\n dir_path = os.getcwd()\n for name, donation in donor_chart.items():\n file_name = '{}.txt'.format(name)\n path_name = dir_path + '/' + file_name\n with open(path_name, 'w') as file:\n file.write(letter_form.format(name, sum(donation)))\n break\n except ValueError:\n print('\\nsomething went wrong please try again: ')\n\n\n<mask token>\n\n\ndef menu_quit():\n \"\"\" return quit for menus \"\"\"\n return 'Quit'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_amount():\n \"\"\"Get valid donation amount from user\"\"\"\n while True:\n try:\n amount = input('How much did they donate: ')\n if str(amount).lower() == 'exit':\n return amount\n else:\n return float(amount)\n except ValueError:\n print('you have made an invalid choice, try again.')\n\n\ndef get_key(donor_chart):\n \"\"\" Return key for sorted function \"\"\"\n return sum(donor_chart[1])\n\n\ndef menu_page():\n \"\"\" Return valid menu option from user \"\"\"\n while True:\n try:\n print(\n \"\"\"Please choose one of the following options(1,2,3):\n1. Send a Thank you. \n2. Create a report\n3. Send Letters to Everyone \n4. Quit\"\"\"\n )\n option = int(input('--->'))\n except ValueError:\n print('You have made an invalid choice, try again.')\n page_break()\n return option\n\n\ndef send_thanks():\n \"\"\" Send Thanks \"\"\"\n page_break()\n while True:\n list_names = [item[0] for item in donor_chart.items()]\n try:\n print(\n \"\"\"To whom would you like to say thank you?\n(type \"list\" for a full list of names or\"exit\" to return to the menu)\"\"\"\n )\n name = input('--->')\n except ValueError:\n print('you have made an invalid choice, try again.')\n page_break()\n continue\n if name == 'list':\n print(('{}\\n' * len(list_names)).format(*list_names))\n continue\n elif name in list_names:\n amount = get_amount()\n new_donor = False\n elif name.lower() == 'exit':\n break\n else:\n addname = input(\n 'The name you selected is not in the list, would you like to add it(y/n)? '\n )\n if addname[0].lower() == 'y':\n amount = get_amount()\n new_donor = True\n elif addname.lower() == 'exit':\n break\n else:\n print('\\nName was not added, try again\\n')\n continue\n if amount == 'exit':\n break\n add_donation(name, amount, new_donor)\n print(\n \"\"\"\nDear {} \nThank you for your generous donation of ${:.2f}!!\nNow all of the kittens will get to eat this year\"\"\"\n .format(name, amount))\n break\n\n\n<mask token>\n\n\ndef send_letters():\n \"\"\" Write letters to each donor in the donor chart and\n save them in a user specified directory \"\"\"\n while True:\n try:\n dir_path = input(\n 'Please type the desired directory to save the letters: ')\n letter_form = (\n 'Dear {},\\n\\n\\tThank you for your very kind donation of ${:.2f}!'\n )\n letter_form += (\n '\\n\\n\\tNow all of the kittens will get to eat this year!')\n letter_form += '\\n\\n\\t\\t\\t\\t Cheers! \\n\\t\\t\\t\\t -The Team'\n if dir_path.lower() == 'Exit':\n break\n if not os.path.exists(dir_path):\n print('That is not a valid directory, using working directory')\n dir_path = os.getcwd()\n for name, donation in donor_chart.items():\n file_name = '{}.txt'.format(name)\n path_name = dir_path + '/' + file_name\n with open(path_name, 'w') as file:\n file.write(letter_form.format(name, sum(donation)))\n break\n except ValueError:\n print('\\nsomething went wrong please try again: ')\n\n\ndef add_donation(name, amount, donor_bool):\n \"\"\" add a donation for a new or existing donor \"\"\"\n if donor_bool is False:\n donor_chart.get(list_names.index(name), [1]).append(amount)\n else:\n donor_chart.update({name: [amount]})\n return\n\n\ndef menu_quit():\n \"\"\" return quit for menus \"\"\"\n return 'Quit'\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef page_break():\n \"\"\" Print a separator to distinguish new 'pages'\"\"\"\n print('_' * 75 + '\\n')\n\n\ndef get_amount():\n \"\"\"Get valid donation amount from user\"\"\"\n while True:\n try:\n amount = input('How much did they donate: ')\n if str(amount).lower() == 'exit':\n return amount\n else:\n return float(amount)\n except ValueError:\n print('you have made an invalid choice, try again.')\n\n\ndef get_key(donor_chart):\n \"\"\" Return key for sorted function \"\"\"\n return sum(donor_chart[1])\n\n\ndef menu_page():\n \"\"\" Return valid menu option from user \"\"\"\n while True:\n try:\n print(\n \"\"\"Please choose one of the following options(1,2,3):\n1. Send a Thank you. \n2. Create a report\n3. Send Letters to Everyone \n4. Quit\"\"\"\n )\n option = int(input('--->'))\n except ValueError:\n print('You have made an invalid choice, try again.')\n page_break()\n return option\n\n\ndef send_thanks():\n \"\"\" Send Thanks \"\"\"\n page_break()\n while True:\n list_names = [item[0] for item in donor_chart.items()]\n try:\n print(\n \"\"\"To whom would you like to say thank you?\n(type \"list\" for a full list of names or\"exit\" to return to the menu)\"\"\"\n )\n name = input('--->')\n except ValueError:\n print('you have made an invalid choice, try again.')\n page_break()\n continue\n if name == 'list':\n print(('{}\\n' * len(list_names)).format(*list_names))\n continue\n elif name in list_names:\n amount = get_amount()\n new_donor = False\n elif name.lower() == 'exit':\n break\n else:\n addname = input(\n 'The name you selected is not in the list, would you like to add it(y/n)? '\n )\n if addname[0].lower() == 'y':\n amount = get_amount()\n new_donor = True\n elif addname.lower() == 'exit':\n break\n else:\n print('\\nName was not added, try again\\n')\n continue\n if amount == 'exit':\n break\n add_donation(name, amount, new_donor)\n print(\n \"\"\"\nDear {} \nThank you for your generous donation of ${:.2f}!!\nNow all of the kittens will get to eat this year\"\"\"\n .format(name, amount))\n break\n\n\n<mask token>\n\n\ndef send_letters():\n \"\"\" Write letters to each donor in the donor chart and\n save them in a user specified directory \"\"\"\n while True:\n try:\n dir_path = input(\n 'Please type the desired directory to save the letters: ')\n letter_form = (\n 'Dear {},\\n\\n\\tThank you for your very kind donation of ${:.2f}!'\n )\n letter_form += (\n '\\n\\n\\tNow all of the kittens will get to eat this year!')\n letter_form += '\\n\\n\\t\\t\\t\\t Cheers! \\n\\t\\t\\t\\t -The Team'\n if dir_path.lower() == 'Exit':\n break\n if not os.path.exists(dir_path):\n print('That is not a valid directory, using working directory')\n dir_path = os.getcwd()\n for name, donation in donor_chart.items():\n file_name = '{}.txt'.format(name)\n path_name = dir_path + '/' + file_name\n with open(path_name, 'w') as file:\n file.write(letter_form.format(name, sum(donation)))\n break\n except ValueError:\n print('\\nsomething went wrong please try again: ')\n\n\ndef add_donation(name, amount, donor_bool):\n \"\"\" add a donation for a new or existing donor \"\"\"\n if donor_bool is False:\n donor_chart.get(list_names.index(name), [1]).append(amount)\n else:\n donor_chart.update({name: [amount]})\n return\n\n\ndef menu_quit():\n \"\"\" return quit for menus \"\"\"\n return 'Quit'\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef page_break():\n \"\"\" Print a separator to distinguish new 'pages'\"\"\"\n print('_' * 75 + '\\n')\n\n\ndef get_amount():\n \"\"\"Get valid donation amount from user\"\"\"\n while True:\n try:\n amount = input('How much did they donate: ')\n if str(amount).lower() == 'exit':\n return amount\n else:\n return float(amount)\n except ValueError:\n print('you have made an invalid choice, try again.')\n\n\ndef get_key(donor_chart):\n \"\"\" Return key for sorted function \"\"\"\n return sum(donor_chart[1])\n\n\ndef menu_page():\n \"\"\" Return valid menu option from user \"\"\"\n while True:\n try:\n print(\n \"\"\"Please choose one of the following options(1,2,3):\n1. Send a Thank you. \n2. Create a report\n3. Send Letters to Everyone \n4. Quit\"\"\"\n )\n option = int(input('--->'))\n except ValueError:\n print('You have made an invalid choice, try again.')\n page_break()\n return option\n\n\ndef send_thanks():\n \"\"\" Send Thanks \"\"\"\n page_break()\n while True:\n list_names = [item[0] for item in donor_chart.items()]\n try:\n print(\n \"\"\"To whom would you like to say thank you?\n(type \"list\" for a full list of names or\"exit\" to return to the menu)\"\"\"\n )\n name = input('--->')\n except ValueError:\n print('you have made an invalid choice, try again.')\n page_break()\n continue\n if name == 'list':\n print(('{}\\n' * len(list_names)).format(*list_names))\n continue\n elif name in list_names:\n amount = get_amount()\n new_donor = False\n elif name.lower() == 'exit':\n break\n else:\n addname = input(\n 'The name you selected is not in the list, would you like to add it(y/n)? '\n )\n if addname[0].lower() == 'y':\n amount = get_amount()\n new_donor = True\n elif addname.lower() == 'exit':\n break\n else:\n print('\\nName was not added, try again\\n')\n continue\n if amount == 'exit':\n break\n add_donation(name, amount, new_donor)\n print(\n \"\"\"\nDear {} \nThank you for your generous donation of ${:.2f}!!\nNow all of the kittens will get to eat this year\"\"\"\n .format(name, amount))\n break\n\n\ndef create_report():\n \"\"\" Create Report \"\"\"\n page_break()\n list_names = [item[0] for item in donor_chart.items()]\n new_list = []\n for donor in donor_chart.items():\n sum_don = sum(donor[1])\n new_list.append(sum_don)\n col_lab = ['Donor Name', 'Total Given', 'Num Gifts', 'Average Gift']\n max_name = max([len(x) for x in list_names])\n max_don = []\n for don in donor_chart.items():\n max_don.append(max(don[1]))\n max_donl = len(str(max(max_don)))\n max_gift = len(col_lab[2])\n if max_donl < len(col_lab[1]):\n max_donl = len(col_lab[1])\n format_col = '\\n{:<' + '{}'.format(max_name + 5) + '}|{:^'\n format_col += '{}'.format(max_donl + 5)\n format_col += '}|{:^' + '{}'.format(max_gift + 5)\n format_col += '}|{:>' + '{}'.format(max_donl + 5) + '}'\n print(format_col.format(*col_lab))\n print('-' * len(format_col.format(*col_lab)))\n sorted_list = sorted(donor_chart.items(), key=get_key, reverse=True)\n for donor in sorted_list:\n num_gifts = len(donor[1])\n avg_gift = sum(donor[1]) / num_gifts\n format_item = '{:<' + '{}'.format(max_name + 5) + '}${:>'\n format_item += '{}'.format(max_donl + 5) + '.2f}{:>'\n format_item += '{}'.format(max_gift + 5) + 'd} ${:>'\n format_item += '{}'.format(max_donl + 5) + '.2f}'\n print(format_item.format(donor[0], sum(donor[1]), num_gifts, avg_gift))\n\n\ndef send_letters():\n \"\"\" Write letters to each donor in the donor chart and\n save them in a user specified directory \"\"\"\n while True:\n try:\n dir_path = input(\n 'Please type the desired directory to save the letters: ')\n letter_form = (\n 'Dear {},\\n\\n\\tThank you for your very kind donation of ${:.2f}!'\n )\n letter_form += (\n '\\n\\n\\tNow all of the kittens will get to eat this year!')\n letter_form += '\\n\\n\\t\\t\\t\\t Cheers! \\n\\t\\t\\t\\t -The Team'\n if dir_path.lower() == 'Exit':\n break\n if not os.path.exists(dir_path):\n print('That is not a valid directory, using working directory')\n dir_path = os.getcwd()\n for name, donation in donor_chart.items():\n file_name = '{}.txt'.format(name)\n path_name = dir_path + '/' + file_name\n with open(path_name, 'w') as file:\n file.write(letter_form.format(name, sum(donation)))\n break\n except ValueError:\n print('\\nsomething went wrong please try again: ')\n\n\ndef add_donation(name, amount, donor_bool):\n \"\"\" add a donation for a new or existing donor \"\"\"\n if donor_bool is False:\n donor_chart.get(list_names.index(name), [1]).append(amount)\n else:\n donor_chart.update({name: [amount]})\n return\n\n\ndef menu_quit():\n \"\"\" return quit for menus \"\"\"\n return 'Quit'\n\n\n<mask token>\n",
"step-5": "#!/usr/bin/env python3\n\n# Lesson_5 Activity 2 Mailroom Part 2\n\nimport os\n\n\ndef page_break():\n \"\"\" Print a separator to distinguish new 'pages'\"\"\"\n print(\"_\"*75+\"\\n\")\n\n\ndef get_amount():\n \"\"\"Get valid donation amount from user\"\"\"\n while True:\n try:\n amount = input(\"How much did they donate: \")\n if str(amount).lower() == 'exit':\n return amount\n else:\n return float(amount)\n except ValueError:\n print(\"you have made an invalid choice, try again.\")\n\n\ndef get_key(donor_chart):\n \"\"\" Return key for sorted function \"\"\"\n return(sum(donor_chart[1]))\n\n\ndef menu_page():\n \"\"\" Return valid menu option from user \"\"\"\n while True:\n try:\n print(\"Please choose one of the following options(1,2,3):\"\n \"\\n1. Send a Thank you. \\n2. Create a report\"\n \"\\n3. Send Letters to Everyone \\n4. Quit\")\n option = int(input('--->'))\n except ValueError:\n print(\"You have made an invalid choice, try again.\")\n page_break()\n return option\n\n\ndef send_thanks():\n \"\"\" Send Thanks \"\"\"\n page_break()\n while True:\n list_names = [item[0] for item in donor_chart.items()]\n try:\n print(\"To whom would you like to say thank you?\\n\"\n \"(type \\\"list\\\" for a full list of names or\"\n \"\\\"exit\\\" to return to the menu)\")\n name = input(\"--->\")\n except ValueError:\n print(\"you have made an invalid choice, try again.\")\n page_break()\n continue\n if name == 'list':\n print((\"{}\\n\"*len(list_names)).format(*list_names))\n continue\n elif name in list_names:\n amount = get_amount()\n new_donor = False\n elif name.lower() == 'exit':\n break\n else:\n addname = input(\"The name you selected is not in the list,\"\n \" would you like to add it(y/n)? \")\n if addname[0].lower() == 'y':\n amount = get_amount()\n new_donor = True\n elif addname.lower() == 'exit':\n break\n else:\n print(\"\\nName was not added, try again\\n\")\n continue\n if amount == \"exit\":\n break\n add_donation(name, amount, new_donor)\n print(\"\\nDear {} \\nThank you for your generous donation of ${:.2f}!!\\n\"\n \"Now all of the kittens will get \"\n \"to eat this year\".format(name, amount))\n break\n\n\ndef create_report():\n \"\"\" Create Report \"\"\"\n page_break()\n list_names = [item[0] for item in donor_chart.items()]\n new_list = []\n for donor in donor_chart.items():\n sum_don = sum(donor[1])\n new_list.append(sum_don)\n col_lab = [\"Donor Name\", \"Total Given\", \"Num Gifts\", \"Average Gift\"]\n max_name = max([len(x) for x in list_names])\n max_don = []\n for don in donor_chart.items():\n max_don.append(max(don[1]))\n max_donl = len(str(max(max_don)))\n max_gift = len(col_lab[2])\n if max_donl < len(col_lab[1]):\n max_donl = len(col_lab[1])\n format_col = \"\\n{:<\" + \"{}\".format(max_name+5) + \"}|{:^\"\n format_col += \"{}\".format(max_donl+5)\n format_col += \"}|{:^\" + \"{}\".format(max_gift+5)\n format_col += \"}|{:>\" + \"{}\".format(max_donl+5) + \"}\"\n print(format_col.format(*col_lab))\n print(\"-\"*len(format_col.format(*col_lab)))\n sorted_list = sorted(donor_chart.items(), key=get_key, reverse=True)\n for donor in sorted_list:\n num_gifts = len(donor[1])\n avg_gift = sum(donor[1])/num_gifts\n format_item = \"{:<\" + \"{}\".format(max_name+5) + \"}${:>\"\n format_item += \"{}\".format(max_donl+5) + \".2f}{:>\"\n format_item += \"{}\".format(max_gift+5) + \"d} ${:>\"\n format_item += \"{}\".format(max_donl+5) + \".2f}\"\n print(format_item.format(donor[0], sum(donor[1]), num_gifts, avg_gift))\n\n\ndef send_letters():\n \"\"\" Write letters to each donor in the donor chart and\n save them in a user specified directory \"\"\"\n while True:\n try:\n dir_path = input(\"Please type the desired directory \"\n \"to save the letters: \")\n letter_form = (\"Dear {},\\n\\n\\tThank you for your very \"\n \"kind donation of ${:.2f}!\")\n letter_form += (\"\\n\\n\\tNow all of the kittens will \"\n \"get to eat this year!\")\n letter_form += (\"\\n\\n\\t\\t\\t\\t Cheers! \\n\\t\\t\\t\\t \"\n \"-The Team\")\n if dir_path.lower() == \"Exit\":\n break\n if not os.path.exists(dir_path):\n print(\"That is not a valid directory, using working directory\")\n dir_path = os.getcwd()\n for name, donation in donor_chart.items():\n file_name = (\"{}.txt\".format(name))\n path_name = dir_path + \"/\" + file_name\n with open(path_name, 'w') as file:\n file.write(letter_form.format(name, sum(donation)))\n break\n except ValueError:\n print(\"\\nsomething went wrong please try again: \")\n\n\ndef add_donation(name, amount, donor_bool):\n \"\"\" add a donation for a new or existing donor \"\"\"\n if donor_bool is False:\n donor_chart.get(list_names.index(name), [1]).append(amount)\n else:\n donor_chart.update({name: [amount]})\n return\n\n\ndef menu_quit():\n \"\"\" return quit for menus \"\"\"\n return \"Quit\"\n\nif __name__ == '__main__':\n donor_chart = {\"Justin Thyme\": [1, 1, 1],\n \"Beau Andarrow\": [207.121324, 400.321234, 12345.001234],\n \"Crystal Clearwater\": [80082],\n \"Harry Shins\": [1.00, 2.00, 3.00],\n \"Bob Zuruncle\": [0.53, 7.00],\n \"Al Kaseltzer\": [1010101, 666.00],\n \"Joe Somebody\": [25]}\n\n options = range(1, 5)\n menus = (send_thanks, create_report, send_letters, menu_quit)\n menu_dict = dict(zip(options, menus))\n\n option = 0\n while True:\n page_break()\n try:\n option = menu_page()\n if menu_dict[option]() == \"Quit\":\n break\n except KeyError:\n print(\"You have made an invalid choice, try again.\")\n page_break()\n",
"step-ids": [
6,
7,
8,
9,
12
]
}
|
[
6,
7,
8,
9,
12
] |
#Web Scraping
#Make sure you have bs4, webbrowser and request installed as your third party modules
import bs4, webbrowser, requests
try:
data = requests.get("http://en.wikipedia.org/wiki/Python")
data.raise_for_status()
my_data = bs4.BeautifulSoup(data.text, "lxml")
print("List of all the header tags: \n\n")
for the_data in my_data.find_all("a"):
try:
print(the_data.attrs["href"])
except Exception as err:
print(err)
except Exception as err:
print(err)
print("\nNo website matches your search.")
|
normal
|
{
"blob_id": "27e9635adf6109f3ab13b9d8dd5809973b61ca03",
"index": 413,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n data = requests.get('http://en.wikipedia.org/wiki/Python')\n data.raise_for_status()\n my_data = bs4.BeautifulSoup(data.text, 'lxml')\n print('List of all the header tags: \\n\\n')\n for the_data in my_data.find_all('a'):\n try:\n print(the_data.attrs['href'])\n except Exception as err:\n print(err)\nexcept Exception as err:\n print(err)\n print('\\nNo website matches your search.')\n",
"step-3": "import bs4, webbrowser, requests\ntry:\n data = requests.get('http://en.wikipedia.org/wiki/Python')\n data.raise_for_status()\n my_data = bs4.BeautifulSoup(data.text, 'lxml')\n print('List of all the header tags: \\n\\n')\n for the_data in my_data.find_all('a'):\n try:\n print(the_data.attrs['href'])\n except Exception as err:\n print(err)\nexcept Exception as err:\n print(err)\n print('\\nNo website matches your search.')\n",
"step-4": "#Web Scraping\r\n#Make sure you have bs4, webbrowser and request installed as your third party modules\r\n\r\nimport bs4, webbrowser, requests\r\n\r\ntry:\r\n data = requests.get(\"http://en.wikipedia.org/wiki/Python\")\r\n data.raise_for_status()\r\n \r\n my_data = bs4.BeautifulSoup(data.text, \"lxml\")\r\n \r\n print(\"List of all the header tags: \\n\\n\")\r\n for the_data in my_data.find_all(\"a\"):\r\n try:\r\n print(the_data.attrs[\"href\"])\r\n except Exception as err:\r\n print(err)\r\n \r\nexcept Exception as err:\r\n print(err)\r\n print(\"\\nNo website matches your search.\")\r\n\r\n\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
print('CrawlerSlaveYoke')
print('CSY-000000023.py')
<|reserved_special_token_1|>
# Author: Andreas Francois Vermeulen
print("CrawlerSlaveYoke")
print("CSY-000000023.py")
|
flexible
|
{
"blob_id": "322795bce189428823c45a26477555052c7d5022",
"index": 8933,
"step-1": "<mask token>\n",
"step-2": "print('CrawlerSlaveYoke')\nprint('CSY-000000023.py')\n",
"step-3": "# Author: Andreas Francois Vermeulen\nprint(\"CrawlerSlaveYoke\")\nprint(\"CSY-000000023.py\")\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
def pixels_generator(w, h):
i = 0
while i < (w * h):
yield divmod(i, w)
i = i + 1
|
normal
|
{
"blob_id": "bb481fa038835abc6d61a4985b1e30c7c00bff96",
"index": 158,
"step-1": "<mask token>\n",
"step-2": "def pixels_generator(w, h):\n i = 0\n while i < w * h:\n yield divmod(i, w)\n i = i + 1\n",
"step-3": "def pixels_generator(w, h):\n i = 0\n while i < (w * h):\n yield divmod(i, w)\n i = i + 1\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for category in ('beauty', 'fashion', 'mobile'):
with open('%s/%s_data_info_val_competition.csv' % (data_dir, category), 'r'
) as infile:
next(infile)
for line in infile:
curr_id = line.strip().split(',')[0]
valid_id[curr_id] = True
with open('submission_977.csv', 'w') as outfile:
outfile.write('id,tagging\n')
with open('%s/submission_2103.csv' % output_dir, 'r') as infile:
next(infile)
for line in infile:
curr_id = line.strip().split('_')[0]
if curr_id in valid_id:
outfile.write(line.strip() + '\n')
<|reserved_special_token_1|>
data_dir = '../data'
output_dir = './'
valid_id = dict()
for category in ('beauty', 'fashion', 'mobile'):
with open('%s/%s_data_info_val_competition.csv' % (data_dir, category), 'r'
) as infile:
next(infile)
for line in infile:
curr_id = line.strip().split(',')[0]
valid_id[curr_id] = True
with open('submission_977.csv', 'w') as outfile:
outfile.write('id,tagging\n')
with open('%s/submission_2103.csv' % output_dir, 'r') as infile:
next(infile)
for line in infile:
curr_id = line.strip().split('_')[0]
if curr_id in valid_id:
outfile.write(line.strip() + '\n')
<|reserved_special_token_1|>
data_dir = "../data"
output_dir = './'
valid_id = dict()
for category in ("beauty", "fashion", "mobile"):
with open("%s/%s_data_info_val_competition.csv" % (data_dir, category), "r") as infile:
next(infile)
for line in infile:
curr_id = line.strip().split(',')[0]
valid_id[curr_id] = True
# This is the new output submission file containing 977987 rows
with open("submission_977.csv", "w") as outfile:
outfile.write("id,tagging\n")
# Please change the file below to your current submission filename containing 1174802 rows
# with open("submission-in.csv", "r") as infile:
with open("%s/submission_2103.csv" % output_dir, "r") as infile:
next(infile)
for line in infile:
curr_id = line.strip().split('_')[0]
if curr_id in valid_id:
outfile.write(line.strip() + '\n')
|
flexible
|
{
"blob_id": "82556291c456b9e43e4e589ea4a77d320430344b",
"index": 7478,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor category in ('beauty', 'fashion', 'mobile'):\n with open('%s/%s_data_info_val_competition.csv' % (data_dir, category), 'r'\n ) as infile:\n next(infile)\n for line in infile:\n curr_id = line.strip().split(',')[0]\n valid_id[curr_id] = True\nwith open('submission_977.csv', 'w') as outfile:\n outfile.write('id,tagging\\n')\n with open('%s/submission_2103.csv' % output_dir, 'r') as infile:\n next(infile)\n for line in infile:\n curr_id = line.strip().split('_')[0]\n if curr_id in valid_id:\n outfile.write(line.strip() + '\\n')\n",
"step-3": "data_dir = '../data'\noutput_dir = './'\nvalid_id = dict()\nfor category in ('beauty', 'fashion', 'mobile'):\n with open('%s/%s_data_info_val_competition.csv' % (data_dir, category), 'r'\n ) as infile:\n next(infile)\n for line in infile:\n curr_id = line.strip().split(',')[0]\n valid_id[curr_id] = True\nwith open('submission_977.csv', 'w') as outfile:\n outfile.write('id,tagging\\n')\n with open('%s/submission_2103.csv' % output_dir, 'r') as infile:\n next(infile)\n for line in infile:\n curr_id = line.strip().split('_')[0]\n if curr_id in valid_id:\n outfile.write(line.strip() + '\\n')\n",
"step-4": "data_dir = \"../data\"\noutput_dir = './'\nvalid_id = dict()\n\nfor category in (\"beauty\", \"fashion\", \"mobile\"):\n with open(\"%s/%s_data_info_val_competition.csv\" % (data_dir, category), \"r\") as infile:\n next(infile)\n for line in infile:\n curr_id = line.strip().split(',')[0]\n valid_id[curr_id] = True\n\n# This is the new output submission file containing 977987 rows\nwith open(\"submission_977.csv\", \"w\") as outfile:\n outfile.write(\"id,tagging\\n\")\n \n # Please change the file below to your current submission filename containing 1174802 rows\n # with open(\"submission-in.csv\", \"r\") as infile:\n with open(\"%s/submission_2103.csv\" % output_dir, \"r\") as infile:\n next(infile)\n for line in infile:\n curr_id = line.strip().split('_')[0]\n if curr_id in valid_id:\n outfile.write(line.strip() + '\\n')",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn import linear_model
from features import calculateTargets
currency = 'EURUSD'
interval = '1440'
df = pd.read_csv(
r'../data/' + currency.upper() + interval + '.csv',
names=['date', 'time', 'open', 'high', 'low', 'close', 'volume'],
dtype={'open': 'float', 'high': 'float', 'low': 'float', 'close': 'float', 'volume': 'int'},
#parse_dates=[[0, 1]],
# index_col=0,
)
#print df.head()
#print df.tail()
# Use only one feature
#diabetes_X = diabetes.data[:, np.newaxis]
#diabetes_X_temp = diabetes_X[:, :, 2]
## Split the data into training/testing sets
#diabetes_X_train = diabetes_X_temp[:-20]
#diabetes_X_test = diabetes_X_temp[-20:]
# Split the targets into training/testing sets
calculateTargets(df)
print 'targets calculated'
#diabetes_y_train = diabetes.target[:-20]
#diabetes_y_test = diabetes.target[-20:]
## Create linear regression object
#regr = linear_model.LinearRegression()
#
## Train the model using the training sets
#regr.fit(diabetes_X_train, diabetes_y_train)
#
## The coefficients
#print('Coefficients: \n', regr.coef_)
## The mean square error
#print("Residual sum of squares: %.2f"
# % np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
## Explained variance score: 1 is perfect prediction
#print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
#
## Plot outputs
#plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
#plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
# linewidth=3)
#
#plt.xticks(())
#plt.yticks(())
#
#plt.show()
|
normal
|
{
"blob_id": "8c0bae9e49c5ea9fbdee7c5c864afff16cc9f8b8",
"index": 3757,
"step-1": "import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn import linear_model\nfrom features import calculateTargets\n\ncurrency = 'EURUSD'\ninterval = '1440'\n\ndf = pd.read_csv(\n r'../data/' + currency.upper() + interval + '.csv',\n names=['date', 'time', 'open', 'high', 'low', 'close', 'volume'],\n dtype={'open': 'float', 'high': 'float', 'low': 'float', 'close': 'float', 'volume': 'int'},\n #parse_dates=[[0, 1]],\n # index_col=0,\n)\n#print df.head()\n#print df.tail()\n\n# Use only one feature\n#diabetes_X = diabetes.data[:, np.newaxis]\n#diabetes_X_temp = diabetes_X[:, :, 2]\n\n## Split the data into training/testing sets\n#diabetes_X_train = diabetes_X_temp[:-20]\n#diabetes_X_test = diabetes_X_temp[-20:]\n\n# Split the targets into training/testing sets\ncalculateTargets(df)\nprint 'targets calculated'\n#diabetes_y_train = diabetes.target[:-20]\n#diabetes_y_test = diabetes.target[-20:]\n\n## Create linear regression object\n#regr = linear_model.LinearRegression()\n#\n## Train the model using the training sets\n#regr.fit(diabetes_X_train, diabetes_y_train)\n#\n## The coefficients\n#print('Coefficients: \\n', regr.coef_)\n## The mean square error\n#print(\"Residual sum of squares: %.2f\"\n# % np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))\n## Explained variance score: 1 is perfect prediction\n#print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))\n#\n## Plot outputs\n#plt.scatter(diabetes_X_test, diabetes_y_test, color='black')\n#plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',\n# linewidth=3)\n#\n#plt.xticks(())\n#plt.yticks(())\n#\n#plt.show()",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def _mako_get_namespace(context, name):
try:
return context.namespaces[__name__, name]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[__name__, name]
<|reserved_special_token_0|>
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, 'base_ajax.htm', _template_uri)
<|reserved_special_token_0|>
def render_content(context, **pageargs):
__M_caller = context.caller_stack._push_frame()
try:
int = context.get('int', UNDEFINED)
str = context.get('str', UNDEFINED)
rentals = context.get('rentals', UNDEFINED)
def content():
return render_content(context)
request = context.get('request', UNDEFINED)
STATIC_URL = context.get('STATIC_URL', UNDEFINED)
__M_writer = context.writer()
__M_writer(
'\r\n\r\n<table class="table-responsive table-striped">\r\n <th></th>\r\n <th>#</th>\r\n <th>Name</th>\r\n <th>Price per Day</th>\r\n <th># of Days Rented</th>\r\n'
)
for item in rentals:
__M_writer(' <tr>\r\n <td><button rel="')
__M_writer(str(item.id))
__M_writer(
'" class="btn btn-danger btn-sm deleter">Remove</button></td>\r\n <td class="img-col"><img class="shopping_cart_image" src="'
)
__M_writer(str(STATIC_URL))
__M_writer(str(item.photo.image))
__M_writer('"/></td>\r\n <td class="name-col">')
__M_writer(str(noww))
__M_writer('</td>\r\n <td class="price-col">')
__M_writer(str(item.price_per_day))
__M_writer('</td>\r\n <td class="qty-col">')
__M_writer(str(int(request.session['rental_cart'][str(item.id)])))
__M_writer('</td>\r\n </tr>\r\n')
__M_writer(
'</table>\r\n<table id="button-table" class="table-responsive">\r\n <tr>\r\n <td id="space"></td>\r\n'
)
if request.user.is_authenticated():
__M_writer(
' <td id=\'checkout\'><a href="/account.checkout" class="btn btn-warning">Checkout</a></td>\r\n'
)
else:
__M_writer(
' <td id=\'checkout\'><a href="/mylogin.cartlogin" class="btn btn-warning">Checkout</a></td>\r\n'
)
__M_writer(' </tr>\r\n</table>\r\n')
return ''
finally:
context.caller_stack._pop_frame()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _mako_get_namespace(context, name):
try:
return context.namespaces[__name__, name]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[__name__, name]
def _mako_generate_namespaces(context):
pass
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, 'base_ajax.htm', _template_uri)
def render_body(context, **pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
int = context.get('int', UNDEFINED)
str = context.get('str', UNDEFINED)
rentals = context.get('rentals', UNDEFINED)
def content():
return render_content(context._locals(__M_locals))
request = context.get('request', UNDEFINED)
STATIC_URL = context.get('STATIC_URL', UNDEFINED)
__M_writer = context.writer()
__M_writer('\r\n')
__M_writer('\r\n')
__M_writer(str(nowww=noww - timedelta(days=3)))
__M_writer('\r\n')
if 'parent' not in context._data or not hasattr(context._data[
'parent'], 'content'):
context['self'].content(**pageargs)
__M_writer('\r\n\r\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_content(context, **pageargs):
__M_caller = context.caller_stack._push_frame()
try:
int = context.get('int', UNDEFINED)
str = context.get('str', UNDEFINED)
rentals = context.get('rentals', UNDEFINED)
def content():
return render_content(context)
request = context.get('request', UNDEFINED)
STATIC_URL = context.get('STATIC_URL', UNDEFINED)
__M_writer = context.writer()
__M_writer(
'\r\n\r\n<table class="table-responsive table-striped">\r\n <th></th>\r\n <th>#</th>\r\n <th>Name</th>\r\n <th>Price per Day</th>\r\n <th># of Days Rented</th>\r\n'
)
for item in rentals:
__M_writer(' <tr>\r\n <td><button rel="')
__M_writer(str(item.id))
__M_writer(
'" class="btn btn-danger btn-sm deleter">Remove</button></td>\r\n <td class="img-col"><img class="shopping_cart_image" src="'
)
__M_writer(str(STATIC_URL))
__M_writer(str(item.photo.image))
__M_writer('"/></td>\r\n <td class="name-col">')
__M_writer(str(noww))
__M_writer('</td>\r\n <td class="price-col">')
__M_writer(str(item.price_per_day))
__M_writer('</td>\r\n <td class="qty-col">')
__M_writer(str(int(request.session['rental_cart'][str(item.id)])))
__M_writer('</td>\r\n </tr>\r\n')
__M_writer(
'</table>\r\n<table id="button-table" class="table-responsive">\r\n <tr>\r\n <td id="space"></td>\r\n'
)
if request.user.is_authenticated():
__M_writer(
' <td id=\'checkout\'><a href="/account.checkout" class="btn btn-warning">Checkout</a></td>\r\n'
)
else:
__M_writer(
' <td id=\'checkout\'><a href="/mylogin.cartlogin" class="btn btn-warning">Checkout</a></td>\r\n'
)
__M_writer(' </tr>\r\n</table>\r\n')
return ''
finally:
context.caller_stack._pop_frame()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 10
_modified_time = 1428612037.145222
_enable_loop = True
_template_filename = (
'C:\\Users\\Cody\\Desktop\\Heritage\\chf\\templates/account.rentalcart.html'
)
_template_uri = '/account.rentalcart.html'
_source_encoding = 'ascii'
<|reserved_special_token_0|>
_exports = ['content']
<|reserved_special_token_0|>
now = datetime.now()
noww = now.strftime('%B %d, %Y')
def _mako_get_namespace(context, name):
try:
return context.namespaces[__name__, name]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[__name__, name]
def _mako_generate_namespaces(context):
pass
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, 'base_ajax.htm', _template_uri)
def render_body(context, **pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
int = context.get('int', UNDEFINED)
str = context.get('str', UNDEFINED)
rentals = context.get('rentals', UNDEFINED)
def content():
return render_content(context._locals(__M_locals))
request = context.get('request', UNDEFINED)
STATIC_URL = context.get('STATIC_URL', UNDEFINED)
__M_writer = context.writer()
__M_writer('\r\n')
__M_writer('\r\n')
__M_writer(str(nowww=noww - timedelta(days=3)))
__M_writer('\r\n')
if 'parent' not in context._data or not hasattr(context._data[
'parent'], 'content'):
context['self'].content(**pageargs)
__M_writer('\r\n\r\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_content(context, **pageargs):
__M_caller = context.caller_stack._push_frame()
try:
int = context.get('int', UNDEFINED)
str = context.get('str', UNDEFINED)
rentals = context.get('rentals', UNDEFINED)
def content():
return render_content(context)
request = context.get('request', UNDEFINED)
STATIC_URL = context.get('STATIC_URL', UNDEFINED)
__M_writer = context.writer()
__M_writer(
'\r\n\r\n<table class="table-responsive table-striped">\r\n <th></th>\r\n <th>#</th>\r\n <th>Name</th>\r\n <th>Price per Day</th>\r\n <th># of Days Rented</th>\r\n'
)
for item in rentals:
__M_writer(' <tr>\r\n <td><button rel="')
__M_writer(str(item.id))
__M_writer(
'" class="btn btn-danger btn-sm deleter">Remove</button></td>\r\n <td class="img-col"><img class="shopping_cart_image" src="'
)
__M_writer(str(STATIC_URL))
__M_writer(str(item.photo.image))
__M_writer('"/></td>\r\n <td class="name-col">')
__M_writer(str(noww))
__M_writer('</td>\r\n <td class="price-col">')
__M_writer(str(item.price_per_day))
__M_writer('</td>\r\n <td class="qty-col">')
__M_writer(str(int(request.session['rental_cart'][str(item.id)])))
__M_writer('</td>\r\n </tr>\r\n')
__M_writer(
'</table>\r\n<table id="button-table" class="table-responsive">\r\n <tr>\r\n <td id="space"></td>\r\n'
)
if request.user.is_authenticated():
__M_writer(
' <td id=\'checkout\'><a href="/account.checkout" class="btn btn-warning">Checkout</a></td>\r\n'
)
else:
__M_writer(
' <td id=\'checkout\'><a href="/mylogin.cartlogin" class="btn btn-warning">Checkout</a></td>\r\n'
)
__M_writer(' </tr>\r\n</table>\r\n')
return ''
finally:
context.caller_stack._pop_frame()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 10
_modified_time = 1428612037.145222
_enable_loop = True
_template_filename = (
'C:\\Users\\Cody\\Desktop\\Heritage\\chf\\templates/account.rentalcart.html'
)
_template_uri = '/account.rentalcart.html'
_source_encoding = 'ascii'
import os, os.path, re
_exports = ['content']
from datetime import datetime, timedelta
now = datetime.now()
noww = now.strftime('%B %d, %Y')
def _mako_get_namespace(context, name):
try:
return context.namespaces[__name__, name]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[__name__, name]
def _mako_generate_namespaces(context):
pass
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, 'base_ajax.htm', _template_uri)
def render_body(context, **pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
int = context.get('int', UNDEFINED)
str = context.get('str', UNDEFINED)
rentals = context.get('rentals', UNDEFINED)
def content():
return render_content(context._locals(__M_locals))
request = context.get('request', UNDEFINED)
STATIC_URL = context.get('STATIC_URL', UNDEFINED)
__M_writer = context.writer()
__M_writer('\r\n')
__M_writer('\r\n')
__M_writer(str(nowww=noww - timedelta(days=3)))
__M_writer('\r\n')
if 'parent' not in context._data or not hasattr(context._data[
'parent'], 'content'):
context['self'].content(**pageargs)
__M_writer('\r\n\r\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_content(context, **pageargs):
__M_caller = context.caller_stack._push_frame()
try:
int = context.get('int', UNDEFINED)
str = context.get('str', UNDEFINED)
rentals = context.get('rentals', UNDEFINED)
def content():
return render_content(context)
request = context.get('request', UNDEFINED)
STATIC_URL = context.get('STATIC_URL', UNDEFINED)
__M_writer = context.writer()
__M_writer(
'\r\n\r\n<table class="table-responsive table-striped">\r\n <th></th>\r\n <th>#</th>\r\n <th>Name</th>\r\n <th>Price per Day</th>\r\n <th># of Days Rented</th>\r\n'
)
for item in rentals:
__M_writer(' <tr>\r\n <td><button rel="')
__M_writer(str(item.id))
__M_writer(
'" class="btn btn-danger btn-sm deleter">Remove</button></td>\r\n <td class="img-col"><img class="shopping_cart_image" src="'
)
__M_writer(str(STATIC_URL))
__M_writer(str(item.photo.image))
__M_writer('"/></td>\r\n <td class="name-col">')
__M_writer(str(noww))
__M_writer('</td>\r\n <td class="price-col">')
__M_writer(str(item.price_per_day))
__M_writer('</td>\r\n <td class="qty-col">')
__M_writer(str(int(request.session['rental_cart'][str(item.id)])))
__M_writer('</td>\r\n </tr>\r\n')
__M_writer(
'</table>\r\n<table id="button-table" class="table-responsive">\r\n <tr>\r\n <td id="space"></td>\r\n'
)
if request.user.is_authenticated():
__M_writer(
' <td id=\'checkout\'><a href="/account.checkout" class="btn btn-warning">Checkout</a></td>\r\n'
)
else:
__M_writer(
' <td id=\'checkout\'><a href="/mylogin.cartlogin" class="btn btn-warning">Checkout</a></td>\r\n'
)
__M_writer(' </tr>\r\n</table>\r\n')
return ''
finally:
context.caller_stack._pop_frame()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
# -*- coding:ascii -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 10
_modified_time = 1428612037.145222
_enable_loop = True
_template_filename = 'C:\\Users\\Cody\\Desktop\\Heritage\\chf\\templates/account.rentalcart.html'
_template_uri = '/account.rentalcart.html'
_source_encoding = 'ascii'
import os, os.path, re
_exports = ['content']
from datetime import datetime, timedelta
now = datetime.now()
noww = now.strftime('%B %d, %Y')
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
pass
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, 'base_ajax.htm', _template_uri)
def render_body(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
int = context.get('int', UNDEFINED)
str = context.get('str', UNDEFINED)
rentals = context.get('rentals', UNDEFINED)
def content():
return render_content(context._locals(__M_locals))
request = context.get('request', UNDEFINED)
STATIC_URL = context.get('STATIC_URL', UNDEFINED)
__M_writer = context.writer()
__M_writer('\r\n')
__M_writer('\r\n')
__M_writer(str(nowww = noww - timedelta(days=3)))
__M_writer('\r\n')
if 'parent' not in context._data or not hasattr(context._data['parent'], 'content'):
context['self'].content(**pageargs)
__M_writer('\r\n\r\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_content(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
int = context.get('int', UNDEFINED)
str = context.get('str', UNDEFINED)
rentals = context.get('rentals', UNDEFINED)
def content():
return render_content(context)
request = context.get('request', UNDEFINED)
STATIC_URL = context.get('STATIC_URL', UNDEFINED)
__M_writer = context.writer()
__M_writer('\r\n\r\n<table class="table-responsive table-striped">\r\n <th></th>\r\n <th>#</th>\r\n <th>Name</th>\r\n <th>Price per Day</th>\r\n <th># of Days Rented</th>\r\n')
for item in rentals:
__M_writer(' <tr>\r\n <td><button rel="')
__M_writer(str( item.id ))
__M_writer('" class="btn btn-danger btn-sm deleter">Remove</button></td>\r\n <td class="img-col"><img class="shopping_cart_image" src="')
__M_writer(str(STATIC_URL))
__M_writer(str( item.photo.image ))
__M_writer('"/></td>\r\n <td class="name-col">')
__M_writer(str( noww ))
__M_writer('</td>\r\n <td class="price-col">')
__M_writer(str( item.price_per_day ))
__M_writer('</td>\r\n <td class="qty-col">')
__M_writer(str(int(request.session['rental_cart'][str(item.id)])))
__M_writer('</td>\r\n </tr>\r\n')
__M_writer('</table>\r\n<table id="button-table" class="table-responsive">\r\n <tr>\r\n <td id="space"></td>\r\n')
if request.user.is_authenticated():
__M_writer(' <td id=\'checkout\'><a href="/account.checkout" class="btn btn-warning">Checkout</a></td>\r\n')
else:
__M_writer(' <td id=\'checkout\'><a href="/mylogin.cartlogin" class="btn btn-warning">Checkout</a></td>\r\n')
__M_writer(' </tr>\r\n</table>\r\n')
return ''
finally:
context.caller_stack._pop_frame()
"""
__M_BEGIN_METADATA
{"uri": "/account.rentalcart.html", "line_map": {"70": 8, "71": 16, "72": 17, "73": 18, "74": 18, "75": 19, "76": 19, "77": 19, "78": 20, "79": 20, "80": 21, "81": 21, "82": 22, "83": 22, "84": 25, "85": 29, "86": 30, "87": 31, "88": 32, "89": 34, "95": 89, "33": 0, "16": 2, "45": 1, "46": 6, "47": 7, "48": 7, "53": 36, "59": 8}, "filename": "C:\\Users\\Cody\\Desktop\\Heritage\\chf\\templates/account.rentalcart.html", "source_encoding": "ascii"}
__M_END_METADATA
"""
|
flexible
|
{
"blob_id": "57967f36a45bb3ea62708bbbb5b2f4ddb0f4bb16",
"index": 29,
"step-1": "<mask token>\n\n\ndef _mako_get_namespace(context, name):\n try:\n return context.namespaces[__name__, name]\n except KeyError:\n _mako_generate_namespaces(context)\n return context.namespaces[__name__, name]\n\n\n<mask token>\n\n\ndef _mako_inherit(template, context):\n _mako_generate_namespaces(context)\n return runtime._inherit_from(context, 'base_ajax.htm', _template_uri)\n\n\n<mask token>\n\n\ndef render_content(context, **pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n int = context.get('int', UNDEFINED)\n str = context.get('str', UNDEFINED)\n rentals = context.get('rentals', UNDEFINED)\n\n def content():\n return render_content(context)\n request = context.get('request', UNDEFINED)\n STATIC_URL = context.get('STATIC_URL', UNDEFINED)\n __M_writer = context.writer()\n __M_writer(\n '\\r\\n\\r\\n<table class=\"table-responsive table-striped\">\\r\\n <th></th>\\r\\n <th>#</th>\\r\\n <th>Name</th>\\r\\n <th>Price per Day</th>\\r\\n <th># of Days Rented</th>\\r\\n'\n )\n for item in rentals:\n __M_writer(' <tr>\\r\\n <td><button rel=\"')\n __M_writer(str(item.id))\n __M_writer(\n '\" class=\"btn btn-danger btn-sm deleter\">Remove</button></td>\\r\\n <td class=\"img-col\"><img class=\"shopping_cart_image\" src=\"'\n )\n __M_writer(str(STATIC_URL))\n __M_writer(str(item.photo.image))\n __M_writer('\"/></td>\\r\\n <td class=\"name-col\">')\n __M_writer(str(noww))\n __M_writer('</td>\\r\\n <td class=\"price-col\">')\n __M_writer(str(item.price_per_day))\n __M_writer('</td>\\r\\n <td class=\"qty-col\">')\n __M_writer(str(int(request.session['rental_cart'][str(item.id)])))\n __M_writer('</td>\\r\\n </tr>\\r\\n')\n __M_writer(\n '</table>\\r\\n<table id=\"button-table\" class=\"table-responsive\">\\r\\n <tr>\\r\\n <td id=\"space\"></td>\\r\\n'\n )\n if request.user.is_authenticated():\n __M_writer(\n ' <td id=\\'checkout\\'><a href=\"/account.checkout\" class=\"btn btn-warning\">Checkout</a></td>\\r\\n'\n )\n else:\n __M_writer(\n ' <td id=\\'checkout\\'><a href=\"/mylogin.cartlogin\" class=\"btn btn-warning\">Checkout</a></td>\\r\\n'\n )\n __M_writer(' </tr>\\r\\n</table>\\r\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef _mako_get_namespace(context, name):\n try:\n return context.namespaces[__name__, name]\n except KeyError:\n _mako_generate_namespaces(context)\n return context.namespaces[__name__, name]\n\n\ndef _mako_generate_namespaces(context):\n pass\n\n\ndef _mako_inherit(template, context):\n _mako_generate_namespaces(context)\n return runtime._inherit_from(context, 'base_ajax.htm', _template_uri)\n\n\ndef render_body(context, **pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n int = context.get('int', UNDEFINED)\n str = context.get('str', UNDEFINED)\n rentals = context.get('rentals', UNDEFINED)\n\n def content():\n return render_content(context._locals(__M_locals))\n request = context.get('request', UNDEFINED)\n STATIC_URL = context.get('STATIC_URL', UNDEFINED)\n __M_writer = context.writer()\n __M_writer('\\r\\n')\n __M_writer('\\r\\n')\n __M_writer(str(nowww=noww - timedelta(days=3)))\n __M_writer('\\r\\n')\n if 'parent' not in context._data or not hasattr(context._data[\n 'parent'], 'content'):\n context['self'].content(**pageargs)\n __M_writer('\\r\\n\\r\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_content(context, **pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n int = context.get('int', UNDEFINED)\n str = context.get('str', UNDEFINED)\n rentals = context.get('rentals', UNDEFINED)\n\n def content():\n return render_content(context)\n request = context.get('request', UNDEFINED)\n STATIC_URL = context.get('STATIC_URL', UNDEFINED)\n __M_writer = context.writer()\n __M_writer(\n '\\r\\n\\r\\n<table class=\"table-responsive table-striped\">\\r\\n <th></th>\\r\\n <th>#</th>\\r\\n <th>Name</th>\\r\\n <th>Price per Day</th>\\r\\n <th># of Days Rented</th>\\r\\n'\n )\n for item in rentals:\n __M_writer(' <tr>\\r\\n <td><button rel=\"')\n __M_writer(str(item.id))\n __M_writer(\n '\" class=\"btn btn-danger btn-sm deleter\">Remove</button></td>\\r\\n <td class=\"img-col\"><img class=\"shopping_cart_image\" src=\"'\n )\n __M_writer(str(STATIC_URL))\n __M_writer(str(item.photo.image))\n __M_writer('\"/></td>\\r\\n <td class=\"name-col\">')\n __M_writer(str(noww))\n __M_writer('</td>\\r\\n <td class=\"price-col\">')\n __M_writer(str(item.price_per_day))\n __M_writer('</td>\\r\\n <td class=\"qty-col\">')\n __M_writer(str(int(request.session['rental_cart'][str(item.id)])))\n __M_writer('</td>\\r\\n </tr>\\r\\n')\n __M_writer(\n '</table>\\r\\n<table id=\"button-table\" class=\"table-responsive\">\\r\\n <tr>\\r\\n <td id=\"space\"></td>\\r\\n'\n )\n if request.user.is_authenticated():\n __M_writer(\n ' <td id=\\'checkout\\'><a href=\"/account.checkout\" class=\"btn btn-warning\">Checkout</a></td>\\r\\n'\n )\n else:\n __M_writer(\n ' <td id=\\'checkout\\'><a href=\"/mylogin.cartlogin\" class=\"btn btn-warning\">Checkout</a></td>\\r\\n'\n )\n __M_writer(' </tr>\\r\\n</table>\\r\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n<mask token>\n",
"step-3": "<mask token>\nUNDEFINED = runtime.UNDEFINED\n__M_dict_builtin = dict\n__M_locals_builtin = locals\n_magic_number = 10\n_modified_time = 1428612037.145222\n_enable_loop = True\n_template_filename = (\n 'C:\\\\Users\\\\Cody\\\\Desktop\\\\Heritage\\\\chf\\\\templates/account.rentalcart.html'\n )\n_template_uri = '/account.rentalcart.html'\n_source_encoding = 'ascii'\n<mask token>\n_exports = ['content']\n<mask token>\nnow = datetime.now()\nnoww = now.strftime('%B %d, %Y')\n\n\ndef _mako_get_namespace(context, name):\n try:\n return context.namespaces[__name__, name]\n except KeyError:\n _mako_generate_namespaces(context)\n return context.namespaces[__name__, name]\n\n\ndef _mako_generate_namespaces(context):\n pass\n\n\ndef _mako_inherit(template, context):\n _mako_generate_namespaces(context)\n return runtime._inherit_from(context, 'base_ajax.htm', _template_uri)\n\n\ndef render_body(context, **pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n int = context.get('int', UNDEFINED)\n str = context.get('str', UNDEFINED)\n rentals = context.get('rentals', UNDEFINED)\n\n def content():\n return render_content(context._locals(__M_locals))\n request = context.get('request', UNDEFINED)\n STATIC_URL = context.get('STATIC_URL', UNDEFINED)\n __M_writer = context.writer()\n __M_writer('\\r\\n')\n __M_writer('\\r\\n')\n __M_writer(str(nowww=noww - timedelta(days=3)))\n __M_writer('\\r\\n')\n if 'parent' not in context._data or not hasattr(context._data[\n 'parent'], 'content'):\n context['self'].content(**pageargs)\n __M_writer('\\r\\n\\r\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_content(context, **pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n int = context.get('int', UNDEFINED)\n str = context.get('str', UNDEFINED)\n rentals = context.get('rentals', UNDEFINED)\n\n def content():\n return render_content(context)\n request = context.get('request', UNDEFINED)\n STATIC_URL = context.get('STATIC_URL', UNDEFINED)\n __M_writer = context.writer()\n __M_writer(\n '\\r\\n\\r\\n<table class=\"table-responsive table-striped\">\\r\\n <th></th>\\r\\n <th>#</th>\\r\\n <th>Name</th>\\r\\n <th>Price per Day</th>\\r\\n <th># of Days Rented</th>\\r\\n'\n )\n for item in rentals:\n __M_writer(' <tr>\\r\\n <td><button rel=\"')\n __M_writer(str(item.id))\n __M_writer(\n '\" class=\"btn btn-danger btn-sm deleter\">Remove</button></td>\\r\\n <td class=\"img-col\"><img class=\"shopping_cart_image\" src=\"'\n )\n __M_writer(str(STATIC_URL))\n __M_writer(str(item.photo.image))\n __M_writer('\"/></td>\\r\\n <td class=\"name-col\">')\n __M_writer(str(noww))\n __M_writer('</td>\\r\\n <td class=\"price-col\">')\n __M_writer(str(item.price_per_day))\n __M_writer('</td>\\r\\n <td class=\"qty-col\">')\n __M_writer(str(int(request.session['rental_cart'][str(item.id)])))\n __M_writer('</td>\\r\\n </tr>\\r\\n')\n __M_writer(\n '</table>\\r\\n<table id=\"button-table\" class=\"table-responsive\">\\r\\n <tr>\\r\\n <td id=\"space\"></td>\\r\\n'\n )\n if request.user.is_authenticated():\n __M_writer(\n ' <td id=\\'checkout\\'><a href=\"/account.checkout\" class=\"btn btn-warning\">Checkout</a></td>\\r\\n'\n )\n else:\n __M_writer(\n ' <td id=\\'checkout\\'><a href=\"/mylogin.cartlogin\" class=\"btn btn-warning\">Checkout</a></td>\\r\\n'\n )\n __M_writer(' </tr>\\r\\n</table>\\r\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n<mask token>\n",
"step-4": "from mako import runtime, filters, cache\nUNDEFINED = runtime.UNDEFINED\n__M_dict_builtin = dict\n__M_locals_builtin = locals\n_magic_number = 10\n_modified_time = 1428612037.145222\n_enable_loop = True\n_template_filename = (\n 'C:\\\\Users\\\\Cody\\\\Desktop\\\\Heritage\\\\chf\\\\templates/account.rentalcart.html'\n )\n_template_uri = '/account.rentalcart.html'\n_source_encoding = 'ascii'\nimport os, os.path, re\n_exports = ['content']\nfrom datetime import datetime, timedelta\nnow = datetime.now()\nnoww = now.strftime('%B %d, %Y')\n\n\ndef _mako_get_namespace(context, name):\n try:\n return context.namespaces[__name__, name]\n except KeyError:\n _mako_generate_namespaces(context)\n return context.namespaces[__name__, name]\n\n\ndef _mako_generate_namespaces(context):\n pass\n\n\ndef _mako_inherit(template, context):\n _mako_generate_namespaces(context)\n return runtime._inherit_from(context, 'base_ajax.htm', _template_uri)\n\n\ndef render_body(context, **pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n int = context.get('int', UNDEFINED)\n str = context.get('str', UNDEFINED)\n rentals = context.get('rentals', UNDEFINED)\n\n def content():\n return render_content(context._locals(__M_locals))\n request = context.get('request', UNDEFINED)\n STATIC_URL = context.get('STATIC_URL', UNDEFINED)\n __M_writer = context.writer()\n __M_writer('\\r\\n')\n __M_writer('\\r\\n')\n __M_writer(str(nowww=noww - timedelta(days=3)))\n __M_writer('\\r\\n')\n if 'parent' not in context._data or not hasattr(context._data[\n 'parent'], 'content'):\n context['self'].content(**pageargs)\n __M_writer('\\r\\n\\r\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_content(context, **pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n int = context.get('int', UNDEFINED)\n str = context.get('str', UNDEFINED)\n rentals = context.get('rentals', UNDEFINED)\n\n def content():\n return render_content(context)\n request = context.get('request', UNDEFINED)\n STATIC_URL = context.get('STATIC_URL', UNDEFINED)\n __M_writer = context.writer()\n __M_writer(\n '\\r\\n\\r\\n<table class=\"table-responsive table-striped\">\\r\\n <th></th>\\r\\n <th>#</th>\\r\\n <th>Name</th>\\r\\n <th>Price per Day</th>\\r\\n <th># of Days Rented</th>\\r\\n'\n )\n for item in rentals:\n __M_writer(' <tr>\\r\\n <td><button rel=\"')\n __M_writer(str(item.id))\n __M_writer(\n '\" class=\"btn btn-danger btn-sm deleter\">Remove</button></td>\\r\\n <td class=\"img-col\"><img class=\"shopping_cart_image\" src=\"'\n )\n __M_writer(str(STATIC_URL))\n __M_writer(str(item.photo.image))\n __M_writer('\"/></td>\\r\\n <td class=\"name-col\">')\n __M_writer(str(noww))\n __M_writer('</td>\\r\\n <td class=\"price-col\">')\n __M_writer(str(item.price_per_day))\n __M_writer('</td>\\r\\n <td class=\"qty-col\">')\n __M_writer(str(int(request.session['rental_cart'][str(item.id)])))\n __M_writer('</td>\\r\\n </tr>\\r\\n')\n __M_writer(\n '</table>\\r\\n<table id=\"button-table\" class=\"table-responsive\">\\r\\n <tr>\\r\\n <td id=\"space\"></td>\\r\\n'\n )\n if request.user.is_authenticated():\n __M_writer(\n ' <td id=\\'checkout\\'><a href=\"/account.checkout\" class=\"btn btn-warning\">Checkout</a></td>\\r\\n'\n )\n else:\n __M_writer(\n ' <td id=\\'checkout\\'><a href=\"/mylogin.cartlogin\" class=\"btn btn-warning\">Checkout</a></td>\\r\\n'\n )\n __M_writer(' </tr>\\r\\n</table>\\r\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n<mask token>\n",
"step-5": "# -*- coding:ascii -*-\nfrom mako import runtime, filters, cache\nUNDEFINED = runtime.UNDEFINED\n__M_dict_builtin = dict\n__M_locals_builtin = locals\n_magic_number = 10\n_modified_time = 1428612037.145222\n_enable_loop = True\n_template_filename = 'C:\\\\Users\\\\Cody\\\\Desktop\\\\Heritage\\\\chf\\\\templates/account.rentalcart.html'\n_template_uri = '/account.rentalcart.html'\n_source_encoding = 'ascii'\nimport os, os.path, re\n_exports = ['content']\n\n\n\nfrom datetime import datetime, timedelta\nnow = datetime.now()\nnoww = now.strftime('%B %d, %Y')\n\n\ndef _mako_get_namespace(context, name):\n try:\n return context.namespaces[(__name__, name)]\n except KeyError:\n _mako_generate_namespaces(context)\n return context.namespaces[(__name__, name)]\ndef _mako_generate_namespaces(context):\n pass\ndef _mako_inherit(template, context):\n _mako_generate_namespaces(context)\n return runtime._inherit_from(context, 'base_ajax.htm', _template_uri)\ndef render_body(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n int = context.get('int', UNDEFINED)\n str = context.get('str', UNDEFINED)\n rentals = context.get('rentals', UNDEFINED)\n def content():\n return render_content(context._locals(__M_locals))\n request = context.get('request', UNDEFINED)\n STATIC_URL = context.get('STATIC_URL', UNDEFINED)\n __M_writer = context.writer()\n __M_writer('\\r\\n')\n __M_writer('\\r\\n')\n __M_writer(str(nowww = noww - timedelta(days=3)))\n __M_writer('\\r\\n')\n if 'parent' not in context._data or not hasattr(context._data['parent'], 'content'):\n context['self'].content(**pageargs)\n \n\n __M_writer('\\r\\n\\r\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_content(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n int = context.get('int', UNDEFINED)\n str = context.get('str', UNDEFINED)\n rentals = context.get('rentals', UNDEFINED)\n def content():\n return render_content(context)\n request = context.get('request', UNDEFINED)\n STATIC_URL = context.get('STATIC_URL', UNDEFINED)\n __M_writer = context.writer()\n __M_writer('\\r\\n\\r\\n<table class=\"table-responsive table-striped\">\\r\\n <th></th>\\r\\n <th>#</th>\\r\\n <th>Name</th>\\r\\n <th>Price per Day</th>\\r\\n <th># of Days Rented</th>\\r\\n')\n for item in rentals:\n __M_writer(' <tr>\\r\\n <td><button rel=\"')\n __M_writer(str( item.id ))\n __M_writer('\" class=\"btn btn-danger btn-sm deleter\">Remove</button></td>\\r\\n <td class=\"img-col\"><img class=\"shopping_cart_image\" src=\"')\n __M_writer(str(STATIC_URL))\n __M_writer(str( item.photo.image ))\n __M_writer('\"/></td>\\r\\n <td class=\"name-col\">')\n __M_writer(str( noww ))\n __M_writer('</td>\\r\\n <td class=\"price-col\">')\n __M_writer(str( item.price_per_day ))\n __M_writer('</td>\\r\\n <td class=\"qty-col\">')\n __M_writer(str(int(request.session['rental_cart'][str(item.id)])))\n __M_writer('</td>\\r\\n </tr>\\r\\n')\n __M_writer('</table>\\r\\n<table id=\"button-table\" class=\"table-responsive\">\\r\\n <tr>\\r\\n <td id=\"space\"></td>\\r\\n')\n if request.user.is_authenticated():\n __M_writer(' <td id=\\'checkout\\'><a href=\"/account.checkout\" class=\"btn btn-warning\">Checkout</a></td>\\r\\n')\n else:\n __M_writer(' <td id=\\'checkout\\'><a href=\"/mylogin.cartlogin\" class=\"btn btn-warning\">Checkout</a></td>\\r\\n')\n __M_writer(' </tr>\\r\\n</table>\\r\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n\"\"\"\n__M_BEGIN_METADATA\n{\"uri\": \"/account.rentalcart.html\", \"line_map\": {\"70\": 8, \"71\": 16, \"72\": 17, \"73\": 18, \"74\": 18, \"75\": 19, \"76\": 19, \"77\": 19, \"78\": 20, \"79\": 20, \"80\": 21, \"81\": 21, \"82\": 22, \"83\": 22, \"84\": 25, \"85\": 29, \"86\": 30, \"87\": 31, \"88\": 32, \"89\": 34, \"95\": 89, \"33\": 0, \"16\": 2, \"45\": 1, \"46\": 6, \"47\": 7, \"48\": 7, \"53\": 36, \"59\": 8}, \"filename\": \"C:\\\\Users\\\\Cody\\\\Desktop\\\\Heritage\\\\chf\\\\templates/account.rentalcart.html\", \"source_encoding\": \"ascii\"}\n__M_END_METADATA\n\"\"\"\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
from random import random
def random_numbers():
print('start generator')
while True:
val = random()
print(f'will yield {val}')
yield val
def run_random_numbers():
print(f'{random_numbers=}')
rnd_gen = random_numbers()
print(f'{rnd_gen=}')
print(f'{next(rnd_gen)=}')
print(f'{next(rnd_gen)=}')
# but we can have two way communication
print(f'{rnd_gen.send(None)=}')
print(f'{rnd_gen.send(42)=}')
# rnd_gen.throw(Exception)
# rnd_gen.close()
# next(rnd_gen)
def inout_gen():
print('init')
ret_val = None
while True:
x = yield ret_val
if x is not None:
ret_val = x
def run_input_gen():
inout_g = inout_gen()
next(inout_g)
print(f'{next(inout_g)}')
print(f'{inout_g.send(22)}')
print(f'{next(inout_g)}')
def exercise_gen(ret_val, times):
"""Return `ret_value` `times` times.
If generator will receive some value from outside, update `ret_value`"""
def exercise1():
"""Make it pass"""
g1 = exercise_gen(42, 3)
assert next(g1) == 42
assert g1.send('new val') == 'new val'
assert next(g1) == 'new val'
try:
next(g1)
except StopIteration:
# ok
pass
else:
raise Exception('Generator should be invalid')
def exercise2():
"""Update `exercise_gen`, so it will ignore all exceptions"""
g1 = exercise_gen("I'll ignore errors", 300)
assert next(g1) == "I'll ignore errors"
assert g1.send('new val') == 'new val'
assert g1.throw(Exception) == 'new val'
assert next(g1) == 'new val'
if __name__ == '__main__':
run_random_numbers()
run_input_gen()
exercise1()
exercise2()
|
normal
|
{
"blob_id": "e5979aeb7cff0e2a75966924382bae87aebcfcb2",
"index": 3312,
"step-1": "<mask token>\n\n\ndef exercise_gen(ret_val, times):\n \"\"\"Return `ret_value` `times` times.\n If generator will receive some value from outside, update `ret_value`\"\"\"\n\n\ndef exercise1():\n \"\"\"Make it pass\"\"\"\n g1 = exercise_gen(42, 3)\n assert next(g1) == 42\n assert g1.send('new val') == 'new val'\n assert next(g1) == 'new val'\n try:\n next(g1)\n except StopIteration:\n pass\n else:\n raise Exception('Generator should be invalid')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef random_numbers():\n print('start generator')\n while True:\n val = random()\n print(f'will yield {val}')\n yield val\n\n\n<mask token>\n\n\ndef inout_gen():\n print('init')\n ret_val = None\n while True:\n x = yield ret_val\n if x is not None:\n ret_val = x\n\n\n<mask token>\n\n\ndef exercise_gen(ret_val, times):\n \"\"\"Return `ret_value` `times` times.\n If generator will receive some value from outside, update `ret_value`\"\"\"\n\n\ndef exercise1():\n \"\"\"Make it pass\"\"\"\n g1 = exercise_gen(42, 3)\n assert next(g1) == 42\n assert g1.send('new val') == 'new val'\n assert next(g1) == 'new val'\n try:\n next(g1)\n except StopIteration:\n pass\n else:\n raise Exception('Generator should be invalid')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef random_numbers():\n print('start generator')\n while True:\n val = random()\n print(f'will yield {val}')\n yield val\n\n\n<mask token>\n\n\ndef inout_gen():\n print('init')\n ret_val = None\n while True:\n x = yield ret_val\n if x is not None:\n ret_val = x\n\n\ndef run_input_gen():\n inout_g = inout_gen()\n next(inout_g)\n print(f'{next(inout_g)}')\n print(f'{inout_g.send(22)}')\n print(f'{next(inout_g)}')\n\n\ndef exercise_gen(ret_val, times):\n \"\"\"Return `ret_value` `times` times.\n If generator will receive some value from outside, update `ret_value`\"\"\"\n\n\ndef exercise1():\n \"\"\"Make it pass\"\"\"\n g1 = exercise_gen(42, 3)\n assert next(g1) == 42\n assert g1.send('new val') == 'new val'\n assert next(g1) == 'new val'\n try:\n next(g1)\n except StopIteration:\n pass\n else:\n raise Exception('Generator should be invalid')\n\n\ndef exercise2():\n \"\"\"Update `exercise_gen`, so it will ignore all exceptions\"\"\"\n g1 = exercise_gen(\"I'll ignore errors\", 300)\n assert next(g1) == \"I'll ignore errors\"\n assert g1.send('new val') == 'new val'\n assert g1.throw(Exception) == 'new val'\n assert next(g1) == 'new val'\n\n\n<mask token>\n",
"step-4": "from random import random\n\n\ndef random_numbers():\n print('start generator')\n while True:\n val = random()\n print(f'will yield {val}')\n yield val\n\n\ndef run_random_numbers():\n print(f'random_numbers={random_numbers!r}')\n rnd_gen = random_numbers()\n print(f'rnd_gen={rnd_gen!r}')\n print(f'next(rnd_gen)={next(rnd_gen)!r}')\n print(f'next(rnd_gen)={next(rnd_gen)!r}')\n print(f'rnd_gen.send(None)={rnd_gen.send(None)!r}')\n print(f'rnd_gen.send(42)={rnd_gen.send(42)!r}')\n\n\ndef inout_gen():\n print('init')\n ret_val = None\n while True:\n x = yield ret_val\n if x is not None:\n ret_val = x\n\n\ndef run_input_gen():\n inout_g = inout_gen()\n next(inout_g)\n print(f'{next(inout_g)}')\n print(f'{inout_g.send(22)}')\n print(f'{next(inout_g)}')\n\n\ndef exercise_gen(ret_val, times):\n \"\"\"Return `ret_value` `times` times.\n If generator will receive some value from outside, update `ret_value`\"\"\"\n\n\ndef exercise1():\n \"\"\"Make it pass\"\"\"\n g1 = exercise_gen(42, 3)\n assert next(g1) == 42\n assert g1.send('new val') == 'new val'\n assert next(g1) == 'new val'\n try:\n next(g1)\n except StopIteration:\n pass\n else:\n raise Exception('Generator should be invalid')\n\n\ndef exercise2():\n \"\"\"Update `exercise_gen`, so it will ignore all exceptions\"\"\"\n g1 = exercise_gen(\"I'll ignore errors\", 300)\n assert next(g1) == \"I'll ignore errors\"\n assert g1.send('new val') == 'new val'\n assert g1.throw(Exception) == 'new val'\n assert next(g1) == 'new val'\n\n\nif __name__ == '__main__':\n run_random_numbers()\n run_input_gen()\n exercise1()\n exercise2()\n",
"step-5": "from random import random\n\n\ndef random_numbers():\n print('start generator')\n while True:\n val = random()\n print(f'will yield {val}')\n yield val\n\n\ndef run_random_numbers():\n print(f'{random_numbers=}')\n rnd_gen = random_numbers()\n print(f'{rnd_gen=}')\n print(f'{next(rnd_gen)=}')\n print(f'{next(rnd_gen)=}')\n\n # but we can have two way communication\n print(f'{rnd_gen.send(None)=}')\n print(f'{rnd_gen.send(42)=}')\n # rnd_gen.throw(Exception)\n # rnd_gen.close()\n # next(rnd_gen)\n\n\ndef inout_gen():\n print('init')\n ret_val = None\n while True:\n x = yield ret_val\n if x is not None:\n ret_val = x\n\n\ndef run_input_gen():\n inout_g = inout_gen()\n next(inout_g)\n\n print(f'{next(inout_g)}')\n print(f'{inout_g.send(22)}')\n print(f'{next(inout_g)}')\n\n\ndef exercise_gen(ret_val, times):\n \"\"\"Return `ret_value` `times` times.\n If generator will receive some value from outside, update `ret_value`\"\"\"\n\n\ndef exercise1():\n \"\"\"Make it pass\"\"\"\n g1 = exercise_gen(42, 3)\n assert next(g1) == 42\n assert g1.send('new val') == 'new val'\n assert next(g1) == 'new val'\n try:\n next(g1)\n except StopIteration:\n # ok\n pass\n else:\n raise Exception('Generator should be invalid')\n\n\ndef exercise2():\n \"\"\"Update `exercise_gen`, so it will ignore all exceptions\"\"\"\n g1 = exercise_gen(\"I'll ignore errors\", 300)\n assert next(g1) == \"I'll ignore errors\"\n assert g1.send('new val') == 'new val'\n assert g1.throw(Exception) == 'new val'\n assert next(g1) == 'new val'\n\n\nif __name__ == '__main__':\n run_random_numbers()\n run_input_gen()\n exercise1()\n exercise2()\n",
"step-ids": [
2,
4,
6,
9,
10
]
}
|
[
2,
4,
6,
9,
10
] |
import os
import random
import pygame
# Class for all the game's obstacles
class Obstacle(pygame.sprite.Sprite):
# Class constructor
def __init__(self, game_params, game_speed):
self.obs_type = random.randrange(0, 3)
# Becomes a pterodactyl obstacle
if (self.obs_type == 0):
self.create_pterodactyl(game_params)
# Becomes large cacti obstacle
elif (self.obs_type == 1):
self.create_lg_cacti(game_params)
# Becomes small cacti obstacle
else:
self.create_sm_cacti(game_params)
# Gets the sprites and rect of the obstacle
pygame.sprite.Sprite.__init__(self, self.containers)
self.sprites = self.load_sprites()
self.rect = self.sprites[0].get_rect()
self.sprite_idx = random.randrange(0, self.sprite_num)
self.image = self.sprites[self.sprite_idx]
self.counter = 0
# Sets the obstacle's position and movement
self.rect.bottom = self.y_pos
self.rect.left = game_params['scr_width']
self.speed = game_speed
self.movement = [-self.speed, 0]
# To detect if dino succesfully avoids an obstacle
self.reward_rect = pygame.Rect((game_params['scr_width'], # left
0, # top
self.width, # width
game_params['scr_height'])) # height
self.avoided = False
self.min_gap_coeff = game_params['min_gap_coeff']
self.max_gap_coeff = game_params['max_gap_coeff']
# To determine when to create a new obstacle
self.min_gap = round(self.width * game_speed
+ self.gap * self.min_gap_coeff)
self.max_gap = round(self.min_gap * self.max_gap_coeff)
# Creates a pterodactyl using the parameters in game_params
def create_pterodactyl(self, game_params):
idx = random.randrange(0, len(game_params['pter_y_pos']))
self.y_pos = game_params['pter_y_pos'][idx]
self.width = game_params['pter_width']
self.height = game_params['pter_height']
self.gap = game_params['pter_gap']
self.sprite_num = 2
self.sprite_move = True
self.img_name = game_params['pter_img']
# Creates large cacti using the parameters in game_params
def create_lg_cacti(self, game_params):
length = random.randrange(1, game_params['max_cacti_length']+1)
self.y_pos = game_params['ground_pos']
self.width = length * game_params['lg_cacti_width']
self.height = game_params['lg_cacti_height']
self.gap = game_params['lg_cacti_gap']
self.sprite_num = 6 / length
self.sprite_move = False
self.img_name = game_params['lg_cacti_img']
# Creates small cacti using the parameters in game_params
def create_sm_cacti(self, game_params):
length = random.randrange(1, game_params['max_cacti_length']+1)
self.y_pos = game_params['ground_pos']
self.width = length * game_params['sm_cacti_width']
self.height = game_params['sm_cacti_height']
self.gap = game_params['sm_cacti_gap']
self.sprite_num = 6 / length
self.sprite_move = False
self.img_name = game_params['sm_cacti_img']
# Returns a list of images corresponding to this
# obstacle's sprites.
def load_sprites(self):
# Loads the sprite sheet
path = os.path.join('game_classes/sprites', self.img_name)
sheet = pygame.image.load(path).convert()
sheet_rect = sheet.get_rect()
# Gets the original dimensions for each sprite
size_x = sheet_rect.width/self.sprite_num
size_y = sheet_rect.height
sprites = []
# Loops through all sprites in the sprite sheet
# and appends them to the sprites list
for i in range(int(self.sprite_num)):
rect = pygame.Rect((i*size_x, 0, size_x, size_y))
image = pygame.Surface(rect.size).convert()
image.blit(sheet, (0, 0), rect)
colorkey = image.get_at((0, 0))
image.set_colorkey(colorkey, pygame.RLEACCEL)
image = pygame.transform.scale(image, (self.width, self.height))
sprites.append(image)
return sprites
# Update's the min and max gaps between this obstacle and a new
# obstacle based on this obstacle's speed
def update_gaps(self):
self.min_gap = round(self.rect.width * self.speed
+ self.gap * self.min_gap_coeff)
self.max_gap = round(self.min_gap * self.max_gap_coeff)
# Draws the obstacle on the screen
def draw(self, screen):
screen.blit(self.image, self.rect)
# Updates the obstacle's speed, position, and sprite
def update(self, game_speed):
# updates the obstacle's speed
self.speed = game_speed
self.movement[0] = -self.speed
# Updates this obstacles sprites
if self.counter % 10 == 0 and self.sprite_move:
self.sprite_idx = (self.sprite_idx+1) % self.sprite_num
self.image = self.sprites[self.sprite_idx]
self.counter += 1
# Updates the obstacle's position
self.rect = self.rect.move(self.movement)
self.reward_rect = self.reward_rect.move(self.movement)
self.update_gaps()
# Removes obstacle from screen if it moves beyond screen
if self.rect.right < 0:
self.kill()
|
normal
|
{
"blob_id": "09dac7bfe98a15b3e79edcb0d0a53c0ab4d771ca",
"index": 7053,
"step-1": "<mask token>\n\n\nclass Obstacle(pygame.sprite.Sprite):\n\n def __init__(self, game_params, game_speed):\n self.obs_type = random.randrange(0, 3)\n if self.obs_type == 0:\n self.create_pterodactyl(game_params)\n elif self.obs_type == 1:\n self.create_lg_cacti(game_params)\n else:\n self.create_sm_cacti(game_params)\n pygame.sprite.Sprite.__init__(self, self.containers)\n self.sprites = self.load_sprites()\n self.rect = self.sprites[0].get_rect()\n self.sprite_idx = random.randrange(0, self.sprite_num)\n self.image = self.sprites[self.sprite_idx]\n self.counter = 0\n self.rect.bottom = self.y_pos\n self.rect.left = game_params['scr_width']\n self.speed = game_speed\n self.movement = [-self.speed, 0]\n self.reward_rect = pygame.Rect((game_params['scr_width'], 0, self.\n width, game_params['scr_height']))\n self.avoided = False\n self.min_gap_coeff = game_params['min_gap_coeff']\n self.max_gap_coeff = game_params['max_gap_coeff']\n self.min_gap = round(self.width * game_speed + self.gap * self.\n min_gap_coeff)\n self.max_gap = round(self.min_gap * self.max_gap_coeff)\n <mask token>\n <mask token>\n\n def create_sm_cacti(self, game_params):\n length = random.randrange(1, game_params['max_cacti_length'] + 1)\n self.y_pos = game_params['ground_pos']\n self.width = length * game_params['sm_cacti_width']\n self.height = game_params['sm_cacti_height']\n self.gap = game_params['sm_cacti_gap']\n self.sprite_num = 6 / length\n self.sprite_move = False\n self.img_name = game_params['sm_cacti_img']\n\n def load_sprites(self):\n path = os.path.join('game_classes/sprites', self.img_name)\n sheet = pygame.image.load(path).convert()\n sheet_rect = sheet.get_rect()\n size_x = sheet_rect.width / self.sprite_num\n size_y = sheet_rect.height\n sprites = []\n for i in range(int(self.sprite_num)):\n rect = pygame.Rect((i * size_x, 0, size_x, size_y))\n image = pygame.Surface(rect.size).convert()\n image.blit(sheet, (0, 0), rect)\n colorkey = image.get_at((0, 0))\n image.set_colorkey(colorkey, pygame.RLEACCEL)\n image = pygame.transform.scale(image, (self.width, self.height))\n sprites.append(image)\n return sprites\n\n def update_gaps(self):\n self.min_gap = round(self.rect.width * self.speed + self.gap * self\n .min_gap_coeff)\n self.max_gap = round(self.min_gap * self.max_gap_coeff)\n\n def draw(self, screen):\n screen.blit(self.image, self.rect)\n\n def update(self, game_speed):\n self.speed = game_speed\n self.movement[0] = -self.speed\n if self.counter % 10 == 0 and self.sprite_move:\n self.sprite_idx = (self.sprite_idx + 1) % self.sprite_num\n self.image = self.sprites[self.sprite_idx]\n self.counter += 1\n self.rect = self.rect.move(self.movement)\n self.reward_rect = self.reward_rect.move(self.movement)\n self.update_gaps()\n if self.rect.right < 0:\n self.kill()\n",
"step-2": "<mask token>\n\n\nclass Obstacle(pygame.sprite.Sprite):\n\n def __init__(self, game_params, game_speed):\n self.obs_type = random.randrange(0, 3)\n if self.obs_type == 0:\n self.create_pterodactyl(game_params)\n elif self.obs_type == 1:\n self.create_lg_cacti(game_params)\n else:\n self.create_sm_cacti(game_params)\n pygame.sprite.Sprite.__init__(self, self.containers)\n self.sprites = self.load_sprites()\n self.rect = self.sprites[0].get_rect()\n self.sprite_idx = random.randrange(0, self.sprite_num)\n self.image = self.sprites[self.sprite_idx]\n self.counter = 0\n self.rect.bottom = self.y_pos\n self.rect.left = game_params['scr_width']\n self.speed = game_speed\n self.movement = [-self.speed, 0]\n self.reward_rect = pygame.Rect((game_params['scr_width'], 0, self.\n width, game_params['scr_height']))\n self.avoided = False\n self.min_gap_coeff = game_params['min_gap_coeff']\n self.max_gap_coeff = game_params['max_gap_coeff']\n self.min_gap = round(self.width * game_speed + self.gap * self.\n min_gap_coeff)\n self.max_gap = round(self.min_gap * self.max_gap_coeff)\n\n def create_pterodactyl(self, game_params):\n idx = random.randrange(0, len(game_params['pter_y_pos']))\n self.y_pos = game_params['pter_y_pos'][idx]\n self.width = game_params['pter_width']\n self.height = game_params['pter_height']\n self.gap = game_params['pter_gap']\n self.sprite_num = 2\n self.sprite_move = True\n self.img_name = game_params['pter_img']\n <mask token>\n\n def create_sm_cacti(self, game_params):\n length = random.randrange(1, game_params['max_cacti_length'] + 1)\n self.y_pos = game_params['ground_pos']\n self.width = length * game_params['sm_cacti_width']\n self.height = game_params['sm_cacti_height']\n self.gap = game_params['sm_cacti_gap']\n self.sprite_num = 6 / length\n self.sprite_move = False\n self.img_name = game_params['sm_cacti_img']\n\n def load_sprites(self):\n path = os.path.join('game_classes/sprites', self.img_name)\n sheet = pygame.image.load(path).convert()\n sheet_rect = sheet.get_rect()\n size_x = sheet_rect.width / self.sprite_num\n size_y = sheet_rect.height\n sprites = []\n for i in range(int(self.sprite_num)):\n rect = pygame.Rect((i * size_x, 0, size_x, size_y))\n image = pygame.Surface(rect.size).convert()\n image.blit(sheet, (0, 0), rect)\n colorkey = image.get_at((0, 0))\n image.set_colorkey(colorkey, pygame.RLEACCEL)\n image = pygame.transform.scale(image, (self.width, self.height))\n sprites.append(image)\n return sprites\n\n def update_gaps(self):\n self.min_gap = round(self.rect.width * self.speed + self.gap * self\n .min_gap_coeff)\n self.max_gap = round(self.min_gap * self.max_gap_coeff)\n\n def draw(self, screen):\n screen.blit(self.image, self.rect)\n\n def update(self, game_speed):\n self.speed = game_speed\n self.movement[0] = -self.speed\n if self.counter % 10 == 0 and self.sprite_move:\n self.sprite_idx = (self.sprite_idx + 1) % self.sprite_num\n self.image = self.sprites[self.sprite_idx]\n self.counter += 1\n self.rect = self.rect.move(self.movement)\n self.reward_rect = self.reward_rect.move(self.movement)\n self.update_gaps()\n if self.rect.right < 0:\n self.kill()\n",
"step-3": "<mask token>\n\n\nclass Obstacle(pygame.sprite.Sprite):\n\n def __init__(self, game_params, game_speed):\n self.obs_type = random.randrange(0, 3)\n if self.obs_type == 0:\n self.create_pterodactyl(game_params)\n elif self.obs_type == 1:\n self.create_lg_cacti(game_params)\n else:\n self.create_sm_cacti(game_params)\n pygame.sprite.Sprite.__init__(self, self.containers)\n self.sprites = self.load_sprites()\n self.rect = self.sprites[0].get_rect()\n self.sprite_idx = random.randrange(0, self.sprite_num)\n self.image = self.sprites[self.sprite_idx]\n self.counter = 0\n self.rect.bottom = self.y_pos\n self.rect.left = game_params['scr_width']\n self.speed = game_speed\n self.movement = [-self.speed, 0]\n self.reward_rect = pygame.Rect((game_params['scr_width'], 0, self.\n width, game_params['scr_height']))\n self.avoided = False\n self.min_gap_coeff = game_params['min_gap_coeff']\n self.max_gap_coeff = game_params['max_gap_coeff']\n self.min_gap = round(self.width * game_speed + self.gap * self.\n min_gap_coeff)\n self.max_gap = round(self.min_gap * self.max_gap_coeff)\n\n def create_pterodactyl(self, game_params):\n idx = random.randrange(0, len(game_params['pter_y_pos']))\n self.y_pos = game_params['pter_y_pos'][idx]\n self.width = game_params['pter_width']\n self.height = game_params['pter_height']\n self.gap = game_params['pter_gap']\n self.sprite_num = 2\n self.sprite_move = True\n self.img_name = game_params['pter_img']\n\n def create_lg_cacti(self, game_params):\n length = random.randrange(1, game_params['max_cacti_length'] + 1)\n self.y_pos = game_params['ground_pos']\n self.width = length * game_params['lg_cacti_width']\n self.height = game_params['lg_cacti_height']\n self.gap = game_params['lg_cacti_gap']\n self.sprite_num = 6 / length\n self.sprite_move = False\n self.img_name = game_params['lg_cacti_img']\n\n def create_sm_cacti(self, game_params):\n length = random.randrange(1, game_params['max_cacti_length'] + 1)\n self.y_pos = game_params['ground_pos']\n self.width = length * game_params['sm_cacti_width']\n self.height = game_params['sm_cacti_height']\n self.gap = game_params['sm_cacti_gap']\n self.sprite_num = 6 / length\n self.sprite_move = False\n self.img_name = game_params['sm_cacti_img']\n\n def load_sprites(self):\n path = os.path.join('game_classes/sprites', self.img_name)\n sheet = pygame.image.load(path).convert()\n sheet_rect = sheet.get_rect()\n size_x = sheet_rect.width / self.sprite_num\n size_y = sheet_rect.height\n sprites = []\n for i in range(int(self.sprite_num)):\n rect = pygame.Rect((i * size_x, 0, size_x, size_y))\n image = pygame.Surface(rect.size).convert()\n image.blit(sheet, (0, 0), rect)\n colorkey = image.get_at((0, 0))\n image.set_colorkey(colorkey, pygame.RLEACCEL)\n image = pygame.transform.scale(image, (self.width, self.height))\n sprites.append(image)\n return sprites\n\n def update_gaps(self):\n self.min_gap = round(self.rect.width * self.speed + self.gap * self\n .min_gap_coeff)\n self.max_gap = round(self.min_gap * self.max_gap_coeff)\n\n def draw(self, screen):\n screen.blit(self.image, self.rect)\n\n def update(self, game_speed):\n self.speed = game_speed\n self.movement[0] = -self.speed\n if self.counter % 10 == 0 and self.sprite_move:\n self.sprite_idx = (self.sprite_idx + 1) % self.sprite_num\n self.image = self.sprites[self.sprite_idx]\n self.counter += 1\n self.rect = self.rect.move(self.movement)\n self.reward_rect = self.reward_rect.move(self.movement)\n self.update_gaps()\n if self.rect.right < 0:\n self.kill()\n",
"step-4": "import os\nimport random\nimport pygame\n\n\nclass Obstacle(pygame.sprite.Sprite):\n\n def __init__(self, game_params, game_speed):\n self.obs_type = random.randrange(0, 3)\n if self.obs_type == 0:\n self.create_pterodactyl(game_params)\n elif self.obs_type == 1:\n self.create_lg_cacti(game_params)\n else:\n self.create_sm_cacti(game_params)\n pygame.sprite.Sprite.__init__(self, self.containers)\n self.sprites = self.load_sprites()\n self.rect = self.sprites[0].get_rect()\n self.sprite_idx = random.randrange(0, self.sprite_num)\n self.image = self.sprites[self.sprite_idx]\n self.counter = 0\n self.rect.bottom = self.y_pos\n self.rect.left = game_params['scr_width']\n self.speed = game_speed\n self.movement = [-self.speed, 0]\n self.reward_rect = pygame.Rect((game_params['scr_width'], 0, self.\n width, game_params['scr_height']))\n self.avoided = False\n self.min_gap_coeff = game_params['min_gap_coeff']\n self.max_gap_coeff = game_params['max_gap_coeff']\n self.min_gap = round(self.width * game_speed + self.gap * self.\n min_gap_coeff)\n self.max_gap = round(self.min_gap * self.max_gap_coeff)\n\n def create_pterodactyl(self, game_params):\n idx = random.randrange(0, len(game_params['pter_y_pos']))\n self.y_pos = game_params['pter_y_pos'][idx]\n self.width = game_params['pter_width']\n self.height = game_params['pter_height']\n self.gap = game_params['pter_gap']\n self.sprite_num = 2\n self.sprite_move = True\n self.img_name = game_params['pter_img']\n\n def create_lg_cacti(self, game_params):\n length = random.randrange(1, game_params['max_cacti_length'] + 1)\n self.y_pos = game_params['ground_pos']\n self.width = length * game_params['lg_cacti_width']\n self.height = game_params['lg_cacti_height']\n self.gap = game_params['lg_cacti_gap']\n self.sprite_num = 6 / length\n self.sprite_move = False\n self.img_name = game_params['lg_cacti_img']\n\n def create_sm_cacti(self, game_params):\n length = random.randrange(1, game_params['max_cacti_length'] + 1)\n self.y_pos = game_params['ground_pos']\n self.width = length * game_params['sm_cacti_width']\n self.height = game_params['sm_cacti_height']\n self.gap = game_params['sm_cacti_gap']\n self.sprite_num = 6 / length\n self.sprite_move = False\n self.img_name = game_params['sm_cacti_img']\n\n def load_sprites(self):\n path = os.path.join('game_classes/sprites', self.img_name)\n sheet = pygame.image.load(path).convert()\n sheet_rect = sheet.get_rect()\n size_x = sheet_rect.width / self.sprite_num\n size_y = sheet_rect.height\n sprites = []\n for i in range(int(self.sprite_num)):\n rect = pygame.Rect((i * size_x, 0, size_x, size_y))\n image = pygame.Surface(rect.size).convert()\n image.blit(sheet, (0, 0), rect)\n colorkey = image.get_at((0, 0))\n image.set_colorkey(colorkey, pygame.RLEACCEL)\n image = pygame.transform.scale(image, (self.width, self.height))\n sprites.append(image)\n return sprites\n\n def update_gaps(self):\n self.min_gap = round(self.rect.width * self.speed + self.gap * self\n .min_gap_coeff)\n self.max_gap = round(self.min_gap * self.max_gap_coeff)\n\n def draw(self, screen):\n screen.blit(self.image, self.rect)\n\n def update(self, game_speed):\n self.speed = game_speed\n self.movement[0] = -self.speed\n if self.counter % 10 == 0 and self.sprite_move:\n self.sprite_idx = (self.sprite_idx + 1) % self.sprite_num\n self.image = self.sprites[self.sprite_idx]\n self.counter += 1\n self.rect = self.rect.move(self.movement)\n self.reward_rect = self.reward_rect.move(self.movement)\n self.update_gaps()\n if self.rect.right < 0:\n self.kill()\n",
"step-5": "import os\nimport random\nimport pygame\n\n\n# Class for all the game's obstacles\nclass Obstacle(pygame.sprite.Sprite):\n # Class constructor\n def __init__(self, game_params, game_speed):\n self.obs_type = random.randrange(0, 3)\n # Becomes a pterodactyl obstacle\n if (self.obs_type == 0):\n self.create_pterodactyl(game_params)\n # Becomes large cacti obstacle\n elif (self.obs_type == 1):\n self.create_lg_cacti(game_params)\n # Becomes small cacti obstacle\n else:\n self.create_sm_cacti(game_params)\n\n # Gets the sprites and rect of the obstacle\n pygame.sprite.Sprite.__init__(self, self.containers)\n self.sprites = self.load_sprites()\n self.rect = self.sprites[0].get_rect()\n self.sprite_idx = random.randrange(0, self.sprite_num)\n self.image = self.sprites[self.sprite_idx]\n self.counter = 0\n\n # Sets the obstacle's position and movement\n self.rect.bottom = self.y_pos\n self.rect.left = game_params['scr_width']\n self.speed = game_speed\n self.movement = [-self.speed, 0]\n\n # To detect if dino succesfully avoids an obstacle\n self.reward_rect = pygame.Rect((game_params['scr_width'], # left\n 0, # top\n self.width, # width\n game_params['scr_height'])) # height\n self.avoided = False\n\n self.min_gap_coeff = game_params['min_gap_coeff']\n self.max_gap_coeff = game_params['max_gap_coeff']\n\n # To determine when to create a new obstacle\n self.min_gap = round(self.width * game_speed\n + self.gap * self.min_gap_coeff)\n self.max_gap = round(self.min_gap * self.max_gap_coeff)\n\n # Creates a pterodactyl using the parameters in game_params\n def create_pterodactyl(self, game_params):\n idx = random.randrange(0, len(game_params['pter_y_pos']))\n self.y_pos = game_params['pter_y_pos'][idx]\n self.width = game_params['pter_width']\n self.height = game_params['pter_height']\n self.gap = game_params['pter_gap']\n self.sprite_num = 2\n self.sprite_move = True\n self.img_name = game_params['pter_img']\n\n # Creates large cacti using the parameters in game_params\n def create_lg_cacti(self, game_params):\n length = random.randrange(1, game_params['max_cacti_length']+1)\n self.y_pos = game_params['ground_pos']\n self.width = length * game_params['lg_cacti_width']\n self.height = game_params['lg_cacti_height']\n self.gap = game_params['lg_cacti_gap']\n self.sprite_num = 6 / length\n self.sprite_move = False\n self.img_name = game_params['lg_cacti_img']\n\n # Creates small cacti using the parameters in game_params\n def create_sm_cacti(self, game_params):\n length = random.randrange(1, game_params['max_cacti_length']+1)\n self.y_pos = game_params['ground_pos']\n self.width = length * game_params['sm_cacti_width']\n self.height = game_params['sm_cacti_height']\n self.gap = game_params['sm_cacti_gap']\n self.sprite_num = 6 / length\n self.sprite_move = False\n self.img_name = game_params['sm_cacti_img']\n\n # Returns a list of images corresponding to this\n # obstacle's sprites.\n def load_sprites(self):\n # Loads the sprite sheet\n path = os.path.join('game_classes/sprites', self.img_name)\n sheet = pygame.image.load(path).convert()\n sheet_rect = sheet.get_rect()\n\n # Gets the original dimensions for each sprite\n size_x = sheet_rect.width/self.sprite_num\n size_y = sheet_rect.height\n\n sprites = []\n\n # Loops through all sprites in the sprite sheet\n # and appends them to the sprites list\n for i in range(int(self.sprite_num)):\n rect = pygame.Rect((i*size_x, 0, size_x, size_y))\n\n image = pygame.Surface(rect.size).convert()\n image.blit(sheet, (0, 0), rect)\n\n colorkey = image.get_at((0, 0))\n image.set_colorkey(colorkey, pygame.RLEACCEL)\n\n image = pygame.transform.scale(image, (self.width, self.height))\n sprites.append(image)\n\n return sprites\n\n # Update's the min and max gaps between this obstacle and a new\n # obstacle based on this obstacle's speed\n def update_gaps(self):\n self.min_gap = round(self.rect.width * self.speed\n + self.gap * self.min_gap_coeff)\n self.max_gap = round(self.min_gap * self.max_gap_coeff)\n\n # Draws the obstacle on the screen\n def draw(self, screen):\n screen.blit(self.image, self.rect)\n\n # Updates the obstacle's speed, position, and sprite\n def update(self, game_speed):\n # updates the obstacle's speed\n self.speed = game_speed\n self.movement[0] = -self.speed\n\n # Updates this obstacles sprites\n if self.counter % 10 == 0 and self.sprite_move:\n self.sprite_idx = (self.sprite_idx+1) % self.sprite_num\n self.image = self.sprites[self.sprite_idx]\n self.counter += 1\n\n # Updates the obstacle's position\n self.rect = self.rect.move(self.movement)\n self.reward_rect = self.reward_rect.move(self.movement)\n self.update_gaps()\n\n # Removes obstacle from screen if it moves beyond screen\n if self.rect.right < 0:\n self.kill()\n",
"step-ids": [
7,
8,
9,
10,
11
]
}
|
[
7,
8,
9,
10,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def find_roads(probability_map, *, input_threshold=0.3, max_roads=None,
min_strength=0.17, num_angles=720, roads_min_angle=np.pi / 8,
roads_min_distance=50, debugimage=None, debugprint=None):
"""
Finds full-image roads in probability map (image).
Parameters:
probability_map -- an numpy.ndarray with probabilities per pixel (*)
(*) i.e., the array is shaped HxW, with pixel values from 0 to 1
Keyword-Only Parameters:
input_threshold -- threshold applied to probability_map
max_roads -- maximum number of roads to be found
min_strength -- minimum strength of roads to be found
num_angles -- angular resolution used in hough transforms
roads_min_angle -- minimum required angle between roads
roads_min_distance -- minimum required distance between roads
Returns:
roads -- roads that have been found (*)
shape -- shape of probability_map (vector with 2 elements)
(*) A numpy.ndarray with floating point type of shape Nx4, with N being
the number of roads found, and 4 corresponding to columns 'strength',
'angle', 'distance', 'width'. Strength is the response for the road
(the "probability"), 'angle' and 'distance' correspond to the values
returned by skimage.transform.hough_line, and 'width' is the
identified road width (can currently be 12, 32 or 48).
"""
im = probability_map
theta = np.linspace(-np.pi / 2, np.pi / 2, num_angles)
if im.ndim == 3:
if im.shape[2] == 4:
im = im[:, :, :3]
im = im.mean(axis=2)
if debugimage:
debugimage('original', im, 0, 1, 'jet')
assert im.ndim == 2
if debugimage:
hspace, _, _ = hough_line(im, theta)
debugimage('original_hough_hspace', hspace)
im[im >= input_threshold] = 1
im[im < input_threshold] = 0
if debugimage:
debugimage('threshold_applied', im)
hspace, angles, distances = hough_line(im, theta)
hspace = np.asarray(hspace, dtype=np.float32)
hspace /= hspace.max()
if debugimage:
debugimage('hough_hspace', hspace)
w12 = np.concatenate([-np.ones(6), np.ones(12), -np.ones(6)])
w32 = np.concatenate([-np.ones(16), np.ones(32), -np.ones(16)])
w48 = np.concatenate([-np.ones(24), np.ones(48), -np.ones(24)])
im12 = ndi.filters.convolve1d(hspace, w12, axis=0)
im32 = ndi.filters.convolve1d(hspace, w32, axis=0)
im48 = ndi.filters.convolve1d(hspace, w48, axis=0)
im12 /= 12
im32 /= 32
im48 /= 48
ca = None, None, 'jet'
if debugimage:
debugimage('hough_hspace_conv12', im12, *ca)
if debugimage:
debugimage('hough_hspace_conv32', im32, *ca)
if debugimage:
debugimage('hough_hspace_conv48', im48, *ca)
if debugimage:
debugimage('hough_hspace_combined', np.hstack([im12, im32, im48]), *ca)
seq = np.stack((im12, im32, im48)).flatten()
sor = np.argsort(seq)
roads = np.column_stack((seq, np.tile(np.tile(angles, distances.shape[0
]), 3), np.tile(np.repeat(distances, angles.shape[0]), 3), np.
repeat([12, 32, 48], distances.shape[0] * angles.shape[0])))[sor][::-1]
found_roads = np.asarray([]).reshape(0, 4)
for i in range(roads.shape[0]):
if roads[i, 0] < min_strength:
break
a = roads[i, 1]
d = roads[i, 2]
close = np.logical_or(np.logical_and(np.abs(found_roads[:, 1] - a) <
roads_min_angle, np.abs(found_roads[:, 2] - d) <
roads_min_distance), np.logical_and(np.pi - np.abs(found_roads[
:, 1] - a) < roads_min_angle, np.abs(found_roads[:, 2] + d) <
roads_min_distance))
if not np.any(close):
found_roads = np.vstack((found_roads, roads[i]))
if max_roads is not None and found_roads.shape[0] >= max_roads:
break
return found_roads, im.shape
def _get_line_box_cuts(angle, distance, width, height):
a = np.cos(angle)
b = np.sin(angle)
d = distance
x0 = d / a
x1 = (d - b * height) / a
y0 = d / b
y1 = (d - a * width) / b
intersections = []
if x0 >= 0 and x0 <= width:
intersections.append((x0, 0))
if x1 >= 0 and x1 <= width:
intersections.append((x1, height))
if y0 >= 0 and y0 <= height:
intersections.append((0, y0))
if y1 >= 0 and y1 <= height:
intersections.append((width, y1))
if len(intersections) == 0:
return None
assert len(intersections) == 2, (x0, x1, y0, y1)
return intersections
def _road_polygon(endpoints, width):
a, b = endpoints
a = np.asarray(a)
b = np.asarray(b)
n = b - a
n /= np.linalg.norm(n)
n *= width / 2
s = np.dot(np.array([[0, -1], [1, 0]]), n)
xy = np.array([a - n - s, a - n + s, b + n + s, b + n - s])
x = xy[:, 0]
y = xy[:, 1]
return [x, y]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def draw_roads(roads, shape):
"""
Creates an image with roads drawn as full lines.
Parameters:
roads -- ndarray describing all roads to be drawn
shape -- shape (size) of image
The parameters are exactly what is returned by find_roads (see there).
Returns:
An numpy.ndarray with shape 'shape' and floating point type, where
background has probability 0 and roads have been drawn on top of
each other, with pixel values equal to the road strength, from
lowest to highest strength.
"""
im = np.zeros(shape)
for i in reversed(range(roads.shape[0])):
strength, angle, distance, width = roads[i]
coord = _get_line_box_cuts(angle, distance, *shape)
if coord is None:
continue
coord = np.asarray(coord)
x, y = _road_polygon(coord, width)
rr, cc = polygon(y, x, shape)
im[rr, cc] = strength
return im
def find_roads(probability_map, *, input_threshold=0.3, max_roads=None,
min_strength=0.17, num_angles=720, roads_min_angle=np.pi / 8,
roads_min_distance=50, debugimage=None, debugprint=None):
"""
Finds full-image roads in probability map (image).
Parameters:
probability_map -- an numpy.ndarray with probabilities per pixel (*)
(*) i.e., the array is shaped HxW, with pixel values from 0 to 1
Keyword-Only Parameters:
input_threshold -- threshold applied to probability_map
max_roads -- maximum number of roads to be found
min_strength -- minimum strength of roads to be found
num_angles -- angular resolution used in hough transforms
roads_min_angle -- minimum required angle between roads
roads_min_distance -- minimum required distance between roads
Returns:
roads -- roads that have been found (*)
shape -- shape of probability_map (vector with 2 elements)
(*) A numpy.ndarray with floating point type of shape Nx4, with N being
the number of roads found, and 4 corresponding to columns 'strength',
'angle', 'distance', 'width'. Strength is the response for the road
(the "probability"), 'angle' and 'distance' correspond to the values
returned by skimage.transform.hough_line, and 'width' is the
identified road width (can currently be 12, 32 or 48).
"""
im = probability_map
theta = np.linspace(-np.pi / 2, np.pi / 2, num_angles)
if im.ndim == 3:
if im.shape[2] == 4:
im = im[:, :, :3]
im = im.mean(axis=2)
if debugimage:
debugimage('original', im, 0, 1, 'jet')
assert im.ndim == 2
if debugimage:
hspace, _, _ = hough_line(im, theta)
debugimage('original_hough_hspace', hspace)
im[im >= input_threshold] = 1
im[im < input_threshold] = 0
if debugimage:
debugimage('threshold_applied', im)
hspace, angles, distances = hough_line(im, theta)
hspace = np.asarray(hspace, dtype=np.float32)
hspace /= hspace.max()
if debugimage:
debugimage('hough_hspace', hspace)
w12 = np.concatenate([-np.ones(6), np.ones(12), -np.ones(6)])
w32 = np.concatenate([-np.ones(16), np.ones(32), -np.ones(16)])
w48 = np.concatenate([-np.ones(24), np.ones(48), -np.ones(24)])
im12 = ndi.filters.convolve1d(hspace, w12, axis=0)
im32 = ndi.filters.convolve1d(hspace, w32, axis=0)
im48 = ndi.filters.convolve1d(hspace, w48, axis=0)
im12 /= 12
im32 /= 32
im48 /= 48
ca = None, None, 'jet'
if debugimage:
debugimage('hough_hspace_conv12', im12, *ca)
if debugimage:
debugimage('hough_hspace_conv32', im32, *ca)
if debugimage:
debugimage('hough_hspace_conv48', im48, *ca)
if debugimage:
debugimage('hough_hspace_combined', np.hstack([im12, im32, im48]), *ca)
seq = np.stack((im12, im32, im48)).flatten()
sor = np.argsort(seq)
roads = np.column_stack((seq, np.tile(np.tile(angles, distances.shape[0
]), 3), np.tile(np.repeat(distances, angles.shape[0]), 3), np.
repeat([12, 32, 48], distances.shape[0] * angles.shape[0])))[sor][::-1]
found_roads = np.asarray([]).reshape(0, 4)
for i in range(roads.shape[0]):
if roads[i, 0] < min_strength:
break
a = roads[i, 1]
d = roads[i, 2]
close = np.logical_or(np.logical_and(np.abs(found_roads[:, 1] - a) <
roads_min_angle, np.abs(found_roads[:, 2] - d) <
roads_min_distance), np.logical_and(np.pi - np.abs(found_roads[
:, 1] - a) < roads_min_angle, np.abs(found_roads[:, 2] + d) <
roads_min_distance))
if not np.any(close):
found_roads = np.vstack((found_roads, roads[i]))
if max_roads is not None and found_roads.shape[0] >= max_roads:
break
return found_roads, im.shape
def _get_line_box_cuts(angle, distance, width, height):
a = np.cos(angle)
b = np.sin(angle)
d = distance
x0 = d / a
x1 = (d - b * height) / a
y0 = d / b
y1 = (d - a * width) / b
intersections = []
if x0 >= 0 and x0 <= width:
intersections.append((x0, 0))
if x1 >= 0 and x1 <= width:
intersections.append((x1, height))
if y0 >= 0 and y0 <= height:
intersections.append((0, y0))
if y1 >= 0 and y1 <= height:
intersections.append((width, y1))
if len(intersections) == 0:
return None
assert len(intersections) == 2, (x0, x1, y0, y1)
return intersections
def _road_polygon(endpoints, width):
a, b = endpoints
a = np.asarray(a)
b = np.asarray(b)
n = b - a
n /= np.linalg.norm(n)
n *= width / 2
s = np.dot(np.array([[0, -1], [1, 0]]), n)
xy = np.array([a - n - s, a - n + s, b + n + s, b + n - s])
x = xy[:, 0]
y = xy[:, 1]
return [x, y]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import numpy as np
import scipy.ndimage as ndi
from skimage.draw import polygon
from skimage.transform import hough_line
def draw_roads(roads, shape):
"""
Creates an image with roads drawn as full lines.
Parameters:
roads -- ndarray describing all roads to be drawn
shape -- shape (size) of image
The parameters are exactly what is returned by find_roads (see there).
Returns:
An numpy.ndarray with shape 'shape' and floating point type, where
background has probability 0 and roads have been drawn on top of
each other, with pixel values equal to the road strength, from
lowest to highest strength.
"""
im = np.zeros(shape)
for i in reversed(range(roads.shape[0])):
strength, angle, distance, width = roads[i]
coord = _get_line_box_cuts(angle, distance, *shape)
if coord is None:
continue
coord = np.asarray(coord)
x, y = _road_polygon(coord, width)
rr, cc = polygon(y, x, shape)
im[rr, cc] = strength
return im
def find_roads(probability_map, *, input_threshold=0.3, max_roads=None,
min_strength=0.17, num_angles=720, roads_min_angle=np.pi / 8,
roads_min_distance=50, debugimage=None, debugprint=None):
"""
Finds full-image roads in probability map (image).
Parameters:
probability_map -- an numpy.ndarray with probabilities per pixel (*)
(*) i.e., the array is shaped HxW, with pixel values from 0 to 1
Keyword-Only Parameters:
input_threshold -- threshold applied to probability_map
max_roads -- maximum number of roads to be found
min_strength -- minimum strength of roads to be found
num_angles -- angular resolution used in hough transforms
roads_min_angle -- minimum required angle between roads
roads_min_distance -- minimum required distance between roads
Returns:
roads -- roads that have been found (*)
shape -- shape of probability_map (vector with 2 elements)
(*) A numpy.ndarray with floating point type of shape Nx4, with N being
the number of roads found, and 4 corresponding to columns 'strength',
'angle', 'distance', 'width'. Strength is the response for the road
(the "probability"), 'angle' and 'distance' correspond to the values
returned by skimage.transform.hough_line, and 'width' is the
identified road width (can currently be 12, 32 or 48).
"""
im = probability_map
theta = np.linspace(-np.pi / 2, np.pi / 2, num_angles)
if im.ndim == 3:
if im.shape[2] == 4:
im = im[:, :, :3]
im = im.mean(axis=2)
if debugimage:
debugimage('original', im, 0, 1, 'jet')
assert im.ndim == 2
if debugimage:
hspace, _, _ = hough_line(im, theta)
debugimage('original_hough_hspace', hspace)
im[im >= input_threshold] = 1
im[im < input_threshold] = 0
if debugimage:
debugimage('threshold_applied', im)
hspace, angles, distances = hough_line(im, theta)
hspace = np.asarray(hspace, dtype=np.float32)
hspace /= hspace.max()
if debugimage:
debugimage('hough_hspace', hspace)
w12 = np.concatenate([-np.ones(6), np.ones(12), -np.ones(6)])
w32 = np.concatenate([-np.ones(16), np.ones(32), -np.ones(16)])
w48 = np.concatenate([-np.ones(24), np.ones(48), -np.ones(24)])
im12 = ndi.filters.convolve1d(hspace, w12, axis=0)
im32 = ndi.filters.convolve1d(hspace, w32, axis=0)
im48 = ndi.filters.convolve1d(hspace, w48, axis=0)
im12 /= 12
im32 /= 32
im48 /= 48
ca = None, None, 'jet'
if debugimage:
debugimage('hough_hspace_conv12', im12, *ca)
if debugimage:
debugimage('hough_hspace_conv32', im32, *ca)
if debugimage:
debugimage('hough_hspace_conv48', im48, *ca)
if debugimage:
debugimage('hough_hspace_combined', np.hstack([im12, im32, im48]), *ca)
seq = np.stack((im12, im32, im48)).flatten()
sor = np.argsort(seq)
roads = np.column_stack((seq, np.tile(np.tile(angles, distances.shape[0
]), 3), np.tile(np.repeat(distances, angles.shape[0]), 3), np.
repeat([12, 32, 48], distances.shape[0] * angles.shape[0])))[sor][::-1]
found_roads = np.asarray([]).reshape(0, 4)
for i in range(roads.shape[0]):
if roads[i, 0] < min_strength:
break
a = roads[i, 1]
d = roads[i, 2]
close = np.logical_or(np.logical_and(np.abs(found_roads[:, 1] - a) <
roads_min_angle, np.abs(found_roads[:, 2] - d) <
roads_min_distance), np.logical_and(np.pi - np.abs(found_roads[
:, 1] - a) < roads_min_angle, np.abs(found_roads[:, 2] + d) <
roads_min_distance))
if not np.any(close):
found_roads = np.vstack((found_roads, roads[i]))
if max_roads is not None and found_roads.shape[0] >= max_roads:
break
return found_roads, im.shape
def _get_line_box_cuts(angle, distance, width, height):
a = np.cos(angle)
b = np.sin(angle)
d = distance
x0 = d / a
x1 = (d - b * height) / a
y0 = d / b
y1 = (d - a * width) / b
intersections = []
if x0 >= 0 and x0 <= width:
intersections.append((x0, 0))
if x1 >= 0 and x1 <= width:
intersections.append((x1, height))
if y0 >= 0 and y0 <= height:
intersections.append((0, y0))
if y1 >= 0 and y1 <= height:
intersections.append((width, y1))
if len(intersections) == 0:
return None
assert len(intersections) == 2, (x0, x1, y0, y1)
return intersections
def _road_polygon(endpoints, width):
a, b = endpoints
a = np.asarray(a)
b = np.asarray(b)
n = b - a
n /= np.linalg.norm(n)
n *= width / 2
s = np.dot(np.array([[0, -1], [1, 0]]), n)
xy = np.array([a - n - s, a - n + s, b + n + s, b + n - s])
x = xy[:, 0]
y = xy[:, 1]
return [x, y]
<|reserved_special_token_1|>
#!/usr/bin/env python3
"""
This file contains all the required methods for the street prediction utilizing
the Hough transform.
"""
import numpy as np
import scipy.ndimage as ndi
from skimage.draw import polygon
from skimage.transform import hough_line
def draw_roads(roads, shape):
"""
Creates an image with roads drawn as full lines.
Parameters:
roads -- ndarray describing all roads to be drawn
shape -- shape (size) of image
The parameters are exactly what is returned by find_roads (see there).
Returns:
An numpy.ndarray with shape 'shape' and floating point type, where
background has probability 0 and roads have been drawn on top of
each other, with pixel values equal to the road strength, from
lowest to highest strength.
"""
im = np.zeros(shape)
for i in reversed(range(roads.shape[0])):
strength, angle, distance, width = roads[i]
coord = _get_line_box_cuts(angle, distance, *shape)
if coord is None: continue # do not abort on bogus angle/distance
coord = np.asarray(coord)
x, y = _road_polygon(coord, width)
rr, cc = polygon(y, x, shape)
im[rr,cc] = strength
return im
def find_roads(
probability_map,
*,
input_threshold=0.3,
max_roads=None,
min_strength=0.17, #0.2,
num_angles=720,
roads_min_angle=np.pi/8,
roads_min_distance=50,
debugimage=None, # for debugging ...
debugprint=None): # for debugging ...
"""
Finds full-image roads in probability map (image).
Parameters:
probability_map -- an numpy.ndarray with probabilities per pixel (*)
(*) i.e., the array is shaped HxW, with pixel values from 0 to 1
Keyword-Only Parameters:
input_threshold -- threshold applied to probability_map
max_roads -- maximum number of roads to be found
min_strength -- minimum strength of roads to be found
num_angles -- angular resolution used in hough transforms
roads_min_angle -- minimum required angle between roads
roads_min_distance -- minimum required distance between roads
Returns:
roads -- roads that have been found (*)
shape -- shape of probability_map (vector with 2 elements)
(*) A numpy.ndarray with floating point type of shape Nx4, with N being
the number of roads found, and 4 corresponding to columns 'strength',
'angle', 'distance', 'width'. Strength is the response for the road
(the "probability"), 'angle' and 'distance' correspond to the values
returned by skimage.transform.hough_line, and 'width' is the
identified road width (can currently be 12, 32 or 48).
"""
# shorthand
im = probability_map
# the angles to be used in the Hough transform
theta = np.linspace(-np.pi/2, np.pi/2, num_angles)
# normalize almost anything to grayscale
if im.ndim == 3:
if im.shape[2] == 4:
im = im[:,:,:3] # throw away alpha
im = im.mean(axis=2) # convert RGB to grayscale
if debugimage: debugimage('original', im, 0, 1, 'jet')
assert im.ndim == 2
if debugimage:
hspace, _, _ = hough_line(im, theta)
debugimage('original_hough_hspace', hspace)
# create monochrome/binary input map
im[im >= input_threshold] = 1
im[im < input_threshold] = 0
if debugimage: debugimage('threshold_applied', im)
# Hough transform
hspace, angles, distances = hough_line(im, theta)
hspace = np.asarray(hspace, dtype=np.float32)
hspace /= hspace.max() # normalize
if debugimage: debugimage('hough_hspace', hspace)
# convolution filters, rectangular, tuned for widths of 12, 32, 48 pixels
w12 = np.concatenate([-np.ones((6)), np.ones((12)), -np.ones((6))])
w32 = np.concatenate([-np.ones((16)), np.ones((32)), -np.ones((16))])
w48 = np.concatenate([-np.ones((24)), np.ones((48)), -np.ones((24))])
# convolve
im12 = ndi.filters.convolve1d(hspace, w12, axis=0)
im32 = ndi.filters.convolve1d(hspace, w32, axis=0)
im48 = ndi.filters.convolve1d(hspace, w48, axis=0)
# normalize signal strengths for different road widths
im12 /= 12
im32 /= 32
im48 /= 48
ca = (None, None, 'jet',)
if debugimage: debugimage('hough_hspace_conv12', im12, *ca)
if debugimage: debugimage('hough_hspace_conv32', im32, *ca)
if debugimage: debugimage('hough_hspace_conv48', im48, *ca)
if debugimage:
debugimage('hough_hspace_combined',
np.hstack([im12, im32, im48]), *ca)
# compute possible roads of all widths, sorted by signal strength
seq = np.stack((im12, im32, im48)).flatten()
sor = np.argsort(seq)
roads = np.column_stack((
seq,
np.tile(np.tile(angles, distances.shape[0]), 3),
np.tile(np.repeat(distances, angles.shape[0]), 3),
np.repeat([12, 32, 48], distances.shape[0] * angles.shape[0])
))[sor][::-1]
# columns: strength, angle, distance, width
found_roads = np.asarray([]).reshape(0, 4)
# find as many as strong roads as desired, while dropping roads that are too
# similar to roads already found (non-max suppression)
for i in range(roads.shape[0]):
if roads[i,0] < min_strength:
break
a = roads[i,1]
d = roads[i,2]
close = (
np.logical_or(
np.logical_and(
np.abs(found_roads[:,1]-a) < roads_min_angle,
np.abs(found_roads[:,2]-d) < roads_min_distance),
np.logical_and(
np.pi - np.abs(found_roads[:,1]-a) < roads_min_angle,
np.abs(found_roads[:,2]+d) < roads_min_distance)))
if not np.any(close):
found_roads = np.vstack((found_roads, roads[i]))
if max_roads is not None and found_roads.shape[0] >= max_roads:
break
return found_roads, im.shape
# find begin and end coordinates of an intersection of a box (0, 0, width,
# height) with a line (given by angle and distance, as per Hough transform)
def _get_line_box_cuts(angle, distance, width, height):
a = np.cos(angle)
b = np.sin(angle)
d = distance
# TODO: handle divide-by-zero
x0 = d/a
x1 = (d-b*height)/a
y0 = d/b
y1 = (d-a*width)/b
intersections = []
if x0 >= 0 and x0 <= width: intersections.append((x0, 0))
if x1 >= 0 and x1 <= width: intersections.append((x1, height))
if y0 >= 0 and y0 <= height: intersections.append((0, y0))
if y1 >= 0 and y1 <= height: intersections.append((width, y1))
# TODO: what about degenerate cases?
if len(intersections) == 0: return None
assert len(intersections) == 2, (x0, x1, y0, y1)
return intersections
# return a list of pixel coordinates, usable to index 2D ndarrays, that
# correspond to the shape of line segment with given width
def _road_polygon(endpoints, width):
a, b = endpoints
a = np.asarray(a)
b = np.asarray(b)
n = b-a
n /= np.linalg.norm(n)
n *= width / 2
s = np.dot(np.array([[0, -1], [1, 0]]), n)
xy = np.array([
a - n - s,
a - n + s,
b + n + s,
b + n - s
])
x = xy[:,0]
y = xy[:,1]
return [x, y]
|
flexible
|
{
"blob_id": "f76185095ebb1adbf7ae22ffb500ffc3d6b0a30d",
"index": 6019,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef find_roads(probability_map, *, input_threshold=0.3, max_roads=None,\n min_strength=0.17, num_angles=720, roads_min_angle=np.pi / 8,\n roads_min_distance=50, debugimage=None, debugprint=None):\n \"\"\"\n Finds full-image roads in probability map (image).\n\n Parameters:\n probability_map -- an numpy.ndarray with probabilities per pixel (*)\n\n (*) i.e., the array is shaped HxW, with pixel values from 0 to 1\n\n Keyword-Only Parameters:\n input_threshold -- threshold applied to probability_map\n max_roads -- maximum number of roads to be found\n min_strength -- minimum strength of roads to be found\n num_angles -- angular resolution used in hough transforms\n roads_min_angle -- minimum required angle between roads\n roads_min_distance -- minimum required distance between roads\n\n Returns:\n roads -- roads that have been found (*)\n shape -- shape of probability_map (vector with 2 elements)\n\n (*) A numpy.ndarray with floating point type of shape Nx4, with N being\n the number of roads found, and 4 corresponding to columns 'strength',\n 'angle', 'distance', 'width'. Strength is the response for the road\n (the \"probability\"), 'angle' and 'distance' correspond to the values\n returned by skimage.transform.hough_line, and 'width' is the\n identified road width (can currently be 12, 32 or 48).\n\n \"\"\"\n im = probability_map\n theta = np.linspace(-np.pi / 2, np.pi / 2, num_angles)\n if im.ndim == 3:\n if im.shape[2] == 4:\n im = im[:, :, :3]\n im = im.mean(axis=2)\n if debugimage:\n debugimage('original', im, 0, 1, 'jet')\n assert im.ndim == 2\n if debugimage:\n hspace, _, _ = hough_line(im, theta)\n debugimage('original_hough_hspace', hspace)\n im[im >= input_threshold] = 1\n im[im < input_threshold] = 0\n if debugimage:\n debugimage('threshold_applied', im)\n hspace, angles, distances = hough_line(im, theta)\n hspace = np.asarray(hspace, dtype=np.float32)\n hspace /= hspace.max()\n if debugimage:\n debugimage('hough_hspace', hspace)\n w12 = np.concatenate([-np.ones(6), np.ones(12), -np.ones(6)])\n w32 = np.concatenate([-np.ones(16), np.ones(32), -np.ones(16)])\n w48 = np.concatenate([-np.ones(24), np.ones(48), -np.ones(24)])\n im12 = ndi.filters.convolve1d(hspace, w12, axis=0)\n im32 = ndi.filters.convolve1d(hspace, w32, axis=0)\n im48 = ndi.filters.convolve1d(hspace, w48, axis=0)\n im12 /= 12\n im32 /= 32\n im48 /= 48\n ca = None, None, 'jet'\n if debugimage:\n debugimage('hough_hspace_conv12', im12, *ca)\n if debugimage:\n debugimage('hough_hspace_conv32', im32, *ca)\n if debugimage:\n debugimage('hough_hspace_conv48', im48, *ca)\n if debugimage:\n debugimage('hough_hspace_combined', np.hstack([im12, im32, im48]), *ca)\n seq = np.stack((im12, im32, im48)).flatten()\n sor = np.argsort(seq)\n roads = np.column_stack((seq, np.tile(np.tile(angles, distances.shape[0\n ]), 3), np.tile(np.repeat(distances, angles.shape[0]), 3), np.\n repeat([12, 32, 48], distances.shape[0] * angles.shape[0])))[sor][::-1]\n found_roads = np.asarray([]).reshape(0, 4)\n for i in range(roads.shape[0]):\n if roads[i, 0] < min_strength:\n break\n a = roads[i, 1]\n d = roads[i, 2]\n close = np.logical_or(np.logical_and(np.abs(found_roads[:, 1] - a) <\n roads_min_angle, np.abs(found_roads[:, 2] - d) <\n roads_min_distance), np.logical_and(np.pi - np.abs(found_roads[\n :, 1] - a) < roads_min_angle, np.abs(found_roads[:, 2] + d) <\n roads_min_distance))\n if not np.any(close):\n found_roads = np.vstack((found_roads, roads[i]))\n if max_roads is not None and found_roads.shape[0] >= max_roads:\n break\n return found_roads, im.shape\n\n\ndef _get_line_box_cuts(angle, distance, width, height):\n a = np.cos(angle)\n b = np.sin(angle)\n d = distance\n x0 = d / a\n x1 = (d - b * height) / a\n y0 = d / b\n y1 = (d - a * width) / b\n intersections = []\n if x0 >= 0 and x0 <= width:\n intersections.append((x0, 0))\n if x1 >= 0 and x1 <= width:\n intersections.append((x1, height))\n if y0 >= 0 and y0 <= height:\n intersections.append((0, y0))\n if y1 >= 0 and y1 <= height:\n intersections.append((width, y1))\n if len(intersections) == 0:\n return None\n assert len(intersections) == 2, (x0, x1, y0, y1)\n return intersections\n\n\ndef _road_polygon(endpoints, width):\n a, b = endpoints\n a = np.asarray(a)\n b = np.asarray(b)\n n = b - a\n n /= np.linalg.norm(n)\n n *= width / 2\n s = np.dot(np.array([[0, -1], [1, 0]]), n)\n xy = np.array([a - n - s, a - n + s, b + n + s, b + n - s])\n x = xy[:, 0]\n y = xy[:, 1]\n return [x, y]\n",
"step-3": "<mask token>\n\n\ndef draw_roads(roads, shape):\n \"\"\"\n Creates an image with roads drawn as full lines.\n\n Parameters:\n roads -- ndarray describing all roads to be drawn\n shape -- shape (size) of image\n\n The parameters are exactly what is returned by find_roads (see there).\n\n Returns:\n An numpy.ndarray with shape 'shape' and floating point type, where\n background has probability 0 and roads have been drawn on top of\n each other, with pixel values equal to the road strength, from\n lowest to highest strength.\n\n \"\"\"\n im = np.zeros(shape)\n for i in reversed(range(roads.shape[0])):\n strength, angle, distance, width = roads[i]\n coord = _get_line_box_cuts(angle, distance, *shape)\n if coord is None:\n continue\n coord = np.asarray(coord)\n x, y = _road_polygon(coord, width)\n rr, cc = polygon(y, x, shape)\n im[rr, cc] = strength\n return im\n\n\ndef find_roads(probability_map, *, input_threshold=0.3, max_roads=None,\n min_strength=0.17, num_angles=720, roads_min_angle=np.pi / 8,\n roads_min_distance=50, debugimage=None, debugprint=None):\n \"\"\"\n Finds full-image roads in probability map (image).\n\n Parameters:\n probability_map -- an numpy.ndarray with probabilities per pixel (*)\n\n (*) i.e., the array is shaped HxW, with pixel values from 0 to 1\n\n Keyword-Only Parameters:\n input_threshold -- threshold applied to probability_map\n max_roads -- maximum number of roads to be found\n min_strength -- minimum strength of roads to be found\n num_angles -- angular resolution used in hough transforms\n roads_min_angle -- minimum required angle between roads\n roads_min_distance -- minimum required distance between roads\n\n Returns:\n roads -- roads that have been found (*)\n shape -- shape of probability_map (vector with 2 elements)\n\n (*) A numpy.ndarray with floating point type of shape Nx4, with N being\n the number of roads found, and 4 corresponding to columns 'strength',\n 'angle', 'distance', 'width'. Strength is the response for the road\n (the \"probability\"), 'angle' and 'distance' correspond to the values\n returned by skimage.transform.hough_line, and 'width' is the\n identified road width (can currently be 12, 32 or 48).\n\n \"\"\"\n im = probability_map\n theta = np.linspace(-np.pi / 2, np.pi / 2, num_angles)\n if im.ndim == 3:\n if im.shape[2] == 4:\n im = im[:, :, :3]\n im = im.mean(axis=2)\n if debugimage:\n debugimage('original', im, 0, 1, 'jet')\n assert im.ndim == 2\n if debugimage:\n hspace, _, _ = hough_line(im, theta)\n debugimage('original_hough_hspace', hspace)\n im[im >= input_threshold] = 1\n im[im < input_threshold] = 0\n if debugimage:\n debugimage('threshold_applied', im)\n hspace, angles, distances = hough_line(im, theta)\n hspace = np.asarray(hspace, dtype=np.float32)\n hspace /= hspace.max()\n if debugimage:\n debugimage('hough_hspace', hspace)\n w12 = np.concatenate([-np.ones(6), np.ones(12), -np.ones(6)])\n w32 = np.concatenate([-np.ones(16), np.ones(32), -np.ones(16)])\n w48 = np.concatenate([-np.ones(24), np.ones(48), -np.ones(24)])\n im12 = ndi.filters.convolve1d(hspace, w12, axis=0)\n im32 = ndi.filters.convolve1d(hspace, w32, axis=0)\n im48 = ndi.filters.convolve1d(hspace, w48, axis=0)\n im12 /= 12\n im32 /= 32\n im48 /= 48\n ca = None, None, 'jet'\n if debugimage:\n debugimage('hough_hspace_conv12', im12, *ca)\n if debugimage:\n debugimage('hough_hspace_conv32', im32, *ca)\n if debugimage:\n debugimage('hough_hspace_conv48', im48, *ca)\n if debugimage:\n debugimage('hough_hspace_combined', np.hstack([im12, im32, im48]), *ca)\n seq = np.stack((im12, im32, im48)).flatten()\n sor = np.argsort(seq)\n roads = np.column_stack((seq, np.tile(np.tile(angles, distances.shape[0\n ]), 3), np.tile(np.repeat(distances, angles.shape[0]), 3), np.\n repeat([12, 32, 48], distances.shape[0] * angles.shape[0])))[sor][::-1]\n found_roads = np.asarray([]).reshape(0, 4)\n for i in range(roads.shape[0]):\n if roads[i, 0] < min_strength:\n break\n a = roads[i, 1]\n d = roads[i, 2]\n close = np.logical_or(np.logical_and(np.abs(found_roads[:, 1] - a) <\n roads_min_angle, np.abs(found_roads[:, 2] - d) <\n roads_min_distance), np.logical_and(np.pi - np.abs(found_roads[\n :, 1] - a) < roads_min_angle, np.abs(found_roads[:, 2] + d) <\n roads_min_distance))\n if not np.any(close):\n found_roads = np.vstack((found_roads, roads[i]))\n if max_roads is not None and found_roads.shape[0] >= max_roads:\n break\n return found_roads, im.shape\n\n\ndef _get_line_box_cuts(angle, distance, width, height):\n a = np.cos(angle)\n b = np.sin(angle)\n d = distance\n x0 = d / a\n x1 = (d - b * height) / a\n y0 = d / b\n y1 = (d - a * width) / b\n intersections = []\n if x0 >= 0 and x0 <= width:\n intersections.append((x0, 0))\n if x1 >= 0 and x1 <= width:\n intersections.append((x1, height))\n if y0 >= 0 and y0 <= height:\n intersections.append((0, y0))\n if y1 >= 0 and y1 <= height:\n intersections.append((width, y1))\n if len(intersections) == 0:\n return None\n assert len(intersections) == 2, (x0, x1, y0, y1)\n return intersections\n\n\ndef _road_polygon(endpoints, width):\n a, b = endpoints\n a = np.asarray(a)\n b = np.asarray(b)\n n = b - a\n n /= np.linalg.norm(n)\n n *= width / 2\n s = np.dot(np.array([[0, -1], [1, 0]]), n)\n xy = np.array([a - n - s, a - n + s, b + n + s, b + n - s])\n x = xy[:, 0]\n y = xy[:, 1]\n return [x, y]\n",
"step-4": "<mask token>\nimport numpy as np\nimport scipy.ndimage as ndi\nfrom skimage.draw import polygon\nfrom skimage.transform import hough_line\n\n\ndef draw_roads(roads, shape):\n \"\"\"\n Creates an image with roads drawn as full lines.\n\n Parameters:\n roads -- ndarray describing all roads to be drawn\n shape -- shape (size) of image\n\n The parameters are exactly what is returned by find_roads (see there).\n\n Returns:\n An numpy.ndarray with shape 'shape' and floating point type, where\n background has probability 0 and roads have been drawn on top of\n each other, with pixel values equal to the road strength, from\n lowest to highest strength.\n\n \"\"\"\n im = np.zeros(shape)\n for i in reversed(range(roads.shape[0])):\n strength, angle, distance, width = roads[i]\n coord = _get_line_box_cuts(angle, distance, *shape)\n if coord is None:\n continue\n coord = np.asarray(coord)\n x, y = _road_polygon(coord, width)\n rr, cc = polygon(y, x, shape)\n im[rr, cc] = strength\n return im\n\n\ndef find_roads(probability_map, *, input_threshold=0.3, max_roads=None,\n min_strength=0.17, num_angles=720, roads_min_angle=np.pi / 8,\n roads_min_distance=50, debugimage=None, debugprint=None):\n \"\"\"\n Finds full-image roads in probability map (image).\n\n Parameters:\n probability_map -- an numpy.ndarray with probabilities per pixel (*)\n\n (*) i.e., the array is shaped HxW, with pixel values from 0 to 1\n\n Keyword-Only Parameters:\n input_threshold -- threshold applied to probability_map\n max_roads -- maximum number of roads to be found\n min_strength -- minimum strength of roads to be found\n num_angles -- angular resolution used in hough transforms\n roads_min_angle -- minimum required angle between roads\n roads_min_distance -- minimum required distance between roads\n\n Returns:\n roads -- roads that have been found (*)\n shape -- shape of probability_map (vector with 2 elements)\n\n (*) A numpy.ndarray with floating point type of shape Nx4, with N being\n the number of roads found, and 4 corresponding to columns 'strength',\n 'angle', 'distance', 'width'. Strength is the response for the road\n (the \"probability\"), 'angle' and 'distance' correspond to the values\n returned by skimage.transform.hough_line, and 'width' is the\n identified road width (can currently be 12, 32 or 48).\n\n \"\"\"\n im = probability_map\n theta = np.linspace(-np.pi / 2, np.pi / 2, num_angles)\n if im.ndim == 3:\n if im.shape[2] == 4:\n im = im[:, :, :3]\n im = im.mean(axis=2)\n if debugimage:\n debugimage('original', im, 0, 1, 'jet')\n assert im.ndim == 2\n if debugimage:\n hspace, _, _ = hough_line(im, theta)\n debugimage('original_hough_hspace', hspace)\n im[im >= input_threshold] = 1\n im[im < input_threshold] = 0\n if debugimage:\n debugimage('threshold_applied', im)\n hspace, angles, distances = hough_line(im, theta)\n hspace = np.asarray(hspace, dtype=np.float32)\n hspace /= hspace.max()\n if debugimage:\n debugimage('hough_hspace', hspace)\n w12 = np.concatenate([-np.ones(6), np.ones(12), -np.ones(6)])\n w32 = np.concatenate([-np.ones(16), np.ones(32), -np.ones(16)])\n w48 = np.concatenate([-np.ones(24), np.ones(48), -np.ones(24)])\n im12 = ndi.filters.convolve1d(hspace, w12, axis=0)\n im32 = ndi.filters.convolve1d(hspace, w32, axis=0)\n im48 = ndi.filters.convolve1d(hspace, w48, axis=0)\n im12 /= 12\n im32 /= 32\n im48 /= 48\n ca = None, None, 'jet'\n if debugimage:\n debugimage('hough_hspace_conv12', im12, *ca)\n if debugimage:\n debugimage('hough_hspace_conv32', im32, *ca)\n if debugimage:\n debugimage('hough_hspace_conv48', im48, *ca)\n if debugimage:\n debugimage('hough_hspace_combined', np.hstack([im12, im32, im48]), *ca)\n seq = np.stack((im12, im32, im48)).flatten()\n sor = np.argsort(seq)\n roads = np.column_stack((seq, np.tile(np.tile(angles, distances.shape[0\n ]), 3), np.tile(np.repeat(distances, angles.shape[0]), 3), np.\n repeat([12, 32, 48], distances.shape[0] * angles.shape[0])))[sor][::-1]\n found_roads = np.asarray([]).reshape(0, 4)\n for i in range(roads.shape[0]):\n if roads[i, 0] < min_strength:\n break\n a = roads[i, 1]\n d = roads[i, 2]\n close = np.logical_or(np.logical_and(np.abs(found_roads[:, 1] - a) <\n roads_min_angle, np.abs(found_roads[:, 2] - d) <\n roads_min_distance), np.logical_and(np.pi - np.abs(found_roads[\n :, 1] - a) < roads_min_angle, np.abs(found_roads[:, 2] + d) <\n roads_min_distance))\n if not np.any(close):\n found_roads = np.vstack((found_roads, roads[i]))\n if max_roads is not None and found_roads.shape[0] >= max_roads:\n break\n return found_roads, im.shape\n\n\ndef _get_line_box_cuts(angle, distance, width, height):\n a = np.cos(angle)\n b = np.sin(angle)\n d = distance\n x0 = d / a\n x1 = (d - b * height) / a\n y0 = d / b\n y1 = (d - a * width) / b\n intersections = []\n if x0 >= 0 and x0 <= width:\n intersections.append((x0, 0))\n if x1 >= 0 and x1 <= width:\n intersections.append((x1, height))\n if y0 >= 0 and y0 <= height:\n intersections.append((0, y0))\n if y1 >= 0 and y1 <= height:\n intersections.append((width, y1))\n if len(intersections) == 0:\n return None\n assert len(intersections) == 2, (x0, x1, y0, y1)\n return intersections\n\n\ndef _road_polygon(endpoints, width):\n a, b = endpoints\n a = np.asarray(a)\n b = np.asarray(b)\n n = b - a\n n /= np.linalg.norm(n)\n n *= width / 2\n s = np.dot(np.array([[0, -1], [1, 0]]), n)\n xy = np.array([a - n - s, a - n + s, b + n + s, b + n - s])\n x = xy[:, 0]\n y = xy[:, 1]\n return [x, y]\n",
"step-5": "#!/usr/bin/env python3\n\n\"\"\"\nThis file contains all the required methods for the street prediction utilizing\nthe Hough transform.\n\"\"\"\n\nimport numpy as np\nimport scipy.ndimage as ndi\n\nfrom skimage.draw import polygon\nfrom skimage.transform import hough_line\n\n\ndef draw_roads(roads, shape):\n \"\"\"\n Creates an image with roads drawn as full lines.\n\n Parameters:\n roads -- ndarray describing all roads to be drawn\n shape -- shape (size) of image\n\n The parameters are exactly what is returned by find_roads (see there).\n\n Returns:\n An numpy.ndarray with shape 'shape' and floating point type, where\n background has probability 0 and roads have been drawn on top of\n each other, with pixel values equal to the road strength, from\n lowest to highest strength.\n\n \"\"\"\n\n im = np.zeros(shape)\n\n for i in reversed(range(roads.shape[0])):\n strength, angle, distance, width = roads[i]\n coord = _get_line_box_cuts(angle, distance, *shape)\n if coord is None: continue # do not abort on bogus angle/distance\n coord = np.asarray(coord)\n x, y = _road_polygon(coord, width)\n rr, cc = polygon(y, x, shape)\n im[rr,cc] = strength\n\n return im\n\n\ndef find_roads(\n probability_map,\n *,\n input_threshold=0.3,\n max_roads=None,\n min_strength=0.17, #0.2,\n num_angles=720,\n roads_min_angle=np.pi/8,\n roads_min_distance=50,\n debugimage=None, # for debugging ...\n debugprint=None): # for debugging ...\n \"\"\"\n Finds full-image roads in probability map (image).\n\n Parameters:\n probability_map -- an numpy.ndarray with probabilities per pixel (*)\n\n (*) i.e., the array is shaped HxW, with pixel values from 0 to 1\n\n Keyword-Only Parameters:\n input_threshold -- threshold applied to probability_map\n max_roads -- maximum number of roads to be found\n min_strength -- minimum strength of roads to be found\n num_angles -- angular resolution used in hough transforms\n roads_min_angle -- minimum required angle between roads\n roads_min_distance -- minimum required distance between roads\n\n Returns:\n roads -- roads that have been found (*)\n shape -- shape of probability_map (vector with 2 elements)\n\n (*) A numpy.ndarray with floating point type of shape Nx4, with N being\n the number of roads found, and 4 corresponding to columns 'strength',\n 'angle', 'distance', 'width'. Strength is the response for the road\n (the \"probability\"), 'angle' and 'distance' correspond to the values\n returned by skimage.transform.hough_line, and 'width' is the\n identified road width (can currently be 12, 32 or 48).\n\n \"\"\"\n\n # shorthand\n im = probability_map\n\n # the angles to be used in the Hough transform\n theta = np.linspace(-np.pi/2, np.pi/2, num_angles)\n\n # normalize almost anything to grayscale\n if im.ndim == 3:\n if im.shape[2] == 4:\n im = im[:,:,:3] # throw away alpha\n im = im.mean(axis=2) # convert RGB to grayscale\n\n if debugimage: debugimage('original', im, 0, 1, 'jet')\n\n assert im.ndim == 2\n\n if debugimage:\n hspace, _, _ = hough_line(im, theta)\n debugimage('original_hough_hspace', hspace)\n\n # create monochrome/binary input map\n im[im >= input_threshold] = 1\n im[im < input_threshold] = 0\n\n if debugimage: debugimage('threshold_applied', im)\n\n # Hough transform\n hspace, angles, distances = hough_line(im, theta)\n\n hspace = np.asarray(hspace, dtype=np.float32)\n hspace /= hspace.max() # normalize\n\n if debugimage: debugimage('hough_hspace', hspace)\n\n # convolution filters, rectangular, tuned for widths of 12, 32, 48 pixels\n w12 = np.concatenate([-np.ones((6)), np.ones((12)), -np.ones((6))])\n w32 = np.concatenate([-np.ones((16)), np.ones((32)), -np.ones((16))])\n w48 = np.concatenate([-np.ones((24)), np.ones((48)), -np.ones((24))])\n\n # convolve\n im12 = ndi.filters.convolve1d(hspace, w12, axis=0)\n im32 = ndi.filters.convolve1d(hspace, w32, axis=0)\n im48 = ndi.filters.convolve1d(hspace, w48, axis=0)\n\n # normalize signal strengths for different road widths\n im12 /= 12\n im32 /= 32\n im48 /= 48\n\n ca = (None, None, 'jet',)\n if debugimage: debugimage('hough_hspace_conv12', im12, *ca)\n if debugimage: debugimage('hough_hspace_conv32', im32, *ca)\n if debugimage: debugimage('hough_hspace_conv48', im48, *ca)\n if debugimage:\n debugimage('hough_hspace_combined',\n np.hstack([im12, im32, im48]), *ca)\n\n # compute possible roads of all widths, sorted by signal strength\n seq = np.stack((im12, im32, im48)).flatten()\n sor = np.argsort(seq)\n roads = np.column_stack((\n seq,\n np.tile(np.tile(angles, distances.shape[0]), 3),\n np.tile(np.repeat(distances, angles.shape[0]), 3),\n np.repeat([12, 32, 48], distances.shape[0] * angles.shape[0])\n ))[sor][::-1]\n\n # columns: strength, angle, distance, width\n found_roads = np.asarray([]).reshape(0, 4)\n\n # find as many as strong roads as desired, while dropping roads that are too\n # similar to roads already found (non-max suppression)\n for i in range(roads.shape[0]):\n if roads[i,0] < min_strength:\n break\n a = roads[i,1]\n d = roads[i,2]\n close = (\n np.logical_or(\n np.logical_and(\n np.abs(found_roads[:,1]-a) < roads_min_angle,\n np.abs(found_roads[:,2]-d) < roads_min_distance),\n np.logical_and(\n np.pi - np.abs(found_roads[:,1]-a) < roads_min_angle,\n np.abs(found_roads[:,2]+d) < roads_min_distance)))\n if not np.any(close):\n found_roads = np.vstack((found_roads, roads[i]))\n if max_roads is not None and found_roads.shape[0] >= max_roads:\n break\n\n return found_roads, im.shape\n\n\n# find begin and end coordinates of an intersection of a box (0, 0, width,\n# height) with a line (given by angle and distance, as per Hough transform)\ndef _get_line_box_cuts(angle, distance, width, height):\n a = np.cos(angle)\n b = np.sin(angle)\n d = distance\n # TODO: handle divide-by-zero\n x0 = d/a\n x1 = (d-b*height)/a\n y0 = d/b\n y1 = (d-a*width)/b\n intersections = []\n if x0 >= 0 and x0 <= width: intersections.append((x0, 0))\n if x1 >= 0 and x1 <= width: intersections.append((x1, height))\n if y0 >= 0 and y0 <= height: intersections.append((0, y0))\n if y1 >= 0 and y1 <= height: intersections.append((width, y1))\n # TODO: what about degenerate cases?\n if len(intersections) == 0: return None\n assert len(intersections) == 2, (x0, x1, y0, y1)\n return intersections\n\n\n# return a list of pixel coordinates, usable to index 2D ndarrays, that\n# correspond to the shape of line segment with given width\ndef _road_polygon(endpoints, width):\n a, b = endpoints\n a = np.asarray(a)\n b = np.asarray(b)\n n = b-a\n n /= np.linalg.norm(n)\n n *= width / 2\n s = np.dot(np.array([[0, -1], [1, 0]]), n)\n xy = np.array([\n a - n - s,\n a - n + s,\n b + n + s,\n b + n - s\n ])\n x = xy[:,0]\n y = xy[:,1]\n return [x, y]\n",
"step-ids": [
0,
3,
4,
5,
6
]
}
|
[
0,
3,
4,
5,
6
] |
"""Test Assert module."""
import unittest
from physalia import asserts
from physalia.fixtures.models import create_random_sample
from physalia.models import Measurement
# pylint: disable=missing-docstring
class TestAssert(unittest.TestCase):
TEST_CSV_STORAGE = "./test_asserts_db.csv"
def setUp(self):
Measurement.csv_storage = self.TEST_CSV_STORAGE
self.addCleanup(Measurement.clear_database)
def test_consumption_below(self):
sample = create_random_sample(10, 1)
asserts.consumption_below(sample, 11)
with self.assertRaises(Exception):
asserts.consumption_below(sample, 9)
def test_consumption_lower_than_app(self):
sample_low_energy = create_random_sample(
9, 1,
app_pkg='com.sample',
use_case='login'
)
sample_high_energy = create_random_sample(
12, 1,
app_pkg='com.sample',
use_case='login'
)
existing_sample_one = create_random_sample(
10, 1,
app_pkg='com.persisted',
use_case='login'
)
existing_sample_two = create_random_sample(
11, 1,
app_pkg='com.persisted',
use_case='logout'
)
for measurement in existing_sample_one+existing_sample_two:
measurement.persist()
asserts.consumption_lower_than_app(
sample_low_energy, "com.persisted"
)
asserts.consumption_lower_than_app(
sample_low_energy, "com.persisted", "login"
)
with self.assertRaises(Exception):
asserts.consumption_lower_than_app(
sample_high_energy, "com.persisted"
)
with self.assertRaises(Exception):
asserts.consumption_lower_than_app(
sample_high_energy, "com.persisted", "login"
)
def test_top_percentile(self):
sample = create_random_sample(
11, 1,
app_pkg='com.sample',
use_case='login'
)
for i in range(100):
existing_sample = create_random_sample(
i, 1,
app_pkg=('com.persisted.{}'.format(i)),
use_case='login'
)
for measurement in existing_sample:
measurement.persist()
asserts.top_percentile(sample, 12)
with self.assertRaises(Exception):
asserts.top_percentile(sample, 11)
|
normal
|
{
"blob_id": "eda1c1db5371f5171f0e1929e98d09e10fdcef24",
"index": 1677,
"step-1": "<mask token>\n\n\nclass TestAssert(unittest.TestCase):\n <mask token>\n <mask token>\n\n def test_consumption_below(self):\n sample = create_random_sample(10, 1)\n asserts.consumption_below(sample, 11)\n with self.assertRaises(Exception):\n asserts.consumption_below(sample, 9)\n\n def test_consumption_lower_than_app(self):\n sample_low_energy = create_random_sample(9, 1, app_pkg='com.sample',\n use_case='login')\n sample_high_energy = create_random_sample(12, 1, app_pkg=\n 'com.sample', use_case='login')\n existing_sample_one = create_random_sample(10, 1, app_pkg=\n 'com.persisted', use_case='login')\n existing_sample_two = create_random_sample(11, 1, app_pkg=\n 'com.persisted', use_case='logout')\n for measurement in (existing_sample_one + existing_sample_two):\n measurement.persist()\n asserts.consumption_lower_than_app(sample_low_energy, 'com.persisted')\n asserts.consumption_lower_than_app(sample_low_energy,\n 'com.persisted', 'login')\n with self.assertRaises(Exception):\n asserts.consumption_lower_than_app(sample_high_energy,\n 'com.persisted')\n with self.assertRaises(Exception):\n asserts.consumption_lower_than_app(sample_high_energy,\n 'com.persisted', 'login')\n\n def test_top_percentile(self):\n sample = create_random_sample(11, 1, app_pkg='com.sample', use_case\n ='login')\n for i in range(100):\n existing_sample = create_random_sample(i, 1, app_pkg=\n 'com.persisted.{}'.format(i), use_case='login')\n for measurement in existing_sample:\n measurement.persist()\n asserts.top_percentile(sample, 12)\n with self.assertRaises(Exception):\n asserts.top_percentile(sample, 11)\n",
"step-2": "<mask token>\n\n\nclass TestAssert(unittest.TestCase):\n <mask token>\n\n def setUp(self):\n Measurement.csv_storage = self.TEST_CSV_STORAGE\n self.addCleanup(Measurement.clear_database)\n\n def test_consumption_below(self):\n sample = create_random_sample(10, 1)\n asserts.consumption_below(sample, 11)\n with self.assertRaises(Exception):\n asserts.consumption_below(sample, 9)\n\n def test_consumption_lower_than_app(self):\n sample_low_energy = create_random_sample(9, 1, app_pkg='com.sample',\n use_case='login')\n sample_high_energy = create_random_sample(12, 1, app_pkg=\n 'com.sample', use_case='login')\n existing_sample_one = create_random_sample(10, 1, app_pkg=\n 'com.persisted', use_case='login')\n existing_sample_two = create_random_sample(11, 1, app_pkg=\n 'com.persisted', use_case='logout')\n for measurement in (existing_sample_one + existing_sample_two):\n measurement.persist()\n asserts.consumption_lower_than_app(sample_low_energy, 'com.persisted')\n asserts.consumption_lower_than_app(sample_low_energy,\n 'com.persisted', 'login')\n with self.assertRaises(Exception):\n asserts.consumption_lower_than_app(sample_high_energy,\n 'com.persisted')\n with self.assertRaises(Exception):\n asserts.consumption_lower_than_app(sample_high_energy,\n 'com.persisted', 'login')\n\n def test_top_percentile(self):\n sample = create_random_sample(11, 1, app_pkg='com.sample', use_case\n ='login')\n for i in range(100):\n existing_sample = create_random_sample(i, 1, app_pkg=\n 'com.persisted.{}'.format(i), use_case='login')\n for measurement in existing_sample:\n measurement.persist()\n asserts.top_percentile(sample, 12)\n with self.assertRaises(Exception):\n asserts.top_percentile(sample, 11)\n",
"step-3": "<mask token>\n\n\nclass TestAssert(unittest.TestCase):\n TEST_CSV_STORAGE = './test_asserts_db.csv'\n\n def setUp(self):\n Measurement.csv_storage = self.TEST_CSV_STORAGE\n self.addCleanup(Measurement.clear_database)\n\n def test_consumption_below(self):\n sample = create_random_sample(10, 1)\n asserts.consumption_below(sample, 11)\n with self.assertRaises(Exception):\n asserts.consumption_below(sample, 9)\n\n def test_consumption_lower_than_app(self):\n sample_low_energy = create_random_sample(9, 1, app_pkg='com.sample',\n use_case='login')\n sample_high_energy = create_random_sample(12, 1, app_pkg=\n 'com.sample', use_case='login')\n existing_sample_one = create_random_sample(10, 1, app_pkg=\n 'com.persisted', use_case='login')\n existing_sample_two = create_random_sample(11, 1, app_pkg=\n 'com.persisted', use_case='logout')\n for measurement in (existing_sample_one + existing_sample_two):\n measurement.persist()\n asserts.consumption_lower_than_app(sample_low_energy, 'com.persisted')\n asserts.consumption_lower_than_app(sample_low_energy,\n 'com.persisted', 'login')\n with self.assertRaises(Exception):\n asserts.consumption_lower_than_app(sample_high_energy,\n 'com.persisted')\n with self.assertRaises(Exception):\n asserts.consumption_lower_than_app(sample_high_energy,\n 'com.persisted', 'login')\n\n def test_top_percentile(self):\n sample = create_random_sample(11, 1, app_pkg='com.sample', use_case\n ='login')\n for i in range(100):\n existing_sample = create_random_sample(i, 1, app_pkg=\n 'com.persisted.{}'.format(i), use_case='login')\n for measurement in existing_sample:\n measurement.persist()\n asserts.top_percentile(sample, 12)\n with self.assertRaises(Exception):\n asserts.top_percentile(sample, 11)\n",
"step-4": "<mask token>\nimport unittest\nfrom physalia import asserts\nfrom physalia.fixtures.models import create_random_sample\nfrom physalia.models import Measurement\n\n\nclass TestAssert(unittest.TestCase):\n TEST_CSV_STORAGE = './test_asserts_db.csv'\n\n def setUp(self):\n Measurement.csv_storage = self.TEST_CSV_STORAGE\n self.addCleanup(Measurement.clear_database)\n\n def test_consumption_below(self):\n sample = create_random_sample(10, 1)\n asserts.consumption_below(sample, 11)\n with self.assertRaises(Exception):\n asserts.consumption_below(sample, 9)\n\n def test_consumption_lower_than_app(self):\n sample_low_energy = create_random_sample(9, 1, app_pkg='com.sample',\n use_case='login')\n sample_high_energy = create_random_sample(12, 1, app_pkg=\n 'com.sample', use_case='login')\n existing_sample_one = create_random_sample(10, 1, app_pkg=\n 'com.persisted', use_case='login')\n existing_sample_two = create_random_sample(11, 1, app_pkg=\n 'com.persisted', use_case='logout')\n for measurement in (existing_sample_one + existing_sample_two):\n measurement.persist()\n asserts.consumption_lower_than_app(sample_low_energy, 'com.persisted')\n asserts.consumption_lower_than_app(sample_low_energy,\n 'com.persisted', 'login')\n with self.assertRaises(Exception):\n asserts.consumption_lower_than_app(sample_high_energy,\n 'com.persisted')\n with self.assertRaises(Exception):\n asserts.consumption_lower_than_app(sample_high_energy,\n 'com.persisted', 'login')\n\n def test_top_percentile(self):\n sample = create_random_sample(11, 1, app_pkg='com.sample', use_case\n ='login')\n for i in range(100):\n existing_sample = create_random_sample(i, 1, app_pkg=\n 'com.persisted.{}'.format(i), use_case='login')\n for measurement in existing_sample:\n measurement.persist()\n asserts.top_percentile(sample, 12)\n with self.assertRaises(Exception):\n asserts.top_percentile(sample, 11)\n",
"step-5": "\"\"\"Test Assert module.\"\"\"\n\nimport unittest\nfrom physalia import asserts\nfrom physalia.fixtures.models import create_random_sample\nfrom physalia.models import Measurement\n\n# pylint: disable=missing-docstring\n\nclass TestAssert(unittest.TestCase):\n TEST_CSV_STORAGE = \"./test_asserts_db.csv\"\n\n def setUp(self):\n Measurement.csv_storage = self.TEST_CSV_STORAGE\n self.addCleanup(Measurement.clear_database)\n\n def test_consumption_below(self):\n sample = create_random_sample(10, 1)\n asserts.consumption_below(sample, 11)\n with self.assertRaises(Exception):\n asserts.consumption_below(sample, 9)\n\n def test_consumption_lower_than_app(self):\n sample_low_energy = create_random_sample(\n 9, 1,\n app_pkg='com.sample',\n use_case='login'\n )\n sample_high_energy = create_random_sample(\n 12, 1,\n app_pkg='com.sample',\n use_case='login'\n )\n existing_sample_one = create_random_sample(\n 10, 1,\n app_pkg='com.persisted',\n use_case='login'\n )\n existing_sample_two = create_random_sample(\n 11, 1,\n app_pkg='com.persisted',\n use_case='logout'\n )\n\n for measurement in existing_sample_one+existing_sample_two:\n measurement.persist()\n\n asserts.consumption_lower_than_app(\n sample_low_energy, \"com.persisted\"\n )\n asserts.consumption_lower_than_app(\n sample_low_energy, \"com.persisted\", \"login\"\n )\n with self.assertRaises(Exception):\n asserts.consumption_lower_than_app(\n sample_high_energy, \"com.persisted\"\n )\n with self.assertRaises(Exception):\n asserts.consumption_lower_than_app(\n sample_high_energy, \"com.persisted\", \"login\"\n )\n\n def test_top_percentile(self):\n sample = create_random_sample(\n 11, 1,\n app_pkg='com.sample',\n use_case='login'\n )\n for i in range(100):\n existing_sample = create_random_sample(\n i, 1,\n app_pkg=('com.persisted.{}'.format(i)),\n use_case='login'\n )\n for measurement in existing_sample:\n measurement.persist()\n asserts.top_percentile(sample, 12)\n with self.assertRaises(Exception):\n asserts.top_percentile(sample, 11)\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
#connect4_JayNa.py
#Jay Na
#CS111 Spring 2018
#This file creates a version of the game Connect4, where the user plays against an AI
from graphics import *
import random
class ConnectWindow:
def __init__(self):
self.window = GraphWin("Connect Four", 690, 590)
self.window.setMouseHandler(self.handleClick)
self.startScreen()
self.currentUser = 1
self.limitCounter = 0
def startScreen(self):
'''This function creates the board and intializes the board count for each column'''
#draws blue rectangle as the background
self.background = Rectangle(Point(0,0), Point(690,590))
self.background.setFill('blue')
self.background.draw(self.window)
#draws white circles to represent the spots for the game
for i in range(7):
for j in range(6):
self.Circles = Circle(Point(i*100+50,j*100+50),(30))
self.Circles.setFill('white')
self.Circles.draw(self.window)
#draws lines to separate circles in rectangle
for i in range(6):
self.horizLine = Line(Point(0,i*100+100), Point(900,i*100+100))
self.vertLine = Line(Point(100*i+100,0), Point(100*i+100,900))
self.horizLine.draw(self.window)
self.vertLine.draw(self.window)
#initiates counts for each column and creates grid
self.grid = [[],[],[],[],[],[],[]]
self.boardCount = [0,0,0,0,0,0,0]
counter = 2
#help from CS Major, Joh Farmer
for x in range(7):
for y in range(6):
self.grid[x].append(counter)
counter += 1
def validClick(self, x):
'''This function checks if there is enough space vertically for move to be valid'''
if self.boardCount[x] >= 6:
print("Invalid Move")
return False
else:
return True
def drawUmove(self):
'''This function prints the pieces onto the board at the given position from the user'''
piece = Circle(Point(self.x*100+50, 600-(self.y*100+50)),30)
piece.setFill('red')
piece.draw(self.window)
return
def handleClick(self, point):
'''This function works with the user to add each move into the board count and to the current grid'''
self.newX = point.getX()
self.x = self.newX//100
self.y = self.boardCount[self.x]
if self.validClick(self.x):
self.boardCount[self.x] += 1
self.limitCounter += 1
self.grid[self.x][self.y] = self.currentUser
if self.isWon() == False:
self.limitCounter += 1
self.computerMove()
self.drawUmove()
def isWon(self):
'''This function checks if there is a winner in the game (True/False) and calls printWinner function'''
#checks to see if there is a winner vertically
for i in range(7):
for j in range(3):
self.square1 = self.grid[i][j]
self.square2 = self.grid[i][j+1]
self.square3 = self.grid[i][j+2]
self.square4 = self.grid[i][j+3]
if self.square1 == self.square2 and self.square2 == self.square3 and self.square3 == self.square4:
self.printWinner(self.square1)
return True
#checks to see if there is a winner diagonally from lower left to upper right
for i in range(4):
for j in range(3):
self.square1 = self.grid[i][j]
self.square2 = self.grid[i+1][j+1]
self.square3 = self.grid[i+2][j+2]
self.square4 = self.grid[i+3][j+3]
if self.square1 == self.square2 and self.square2 == self.square3 and self.square3 == self.square4:
self.printWinner(self.square1)
return True
#checks to see if there is a winner diagonally from upper left to lower right
for i in range(3,7):
for j in range(3):
self.square1 = self.grid[i][j]
self.square2 = self.grid[i-1][j+1]
self.square3 = self.grid[i-2][j+2]
self.square4 = self.grid[i-3][j+3]
if self.square1 == self.square2 and self.square2 == self.square3 and self.square3 == self.square4:
self.printWinner(self.square1)
return True
#checks to see if there is a winner horizontally
for i in range(4):
for j in range(6):
self.square1 = self.grid[i][j]
self.square2 = self.grid[i+1][j]
self.square3 = self.grid[i+2][j]
self.square4 = self.grid[i+3][j]
if self.square1 == self.square2 and self.square2 == self.square3 and self.square3 == self.square4:
self.printWinner(self.square1)
return True
#checks if board is full without a winner (tie)
if self.limitCounter == 42:
self.printWinner(3)
return True
return False
def printWinner(self, winner):
'''This function prints who the winner is or if it is a tie'''
#if input is 3 from isWon() fxn, game is tied and so "Tie Game!" is printed
if winner == 3:
txt = Text(Point(345, 300), "Tie Game!")
txt.setFill('white')
txt.setSize(35)
txt.draw(self.window)
return
else:
#prints "You Won!" if user wins
if winner == 1:
txt = Text(Point(345, 300), "You Won!")
txt.setFill('white')
txt.setSize(35)
txt.draw(self.window)
return
else:
#prints "Computer Won!" if computer wins
txt = Text(Point(345, 300), "Computer Won!")
txt.setFill('white')
txt.setSize(35)
txt.draw(self.window)
return
def validCmove(self, x, y):
'''This function checks if the computer's move will be valid'''
#checks if '''if it tries to place it higher than the highest piece'''
if self.boardCount[x] > y:
return False
''' if it tries to place below the highest piece'''
if self.boardCount[x] < y:
return False
'''if it tries to place it in a column with 6 pieces already'''
if self.boardCount[x] >= 6:
return False
else:
return True
def drawCmove(self, x ,y):
'''This function adds the computer's move to the game board and adds it to the board count'''
piece = Circle(Point((x)*100+50, 600 - ((y)*100+50)),30)
piece.setFill('yellow')
piece.draw(self.window)
self.boardCount[x] += 1
self.grid[x][y] = -1
return
def computerMove(self):
'''This function computes where the computer will put its next move and calls the drawCmove() fxn to do so.
The computer will add its piece to wherever there are three in a row in either color then looks to see when
there are two in a row. Move will be placed randomly if no pieces are placed in a row'''
#checks if there are three pieces lined up vertically in a row and places its move to win or prevent the win'''
for i in range(7):
for j in range(3):
self.square1 = self.grid[i][j]
self.square2 = self.grid[i][j+1]
self.square3 = self.grid[i][j+2]
if self.square1 == self.square2 and self.square2 == self.square3:
if self.validCmove(i,j+3):
self.drawCmove(i,j+3)
return
else:
self.randomMove()
return
#checks if there are three pieces lined up diagonally from lower left to upper right and places its move to win or prevent the win
#help from CS major, Joh Farmer
for i in range(4):
for j in range(3):
self.square1 = self.grid[i][j]
self.square2 = self.grid[i+1][j+1]
self.square3 = self.grid[i+2][j+2]
if self.square1 == self.square2 and self.square2 == self.square3:
if self.validCmove(i+3,j+3):
self.drawCmove(i+3,j+3)
return
if self.validCmove(i-1,j-1):
self.drawCmove(i-1,j-1)
else:
self.randomMove()
return
#checks if there are three pieces lined up diagonally from lower right to upper left and places its move to win or prevent the win
for i in range(3,7):
for j in range(3):
self.square1 = self.grid[i][j]
self.square2 = self.grid[i-1][j+1]
self.square3 = self.grid[i-2][j+2]
if self.square1 == self.square2 and self.square2 == self.square3:
if self.validCmove(i-3,j+3):
self.drawCmove(i-3,j+3)
return
if self.validCmove(i+1,j-1):
self.drawCmove(i+1,j-1)
else:
self.randomMove()
return
#checks if there are three pieces lined up horizontally in a row and places its move to win or prevent the win (either side)'''
for i in range(4):
for j in range(6):
self.square1 = self.grid[i][j]
self.square2 = self.grid[i+1][j]
self.square3 = self.grid[i+2][j]
if self.square1 == self.square2 and self.square2 == self.square3:
if self.validCmove(i+3,j):
self.drawCmove(i+3,j)
return
if self.validCmove(i-1,j):
self.drawCmove(i-1,j)
return
else:
self.randomMove()
return
#checks if there are two in a row diagonally from lower left to upper right and places its move accordingly
for i in range(4):
for j in range(3):
self.square1 = self.grid[i][j]
self.square2 = self.grid[i+1][j+1]
if self.square1 == self.square2:
if self.validCmove(i+2,j+2):
self.drawCmove(i+2,j+2)
return
if self.validCmove(i-1,j-1):
self.drawCmove(i-1,j-1)
else:
self.randomMove()
return
#checks if there are two in a row vertically and places its move accordingly
for i in range(7):
for j in range(3):
self.square1 = self.grid[i][j]
self.square2 = self.grid[i][j+1]
if self.square1 == self.square2:
if self.validCmove(i,j+2):
self.drawCmove(i,j+2)
return
if self.validCmove(i,j-1):
self.drawCmove(i,j-1)
return
else:
self.randomMove()
return
#checks if there are two in a row diagonally from lower right to upper left and places its move accordingly
for i in range(3,7):
for j in range(3):
self.square1 = self.grid[i][j]
self.square2 = self.grid[i-1][j+1]
if self.square1 == self.square2:
if self.validCmove(i-2,j+2):
self.drawCmove(i-2,j+2)
return
if self.validCmove(i+1,j-1):
self.drawCmove(i+1,j-1)
else:
self.randomMove()
return
#checks if there are two in a row horizontally and places its move accordingly
for i in range(4):
for j in range(6):
self.square1 = self.grid[i][j]
self.square2 = self.grid[i+1][j]
if self.square1 == self.square2:
if self.validCmove(i+2,j):
self.drawCmove(i+2,j)
return
if self.validCmove(i-1,j):
self.drawCmove(i-1,j)
return
else:
self.randomMove()
return
#places move randomly if no pieces are being placed in a row
else:
self.randomMove()
def randomMove(self):
'''This function creates a random coordinate for its move, checks if it's valid, then prints the move.
It will continue to run until numbers are valid for current board'''
randY = random.randint(0,6)
randX = random.randint(0,7)
if self.validCmove(randY,randX):
self.drawCmove(randY,randX)
return
else:
self.randomMove()
def main():
gameOver = False
connect4 = ConnectWindow()
while gameOver == False:
connect4.window.getMouse()
gameOver = connect4.isWon()
input("Hit enter to quit")
main()
|
normal
|
{
"blob_id": "abbad57e945d2195021948a0e0838c6bfd9c6a1e",
"index": 769,
"step-1": "<mask token>\n\n\nclass ConnectWindow:\n <mask token>\n\n def startScreen(self):\n \"\"\"This function creates the board and intializes the board count for each column\"\"\"\n self.background = Rectangle(Point(0, 0), Point(690, 590))\n self.background.setFill('blue')\n self.background.draw(self.window)\n for i in range(7):\n for j in range(6):\n self.Circles = Circle(Point(i * 100 + 50, j * 100 + 50), 30)\n self.Circles.setFill('white')\n self.Circles.draw(self.window)\n for i in range(6):\n self.horizLine = Line(Point(0, i * 100 + 100), Point(900, i * \n 100 + 100))\n self.vertLine = Line(Point(100 * i + 100, 0), Point(100 * i + \n 100, 900))\n self.horizLine.draw(self.window)\n self.vertLine.draw(self.window)\n self.grid = [[], [], [], [], [], [], []]\n self.boardCount = [0, 0, 0, 0, 0, 0, 0]\n counter = 2\n for x in range(7):\n for y in range(6):\n self.grid[x].append(counter)\n counter += 1\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ConnectWindow:\n <mask token>\n\n def startScreen(self):\n \"\"\"This function creates the board and intializes the board count for each column\"\"\"\n self.background = Rectangle(Point(0, 0), Point(690, 590))\n self.background.setFill('blue')\n self.background.draw(self.window)\n for i in range(7):\n for j in range(6):\n self.Circles = Circle(Point(i * 100 + 50, j * 100 + 50), 30)\n self.Circles.setFill('white')\n self.Circles.draw(self.window)\n for i in range(6):\n self.horizLine = Line(Point(0, i * 100 + 100), Point(900, i * \n 100 + 100))\n self.vertLine = Line(Point(100 * i + 100, 0), Point(100 * i + \n 100, 900))\n self.horizLine.draw(self.window)\n self.vertLine.draw(self.window)\n self.grid = [[], [], [], [], [], [], []]\n self.boardCount = [0, 0, 0, 0, 0, 0, 0]\n counter = 2\n for x in range(7):\n for y in range(6):\n self.grid[x].append(counter)\n counter += 1\n\n def validClick(self, x):\n \"\"\"This function checks if there is enough space vertically for move to be valid\"\"\"\n if self.boardCount[x] >= 6:\n print('Invalid Move')\n return False\n else:\n return True\n <mask token>\n\n def handleClick(self, point):\n \"\"\"This function works with the user to add each move into the board count and to the current grid\"\"\"\n self.newX = point.getX()\n self.x = self.newX // 100\n self.y = self.boardCount[self.x]\n if self.validClick(self.x):\n self.boardCount[self.x] += 1\n self.limitCounter += 1\n self.grid[self.x][self.y] = self.currentUser\n if self.isWon() == False:\n self.limitCounter += 1\n self.computerMove()\n self.drawUmove()\n <mask token>\n\n def printWinner(self, winner):\n \"\"\"This function prints who the winner is or if it is a tie\"\"\"\n if winner == 3:\n txt = Text(Point(345, 300), 'Tie Game!')\n txt.setFill('white')\n txt.setSize(35)\n txt.draw(self.window)\n return\n elif winner == 1:\n txt = Text(Point(345, 300), 'You Won!')\n txt.setFill('white')\n txt.setSize(35)\n txt.draw(self.window)\n return\n else:\n txt = Text(Point(345, 300), 'Computer Won!')\n txt.setFill('white')\n txt.setSize(35)\n txt.draw(self.window)\n return\n\n def validCmove(self, x, y):\n \"\"\"This function checks if the computer's move will be valid\"\"\"\n if self.boardCount[x] > y:\n return False\n \"\"\" if it tries to place below the highest piece\"\"\"\n if self.boardCount[x] < y:\n return False\n \"\"\"if it tries to place it in a column with 6 pieces already\"\"\"\n if self.boardCount[x] >= 6:\n return False\n else:\n return True\n\n def drawCmove(self, x, y):\n \"\"\"This function adds the computer's move to the game board and adds it to the board count\"\"\"\n piece = Circle(Point(x * 100 + 50, 600 - (y * 100 + 50)), 30)\n piece.setFill('yellow')\n piece.draw(self.window)\n self.boardCount[x] += 1\n self.grid[x][y] = -1\n return\n <mask token>\n\n def randomMove(self):\n \"\"\"This function creates a random coordinate for its move, checks if it's valid, then prints the move.\n\t\tIt will continue to run until numbers are valid for current board\"\"\"\n randY = random.randint(0, 6)\n randX = random.randint(0, 7)\n if self.validCmove(randY, randX):\n self.drawCmove(randY, randX)\n return\n else:\n self.randomMove()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ConnectWindow:\n <mask token>\n\n def startScreen(self):\n \"\"\"This function creates the board and intializes the board count for each column\"\"\"\n self.background = Rectangle(Point(0, 0), Point(690, 590))\n self.background.setFill('blue')\n self.background.draw(self.window)\n for i in range(7):\n for j in range(6):\n self.Circles = Circle(Point(i * 100 + 50, j * 100 + 50), 30)\n self.Circles.setFill('white')\n self.Circles.draw(self.window)\n for i in range(6):\n self.horizLine = Line(Point(0, i * 100 + 100), Point(900, i * \n 100 + 100))\n self.vertLine = Line(Point(100 * i + 100, 0), Point(100 * i + \n 100, 900))\n self.horizLine.draw(self.window)\n self.vertLine.draw(self.window)\n self.grid = [[], [], [], [], [], [], []]\n self.boardCount = [0, 0, 0, 0, 0, 0, 0]\n counter = 2\n for x in range(7):\n for y in range(6):\n self.grid[x].append(counter)\n counter += 1\n\n def validClick(self, x):\n \"\"\"This function checks if there is enough space vertically for move to be valid\"\"\"\n if self.boardCount[x] >= 6:\n print('Invalid Move')\n return False\n else:\n return True\n\n def drawUmove(self):\n \"\"\"This function prints the pieces onto the board at the given position from the user\"\"\"\n piece = Circle(Point(self.x * 100 + 50, 600 - (self.y * 100 + 50)), 30)\n piece.setFill('red')\n piece.draw(self.window)\n return\n\n def handleClick(self, point):\n \"\"\"This function works with the user to add each move into the board count and to the current grid\"\"\"\n self.newX = point.getX()\n self.x = self.newX // 100\n self.y = self.boardCount[self.x]\n if self.validClick(self.x):\n self.boardCount[self.x] += 1\n self.limitCounter += 1\n self.grid[self.x][self.y] = self.currentUser\n if self.isWon() == False:\n self.limitCounter += 1\n self.computerMove()\n self.drawUmove()\n\n def isWon(self):\n \"\"\"This function checks if there is a winner in the game (True/False) and calls printWinner function\"\"\"\n for i in range(7):\n for j in range(3):\n self.square1 = self.grid[i][j]\n self.square2 = self.grid[i][j + 1]\n self.square3 = self.grid[i][j + 2]\n self.square4 = self.grid[i][j + 3]\n if (self.square1 == self.square2 and self.square2 == self.\n square3 and self.square3 == self.square4):\n self.printWinner(self.square1)\n return True\n for i in range(4):\n for j in range(3):\n self.square1 = self.grid[i][j]\n self.square2 = self.grid[i + 1][j + 1]\n self.square3 = self.grid[i + 2][j + 2]\n self.square4 = self.grid[i + 3][j + 3]\n if (self.square1 == self.square2 and self.square2 == self.\n square3 and self.square3 == self.square4):\n self.printWinner(self.square1)\n return True\n for i in range(3, 7):\n for j in range(3):\n self.square1 = self.grid[i][j]\n self.square2 = self.grid[i - 1][j + 1]\n self.square3 = self.grid[i - 2][j + 2]\n self.square4 = self.grid[i - 3][j + 3]\n if (self.square1 == self.square2 and self.square2 == self.\n square3 and self.square3 == self.square4):\n self.printWinner(self.square1)\n return True\n for i in range(4):\n for j in range(6):\n self.square1 = self.grid[i][j]\n self.square2 = self.grid[i + 1][j]\n self.square3 = self.grid[i + 2][j]\n self.square4 = self.grid[i + 3][j]\n if (self.square1 == self.square2 and self.square2 == self.\n square3 and self.square3 == self.square4):\n self.printWinner(self.square1)\n return True\n if self.limitCounter == 42:\n self.printWinner(3)\n return True\n return False\n\n def printWinner(self, winner):\n \"\"\"This function prints who the winner is or if it is a tie\"\"\"\n if winner == 3:\n txt = Text(Point(345, 300), 'Tie Game!')\n txt.setFill('white')\n txt.setSize(35)\n txt.draw(self.window)\n return\n elif winner == 1:\n txt = Text(Point(345, 300), 'You Won!')\n txt.setFill('white')\n txt.setSize(35)\n txt.draw(self.window)\n return\n else:\n txt = Text(Point(345, 300), 'Computer Won!')\n txt.setFill('white')\n txt.setSize(35)\n txt.draw(self.window)\n return\n\n def validCmove(self, x, y):\n \"\"\"This function checks if the computer's move will be valid\"\"\"\n if self.boardCount[x] > y:\n return False\n \"\"\" if it tries to place below the highest piece\"\"\"\n if self.boardCount[x] < y:\n return False\n \"\"\"if it tries to place it in a column with 6 pieces already\"\"\"\n if self.boardCount[x] >= 6:\n return False\n else:\n return True\n\n def drawCmove(self, x, y):\n \"\"\"This function adds the computer's move to the game board and adds it to the board count\"\"\"\n piece = Circle(Point(x * 100 + 50, 600 - (y * 100 + 50)), 30)\n piece.setFill('yellow')\n piece.draw(self.window)\n self.boardCount[x] += 1\n self.grid[x][y] = -1\n return\n <mask token>\n\n def randomMove(self):\n \"\"\"This function creates a random coordinate for its move, checks if it's valid, then prints the move.\n\t\tIt will continue to run until numbers are valid for current board\"\"\"\n randY = random.randint(0, 6)\n randX = random.randint(0, 7)\n if self.validCmove(randY, randX):\n self.drawCmove(randY, randX)\n return\n else:\n self.randomMove()\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass ConnectWindow:\n <mask token>\n\n def startScreen(self):\n \"\"\"This function creates the board and intializes the board count for each column\"\"\"\n self.background = Rectangle(Point(0, 0), Point(690, 590))\n self.background.setFill('blue')\n self.background.draw(self.window)\n for i in range(7):\n for j in range(6):\n self.Circles = Circle(Point(i * 100 + 50, j * 100 + 50), 30)\n self.Circles.setFill('white')\n self.Circles.draw(self.window)\n for i in range(6):\n self.horizLine = Line(Point(0, i * 100 + 100), Point(900, i * \n 100 + 100))\n self.vertLine = Line(Point(100 * i + 100, 0), Point(100 * i + \n 100, 900))\n self.horizLine.draw(self.window)\n self.vertLine.draw(self.window)\n self.grid = [[], [], [], [], [], [], []]\n self.boardCount = [0, 0, 0, 0, 0, 0, 0]\n counter = 2\n for x in range(7):\n for y in range(6):\n self.grid[x].append(counter)\n counter += 1\n\n def validClick(self, x):\n \"\"\"This function checks if there is enough space vertically for move to be valid\"\"\"\n if self.boardCount[x] >= 6:\n print('Invalid Move')\n return False\n else:\n return True\n\n def drawUmove(self):\n \"\"\"This function prints the pieces onto the board at the given position from the user\"\"\"\n piece = Circle(Point(self.x * 100 + 50, 600 - (self.y * 100 + 50)), 30)\n piece.setFill('red')\n piece.draw(self.window)\n return\n\n def handleClick(self, point):\n \"\"\"This function works with the user to add each move into the board count and to the current grid\"\"\"\n self.newX = point.getX()\n self.x = self.newX // 100\n self.y = self.boardCount[self.x]\n if self.validClick(self.x):\n self.boardCount[self.x] += 1\n self.limitCounter += 1\n self.grid[self.x][self.y] = self.currentUser\n if self.isWon() == False:\n self.limitCounter += 1\n self.computerMove()\n self.drawUmove()\n\n def isWon(self):\n \"\"\"This function checks if there is a winner in the game (True/False) and calls printWinner function\"\"\"\n for i in range(7):\n for j in range(3):\n self.square1 = self.grid[i][j]\n self.square2 = self.grid[i][j + 1]\n self.square3 = self.grid[i][j + 2]\n self.square4 = self.grid[i][j + 3]\n if (self.square1 == self.square2 and self.square2 == self.\n square3 and self.square3 == self.square4):\n self.printWinner(self.square1)\n return True\n for i in range(4):\n for j in range(3):\n self.square1 = self.grid[i][j]\n self.square2 = self.grid[i + 1][j + 1]\n self.square3 = self.grid[i + 2][j + 2]\n self.square4 = self.grid[i + 3][j + 3]\n if (self.square1 == self.square2 and self.square2 == self.\n square3 and self.square3 == self.square4):\n self.printWinner(self.square1)\n return True\n for i in range(3, 7):\n for j in range(3):\n self.square1 = self.grid[i][j]\n self.square2 = self.grid[i - 1][j + 1]\n self.square3 = self.grid[i - 2][j + 2]\n self.square4 = self.grid[i - 3][j + 3]\n if (self.square1 == self.square2 and self.square2 == self.\n square3 and self.square3 == self.square4):\n self.printWinner(self.square1)\n return True\n for i in range(4):\n for j in range(6):\n self.square1 = self.grid[i][j]\n self.square2 = self.grid[i + 1][j]\n self.square3 = self.grid[i + 2][j]\n self.square4 = self.grid[i + 3][j]\n if (self.square1 == self.square2 and self.square2 == self.\n square3 and self.square3 == self.square4):\n self.printWinner(self.square1)\n return True\n if self.limitCounter == 42:\n self.printWinner(3)\n return True\n return False\n\n def printWinner(self, winner):\n \"\"\"This function prints who the winner is or if it is a tie\"\"\"\n if winner == 3:\n txt = Text(Point(345, 300), 'Tie Game!')\n txt.setFill('white')\n txt.setSize(35)\n txt.draw(self.window)\n return\n elif winner == 1:\n txt = Text(Point(345, 300), 'You Won!')\n txt.setFill('white')\n txt.setSize(35)\n txt.draw(self.window)\n return\n else:\n txt = Text(Point(345, 300), 'Computer Won!')\n txt.setFill('white')\n txt.setSize(35)\n txt.draw(self.window)\n return\n\n def validCmove(self, x, y):\n \"\"\"This function checks if the computer's move will be valid\"\"\"\n if self.boardCount[x] > y:\n return False\n \"\"\" if it tries to place below the highest piece\"\"\"\n if self.boardCount[x] < y:\n return False\n \"\"\"if it tries to place it in a column with 6 pieces already\"\"\"\n if self.boardCount[x] >= 6:\n return False\n else:\n return True\n\n def drawCmove(self, x, y):\n \"\"\"This function adds the computer's move to the game board and adds it to the board count\"\"\"\n piece = Circle(Point(x * 100 + 50, 600 - (y * 100 + 50)), 30)\n piece.setFill('yellow')\n piece.draw(self.window)\n self.boardCount[x] += 1\n self.grid[x][y] = -1\n return\n\n def computerMove(self):\n \"\"\"This function computes where the computer will put its next move and calls the drawCmove() fxn to do so.\n\t\tThe computer will add its piece to wherever there are three in a row in either color then looks to see when \n\t\tthere are two in a row. Move will be placed randomly if no pieces are placed in a row\"\"\"\n for i in range(7):\n for j in range(3):\n self.square1 = self.grid[i][j]\n self.square2 = self.grid[i][j + 1]\n self.square3 = self.grid[i][j + 2]\n if (self.square1 == self.square2 and self.square2 == self.\n square3):\n if self.validCmove(i, j + 3):\n self.drawCmove(i, j + 3)\n return\n else:\n self.randomMove()\n return\n for i in range(4):\n for j in range(3):\n self.square1 = self.grid[i][j]\n self.square2 = self.grid[i + 1][j + 1]\n self.square3 = self.grid[i + 2][j + 2]\n if (self.square1 == self.square2 and self.square2 == self.\n square3):\n if self.validCmove(i + 3, j + 3):\n self.drawCmove(i + 3, j + 3)\n return\n if self.validCmove(i - 1, j - 1):\n self.drawCmove(i - 1, j - 1)\n else:\n self.randomMove()\n return\n for i in range(3, 7):\n for j in range(3):\n self.square1 = self.grid[i][j]\n self.square2 = self.grid[i - 1][j + 1]\n self.square3 = self.grid[i - 2][j + 2]\n if (self.square1 == self.square2 and self.square2 == self.\n square3):\n if self.validCmove(i - 3, j + 3):\n self.drawCmove(i - 3, j + 3)\n return\n if self.validCmove(i + 1, j - 1):\n self.drawCmove(i + 1, j - 1)\n else:\n self.randomMove()\n return\n for i in range(4):\n for j in range(6):\n self.square1 = self.grid[i][j]\n self.square2 = self.grid[i + 1][j]\n self.square3 = self.grid[i + 2][j]\n if (self.square1 == self.square2 and self.square2 == self.\n square3):\n if self.validCmove(i + 3, j):\n self.drawCmove(i + 3, j)\n return\n if self.validCmove(i - 1, j):\n self.drawCmove(i - 1, j)\n return\n else:\n self.randomMove()\n return\n for i in range(4):\n for j in range(3):\n self.square1 = self.grid[i][j]\n self.square2 = self.grid[i + 1][j + 1]\n if self.square1 == self.square2:\n if self.validCmove(i + 2, j + 2):\n self.drawCmove(i + 2, j + 2)\n return\n if self.validCmove(i - 1, j - 1):\n self.drawCmove(i - 1, j - 1)\n else:\n self.randomMove()\n return\n for i in range(7):\n for j in range(3):\n self.square1 = self.grid[i][j]\n self.square2 = self.grid[i][j + 1]\n if self.square1 == self.square2:\n if self.validCmove(i, j + 2):\n self.drawCmove(i, j + 2)\n return\n if self.validCmove(i, j - 1):\n self.drawCmove(i, j - 1)\n return\n else:\n self.randomMove()\n return\n for i in range(3, 7):\n for j in range(3):\n self.square1 = self.grid[i][j]\n self.square2 = self.grid[i - 1][j + 1]\n if self.square1 == self.square2:\n if self.validCmove(i - 2, j + 2):\n self.drawCmove(i - 2, j + 2)\n return\n if self.validCmove(i + 1, j - 1):\n self.drawCmove(i + 1, j - 1)\n else:\n self.randomMove()\n return\n for i in range(4):\n for j in range(6):\n self.square1 = self.grid[i][j]\n self.square2 = self.grid[i + 1][j]\n if self.square1 == self.square2:\n if self.validCmove(i + 2, j):\n self.drawCmove(i + 2, j)\n return\n if self.validCmove(i - 1, j):\n self.drawCmove(i - 1, j)\n return\n else:\n self.randomMove()\n return\n else:\n self.randomMove()\n\n def randomMove(self):\n \"\"\"This function creates a random coordinate for its move, checks if it's valid, then prints the move.\n\t\tIt will continue to run until numbers are valid for current board\"\"\"\n randY = random.randint(0, 6)\n randX = random.randint(0, 7)\n if self.validCmove(randY, randX):\n self.drawCmove(randY, randX)\n return\n else:\n self.randomMove()\n\n\n<mask token>\n",
"step-5": "#connect4_JayNa.py\n#Jay Na\n#CS111 Spring 2018\n#This file creates a version of the game Connect4, where the user plays against an AI\n\nfrom graphics import *\nimport random\n\nclass ConnectWindow:\n\n\tdef __init__(self):\n\t\tself.window = GraphWin(\"Connect Four\", 690, 590)\n\t\tself.window.setMouseHandler(self.handleClick)\n\t\tself.startScreen()\n\t\tself.currentUser = 1\n\t\tself.limitCounter = 0\n\t\t\n\n\tdef startScreen(self):\n\t\t'''This function creates the board and intializes the board count for each column'''\n\n\t#draws blue rectangle as the background\n\t\tself.background = Rectangle(Point(0,0), Point(690,590))\n\t\tself.background.setFill('blue')\n\t\tself.background.draw(self.window)\n\t\t\n\t#draws white circles to represent the spots for the game\t\n\t\tfor i in range(7):\n\t\t\tfor j in range(6):\n\t\t\t\tself.Circles = Circle(Point(i*100+50,j*100+50),(30))\n\t\t\t\tself.Circles.setFill('white')\n\t\t\t\tself.Circles.draw(self.window)\n\t\t\t\t\n\t#draws lines to separate circles in rectangle\n\t\tfor i in range(6):\n\t\t\tself.horizLine = Line(Point(0,i*100+100), Point(900,i*100+100))\n\t\t\tself.vertLine = Line(Point(100*i+100,0), Point(100*i+100,900))\n\t\t\tself.horizLine.draw(self.window)\n\t\t\tself.vertLine.draw(self.window)\n\t\t\t\n\t#initiates counts for each column and creates grid\n\t\tself.grid = [[],[],[],[],[],[],[]]\n\t\tself.boardCount = [0,0,0,0,0,0,0]\n\t\tcounter = 2\n\t\t\n\t\t#help from CS Major, Joh Farmer\n\t\tfor x in range(7):\n\t\t\tfor y in range(6):\n\t\t\t\tself.grid[x].append(counter)\n\t\t\t\tcounter += 1\n\t\t\t\t\n\n\tdef validClick(self, x):\n\t\t'''This function checks if there is enough space vertically for move to be valid'''\n\t\t\n\t\tif self.boardCount[x] >= 6:\n\t\t\tprint(\"Invalid Move\")\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True\n\n\n\tdef drawUmove(self):\n\t\t'''This function prints the pieces onto the board at the given position from the user'''\n\t\t\n\t\tpiece = Circle(Point(self.x*100+50, 600-(self.y*100+50)),30)\n\t\tpiece.setFill('red')\n\t\tpiece.draw(self.window)\n\t\treturn\n\n\tdef handleClick(self, point):\n\t\t'''This function works with the user to add each move into the board count and to the current grid'''\n\n\t\tself.newX = point.getX()\n\t\tself.x = self.newX//100\n\t\tself.y = self.boardCount[self.x]\n\t\t\n\t\tif self.validClick(self.x):\n\t\t\tself.boardCount[self.x] += 1\n\t\t\tself.limitCounter += 1\n\t\t\tself.grid[self.x][self.y] = self.currentUser\n\t\t\t\n\t\tif self.isWon() == False:\n\t\t\tself.limitCounter += 1\n\t\t\tself.computerMove()\n\t\t\tself.drawUmove()\n\n\n\tdef isWon(self):\n\t\t'''This function checks if there is a winner in the game (True/False) and calls printWinner function'''\n\n\t#checks to see if there is a winner vertically\n\t\tfor i in range(7):\n\t\t\tfor j in range(3):\n\t\t\t\tself.square1 = self.grid[i][j]\n\t\t\t\tself.square2 = self.grid[i][j+1]\n\t\t\t\tself.square3 = self.grid[i][j+2]\n\t\t\t\tself.square4 = self.grid[i][j+3]\n\t\t\t\tif self.square1 == self.square2 and self.square2 == self.square3 and self.square3 == self.square4:\n\t\t\t\t\tself.printWinner(self.square1)\n\t\t\t\t\treturn True\n\n\t#checks to see if there is a winner diagonally from lower left to upper right\n\t\tfor i in range(4):\n\t\t\tfor j in range(3):\n\t\t\t\tself.square1 = self.grid[i][j]\n\t\t\t\tself.square2 = self.grid[i+1][j+1]\n\t\t\t\tself.square3 = self.grid[i+2][j+2]\n\t\t\t\tself.square4 = self.grid[i+3][j+3]\n\t\t\t\tif self.square1 == self.square2 and self.square2 == self.square3 and self.square3 == self.square4:\n\t\t\t\t\tself.printWinner(self.square1)\n\t\t\t\t\treturn True\n\t\t\t\t\t\n\t#checks to see if there is a winner diagonally from upper left to lower right\n\t\tfor i in range(3,7):\n\t\t\tfor j in range(3):\n\t\t\t\tself.square1 = self.grid[i][j]\n\t\t\t\tself.square2 = self.grid[i-1][j+1]\n\t\t\t\tself.square3 = self.grid[i-2][j+2]\n\t\t\t\tself.square4 = self.grid[i-3][j+3]\n\t\t\t\tif self.square1 == self.square2 and self.square2 == self.square3 and self.square3 == self.square4:\n\t\t\t\t\tself.printWinner(self.square1)\n\t\t\t\t\treturn True\t\t\t\n\t\t\t\t\t\n\t#checks to see if there is a winner horizontally\n\t\tfor i in range(4):\n\t\t\tfor j in range(6):\n\t\t\t\tself.square1 = self.grid[i][j]\n\t\t\t\tself.square2 = self.grid[i+1][j]\n\t\t\t\tself.square3 = self.grid[i+2][j]\n\t\t\t\tself.square4 = self.grid[i+3][j]\n\t\t\t\tif self.square1 == self.square2 and self.square2 == self.square3 and self.square3 == self.square4: \n\t\t\t\t\tself.printWinner(self.square1)\n\t\t\t\t\treturn True\t\t\t\t\n\t\t\t\t\t\n\t#checks if board is full without a winner (tie)\n\t\tif self.limitCounter == 42:\n\t\t\tself.printWinner(3)\n\t\t\treturn True\n\t\treturn False\n\n\n\tdef printWinner(self, winner):\n\t\t'''This function prints who the winner is or if it is a tie'''\n\t\t\n\t#if input is 3 from isWon() fxn, game is tied and so \"Tie Game!\" is printed \n\t\tif winner == 3:\n\t\t\ttxt = Text(Point(345, 300), \"Tie Game!\")\n\t\t\ttxt.setFill('white')\n\t\t\ttxt.setSize(35)\n\t\t\ttxt.draw(self.window)\n\t\t\treturn\t\t\n\t\telse:\n\t#prints \"You Won!\" if user wins\n\t\t\tif winner == 1:\n\t\t\t\ttxt = Text(Point(345, 300), \"You Won!\")\n\t\t\t\ttxt.setFill('white')\n\t\t\t\ttxt.setSize(35)\n\t\t\t\ttxt.draw(self.window)\n\t\t\t\treturn\n\t\t\telse:\n\t#prints \"Computer Won!\" if computer wins\n\t\t\t\ttxt = Text(Point(345, 300), \"Computer Won!\")\n\t\t\t\ttxt.setFill('white')\n\t\t\t\ttxt.setSize(35)\n\t\t\t\ttxt.draw(self.window)\n\t\t\t\treturn\n\n\n\tdef validCmove(self, x, y):\n\t\t'''This function checks if the computer's move will be valid'''\n\t\n\t#checks if \t'''if it tries to place it higher than the highest piece'''\n\t\tif self.boardCount[x] > y:\n\t\t\treturn False\n\t\t''' if it tries to place below the highest piece'''\n\t\tif self.boardCount[x] < y:\n\t\t\treturn False\n\t\t'''if it tries to place it in a column with 6 pieces already'''\n\t\tif self.boardCount[x] >= 6:\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True\n\t\n\n\tdef drawCmove(self, x ,y):\n\t\t'''This function adds the computer's move to the game board and adds it to the board count'''\n\n\t\tpiece = Circle(Point((x)*100+50, 600 - ((y)*100+50)),30)\n\t\tpiece.setFill('yellow')\n\t\tpiece.draw(self.window)\n\t\tself.boardCount[x] += 1\n\t\tself.grid[x][y] = -1\n\t\treturn\n\n\n\tdef computerMove(self):\n\t\t'''This function computes where the computer will put its next move and calls the drawCmove() fxn to do so.\n\t\tThe computer will add its piece to wherever there are three in a row in either color then looks to see when \n\t\tthere are two in a row. Move will be placed randomly if no pieces are placed in a row'''\n\n\t#checks if there are three pieces lined up vertically in a row and places its move to win or prevent the win'''\n\t\tfor i in range(7):\n\t\t\tfor j in range(3):\n\t\t\t\tself.square1 = self.grid[i][j]\n\t\t\t\tself.square2 = self.grid[i][j+1]\n\t\t\t\tself.square3 = self.grid[i][j+2]\n\t\t\t\tif self.square1 == self.square2 and self.square2 == self.square3:\n\t\t\t\t\tif self.validCmove(i,j+3):\n\t\t\t\t\t\tself.drawCmove(i,j+3)\n\t\t\t\t\t\treturn\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.randomMove()\n\t\t\t\t\t\treturn\n\t\t\t\t\t\t\t\t\n\t#checks if there are three pieces lined up diagonally from lower left to upper right and places its move to win or prevent the win\n\t#help from CS major, Joh Farmer\n\t\tfor i in range(4):\n\t\t\tfor j in range(3):\n\t\t\t\tself.square1 = self.grid[i][j]\n\t\t\t\tself.square2 = self.grid[i+1][j+1]\n\t\t\t\tself.square3 = self.grid[i+2][j+2]\n\t\t\t\tif self.square1 == self.square2 and self.square2 == self.square3:\n\t\t\t\t\tif self.validCmove(i+3,j+3):\n\t\t\t\t\t\tself.drawCmove(i+3,j+3)\n\t\t\t\t\t\treturn\n\t\t\t\t\tif self.validCmove(i-1,j-1):\n\t\t\t\t\t\tself.drawCmove(i-1,j-1)\n\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.randomMove()\n\t\t\t\t\t\treturn\n\t\t\t\t\t\t\n\t#checks if there are three pieces lined up diagonally from lower right to upper left and places its move to win or prevent the win\t\t\n\t\tfor i in range(3,7):\n\t\t\tfor j in range(3):\n\t\t\t\tself.square1 = self.grid[i][j]\n\t\t\t\tself.square2 = self.grid[i-1][j+1]\n\t\t\t\tself.square3 = self.grid[i-2][j+2]\n\t\t\t\tif self.square1 == self.square2 and self.square2 == self.square3:\n\t\t\t\t\tif self.validCmove(i-3,j+3):\n\t\t\t\t\t\tself.drawCmove(i-3,j+3)\n\t\t\t\t\t\treturn\n\t\t\t\t\tif self.validCmove(i+1,j-1):\n\t\t\t\t\t\tself.drawCmove(i+1,j-1)\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.randomMove()\n\t\t\t\t\t\treturn\n\t\t\t\t\t\t\n\t#checks if there are three pieces lined up horizontally in a row and places its move to win or prevent the win (either side)'''\n\t\tfor i in range(4):\n\t\t\tfor j in range(6):\n\t\t\t\tself.square1 = self.grid[i][j]\n\t\t\t\tself.square2 = self.grid[i+1][j]\n\t\t\t\tself.square3 = self.grid[i+2][j]\n\t\t\t\tif self.square1 == self.square2 and self.square2 == self.square3:\n\t\t\t\t\tif self.validCmove(i+3,j):\n\t\t\t\t\t\tself.drawCmove(i+3,j)\n\t\t\t\t\t\treturn\n\t\t\t\t\tif self.validCmove(i-1,j):\n\t\t\t\t\t\tself.drawCmove(i-1,j)\n\t\t\t\t\t\treturn\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.randomMove()\n\t\t\t\t\t\treturn\n\n\n\n\t#checks if there are two in a row diagonally from lower left to upper right and places its move accordingly\n\t\tfor i in range(4):\n\t\t\tfor j in range(3):\n\t\t\t\tself.square1 = self.grid[i][j]\n\t\t\t\tself.square2 = self.grid[i+1][j+1]\n\t\t\t\tif self.square1 == self.square2:\n\t\t\t\t\tif self.validCmove(i+2,j+2):\n\t\t\t\t\t\tself.drawCmove(i+2,j+2)\n\t\t\t\t\t\treturn\n\t\t\t\t\tif self.validCmove(i-1,j-1):\n\t\t\t\t\t\tself.drawCmove(i-1,j-1)\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.randomMove()\n\t\t\t\t\t\treturn\n\t\t\t\t\t\t\n\t#checks if there are two in a row vertically and places its move accordingly\n\t\tfor i in range(7):\n\t\t\tfor j in range(3):\n\t\t\t\tself.square1 = self.grid[i][j]\n\t\t\t\tself.square2 = self.grid[i][j+1]\n\t\t\t\tif self.square1 == self.square2:\n\t\t\t\t\tif self.validCmove(i,j+2):\n\t\t\t\t\t\tself.drawCmove(i,j+2)\n\t\t\t\t\t\treturn\n\t\t\t\t\tif self.validCmove(i,j-1):\n\t\t\t\t\t\tself.drawCmove(i,j-1)\n\t\t\t\t\t\treturn\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.randomMove()\n\t\t\t\t\t\treturn\t\t\t\t\t\n\t\t\t\t\t\t\n\t#checks if there are two in a row diagonally from lower right to upper left and places its move accordingly\t\n\t\tfor i in range(3,7):\n\t\t\tfor j in range(3):\n\t\t\t\tself.square1 = self.grid[i][j]\n\t\t\t\tself.square2 = self.grid[i-1][j+1]\n\t\t\t\tif self.square1 == self.square2:\n\t\t\t\t\tif self.validCmove(i-2,j+2):\n\t\t\t\t\t\tself.drawCmove(i-2,j+2)\n\t\t\t\t\t\treturn\n\t\t\t\t\tif self.validCmove(i+1,j-1):\n\t\t\t\t\t\tself.drawCmove(i+1,j-1)\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.randomMove()\n\t\t\t\t\t\treturn\n\t\t\t\t\t\n\t#checks if there are two in a row horizontally and places its move accordingly\n\t\tfor i in range(4):\n\t\t\tfor j in range(6):\n\t\t\t\tself.square1 = self.grid[i][j]\n\t\t\t\tself.square2 = self.grid[i+1][j]\n\t\t\t\tif self.square1 == self.square2:\n\t\t\t\t\tif self.validCmove(i+2,j):\n\t\t\t\t\t\tself.drawCmove(i+2,j)\n\t\t\t\t\t\treturn\n\t\t\t\t\tif self.validCmove(i-1,j):\n\t\t\t\t\t\tself.drawCmove(i-1,j)\n\t\t\t\t\t\treturn\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.randomMove()\n\t\t\t\t\t\treturn\n\n\t#places move randomly if no pieces are being placed in a row\n\t\telse:\n\t\t\tself.randomMove()\n\n\n\tdef randomMove(self):\n\t\t'''This function creates a random coordinate for its move, checks if it's valid, then prints the move.\n\t\tIt will continue to run until numbers are valid for current board'''\n\t\n\t\trandY = random.randint(0,6)\n\t\trandX = random.randint(0,7)\n\t\t\n\t\tif self.validCmove(randY,randX):\n\t\t\tself.drawCmove(randY,randX)\n\t\t\treturn\n\t\telse:\n\t\t\tself.randomMove()\n\n\ndef main():\n\tgameOver = False\n\tconnect4 = ConnectWindow()\n\twhile gameOver == False:\n\t\tconnect4.window.getMouse()\n\t\tgameOver = connect4.isWon()\n\tinput(\"Hit enter to quit\")\n\n\t\nmain()\n\n\n\n\n\n\n\n",
"step-ids": [
2,
8,
10,
11,
16
]
}
|
[
2,
8,
10,
11,
16
] |
from ipyleaflet import Map, DrawControl, Marker, Rectangle
from sentinelhub import BBox, CRS
from ipywidgets import widgets as w
class BBoxSelector:
def __init__(self, bbox, zoom=8, resolution=10):
center = (bbox.min_y + bbox.max_y) / 2, (bbox.min_x + bbox.max_x) / 2
self.map = Map(center=center, zoom=zoom, scroll_wheel_zoom=True)
self.resolution = resolution
control = DrawControl()
control.rectangle = {
"shapeOptions": {
"fillColor": "#fabd14",
"color": "#fa6814",
"fillOpacity": 0.2
}
}
#Disable the rest of draw options
control.polyline = {}
control.circle = {}
control.circlemarker = {}
control.polygon = {}
control.edit = False
control.remove = False
control.on_draw(self._handle_draw)
self.map.add_control(control)
self.bbox = None
self.size = None
self.rectangle = None
self.add_rectangle(bbox.min_x, bbox.min_y, bbox.max_x, bbox.max_y)
# self.out = w.Output(layout=w.Layout(width='100%', height='50px', overflow_y='scroll'))
# self.vbox = w.VBox([self.map, self.out])
def add_rectangle(self, min_x, min_y, max_x, max_y):
if self.rectangle:
self.map.remove_layer(self.rectangle)
self.rectangle = Rectangle(
bounds=((min_y, min_x), (max_y, max_x)),
color="#fa6814",
fill=True,
fill_color="#fabd14",
fill_opacity=0.2,
weight=1
)
self.map.add_layer(self.rectangle)
self.bbox = BBox(((min_x, min_y), (max_x, max_y)), CRS.WGS84).transform(CRS.POP_WEB)
# self.out.append_display_data((min_x, min_y, max_x, max_y))
size_x = abs(int((self.bbox.max_x - self.bbox.min_x) / self.resolution))
size_y = abs(int((self.bbox.max_y - self.bbox.min_y) / self.resolution))
self.size = size_x, size_y
def _handle_draw(self, control, action, geo_json):
control.clear_rectangles()
bbox_geom = geo_json['geometry']['coordinates'][0]
min_x, min_y = bbox_geom[0]
max_x, max_y = bbox_geom[2]
self.add_rectangle(min_x, min_y, max_x, max_y)
def show(self):
return self.map
# return self.vbox
|
normal
|
{
"blob_id": "0545aff80e19e47cb9e5b1941e92ff5cb109f9e6",
"index": 1921,
"step-1": "<mask token>\n\n\nclass BBoxSelector:\n\n def __init__(self, bbox, zoom=8, resolution=10):\n center = (bbox.min_y + bbox.max_y) / 2, (bbox.min_x + bbox.max_x) / 2\n self.map = Map(center=center, zoom=zoom, scroll_wheel_zoom=True)\n self.resolution = resolution\n control = DrawControl()\n control.rectangle = {'shapeOptions': {'fillColor': '#fabd14',\n 'color': '#fa6814', 'fillOpacity': 0.2}}\n control.polyline = {}\n control.circle = {}\n control.circlemarker = {}\n control.polygon = {}\n control.edit = False\n control.remove = False\n control.on_draw(self._handle_draw)\n self.map.add_control(control)\n self.bbox = None\n self.size = None\n self.rectangle = None\n self.add_rectangle(bbox.min_x, bbox.min_y, bbox.max_x, bbox.max_y)\n\n def add_rectangle(self, min_x, min_y, max_x, max_y):\n if self.rectangle:\n self.map.remove_layer(self.rectangle)\n self.rectangle = Rectangle(bounds=((min_y, min_x), (max_y, max_x)),\n color='#fa6814', fill=True, fill_color='#fabd14', fill_opacity=\n 0.2, weight=1)\n self.map.add_layer(self.rectangle)\n self.bbox = BBox(((min_x, min_y), (max_x, max_y)), CRS.WGS84\n ).transform(CRS.POP_WEB)\n size_x = abs(int((self.bbox.max_x - self.bbox.min_x) / self.resolution)\n )\n size_y = abs(int((self.bbox.max_y - self.bbox.min_y) / self.resolution)\n )\n self.size = size_x, size_y\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass BBoxSelector:\n\n def __init__(self, bbox, zoom=8, resolution=10):\n center = (bbox.min_y + bbox.max_y) / 2, (bbox.min_x + bbox.max_x) / 2\n self.map = Map(center=center, zoom=zoom, scroll_wheel_zoom=True)\n self.resolution = resolution\n control = DrawControl()\n control.rectangle = {'shapeOptions': {'fillColor': '#fabd14',\n 'color': '#fa6814', 'fillOpacity': 0.2}}\n control.polyline = {}\n control.circle = {}\n control.circlemarker = {}\n control.polygon = {}\n control.edit = False\n control.remove = False\n control.on_draw(self._handle_draw)\n self.map.add_control(control)\n self.bbox = None\n self.size = None\n self.rectangle = None\n self.add_rectangle(bbox.min_x, bbox.min_y, bbox.max_x, bbox.max_y)\n\n def add_rectangle(self, min_x, min_y, max_x, max_y):\n if self.rectangle:\n self.map.remove_layer(self.rectangle)\n self.rectangle = Rectangle(bounds=((min_y, min_x), (max_y, max_x)),\n color='#fa6814', fill=True, fill_color='#fabd14', fill_opacity=\n 0.2, weight=1)\n self.map.add_layer(self.rectangle)\n self.bbox = BBox(((min_x, min_y), (max_x, max_y)), CRS.WGS84\n ).transform(CRS.POP_WEB)\n size_x = abs(int((self.bbox.max_x - self.bbox.min_x) / self.resolution)\n )\n size_y = abs(int((self.bbox.max_y - self.bbox.min_y) / self.resolution)\n )\n self.size = size_x, size_y\n <mask token>\n\n def show(self):\n return self.map\n",
"step-3": "<mask token>\n\n\nclass BBoxSelector:\n\n def __init__(self, bbox, zoom=8, resolution=10):\n center = (bbox.min_y + bbox.max_y) / 2, (bbox.min_x + bbox.max_x) / 2\n self.map = Map(center=center, zoom=zoom, scroll_wheel_zoom=True)\n self.resolution = resolution\n control = DrawControl()\n control.rectangle = {'shapeOptions': {'fillColor': '#fabd14',\n 'color': '#fa6814', 'fillOpacity': 0.2}}\n control.polyline = {}\n control.circle = {}\n control.circlemarker = {}\n control.polygon = {}\n control.edit = False\n control.remove = False\n control.on_draw(self._handle_draw)\n self.map.add_control(control)\n self.bbox = None\n self.size = None\n self.rectangle = None\n self.add_rectangle(bbox.min_x, bbox.min_y, bbox.max_x, bbox.max_y)\n\n def add_rectangle(self, min_x, min_y, max_x, max_y):\n if self.rectangle:\n self.map.remove_layer(self.rectangle)\n self.rectangle = Rectangle(bounds=((min_y, min_x), (max_y, max_x)),\n color='#fa6814', fill=True, fill_color='#fabd14', fill_opacity=\n 0.2, weight=1)\n self.map.add_layer(self.rectangle)\n self.bbox = BBox(((min_x, min_y), (max_x, max_y)), CRS.WGS84\n ).transform(CRS.POP_WEB)\n size_x = abs(int((self.bbox.max_x - self.bbox.min_x) / self.resolution)\n )\n size_y = abs(int((self.bbox.max_y - self.bbox.min_y) / self.resolution)\n )\n self.size = size_x, size_y\n\n def _handle_draw(self, control, action, geo_json):\n control.clear_rectangles()\n bbox_geom = geo_json['geometry']['coordinates'][0]\n min_x, min_y = bbox_geom[0]\n max_x, max_y = bbox_geom[2]\n self.add_rectangle(min_x, min_y, max_x, max_y)\n\n def show(self):\n return self.map\n",
"step-4": "from ipyleaflet import Map, DrawControl, Marker, Rectangle\nfrom sentinelhub import BBox, CRS\nfrom ipywidgets import widgets as w\n\n\nclass BBoxSelector:\n\n def __init__(self, bbox, zoom=8, resolution=10):\n center = (bbox.min_y + bbox.max_y) / 2, (bbox.min_x + bbox.max_x) / 2\n self.map = Map(center=center, zoom=zoom, scroll_wheel_zoom=True)\n self.resolution = resolution\n control = DrawControl()\n control.rectangle = {'shapeOptions': {'fillColor': '#fabd14',\n 'color': '#fa6814', 'fillOpacity': 0.2}}\n control.polyline = {}\n control.circle = {}\n control.circlemarker = {}\n control.polygon = {}\n control.edit = False\n control.remove = False\n control.on_draw(self._handle_draw)\n self.map.add_control(control)\n self.bbox = None\n self.size = None\n self.rectangle = None\n self.add_rectangle(bbox.min_x, bbox.min_y, bbox.max_x, bbox.max_y)\n\n def add_rectangle(self, min_x, min_y, max_x, max_y):\n if self.rectangle:\n self.map.remove_layer(self.rectangle)\n self.rectangle = Rectangle(bounds=((min_y, min_x), (max_y, max_x)),\n color='#fa6814', fill=True, fill_color='#fabd14', fill_opacity=\n 0.2, weight=1)\n self.map.add_layer(self.rectangle)\n self.bbox = BBox(((min_x, min_y), (max_x, max_y)), CRS.WGS84\n ).transform(CRS.POP_WEB)\n size_x = abs(int((self.bbox.max_x - self.bbox.min_x) / self.resolution)\n )\n size_y = abs(int((self.bbox.max_y - self.bbox.min_y) / self.resolution)\n )\n self.size = size_x, size_y\n\n def _handle_draw(self, control, action, geo_json):\n control.clear_rectangles()\n bbox_geom = geo_json['geometry']['coordinates'][0]\n min_x, min_y = bbox_geom[0]\n max_x, max_y = bbox_geom[2]\n self.add_rectangle(min_x, min_y, max_x, max_y)\n\n def show(self):\n return self.map\n",
"step-5": "from ipyleaflet import Map, DrawControl, Marker, Rectangle\nfrom sentinelhub import BBox, CRS\n\nfrom ipywidgets import widgets as w\n\n\nclass BBoxSelector:\n def __init__(self, bbox, zoom=8, resolution=10):\n center = (bbox.min_y + bbox.max_y) / 2, (bbox.min_x + bbox.max_x) / 2\n self.map = Map(center=center, zoom=zoom, scroll_wheel_zoom=True)\n\n self.resolution = resolution\n\n control = DrawControl()\n\n control.rectangle = {\n \"shapeOptions\": {\n \"fillColor\": \"#fabd14\",\n \"color\": \"#fa6814\",\n \"fillOpacity\": 0.2\n }\n }\n\n #Disable the rest of draw options\n control.polyline = {}\n control.circle = {}\n control.circlemarker = {}\n control.polygon = {}\n control.edit = False\n control.remove = False\n\n control.on_draw(self._handle_draw)\n\n self.map.add_control(control)\n\n self.bbox = None\n self.size = None\n self.rectangle = None\n self.add_rectangle(bbox.min_x, bbox.min_y, bbox.max_x, bbox.max_y)\n\n # self.out = w.Output(layout=w.Layout(width='100%', height='50px', overflow_y='scroll'))\n # self.vbox = w.VBox([self.map, self.out])\n\n def add_rectangle(self, min_x, min_y, max_x, max_y):\n if self.rectangle:\n self.map.remove_layer(self.rectangle)\n\n self.rectangle = Rectangle(\n bounds=((min_y, min_x), (max_y, max_x)),\n color=\"#fa6814\",\n fill=True,\n fill_color=\"#fabd14\",\n fill_opacity=0.2,\n weight=1\n )\n\n self.map.add_layer(self.rectangle)\n\n self.bbox = BBox(((min_x, min_y), (max_x, max_y)), CRS.WGS84).transform(CRS.POP_WEB)\n\n # self.out.append_display_data((min_x, min_y, max_x, max_y))\n\n size_x = abs(int((self.bbox.max_x - self.bbox.min_x) / self.resolution))\n size_y = abs(int((self.bbox.max_y - self.bbox.min_y) / self.resolution))\n\n self.size = size_x, size_y\n\n def _handle_draw(self, control, action, geo_json):\n control.clear_rectangles()\n\n bbox_geom = geo_json['geometry']['coordinates'][0]\n\n min_x, min_y = bbox_geom[0]\n max_x, max_y = bbox_geom[2]\n\n self.add_rectangle(min_x, min_y, max_x, max_y)\n\n def show(self):\n return self.map\n # return self.vbox\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('PDPAPI', '0011_auto_20181105_1021')]
operations = [migrations.RemoveField(model_name='optionvoting', name=
'totalVotes'), migrations.AddField(model_name='mcqoption', name=
'totalVotes', field=models.IntegerField(default=0))]
<|reserved_special_token_1|>
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('PDPAPI', '0011_auto_20181105_1021')]
operations = [migrations.RemoveField(model_name='optionvoting', name=
'totalVotes'), migrations.AddField(model_name='mcqoption', name=
'totalVotes', field=models.IntegerField(default=0))]
<|reserved_special_token_1|>
# Generated by Django 2.1.2 on 2018-11-05 12:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('PDPAPI', '0011_auto_20181105_1021'),
]
operations = [
migrations.RemoveField(
model_name='optionvoting',
name='totalVotes',
),
migrations.AddField(
model_name='mcqoption',
name='totalVotes',
field=models.IntegerField(default=0),
),
]
|
flexible
|
{
"blob_id": "53519c704ca9aff62140f187d4246208350fa9ba",
"index": 4610,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('PDPAPI', '0011_auto_20181105_1021')]\n operations = [migrations.RemoveField(model_name='optionvoting', name=\n 'totalVotes'), migrations.AddField(model_name='mcqoption', name=\n 'totalVotes', field=models.IntegerField(default=0))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('PDPAPI', '0011_auto_20181105_1021')]\n operations = [migrations.RemoveField(model_name='optionvoting', name=\n 'totalVotes'), migrations.AddField(model_name='mcqoption', name=\n 'totalVotes', field=models.IntegerField(default=0))]\n",
"step-5": "# Generated by Django 2.1.2 on 2018-11-05 12:00\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('PDPAPI', '0011_auto_20181105_1021'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='optionvoting',\n name='totalVotes',\n ),\n migrations.AddField(\n model_name='mcqoption',\n name='totalVotes',\n field=models.IntegerField(default=0),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class PinEnvDiscrete(Env):
<|reserved_special_token_0|>
def __init__(self, simulation, x, y, trajectory, scorer=0,
max_displacement=False, predict=False, original=False, sample=False):
self.simulation = simulation
height, width = simulation.cloth.initial_params[0]
self.os_dim = height * width * 5
self.simulation.reset()
self.tensioner = self.simulation.pin_position(x, y, max_displacement)
self.scorer = Scorer(scorer)
self.trajectory = trajectory
self.traj_index = 0
self.pinx, self.piny = x, y
self.predict = predict
self.original = original
self.sample = sample
@property
def observation_space(self):
if self.original:
return Box(low=np.array([0, -self.tensioner.max_displacement, -
self.tensioner.max_displacement, -self.tensioner.
max_displacement]), high=np.array([len(self.trajectory) + 1,
self.tensioner.max_displacement, self.tensioner.
max_displacement, self.tensioner.max_displacement]))
if not self.predict:
return Box(low=np.array([0, -self.tensioner.max_displacement, -
self.tensioner.max_displacement, -self.tensioner.
max_displacement] + len(self.simulation.cloth.blobs) * [0,
0, -800]), high=np.array([len(self.trajectory) + 1, self.
tensioner.max_displacement, self.tensioner.max_displacement,
self.tensioner.max_displacement] + len(self.simulation.
cloth.blobs) * [500, 500, 800]))
return Box(low=np.array([0, -self.tensioner.max_displacement, -self
.tensioner.max_displacement, -self.tensioner.max_displacement] +
len(self.simulation.cloth.blobs) * [0, 0, -800] + 3 * [-1000, -
1000, -1000, -1000, -3.2] + [0, 0]), high=np.array([len(self.
trajectory) + 1, self.tensioner.max_displacement, self.
tensioner.max_displacement, self.tensioner.max_displacement] +
len(self.simulation.cloth.blobs) * [500, 500, 800] + 3 * [800,
800, 800, 800, 3.2] + [600, 600]))
@property
def action_space(self):
return Discrete(7)
@property
def _state(self):
scissors = self.simulation.mouse.x, self.simulation.mouse.y
centroids = np.ravel(np.array(self.simulation.cloth.centroids)).tolist(
)
if self.original:
return np.array([self.traj_index] + list(self.tensioner.
displacement))
if not self.predict:
return np.array([self.traj_index] + list(self.tensioner.
displacement) + centroids)
next_position3 = [-1000, -1000]
closest_shape3 = [-1000, -1000]
angle3 = 0
next_position2 = [-1000, -1000]
closest_shape2 = [-1000, -1000]
angle2 = 0
next_position = [-1000, -1000]
closest_shape = [-1000, -1000]
angle = 0
if self.traj_index < len(self.trajectory) - 1:
next_position = [self.trajectory[self.traj_index + 1][0], self.
trajectory[self.traj_index + 1][1]]
closest_shape = list(self.simulation.cloth.find_closest_shapept
(next_position[0], next_position[1]))
angle = self.simulation.cloth.find_dtheta(scissors[0], scissors
[1], next_position[0], next_position[1], closest_shape[0],
closest_shape[1])
if self.traj_index < len(self.trajectory) - 5:
next_position2 = [self.trajectory[self.traj_index + 5][0],
self.trajectory[self.traj_index + 5][1]]
if np.linalg.norm(np.array(next_position2) - np.array(
next_position)) < 100:
closest_shape2 = list(self.simulation.cloth.
find_closest_shapept(next_position2[0],
next_position2[1]))
angle2 = self.simulation.cloth.find_dtheta(next_position
[0], next_position[1], next_position2[0],
next_position2[1], closest_shape2[0], closest_shape2[1]
)
if self.traj_index < len(self.trajectory) - 10:
next_position3 = [self.trajectory[self.traj_index +
10][0], self.trajectory[self.traj_index + 10][1]]
if np.linalg.norm(np.array(next_position3) - np.
array(next_position2)) < 100:
closest_shape3 = list(self.simulation.cloth.
find_closest_shapept(next_position3[0],
next_position3[1]))
angle3 = self.simulation.cloth.find_dtheta(
next_position2[0], next_position2[1],
next_position3[0], next_position3[1],
closest_shape3[0], closest_shape3[1])
return np.array([self.traj_index] + list(self.tensioner.
displacement) + centroids + next_position + closest_shape + [
angle] + next_position2 + closest_shape2 + [angle2] +
next_position3 + closest_shape3 + [angle3] + list(scissors))
@property
def _score(self):
disp = np.linalg.norm(self._state[1])
score = self.scorer.score(self.simulation.cloth)
if disp >= self.tensioner.max_displacement - 2:
score -= 100
return score
def reset(self):
self.simulation.reset()
self.tensioner = self.simulation.pin_position(self.pinx, self.piny,
self.tensioner.max_displacement)
self.traj_index = 0
observation = np.copy(self._state)
return observation
def step(self, action):
x, y, z = self.MAPPING[action]
self.tensioner.tension(x, y, z)
self.simulation.move_mouse(self.trajectory[self.traj_index][0],
self.trajectory[self.traj_index][1])
reward = self.simulation.update() * self.traj_index / 10
self.traj_index += 1
self.simulation.move_mouse(self.trajectory[self.traj_index][0],
self.trajectory[self.traj_index][1])
reward += self.simulation.update() * self.traj_index / 10
done = self.traj_index >= len(self.trajectory) - 2
if done:
reward = self.simulation.cloth.evaluate()
else:
reward = 0
next_observation = np.copy(self._state)
self.traj_index += 1
return Step(observation=next_observation, reward=reward, done=done)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PinEnvDiscrete(Env):
<|reserved_special_token_0|>
def __init__(self, simulation, x, y, trajectory, scorer=0,
max_displacement=False, predict=False, original=False, sample=False):
self.simulation = simulation
height, width = simulation.cloth.initial_params[0]
self.os_dim = height * width * 5
self.simulation.reset()
self.tensioner = self.simulation.pin_position(x, y, max_displacement)
self.scorer = Scorer(scorer)
self.trajectory = trajectory
self.traj_index = 0
self.pinx, self.piny = x, y
self.predict = predict
self.original = original
self.sample = sample
@property
def observation_space(self):
if self.original:
return Box(low=np.array([0, -self.tensioner.max_displacement, -
self.tensioner.max_displacement, -self.tensioner.
max_displacement]), high=np.array([len(self.trajectory) + 1,
self.tensioner.max_displacement, self.tensioner.
max_displacement, self.tensioner.max_displacement]))
if not self.predict:
return Box(low=np.array([0, -self.tensioner.max_displacement, -
self.tensioner.max_displacement, -self.tensioner.
max_displacement] + len(self.simulation.cloth.blobs) * [0,
0, -800]), high=np.array([len(self.trajectory) + 1, self.
tensioner.max_displacement, self.tensioner.max_displacement,
self.tensioner.max_displacement] + len(self.simulation.
cloth.blobs) * [500, 500, 800]))
return Box(low=np.array([0, -self.tensioner.max_displacement, -self
.tensioner.max_displacement, -self.tensioner.max_displacement] +
len(self.simulation.cloth.blobs) * [0, 0, -800] + 3 * [-1000, -
1000, -1000, -1000, -3.2] + [0, 0]), high=np.array([len(self.
trajectory) + 1, self.tensioner.max_displacement, self.
tensioner.max_displacement, self.tensioner.max_displacement] +
len(self.simulation.cloth.blobs) * [500, 500, 800] + 3 * [800,
800, 800, 800, 3.2] + [600, 600]))
@property
def action_space(self):
return Discrete(7)
@property
def _state(self):
scissors = self.simulation.mouse.x, self.simulation.mouse.y
centroids = np.ravel(np.array(self.simulation.cloth.centroids)).tolist(
)
if self.original:
return np.array([self.traj_index] + list(self.tensioner.
displacement))
if not self.predict:
return np.array([self.traj_index] + list(self.tensioner.
displacement) + centroids)
next_position3 = [-1000, -1000]
closest_shape3 = [-1000, -1000]
angle3 = 0
next_position2 = [-1000, -1000]
closest_shape2 = [-1000, -1000]
angle2 = 0
next_position = [-1000, -1000]
closest_shape = [-1000, -1000]
angle = 0
if self.traj_index < len(self.trajectory) - 1:
next_position = [self.trajectory[self.traj_index + 1][0], self.
trajectory[self.traj_index + 1][1]]
closest_shape = list(self.simulation.cloth.find_closest_shapept
(next_position[0], next_position[1]))
angle = self.simulation.cloth.find_dtheta(scissors[0], scissors
[1], next_position[0], next_position[1], closest_shape[0],
closest_shape[1])
if self.traj_index < len(self.trajectory) - 5:
next_position2 = [self.trajectory[self.traj_index + 5][0],
self.trajectory[self.traj_index + 5][1]]
if np.linalg.norm(np.array(next_position2) - np.array(
next_position)) < 100:
closest_shape2 = list(self.simulation.cloth.
find_closest_shapept(next_position2[0],
next_position2[1]))
angle2 = self.simulation.cloth.find_dtheta(next_position
[0], next_position[1], next_position2[0],
next_position2[1], closest_shape2[0], closest_shape2[1]
)
if self.traj_index < len(self.trajectory) - 10:
next_position3 = [self.trajectory[self.traj_index +
10][0], self.trajectory[self.traj_index + 10][1]]
if np.linalg.norm(np.array(next_position3) - np.
array(next_position2)) < 100:
closest_shape3 = list(self.simulation.cloth.
find_closest_shapept(next_position3[0],
next_position3[1]))
angle3 = self.simulation.cloth.find_dtheta(
next_position2[0], next_position2[1],
next_position3[0], next_position3[1],
closest_shape3[0], closest_shape3[1])
return np.array([self.traj_index] + list(self.tensioner.
displacement) + centroids + next_position + closest_shape + [
angle] + next_position2 + closest_shape2 + [angle2] +
next_position3 + closest_shape3 + [angle3] + list(scissors))
@property
def _score(self):
disp = np.linalg.norm(self._state[1])
score = self.scorer.score(self.simulation.cloth)
if disp >= self.tensioner.max_displacement - 2:
score -= 100
return score
def reset(self):
self.simulation.reset()
self.tensioner = self.simulation.pin_position(self.pinx, self.piny,
self.tensioner.max_displacement)
self.traj_index = 0
observation = np.copy(self._state)
return observation
def step(self, action):
x, y, z = self.MAPPING[action]
self.tensioner.tension(x, y, z)
self.simulation.move_mouse(self.trajectory[self.traj_index][0],
self.trajectory[self.traj_index][1])
reward = self.simulation.update() * self.traj_index / 10
self.traj_index += 1
self.simulation.move_mouse(self.trajectory[self.traj_index][0],
self.trajectory[self.traj_index][1])
reward += self.simulation.update() * self.traj_index / 10
done = self.traj_index >= len(self.trajectory) - 2
if done:
reward = self.simulation.cloth.evaluate()
else:
reward = 0
next_observation = np.copy(self._state)
self.traj_index += 1
return Step(observation=next_observation, reward=reward, done=done)
def render(self):
self.simulation.render_sim()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PinEnvDiscrete(Env):
MAPPING = {(0): (0, 0, 0), (1): (1, 0, 0), (2): (0, 1, 0), (3): (0, 0,
1), (4): (-1, 0, 0), (5): (0, -1, 0), (6): (0, 0, -1)}
def __init__(self, simulation, x, y, trajectory, scorer=0,
max_displacement=False, predict=False, original=False, sample=False):
self.simulation = simulation
height, width = simulation.cloth.initial_params[0]
self.os_dim = height * width * 5
self.simulation.reset()
self.tensioner = self.simulation.pin_position(x, y, max_displacement)
self.scorer = Scorer(scorer)
self.trajectory = trajectory
self.traj_index = 0
self.pinx, self.piny = x, y
self.predict = predict
self.original = original
self.sample = sample
@property
def observation_space(self):
if self.original:
return Box(low=np.array([0, -self.tensioner.max_displacement, -
self.tensioner.max_displacement, -self.tensioner.
max_displacement]), high=np.array([len(self.trajectory) + 1,
self.tensioner.max_displacement, self.tensioner.
max_displacement, self.tensioner.max_displacement]))
if not self.predict:
return Box(low=np.array([0, -self.tensioner.max_displacement, -
self.tensioner.max_displacement, -self.tensioner.
max_displacement] + len(self.simulation.cloth.blobs) * [0,
0, -800]), high=np.array([len(self.trajectory) + 1, self.
tensioner.max_displacement, self.tensioner.max_displacement,
self.tensioner.max_displacement] + len(self.simulation.
cloth.blobs) * [500, 500, 800]))
return Box(low=np.array([0, -self.tensioner.max_displacement, -self
.tensioner.max_displacement, -self.tensioner.max_displacement] +
len(self.simulation.cloth.blobs) * [0, 0, -800] + 3 * [-1000, -
1000, -1000, -1000, -3.2] + [0, 0]), high=np.array([len(self.
trajectory) + 1, self.tensioner.max_displacement, self.
tensioner.max_displacement, self.tensioner.max_displacement] +
len(self.simulation.cloth.blobs) * [500, 500, 800] + 3 * [800,
800, 800, 800, 3.2] + [600, 600]))
@property
def action_space(self):
return Discrete(7)
@property
def _state(self):
scissors = self.simulation.mouse.x, self.simulation.mouse.y
centroids = np.ravel(np.array(self.simulation.cloth.centroids)).tolist(
)
if self.original:
return np.array([self.traj_index] + list(self.tensioner.
displacement))
if not self.predict:
return np.array([self.traj_index] + list(self.tensioner.
displacement) + centroids)
next_position3 = [-1000, -1000]
closest_shape3 = [-1000, -1000]
angle3 = 0
next_position2 = [-1000, -1000]
closest_shape2 = [-1000, -1000]
angle2 = 0
next_position = [-1000, -1000]
closest_shape = [-1000, -1000]
angle = 0
if self.traj_index < len(self.trajectory) - 1:
next_position = [self.trajectory[self.traj_index + 1][0], self.
trajectory[self.traj_index + 1][1]]
closest_shape = list(self.simulation.cloth.find_closest_shapept
(next_position[0], next_position[1]))
angle = self.simulation.cloth.find_dtheta(scissors[0], scissors
[1], next_position[0], next_position[1], closest_shape[0],
closest_shape[1])
if self.traj_index < len(self.trajectory) - 5:
next_position2 = [self.trajectory[self.traj_index + 5][0],
self.trajectory[self.traj_index + 5][1]]
if np.linalg.norm(np.array(next_position2) - np.array(
next_position)) < 100:
closest_shape2 = list(self.simulation.cloth.
find_closest_shapept(next_position2[0],
next_position2[1]))
angle2 = self.simulation.cloth.find_dtheta(next_position
[0], next_position[1], next_position2[0],
next_position2[1], closest_shape2[0], closest_shape2[1]
)
if self.traj_index < len(self.trajectory) - 10:
next_position3 = [self.trajectory[self.traj_index +
10][0], self.trajectory[self.traj_index + 10][1]]
if np.linalg.norm(np.array(next_position3) - np.
array(next_position2)) < 100:
closest_shape3 = list(self.simulation.cloth.
find_closest_shapept(next_position3[0],
next_position3[1]))
angle3 = self.simulation.cloth.find_dtheta(
next_position2[0], next_position2[1],
next_position3[0], next_position3[1],
closest_shape3[0], closest_shape3[1])
return np.array([self.traj_index] + list(self.tensioner.
displacement) + centroids + next_position + closest_shape + [
angle] + next_position2 + closest_shape2 + [angle2] +
next_position3 + closest_shape3 + [angle3] + list(scissors))
@property
def _score(self):
disp = np.linalg.norm(self._state[1])
score = self.scorer.score(self.simulation.cloth)
if disp >= self.tensioner.max_displacement - 2:
score -= 100
return score
def reset(self):
self.simulation.reset()
self.tensioner = self.simulation.pin_position(self.pinx, self.piny,
self.tensioner.max_displacement)
self.traj_index = 0
observation = np.copy(self._state)
return observation
def step(self, action):
x, y, z = self.MAPPING[action]
self.tensioner.tension(x, y, z)
self.simulation.move_mouse(self.trajectory[self.traj_index][0],
self.trajectory[self.traj_index][1])
reward = self.simulation.update() * self.traj_index / 10
self.traj_index += 1
self.simulation.move_mouse(self.trajectory[self.traj_index][0],
self.trajectory[self.traj_index][1])
reward += self.simulation.update() * self.traj_index / 10
done = self.traj_index >= len(self.trajectory) - 2
if done:
reward = self.simulation.cloth.evaluate()
else:
reward = 0
next_observation = np.copy(self._state)
self.traj_index += 1
return Step(observation=next_observation, reward=reward, done=done)
def render(self):
self.simulation.render_sim()
<|reserved_special_token_1|>
from rllab.envs.base import Env
from rllab.spaces import Discrete
from rllab.spaces import Box
from rllab.envs.base import Step
import numpy as np
import sys, pickle, os
sys.path.append(os.path.dirname(os.getcwd()))
from os.path import dirname
sys.path.append(dirname(dirname(dirname(os.getcwd()))))
from simulation import *
from scorer import *
from shapecloth import *
from tensioner import *
<|reserved_special_token_0|>
class PinEnvDiscrete(Env):
MAPPING = {(0): (0, 0, 0), (1): (1, 0, 0), (2): (0, 1, 0), (3): (0, 0,
1), (4): (-1, 0, 0), (5): (0, -1, 0), (6): (0, 0, -1)}
def __init__(self, simulation, x, y, trajectory, scorer=0,
max_displacement=False, predict=False, original=False, sample=False):
self.simulation = simulation
height, width = simulation.cloth.initial_params[0]
self.os_dim = height * width * 5
self.simulation.reset()
self.tensioner = self.simulation.pin_position(x, y, max_displacement)
self.scorer = Scorer(scorer)
self.trajectory = trajectory
self.traj_index = 0
self.pinx, self.piny = x, y
self.predict = predict
self.original = original
self.sample = sample
@property
def observation_space(self):
if self.original:
return Box(low=np.array([0, -self.tensioner.max_displacement, -
self.tensioner.max_displacement, -self.tensioner.
max_displacement]), high=np.array([len(self.trajectory) + 1,
self.tensioner.max_displacement, self.tensioner.
max_displacement, self.tensioner.max_displacement]))
if not self.predict:
return Box(low=np.array([0, -self.tensioner.max_displacement, -
self.tensioner.max_displacement, -self.tensioner.
max_displacement] + len(self.simulation.cloth.blobs) * [0,
0, -800]), high=np.array([len(self.trajectory) + 1, self.
tensioner.max_displacement, self.tensioner.max_displacement,
self.tensioner.max_displacement] + len(self.simulation.
cloth.blobs) * [500, 500, 800]))
return Box(low=np.array([0, -self.tensioner.max_displacement, -self
.tensioner.max_displacement, -self.tensioner.max_displacement] +
len(self.simulation.cloth.blobs) * [0, 0, -800] + 3 * [-1000, -
1000, -1000, -1000, -3.2] + [0, 0]), high=np.array([len(self.
trajectory) + 1, self.tensioner.max_displacement, self.
tensioner.max_displacement, self.tensioner.max_displacement] +
len(self.simulation.cloth.blobs) * [500, 500, 800] + 3 * [800,
800, 800, 800, 3.2] + [600, 600]))
@property
def action_space(self):
return Discrete(7)
@property
def _state(self):
scissors = self.simulation.mouse.x, self.simulation.mouse.y
centroids = np.ravel(np.array(self.simulation.cloth.centroids)).tolist(
)
if self.original:
return np.array([self.traj_index] + list(self.tensioner.
displacement))
if not self.predict:
return np.array([self.traj_index] + list(self.tensioner.
displacement) + centroids)
next_position3 = [-1000, -1000]
closest_shape3 = [-1000, -1000]
angle3 = 0
next_position2 = [-1000, -1000]
closest_shape2 = [-1000, -1000]
angle2 = 0
next_position = [-1000, -1000]
closest_shape = [-1000, -1000]
angle = 0
if self.traj_index < len(self.trajectory) - 1:
next_position = [self.trajectory[self.traj_index + 1][0], self.
trajectory[self.traj_index + 1][1]]
closest_shape = list(self.simulation.cloth.find_closest_shapept
(next_position[0], next_position[1]))
angle = self.simulation.cloth.find_dtheta(scissors[0], scissors
[1], next_position[0], next_position[1], closest_shape[0],
closest_shape[1])
if self.traj_index < len(self.trajectory) - 5:
next_position2 = [self.trajectory[self.traj_index + 5][0],
self.trajectory[self.traj_index + 5][1]]
if np.linalg.norm(np.array(next_position2) - np.array(
next_position)) < 100:
closest_shape2 = list(self.simulation.cloth.
find_closest_shapept(next_position2[0],
next_position2[1]))
angle2 = self.simulation.cloth.find_dtheta(next_position
[0], next_position[1], next_position2[0],
next_position2[1], closest_shape2[0], closest_shape2[1]
)
if self.traj_index < len(self.trajectory) - 10:
next_position3 = [self.trajectory[self.traj_index +
10][0], self.trajectory[self.traj_index + 10][1]]
if np.linalg.norm(np.array(next_position3) - np.
array(next_position2)) < 100:
closest_shape3 = list(self.simulation.cloth.
find_closest_shapept(next_position3[0],
next_position3[1]))
angle3 = self.simulation.cloth.find_dtheta(
next_position2[0], next_position2[1],
next_position3[0], next_position3[1],
closest_shape3[0], closest_shape3[1])
return np.array([self.traj_index] + list(self.tensioner.
displacement) + centroids + next_position + closest_shape + [
angle] + next_position2 + closest_shape2 + [angle2] +
next_position3 + closest_shape3 + [angle3] + list(scissors))
@property
def _score(self):
disp = np.linalg.norm(self._state[1])
score = self.scorer.score(self.simulation.cloth)
if disp >= self.tensioner.max_displacement - 2:
score -= 100
return score
def reset(self):
self.simulation.reset()
self.tensioner = self.simulation.pin_position(self.pinx, self.piny,
self.tensioner.max_displacement)
self.traj_index = 0
observation = np.copy(self._state)
return observation
def step(self, action):
x, y, z = self.MAPPING[action]
self.tensioner.tension(x, y, z)
self.simulation.move_mouse(self.trajectory[self.traj_index][0],
self.trajectory[self.traj_index][1])
reward = self.simulation.update() * self.traj_index / 10
self.traj_index += 1
self.simulation.move_mouse(self.trajectory[self.traj_index][0],
self.trajectory[self.traj_index][1])
reward += self.simulation.update() * self.traj_index / 10
done = self.traj_index >= len(self.trajectory) - 2
if done:
reward = self.simulation.cloth.evaluate()
else:
reward = 0
next_observation = np.copy(self._state)
self.traj_index += 1
return Step(observation=next_observation, reward=reward, done=done)
def render(self):
self.simulation.render_sim()
<|reserved_special_token_1|>
from rllab.envs.base import Env
from rllab.spaces import Discrete
from rllab.spaces import Box
from rllab.envs.base import Step
import numpy as np
import sys, pickle, os
sys.path.append(os.path.dirname(os.getcwd()))
from os.path import dirname
sys.path.append(dirname(dirname(dirname(os.getcwd()))))
from simulation import *
from scorer import *
from shapecloth import *
from tensioner import *
"""
A Rllab Environment for the tensioning policy experiments.
"""
class PinEnvDiscrete(Env):
MAPPING = {
0 : (0,0,0),
1 : (1,0,0),
2 : (0,1,0),
3 : (0,0,1),
4 : (-1,0,0),
5 : (0,-1,0),
6 : (0,0,-1)
}
def __init__(self, simulation, x, y, trajectory, scorer=0, max_displacement=False, predict=False, original=False, sample=False):
self.simulation = simulation
height, width = simulation.cloth.initial_params[0]
self.os_dim = height * width * 5
self.simulation.reset()
self.tensioner = self.simulation.pin_position(x, y, max_displacement)
self.scorer = Scorer(scorer)
self.trajectory = trajectory
self.traj_index = 0
self.pinx, self.piny = x, y
self.predict = predict
self.original = original
self.sample = sample
@property
def observation_space(self):
if self.original:
return Box(low=np.array([0, -self.tensioner.max_displacement, -self.tensioner.max_displacement, -self.tensioner.max_displacement]),
high=np.array([len(self.trajectory) + 1, self.tensioner.max_displacement, self.tensioner.max_displacement, self.tensioner.max_displacement]))
if not self.predict:
return Box(low=np.array([0, -self.tensioner.max_displacement, -self.tensioner.max_displacement, -self.tensioner.max_displacement] + len(self.simulation.cloth.blobs) * [0, 0, -800]),
high=np.array([len(self.trajectory) + 1, self.tensioner.max_displacement, self.tensioner.max_displacement, self.tensioner.max_displacement]
+ len(self.simulation.cloth.blobs) * [500, 500, 800]))
return Box(low=np.array([0, -self.tensioner.max_displacement, -self.tensioner.max_displacement, -self.tensioner.max_displacement] + len(self.simulation.cloth.blobs) * [0, 0, -800] + 3 * [-1000, -1000, -1000, -1000, -3.2] + [0, 0]),
high=np.array([len(self.trajectory) + 1, self.tensioner.max_displacement, self.tensioner.max_displacement, self.tensioner.max_displacement]
+ len(self.simulation.cloth.blobs) * [500, 500, 800] + 3 * [800, 800, 800, 800, 3.2] + [600, 600]))
@property
def action_space(self):
return Discrete(7)
@property
def _state(self):
scissors = self.simulation.mouse.x, self.simulation.mouse.y
centroids = np.ravel(np.array(self.simulation.cloth.centroids)).tolist()
if self.original:
return np.array([self.traj_index] + list(self.tensioner.displacement))
if not self.predict:
return np.array([self.traj_index] + list(self.tensioner.displacement) + centroids)
next_position3 = [-1000, -1000]
closest_shape3 = [-1000, -1000]
angle3 = 0
next_position2 = [-1000, -1000]
closest_shape2 = [-1000, -1000]
angle2 = 0
next_position = [-1000, -1000]
closest_shape = [-1000, -1000]
angle = 0
if self.traj_index < len(self.trajectory) - 1:
next_position = [self.trajectory[self.traj_index+1][0], self.trajectory[self.traj_index+1][1]]
closest_shape = list(self.simulation.cloth.find_closest_shapept(next_position[0], next_position[1]))
angle = self.simulation.cloth.find_dtheta(scissors[0], scissors[1], next_position[0], next_position[1], closest_shape[0], closest_shape[1])
if self.traj_index < len(self.trajectory) - 5:
next_position2 = [self.trajectory[self.traj_index+5][0], self.trajectory[self.traj_index+5][1]]
if np.linalg.norm(np.array(next_position2) - np.array(next_position)) < 100:
closest_shape2 = list(self.simulation.cloth.find_closest_shapept(next_position2[0], next_position2[1]))
angle2 = self.simulation.cloth.find_dtheta(next_position[0], next_position[1], next_position2[0], next_position2[1], closest_shape2[0], closest_shape2[1])
if self.traj_index < len(self.trajectory) - 10:
next_position3 = [self.trajectory[self.traj_index+10][0], self.trajectory[self.traj_index+10][1]]
if np.linalg.norm(np.array(next_position3) - np.array(next_position2)) < 100:
closest_shape3 = list(self.simulation.cloth.find_closest_shapept(next_position3[0], next_position3[1]))
angle3 = self.simulation.cloth.find_dtheta(next_position2[0], next_position2[1], next_position3[0], next_position3[1], closest_shape3[0], closest_shape3[1])
return np.array([self.traj_index] + list(self.tensioner.displacement) + centroids + next_position + closest_shape + [angle] + next_position2 + closest_shape2 + [angle2]
+ next_position3 + closest_shape3 + [angle3] + list(scissors))
@property
def _score(self):
disp = np.linalg.norm(self._state[1])
score = self.scorer.score(self.simulation.cloth)
if disp >= self.tensioner.max_displacement - 2:
score -= 100
return score
def reset(self):
self.simulation.reset()
self.tensioner = self.simulation.pin_position(self.pinx, self.piny, self.tensioner.max_displacement)
self.traj_index = 0
observation = np.copy(self._state)
return observation
def step(self, action):
x, y, z = self.MAPPING[action]
self.tensioner.tension(x, y, z)
self.simulation.move_mouse(self.trajectory[self.traj_index][0], self.trajectory[self.traj_index][1])
reward = self.simulation.update() * self.traj_index/10
self.traj_index += 1
self.simulation.move_mouse(self.trajectory[self.traj_index][0], self.trajectory[self.traj_index][1])
reward += self.simulation.update() * self.traj_index/10
done = self.traj_index >= len(self.trajectory) - 2
if done:
reward = self.simulation.cloth.evaluate()
else:
reward = 0
next_observation = np.copy(self._state)
self.traj_index += 1
return Step(observation=next_observation, reward=reward, done=done)
def render(self):
self.simulation.render_sim()
# def local_angles(self, n=5):
# if self.
# for i in range(n):
|
flexible
|
{
"blob_id": "21974274b1e7800b83eb9582ab21714f04230549",
"index": 4299,
"step-1": "<mask token>\n\n\nclass PinEnvDiscrete(Env):\n <mask token>\n\n def __init__(self, simulation, x, y, trajectory, scorer=0,\n max_displacement=False, predict=False, original=False, sample=False):\n self.simulation = simulation\n height, width = simulation.cloth.initial_params[0]\n self.os_dim = height * width * 5\n self.simulation.reset()\n self.tensioner = self.simulation.pin_position(x, y, max_displacement)\n self.scorer = Scorer(scorer)\n self.trajectory = trajectory\n self.traj_index = 0\n self.pinx, self.piny = x, y\n self.predict = predict\n self.original = original\n self.sample = sample\n\n @property\n def observation_space(self):\n if self.original:\n return Box(low=np.array([0, -self.tensioner.max_displacement, -\n self.tensioner.max_displacement, -self.tensioner.\n max_displacement]), high=np.array([len(self.trajectory) + 1,\n self.tensioner.max_displacement, self.tensioner.\n max_displacement, self.tensioner.max_displacement]))\n if not self.predict:\n return Box(low=np.array([0, -self.tensioner.max_displacement, -\n self.tensioner.max_displacement, -self.tensioner.\n max_displacement] + len(self.simulation.cloth.blobs) * [0, \n 0, -800]), high=np.array([len(self.trajectory) + 1, self.\n tensioner.max_displacement, self.tensioner.max_displacement,\n self.tensioner.max_displacement] + len(self.simulation.\n cloth.blobs) * [500, 500, 800]))\n return Box(low=np.array([0, -self.tensioner.max_displacement, -self\n .tensioner.max_displacement, -self.tensioner.max_displacement] +\n len(self.simulation.cloth.blobs) * [0, 0, -800] + 3 * [-1000, -\n 1000, -1000, -1000, -3.2] + [0, 0]), high=np.array([len(self.\n trajectory) + 1, self.tensioner.max_displacement, self.\n tensioner.max_displacement, self.tensioner.max_displacement] + \n len(self.simulation.cloth.blobs) * [500, 500, 800] + 3 * [800, \n 800, 800, 800, 3.2] + [600, 600]))\n\n @property\n def action_space(self):\n return Discrete(7)\n\n @property\n def _state(self):\n scissors = self.simulation.mouse.x, self.simulation.mouse.y\n centroids = np.ravel(np.array(self.simulation.cloth.centroids)).tolist(\n )\n if self.original:\n return np.array([self.traj_index] + list(self.tensioner.\n displacement))\n if not self.predict:\n return np.array([self.traj_index] + list(self.tensioner.\n displacement) + centroids)\n next_position3 = [-1000, -1000]\n closest_shape3 = [-1000, -1000]\n angle3 = 0\n next_position2 = [-1000, -1000]\n closest_shape2 = [-1000, -1000]\n angle2 = 0\n next_position = [-1000, -1000]\n closest_shape = [-1000, -1000]\n angle = 0\n if self.traj_index < len(self.trajectory) - 1:\n next_position = [self.trajectory[self.traj_index + 1][0], self.\n trajectory[self.traj_index + 1][1]]\n closest_shape = list(self.simulation.cloth.find_closest_shapept\n (next_position[0], next_position[1]))\n angle = self.simulation.cloth.find_dtheta(scissors[0], scissors\n [1], next_position[0], next_position[1], closest_shape[0],\n closest_shape[1])\n if self.traj_index < len(self.trajectory) - 5:\n next_position2 = [self.trajectory[self.traj_index + 5][0],\n self.trajectory[self.traj_index + 5][1]]\n if np.linalg.norm(np.array(next_position2) - np.array(\n next_position)) < 100:\n closest_shape2 = list(self.simulation.cloth.\n find_closest_shapept(next_position2[0],\n next_position2[1]))\n angle2 = self.simulation.cloth.find_dtheta(next_position\n [0], next_position[1], next_position2[0],\n next_position2[1], closest_shape2[0], closest_shape2[1]\n )\n if self.traj_index < len(self.trajectory) - 10:\n next_position3 = [self.trajectory[self.traj_index +\n 10][0], self.trajectory[self.traj_index + 10][1]]\n if np.linalg.norm(np.array(next_position3) - np.\n array(next_position2)) < 100:\n closest_shape3 = list(self.simulation.cloth.\n find_closest_shapept(next_position3[0],\n next_position3[1]))\n angle3 = self.simulation.cloth.find_dtheta(\n next_position2[0], next_position2[1],\n next_position3[0], next_position3[1],\n closest_shape3[0], closest_shape3[1])\n return np.array([self.traj_index] + list(self.tensioner.\n displacement) + centroids + next_position + closest_shape + [\n angle] + next_position2 + closest_shape2 + [angle2] +\n next_position3 + closest_shape3 + [angle3] + list(scissors))\n\n @property\n def _score(self):\n disp = np.linalg.norm(self._state[1])\n score = self.scorer.score(self.simulation.cloth)\n if disp >= self.tensioner.max_displacement - 2:\n score -= 100\n return score\n\n def reset(self):\n self.simulation.reset()\n self.tensioner = self.simulation.pin_position(self.pinx, self.piny,\n self.tensioner.max_displacement)\n self.traj_index = 0\n observation = np.copy(self._state)\n return observation\n\n def step(self, action):\n x, y, z = self.MAPPING[action]\n self.tensioner.tension(x, y, z)\n self.simulation.move_mouse(self.trajectory[self.traj_index][0],\n self.trajectory[self.traj_index][1])\n reward = self.simulation.update() * self.traj_index / 10\n self.traj_index += 1\n self.simulation.move_mouse(self.trajectory[self.traj_index][0],\n self.trajectory[self.traj_index][1])\n reward += self.simulation.update() * self.traj_index / 10\n done = self.traj_index >= len(self.trajectory) - 2\n if done:\n reward = self.simulation.cloth.evaluate()\n else:\n reward = 0\n next_observation = np.copy(self._state)\n self.traj_index += 1\n return Step(observation=next_observation, reward=reward, done=done)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass PinEnvDiscrete(Env):\n <mask token>\n\n def __init__(self, simulation, x, y, trajectory, scorer=0,\n max_displacement=False, predict=False, original=False, sample=False):\n self.simulation = simulation\n height, width = simulation.cloth.initial_params[0]\n self.os_dim = height * width * 5\n self.simulation.reset()\n self.tensioner = self.simulation.pin_position(x, y, max_displacement)\n self.scorer = Scorer(scorer)\n self.trajectory = trajectory\n self.traj_index = 0\n self.pinx, self.piny = x, y\n self.predict = predict\n self.original = original\n self.sample = sample\n\n @property\n def observation_space(self):\n if self.original:\n return Box(low=np.array([0, -self.tensioner.max_displacement, -\n self.tensioner.max_displacement, -self.tensioner.\n max_displacement]), high=np.array([len(self.trajectory) + 1,\n self.tensioner.max_displacement, self.tensioner.\n max_displacement, self.tensioner.max_displacement]))\n if not self.predict:\n return Box(low=np.array([0, -self.tensioner.max_displacement, -\n self.tensioner.max_displacement, -self.tensioner.\n max_displacement] + len(self.simulation.cloth.blobs) * [0, \n 0, -800]), high=np.array([len(self.trajectory) + 1, self.\n tensioner.max_displacement, self.tensioner.max_displacement,\n self.tensioner.max_displacement] + len(self.simulation.\n cloth.blobs) * [500, 500, 800]))\n return Box(low=np.array([0, -self.tensioner.max_displacement, -self\n .tensioner.max_displacement, -self.tensioner.max_displacement] +\n len(self.simulation.cloth.blobs) * [0, 0, -800] + 3 * [-1000, -\n 1000, -1000, -1000, -3.2] + [0, 0]), high=np.array([len(self.\n trajectory) + 1, self.tensioner.max_displacement, self.\n tensioner.max_displacement, self.tensioner.max_displacement] + \n len(self.simulation.cloth.blobs) * [500, 500, 800] + 3 * [800, \n 800, 800, 800, 3.2] + [600, 600]))\n\n @property\n def action_space(self):\n return Discrete(7)\n\n @property\n def _state(self):\n scissors = self.simulation.mouse.x, self.simulation.mouse.y\n centroids = np.ravel(np.array(self.simulation.cloth.centroids)).tolist(\n )\n if self.original:\n return np.array([self.traj_index] + list(self.tensioner.\n displacement))\n if not self.predict:\n return np.array([self.traj_index] + list(self.tensioner.\n displacement) + centroids)\n next_position3 = [-1000, -1000]\n closest_shape3 = [-1000, -1000]\n angle3 = 0\n next_position2 = [-1000, -1000]\n closest_shape2 = [-1000, -1000]\n angle2 = 0\n next_position = [-1000, -1000]\n closest_shape = [-1000, -1000]\n angle = 0\n if self.traj_index < len(self.trajectory) - 1:\n next_position = [self.trajectory[self.traj_index + 1][0], self.\n trajectory[self.traj_index + 1][1]]\n closest_shape = list(self.simulation.cloth.find_closest_shapept\n (next_position[0], next_position[1]))\n angle = self.simulation.cloth.find_dtheta(scissors[0], scissors\n [1], next_position[0], next_position[1], closest_shape[0],\n closest_shape[1])\n if self.traj_index < len(self.trajectory) - 5:\n next_position2 = [self.trajectory[self.traj_index + 5][0],\n self.trajectory[self.traj_index + 5][1]]\n if np.linalg.norm(np.array(next_position2) - np.array(\n next_position)) < 100:\n closest_shape2 = list(self.simulation.cloth.\n find_closest_shapept(next_position2[0],\n next_position2[1]))\n angle2 = self.simulation.cloth.find_dtheta(next_position\n [0], next_position[1], next_position2[0],\n next_position2[1], closest_shape2[0], closest_shape2[1]\n )\n if self.traj_index < len(self.trajectory) - 10:\n next_position3 = [self.trajectory[self.traj_index +\n 10][0], self.trajectory[self.traj_index + 10][1]]\n if np.linalg.norm(np.array(next_position3) - np.\n array(next_position2)) < 100:\n closest_shape3 = list(self.simulation.cloth.\n find_closest_shapept(next_position3[0],\n next_position3[1]))\n angle3 = self.simulation.cloth.find_dtheta(\n next_position2[0], next_position2[1],\n next_position3[0], next_position3[1],\n closest_shape3[0], closest_shape3[1])\n return np.array([self.traj_index] + list(self.tensioner.\n displacement) + centroids + next_position + closest_shape + [\n angle] + next_position2 + closest_shape2 + [angle2] +\n next_position3 + closest_shape3 + [angle3] + list(scissors))\n\n @property\n def _score(self):\n disp = np.linalg.norm(self._state[1])\n score = self.scorer.score(self.simulation.cloth)\n if disp >= self.tensioner.max_displacement - 2:\n score -= 100\n return score\n\n def reset(self):\n self.simulation.reset()\n self.tensioner = self.simulation.pin_position(self.pinx, self.piny,\n self.tensioner.max_displacement)\n self.traj_index = 0\n observation = np.copy(self._state)\n return observation\n\n def step(self, action):\n x, y, z = self.MAPPING[action]\n self.tensioner.tension(x, y, z)\n self.simulation.move_mouse(self.trajectory[self.traj_index][0],\n self.trajectory[self.traj_index][1])\n reward = self.simulation.update() * self.traj_index / 10\n self.traj_index += 1\n self.simulation.move_mouse(self.trajectory[self.traj_index][0],\n self.trajectory[self.traj_index][1])\n reward += self.simulation.update() * self.traj_index / 10\n done = self.traj_index >= len(self.trajectory) - 2\n if done:\n reward = self.simulation.cloth.evaluate()\n else:\n reward = 0\n next_observation = np.copy(self._state)\n self.traj_index += 1\n return Step(observation=next_observation, reward=reward, done=done)\n\n def render(self):\n self.simulation.render_sim()\n",
"step-3": "<mask token>\n\n\nclass PinEnvDiscrete(Env):\n MAPPING = {(0): (0, 0, 0), (1): (1, 0, 0), (2): (0, 1, 0), (3): (0, 0, \n 1), (4): (-1, 0, 0), (5): (0, -1, 0), (6): (0, 0, -1)}\n\n def __init__(self, simulation, x, y, trajectory, scorer=0,\n max_displacement=False, predict=False, original=False, sample=False):\n self.simulation = simulation\n height, width = simulation.cloth.initial_params[0]\n self.os_dim = height * width * 5\n self.simulation.reset()\n self.tensioner = self.simulation.pin_position(x, y, max_displacement)\n self.scorer = Scorer(scorer)\n self.trajectory = trajectory\n self.traj_index = 0\n self.pinx, self.piny = x, y\n self.predict = predict\n self.original = original\n self.sample = sample\n\n @property\n def observation_space(self):\n if self.original:\n return Box(low=np.array([0, -self.tensioner.max_displacement, -\n self.tensioner.max_displacement, -self.tensioner.\n max_displacement]), high=np.array([len(self.trajectory) + 1,\n self.tensioner.max_displacement, self.tensioner.\n max_displacement, self.tensioner.max_displacement]))\n if not self.predict:\n return Box(low=np.array([0, -self.tensioner.max_displacement, -\n self.tensioner.max_displacement, -self.tensioner.\n max_displacement] + len(self.simulation.cloth.blobs) * [0, \n 0, -800]), high=np.array([len(self.trajectory) + 1, self.\n tensioner.max_displacement, self.tensioner.max_displacement,\n self.tensioner.max_displacement] + len(self.simulation.\n cloth.blobs) * [500, 500, 800]))\n return Box(low=np.array([0, -self.tensioner.max_displacement, -self\n .tensioner.max_displacement, -self.tensioner.max_displacement] +\n len(self.simulation.cloth.blobs) * [0, 0, -800] + 3 * [-1000, -\n 1000, -1000, -1000, -3.2] + [0, 0]), high=np.array([len(self.\n trajectory) + 1, self.tensioner.max_displacement, self.\n tensioner.max_displacement, self.tensioner.max_displacement] + \n len(self.simulation.cloth.blobs) * [500, 500, 800] + 3 * [800, \n 800, 800, 800, 3.2] + [600, 600]))\n\n @property\n def action_space(self):\n return Discrete(7)\n\n @property\n def _state(self):\n scissors = self.simulation.mouse.x, self.simulation.mouse.y\n centroids = np.ravel(np.array(self.simulation.cloth.centroids)).tolist(\n )\n if self.original:\n return np.array([self.traj_index] + list(self.tensioner.\n displacement))\n if not self.predict:\n return np.array([self.traj_index] + list(self.tensioner.\n displacement) + centroids)\n next_position3 = [-1000, -1000]\n closest_shape3 = [-1000, -1000]\n angle3 = 0\n next_position2 = [-1000, -1000]\n closest_shape2 = [-1000, -1000]\n angle2 = 0\n next_position = [-1000, -1000]\n closest_shape = [-1000, -1000]\n angle = 0\n if self.traj_index < len(self.trajectory) - 1:\n next_position = [self.trajectory[self.traj_index + 1][0], self.\n trajectory[self.traj_index + 1][1]]\n closest_shape = list(self.simulation.cloth.find_closest_shapept\n (next_position[0], next_position[1]))\n angle = self.simulation.cloth.find_dtheta(scissors[0], scissors\n [1], next_position[0], next_position[1], closest_shape[0],\n closest_shape[1])\n if self.traj_index < len(self.trajectory) - 5:\n next_position2 = [self.trajectory[self.traj_index + 5][0],\n self.trajectory[self.traj_index + 5][1]]\n if np.linalg.norm(np.array(next_position2) - np.array(\n next_position)) < 100:\n closest_shape2 = list(self.simulation.cloth.\n find_closest_shapept(next_position2[0],\n next_position2[1]))\n angle2 = self.simulation.cloth.find_dtheta(next_position\n [0], next_position[1], next_position2[0],\n next_position2[1], closest_shape2[0], closest_shape2[1]\n )\n if self.traj_index < len(self.trajectory) - 10:\n next_position3 = [self.trajectory[self.traj_index +\n 10][0], self.trajectory[self.traj_index + 10][1]]\n if np.linalg.norm(np.array(next_position3) - np.\n array(next_position2)) < 100:\n closest_shape3 = list(self.simulation.cloth.\n find_closest_shapept(next_position3[0],\n next_position3[1]))\n angle3 = self.simulation.cloth.find_dtheta(\n next_position2[0], next_position2[1],\n next_position3[0], next_position3[1],\n closest_shape3[0], closest_shape3[1])\n return np.array([self.traj_index] + list(self.tensioner.\n displacement) + centroids + next_position + closest_shape + [\n angle] + next_position2 + closest_shape2 + [angle2] +\n next_position3 + closest_shape3 + [angle3] + list(scissors))\n\n @property\n def _score(self):\n disp = np.linalg.norm(self._state[1])\n score = self.scorer.score(self.simulation.cloth)\n if disp >= self.tensioner.max_displacement - 2:\n score -= 100\n return score\n\n def reset(self):\n self.simulation.reset()\n self.tensioner = self.simulation.pin_position(self.pinx, self.piny,\n self.tensioner.max_displacement)\n self.traj_index = 0\n observation = np.copy(self._state)\n return observation\n\n def step(self, action):\n x, y, z = self.MAPPING[action]\n self.tensioner.tension(x, y, z)\n self.simulation.move_mouse(self.trajectory[self.traj_index][0],\n self.trajectory[self.traj_index][1])\n reward = self.simulation.update() * self.traj_index / 10\n self.traj_index += 1\n self.simulation.move_mouse(self.trajectory[self.traj_index][0],\n self.trajectory[self.traj_index][1])\n reward += self.simulation.update() * self.traj_index / 10\n done = self.traj_index >= len(self.trajectory) - 2\n if done:\n reward = self.simulation.cloth.evaluate()\n else:\n reward = 0\n next_observation = np.copy(self._state)\n self.traj_index += 1\n return Step(observation=next_observation, reward=reward, done=done)\n\n def render(self):\n self.simulation.render_sim()\n",
"step-4": "from rllab.envs.base import Env\nfrom rllab.spaces import Discrete\nfrom rllab.spaces import Box\nfrom rllab.envs.base import Step\nimport numpy as np\nimport sys, pickle, os\nsys.path.append(os.path.dirname(os.getcwd()))\nfrom os.path import dirname\nsys.path.append(dirname(dirname(dirname(os.getcwd()))))\nfrom simulation import *\nfrom scorer import *\nfrom shapecloth import *\nfrom tensioner import *\n<mask token>\n\n\nclass PinEnvDiscrete(Env):\n MAPPING = {(0): (0, 0, 0), (1): (1, 0, 0), (2): (0, 1, 0), (3): (0, 0, \n 1), (4): (-1, 0, 0), (5): (0, -1, 0), (6): (0, 0, -1)}\n\n def __init__(self, simulation, x, y, trajectory, scorer=0,\n max_displacement=False, predict=False, original=False, sample=False):\n self.simulation = simulation\n height, width = simulation.cloth.initial_params[0]\n self.os_dim = height * width * 5\n self.simulation.reset()\n self.tensioner = self.simulation.pin_position(x, y, max_displacement)\n self.scorer = Scorer(scorer)\n self.trajectory = trajectory\n self.traj_index = 0\n self.pinx, self.piny = x, y\n self.predict = predict\n self.original = original\n self.sample = sample\n\n @property\n def observation_space(self):\n if self.original:\n return Box(low=np.array([0, -self.tensioner.max_displacement, -\n self.tensioner.max_displacement, -self.tensioner.\n max_displacement]), high=np.array([len(self.trajectory) + 1,\n self.tensioner.max_displacement, self.tensioner.\n max_displacement, self.tensioner.max_displacement]))\n if not self.predict:\n return Box(low=np.array([0, -self.tensioner.max_displacement, -\n self.tensioner.max_displacement, -self.tensioner.\n max_displacement] + len(self.simulation.cloth.blobs) * [0, \n 0, -800]), high=np.array([len(self.trajectory) + 1, self.\n tensioner.max_displacement, self.tensioner.max_displacement,\n self.tensioner.max_displacement] + len(self.simulation.\n cloth.blobs) * [500, 500, 800]))\n return Box(low=np.array([0, -self.tensioner.max_displacement, -self\n .tensioner.max_displacement, -self.tensioner.max_displacement] +\n len(self.simulation.cloth.blobs) * [0, 0, -800] + 3 * [-1000, -\n 1000, -1000, -1000, -3.2] + [0, 0]), high=np.array([len(self.\n trajectory) + 1, self.tensioner.max_displacement, self.\n tensioner.max_displacement, self.tensioner.max_displacement] + \n len(self.simulation.cloth.blobs) * [500, 500, 800] + 3 * [800, \n 800, 800, 800, 3.2] + [600, 600]))\n\n @property\n def action_space(self):\n return Discrete(7)\n\n @property\n def _state(self):\n scissors = self.simulation.mouse.x, self.simulation.mouse.y\n centroids = np.ravel(np.array(self.simulation.cloth.centroids)).tolist(\n )\n if self.original:\n return np.array([self.traj_index] + list(self.tensioner.\n displacement))\n if not self.predict:\n return np.array([self.traj_index] + list(self.tensioner.\n displacement) + centroids)\n next_position3 = [-1000, -1000]\n closest_shape3 = [-1000, -1000]\n angle3 = 0\n next_position2 = [-1000, -1000]\n closest_shape2 = [-1000, -1000]\n angle2 = 0\n next_position = [-1000, -1000]\n closest_shape = [-1000, -1000]\n angle = 0\n if self.traj_index < len(self.trajectory) - 1:\n next_position = [self.trajectory[self.traj_index + 1][0], self.\n trajectory[self.traj_index + 1][1]]\n closest_shape = list(self.simulation.cloth.find_closest_shapept\n (next_position[0], next_position[1]))\n angle = self.simulation.cloth.find_dtheta(scissors[0], scissors\n [1], next_position[0], next_position[1], closest_shape[0],\n closest_shape[1])\n if self.traj_index < len(self.trajectory) - 5:\n next_position2 = [self.trajectory[self.traj_index + 5][0],\n self.trajectory[self.traj_index + 5][1]]\n if np.linalg.norm(np.array(next_position2) - np.array(\n next_position)) < 100:\n closest_shape2 = list(self.simulation.cloth.\n find_closest_shapept(next_position2[0],\n next_position2[1]))\n angle2 = self.simulation.cloth.find_dtheta(next_position\n [0], next_position[1], next_position2[0],\n next_position2[1], closest_shape2[0], closest_shape2[1]\n )\n if self.traj_index < len(self.trajectory) - 10:\n next_position3 = [self.trajectory[self.traj_index +\n 10][0], self.trajectory[self.traj_index + 10][1]]\n if np.linalg.norm(np.array(next_position3) - np.\n array(next_position2)) < 100:\n closest_shape3 = list(self.simulation.cloth.\n find_closest_shapept(next_position3[0],\n next_position3[1]))\n angle3 = self.simulation.cloth.find_dtheta(\n next_position2[0], next_position2[1],\n next_position3[0], next_position3[1],\n closest_shape3[0], closest_shape3[1])\n return np.array([self.traj_index] + list(self.tensioner.\n displacement) + centroids + next_position + closest_shape + [\n angle] + next_position2 + closest_shape2 + [angle2] +\n next_position3 + closest_shape3 + [angle3] + list(scissors))\n\n @property\n def _score(self):\n disp = np.linalg.norm(self._state[1])\n score = self.scorer.score(self.simulation.cloth)\n if disp >= self.tensioner.max_displacement - 2:\n score -= 100\n return score\n\n def reset(self):\n self.simulation.reset()\n self.tensioner = self.simulation.pin_position(self.pinx, self.piny,\n self.tensioner.max_displacement)\n self.traj_index = 0\n observation = np.copy(self._state)\n return observation\n\n def step(self, action):\n x, y, z = self.MAPPING[action]\n self.tensioner.tension(x, y, z)\n self.simulation.move_mouse(self.trajectory[self.traj_index][0],\n self.trajectory[self.traj_index][1])\n reward = self.simulation.update() * self.traj_index / 10\n self.traj_index += 1\n self.simulation.move_mouse(self.trajectory[self.traj_index][0],\n self.trajectory[self.traj_index][1])\n reward += self.simulation.update() * self.traj_index / 10\n done = self.traj_index >= len(self.trajectory) - 2\n if done:\n reward = self.simulation.cloth.evaluate()\n else:\n reward = 0\n next_observation = np.copy(self._state)\n self.traj_index += 1\n return Step(observation=next_observation, reward=reward, done=done)\n\n def render(self):\n self.simulation.render_sim()\n",
"step-5": "from rllab.envs.base import Env\nfrom rllab.spaces import Discrete\nfrom rllab.spaces import Box\nfrom rllab.envs.base import Step\nimport numpy as np\nimport sys, pickle, os\nsys.path.append(os.path.dirname(os.getcwd()))\nfrom os.path import dirname\nsys.path.append(dirname(dirname(dirname(os.getcwd()))))\nfrom simulation import *\nfrom scorer import *\nfrom shapecloth import *\nfrom tensioner import *\n\n\"\"\"\nA Rllab Environment for the tensioning policy experiments.\n\"\"\"\n\n\nclass PinEnvDiscrete(Env):\n\n MAPPING = {\n 0 : (0,0,0),\n 1 : (1,0,0),\n 2 : (0,1,0),\n 3 : (0,0,1),\n 4 : (-1,0,0),\n 5 : (0,-1,0),\n 6 : (0,0,-1)\n }\n\n def __init__(self, simulation, x, y, trajectory, scorer=0, max_displacement=False, predict=False, original=False, sample=False):\n self.simulation = simulation\n height, width = simulation.cloth.initial_params[0]\n self.os_dim = height * width * 5\n self.simulation.reset()\n self.tensioner = self.simulation.pin_position(x, y, max_displacement)\n self.scorer = Scorer(scorer)\n self.trajectory = trajectory\n self.traj_index = 0\n self.pinx, self.piny = x, y\n self.predict = predict\n self.original = original\n self.sample = sample\n\n @property\n def observation_space(self):\n if self.original:\n return Box(low=np.array([0, -self.tensioner.max_displacement, -self.tensioner.max_displacement, -self.tensioner.max_displacement]),\n high=np.array([len(self.trajectory) + 1, self.tensioner.max_displacement, self.tensioner.max_displacement, self.tensioner.max_displacement]))\n if not self.predict:\n return Box(low=np.array([0, -self.tensioner.max_displacement, -self.tensioner.max_displacement, -self.tensioner.max_displacement] + len(self.simulation.cloth.blobs) * [0, 0, -800]),\n high=np.array([len(self.trajectory) + 1, self.tensioner.max_displacement, self.tensioner.max_displacement, self.tensioner.max_displacement]\n + len(self.simulation.cloth.blobs) * [500, 500, 800]))\n return Box(low=np.array([0, -self.tensioner.max_displacement, -self.tensioner.max_displacement, -self.tensioner.max_displacement] + len(self.simulation.cloth.blobs) * [0, 0, -800] + 3 * [-1000, -1000, -1000, -1000, -3.2] + [0, 0]),\n high=np.array([len(self.trajectory) + 1, self.tensioner.max_displacement, self.tensioner.max_displacement, self.tensioner.max_displacement]\n + len(self.simulation.cloth.blobs) * [500, 500, 800] + 3 * [800, 800, 800, 800, 3.2] + [600, 600]))\n\n @property\n def action_space(self):\n return Discrete(7)\n\n\n @property\n def _state(self):\n scissors = self.simulation.mouse.x, self.simulation.mouse.y\n centroids = np.ravel(np.array(self.simulation.cloth.centroids)).tolist()\n if self.original:\n return np.array([self.traj_index] + list(self.tensioner.displacement))\n if not self.predict:\n return np.array([self.traj_index] + list(self.tensioner.displacement) + centroids)\n next_position3 = [-1000, -1000]\n closest_shape3 = [-1000, -1000]\n angle3 = 0\n next_position2 = [-1000, -1000]\n closest_shape2 = [-1000, -1000]\n angle2 = 0\n next_position = [-1000, -1000]\n closest_shape = [-1000, -1000]\n angle = 0\n if self.traj_index < len(self.trajectory) - 1:\n next_position = [self.trajectory[self.traj_index+1][0], self.trajectory[self.traj_index+1][1]]\n closest_shape = list(self.simulation.cloth.find_closest_shapept(next_position[0], next_position[1]))\n angle = self.simulation.cloth.find_dtheta(scissors[0], scissors[1], next_position[0], next_position[1], closest_shape[0], closest_shape[1])\n if self.traj_index < len(self.trajectory) - 5:\n next_position2 = [self.trajectory[self.traj_index+5][0], self.trajectory[self.traj_index+5][1]]\n if np.linalg.norm(np.array(next_position2) - np.array(next_position)) < 100:\n closest_shape2 = list(self.simulation.cloth.find_closest_shapept(next_position2[0], next_position2[1]))\n angle2 = self.simulation.cloth.find_dtheta(next_position[0], next_position[1], next_position2[0], next_position2[1], closest_shape2[0], closest_shape2[1])\n if self.traj_index < len(self.trajectory) - 10:\n next_position3 = [self.trajectory[self.traj_index+10][0], self.trajectory[self.traj_index+10][1]]\n if np.linalg.norm(np.array(next_position3) - np.array(next_position2)) < 100:\n closest_shape3 = list(self.simulation.cloth.find_closest_shapept(next_position3[0], next_position3[1]))\n angle3 = self.simulation.cloth.find_dtheta(next_position2[0], next_position2[1], next_position3[0], next_position3[1], closest_shape3[0], closest_shape3[1])\n return np.array([self.traj_index] + list(self.tensioner.displacement) + centroids + next_position + closest_shape + [angle] + next_position2 + closest_shape2 + [angle2]\n + next_position3 + closest_shape3 + [angle3] + list(scissors))\n \n @property\n def _score(self):\n disp = np.linalg.norm(self._state[1])\n score = self.scorer.score(self.simulation.cloth)\n if disp >= self.tensioner.max_displacement - 2:\n score -= 100\n return score\n\n\n def reset(self):\n self.simulation.reset()\n self.tensioner = self.simulation.pin_position(self.pinx, self.piny, self.tensioner.max_displacement)\n self.traj_index = 0\n observation = np.copy(self._state)\n return observation\n\n def step(self, action):\n x, y, z = self.MAPPING[action]\n self.tensioner.tension(x, y, z)\n self.simulation.move_mouse(self.trajectory[self.traj_index][0], self.trajectory[self.traj_index][1])\n reward = self.simulation.update() * self.traj_index/10\n self.traj_index += 1\n self.simulation.move_mouse(self.trajectory[self.traj_index][0], self.trajectory[self.traj_index][1])\n reward += self.simulation.update() * self.traj_index/10\n done = self.traj_index >= len(self.trajectory) - 2\n if done:\n reward = self.simulation.cloth.evaluate()\n else:\n reward = 0\n next_observation = np.copy(self._state)\n self.traj_index += 1\n return Step(observation=next_observation, reward=reward, done=done)\n\n def render(self):\n self.simulation.render_sim()\n\n # def local_angles(self, n=5):\n # if self.\n # for i in range(n):\n\n",
"step-ids": [
8,
9,
10,
12,
13
]
}
|
[
8,
9,
10,
12,
13
] |
<|reserved_special_token_0|>
class ebiz_supplier_account_create(osv.osv_memory):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def create_supplier_action(self, cr, uid, ids, context=None):
active_ids = context.get('active_ids', False)
supplier_ids = self.pool['ebiz.supplier.account.line'
].create_ebiz_supplier_account_line(cr, uid, active_ids,
context=context)
return {'view_type': 'form', 'view_mode': 'tree', 'res_model':
'ebiz.supplier.account.line', 'type': 'ir.actions.act_window',
'domain': [('id', 'in', supplier_ids or [0])]}
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ebiz_supplier_account_create(osv.osv_memory):
_name = 'ebiz.supplier.account.create.wizard'
_description = 'Ebiz Supplier Account'
def create_supplier_action(self, cr, uid, ids, context=None):
active_ids = context.get('active_ids', False)
supplier_ids = self.pool['ebiz.supplier.account.line'
].create_ebiz_supplier_account_line(cr, uid, active_ids,
context=context)
return {'view_type': 'form', 'view_mode': 'tree', 'res_model':
'ebiz.supplier.account.line', 'type': 'ir.actions.act_window',
'domain': [('id', 'in', supplier_ids or [0])]}
ebiz_supplier_account_create()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logger = logging.getLogger(__name__)
class ebiz_supplier_account_create(osv.osv_memory):
_name = 'ebiz.supplier.account.create.wizard'
_description = 'Ebiz Supplier Account'
def create_supplier_action(self, cr, uid, ids, context=None):
active_ids = context.get('active_ids', False)
supplier_ids = self.pool['ebiz.supplier.account.line'
].create_ebiz_supplier_account_line(cr, uid, active_ids,
context=context)
return {'view_type': 'form', 'view_mode': 'tree', 'res_model':
'ebiz.supplier.account.line', 'type': 'ir.actions.act_window',
'domain': [('id', 'in', supplier_ids or [0])]}
ebiz_supplier_account_create()
<|reserved_special_token_1|>
import time
from openerp.osv import osv, fields
import logging
import openerp.addons.decimal_precision as dp
logger = logging.getLogger(__name__)
class ebiz_supplier_account_create(osv.osv_memory):
_name = 'ebiz.supplier.account.create.wizard'
_description = 'Ebiz Supplier Account'
def create_supplier_action(self, cr, uid, ids, context=None):
active_ids = context.get('active_ids', False)
supplier_ids = self.pool['ebiz.supplier.account.line'
].create_ebiz_supplier_account_line(cr, uid, active_ids,
context=context)
return {'view_type': 'form', 'view_mode': 'tree', 'res_model':
'ebiz.supplier.account.line', 'type': 'ir.actions.act_window',
'domain': [('id', 'in', supplier_ids or [0])]}
ebiz_supplier_account_create()
<|reserved_special_token_1|>
# -*- coding: utf-8 -*- #
import time
from openerp.osv import osv, fields
import logging
import openerp.addons.decimal_precision as dp
logger = logging.getLogger(__name__)
class ebiz_supplier_account_create(osv.osv_memory):
_name = 'ebiz.supplier.account.create.wizard'
_description = "Ebiz Supplier Account"
def create_supplier_action(self, cr, uid, ids, context=None):
active_ids = context.get('active_ids',False)
supplier_ids = self.pool['ebiz.supplier.account.line'].create_ebiz_supplier_account_line(cr, uid, active_ids, context=context)
return {
'view_type': 'form',
'view_mode': 'tree',
'res_model': 'ebiz.supplier.account.line',
'type': 'ir.actions.act_window',
'domain':[('id','in',supplier_ids or [0])],
}
ebiz_supplier_account_create()
|
flexible
|
{
"blob_id": "309f8016dfebcc3595291b127edb4634f72298ec",
"index": 4387,
"step-1": "<mask token>\n\n\nclass ebiz_supplier_account_create(osv.osv_memory):\n <mask token>\n <mask token>\n\n def create_supplier_action(self, cr, uid, ids, context=None):\n active_ids = context.get('active_ids', False)\n supplier_ids = self.pool['ebiz.supplier.account.line'\n ].create_ebiz_supplier_account_line(cr, uid, active_ids,\n context=context)\n return {'view_type': 'form', 'view_mode': 'tree', 'res_model':\n 'ebiz.supplier.account.line', 'type': 'ir.actions.act_window',\n 'domain': [('id', 'in', supplier_ids or [0])]}\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ebiz_supplier_account_create(osv.osv_memory):\n _name = 'ebiz.supplier.account.create.wizard'\n _description = 'Ebiz Supplier Account'\n\n def create_supplier_action(self, cr, uid, ids, context=None):\n active_ids = context.get('active_ids', False)\n supplier_ids = self.pool['ebiz.supplier.account.line'\n ].create_ebiz_supplier_account_line(cr, uid, active_ids,\n context=context)\n return {'view_type': 'form', 'view_mode': 'tree', 'res_model':\n 'ebiz.supplier.account.line', 'type': 'ir.actions.act_window',\n 'domain': [('id', 'in', supplier_ids or [0])]}\n\n\nebiz_supplier_account_create()\n",
"step-3": "<mask token>\nlogger = logging.getLogger(__name__)\n\n\nclass ebiz_supplier_account_create(osv.osv_memory):\n _name = 'ebiz.supplier.account.create.wizard'\n _description = 'Ebiz Supplier Account'\n\n def create_supplier_action(self, cr, uid, ids, context=None):\n active_ids = context.get('active_ids', False)\n supplier_ids = self.pool['ebiz.supplier.account.line'\n ].create_ebiz_supplier_account_line(cr, uid, active_ids,\n context=context)\n return {'view_type': 'form', 'view_mode': 'tree', 'res_model':\n 'ebiz.supplier.account.line', 'type': 'ir.actions.act_window',\n 'domain': [('id', 'in', supplier_ids or [0])]}\n\n\nebiz_supplier_account_create()\n",
"step-4": "import time\nfrom openerp.osv import osv, fields\nimport logging\nimport openerp.addons.decimal_precision as dp\nlogger = logging.getLogger(__name__)\n\n\nclass ebiz_supplier_account_create(osv.osv_memory):\n _name = 'ebiz.supplier.account.create.wizard'\n _description = 'Ebiz Supplier Account'\n\n def create_supplier_action(self, cr, uid, ids, context=None):\n active_ids = context.get('active_ids', False)\n supplier_ids = self.pool['ebiz.supplier.account.line'\n ].create_ebiz_supplier_account_line(cr, uid, active_ids,\n context=context)\n return {'view_type': 'form', 'view_mode': 'tree', 'res_model':\n 'ebiz.supplier.account.line', 'type': 'ir.actions.act_window',\n 'domain': [('id', 'in', supplier_ids or [0])]}\n\n\nebiz_supplier_account_create()\n",
"step-5": "# -*- coding: utf-8 -*- #\nimport time\nfrom openerp.osv import osv, fields\nimport logging\nimport openerp.addons.decimal_precision as dp\n\nlogger = logging.getLogger(__name__)\n\nclass ebiz_supplier_account_create(osv.osv_memory):\n _name = 'ebiz.supplier.account.create.wizard'\n _description = \"Ebiz Supplier Account\"\n\n def create_supplier_action(self, cr, uid, ids, context=None):\n active_ids = context.get('active_ids',False)\n supplier_ids = self.pool['ebiz.supplier.account.line'].create_ebiz_supplier_account_line(cr, uid, active_ids, context=context)\n return {\n 'view_type': 'form',\n 'view_mode': 'tree',\n 'res_model': 'ebiz.supplier.account.line',\n 'type': 'ir.actions.act_window',\n 'domain':[('id','in',supplier_ids or [0])],\n }\nebiz_supplier_account_create()\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
import os
from linkedin_scraper import get_jobs
chrome_driver_path = os.path.join(os.path.abspath(os.getcwd()), 'chromedriver')
df = get_jobs('Data Scientist', 40, False, chrome_driver_path)
df.to_csv('linkedin_jobs.csv', index=False)
|
normal
|
{
"blob_id": "6ae529a5e5658ba409ec3e7284d8b2911c60dd00",
"index": 906,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndf.to_csv('linkedin_jobs.csv', index=False)\n",
"step-3": "<mask token>\nchrome_driver_path = os.path.join(os.path.abspath(os.getcwd()), 'chromedriver')\ndf = get_jobs('Data Scientist', 40, False, chrome_driver_path)\ndf.to_csv('linkedin_jobs.csv', index=False)\n",
"step-4": "import os\nfrom linkedin_scraper import get_jobs\nchrome_driver_path = os.path.join(os.path.abspath(os.getcwd()), 'chromedriver')\ndf = get_jobs('Data Scientist', 40, False, chrome_driver_path)\ndf.to_csv('linkedin_jobs.csv', index=False)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import random
PATH = "C:\\Program Files (x86)\\chromedriver.exe"
destination = "https://news.ycombinator.com/"
class hackernewsUpvoter():
def __init__(self, username, password, website):
self.driver = webdriver.Chrome(PATH)
self.username = username
self.password = password
self.website = website
def sign_in(self, login_page="https://news.ycombinator.com/login"):
# Go to hackernews's website
self.driver.get(login_page)
time.sleep(2)
# Enter username
account = self.driver.find_element_by_name('acct')
account.send_keys(self.username)
# Enter password
password = self.driver.find_element_by_name('pw')
password.send_keys(self.password)
time.sleep(random.randrange(11,35)/10)
# Click enter key
password.send_keys(Keys.RETURN)
def upvoter(self):
upvoteButtons = self.driver.find_elements_by_class_name("votearrow")
# Click every upvote buttons in the page
for button in upvoteButtons:
try:
button.click()
time.sleep(1)
except:
print("The upvote button wasn't clickable")
pass
def goto_page(self, page):
self.driver.get("https://news.ycombinator.com/news?p={}".format(page))
def next_page(self):
more = self.driver.find_elements_by_class_name("morelink")
more[0].click()
bot = hackernewsUpvoter(input(), input(), destination)
bot.sign_in()
for i in range(3,5):
bot.upvoter()
bot.goto_page(i)
time.sleep(random.randrange(300,500)/100)
|
normal
|
{
"blob_id": "742b655ee6aad2575f67e7329ed7a14c4fb6aa06",
"index": 7242,
"step-1": "<mask token>\n\n\nclass hackernewsUpvoter:\n <mask token>\n\n def sign_in(self, login_page='https://news.ycombinator.com/login'):\n self.driver.get(login_page)\n time.sleep(2)\n account = self.driver.find_element_by_name('acct')\n account.send_keys(self.username)\n password = self.driver.find_element_by_name('pw')\n password.send_keys(self.password)\n time.sleep(random.randrange(11, 35) / 10)\n password.send_keys(Keys.RETURN)\n <mask token>\n\n def goto_page(self, page):\n self.driver.get('https://news.ycombinator.com/news?p={}'.format(page))\n\n def next_page(self):\n more = self.driver.find_elements_by_class_name('morelink')\n more[0].click()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass hackernewsUpvoter:\n\n def __init__(self, username, password, website):\n self.driver = webdriver.Chrome(PATH)\n self.username = username\n self.password = password\n self.website = website\n\n def sign_in(self, login_page='https://news.ycombinator.com/login'):\n self.driver.get(login_page)\n time.sleep(2)\n account = self.driver.find_element_by_name('acct')\n account.send_keys(self.username)\n password = self.driver.find_element_by_name('pw')\n password.send_keys(self.password)\n time.sleep(random.randrange(11, 35) / 10)\n password.send_keys(Keys.RETURN)\n <mask token>\n\n def goto_page(self, page):\n self.driver.get('https://news.ycombinator.com/news?p={}'.format(page))\n\n def next_page(self):\n more = self.driver.find_elements_by_class_name('morelink')\n more[0].click()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass hackernewsUpvoter:\n\n def __init__(self, username, password, website):\n self.driver = webdriver.Chrome(PATH)\n self.username = username\n self.password = password\n self.website = website\n\n def sign_in(self, login_page='https://news.ycombinator.com/login'):\n self.driver.get(login_page)\n time.sleep(2)\n account = self.driver.find_element_by_name('acct')\n account.send_keys(self.username)\n password = self.driver.find_element_by_name('pw')\n password.send_keys(self.password)\n time.sleep(random.randrange(11, 35) / 10)\n password.send_keys(Keys.RETURN)\n\n def upvoter(self):\n upvoteButtons = self.driver.find_elements_by_class_name('votearrow')\n for button in upvoteButtons:\n try:\n button.click()\n time.sleep(1)\n except:\n print(\"The upvote button wasn't clickable\")\n pass\n\n def goto_page(self, page):\n self.driver.get('https://news.ycombinator.com/news?p={}'.format(page))\n\n def next_page(self):\n more = self.driver.find_elements_by_class_name('morelink')\n more[0].click()\n\n\n<mask token>\nbot.sign_in()\nfor i in range(3, 5):\n bot.upvoter()\n bot.goto_page(i)\n time.sleep(random.randrange(300, 500) / 100)\n",
"step-4": "<mask token>\nPATH = 'C:\\\\Program Files (x86)\\\\chromedriver.exe'\ndestination = 'https://news.ycombinator.com/'\n\n\nclass hackernewsUpvoter:\n\n def __init__(self, username, password, website):\n self.driver = webdriver.Chrome(PATH)\n self.username = username\n self.password = password\n self.website = website\n\n def sign_in(self, login_page='https://news.ycombinator.com/login'):\n self.driver.get(login_page)\n time.sleep(2)\n account = self.driver.find_element_by_name('acct')\n account.send_keys(self.username)\n password = self.driver.find_element_by_name('pw')\n password.send_keys(self.password)\n time.sleep(random.randrange(11, 35) / 10)\n password.send_keys(Keys.RETURN)\n\n def upvoter(self):\n upvoteButtons = self.driver.find_elements_by_class_name('votearrow')\n for button in upvoteButtons:\n try:\n button.click()\n time.sleep(1)\n except:\n print(\"The upvote button wasn't clickable\")\n pass\n\n def goto_page(self, page):\n self.driver.get('https://news.ycombinator.com/news?p={}'.format(page))\n\n def next_page(self):\n more = self.driver.find_elements_by_class_name('morelink')\n more[0].click()\n\n\nbot = hackernewsUpvoter(input(), input(), destination)\nbot.sign_in()\nfor i in range(3, 5):\n bot.upvoter()\n bot.goto_page(i)\n time.sleep(random.randrange(300, 500) / 100)\n",
"step-5": "from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport time\nimport random\n\nPATH = \"C:\\\\Program Files (x86)\\\\chromedriver.exe\"\ndestination = \"https://news.ycombinator.com/\"\n\nclass hackernewsUpvoter():\n def __init__(self, username, password, website):\n self.driver = webdriver.Chrome(PATH) \n self.username = username\n self.password = password\n self.website = website\n\n def sign_in(self, login_page=\"https://news.ycombinator.com/login\"):\n # Go to hackernews's website\n self.driver.get(login_page)\n time.sleep(2)\n\n # Enter username \n account = self.driver.find_element_by_name('acct')\n account.send_keys(self.username)\n\n # Enter password\n password = self.driver.find_element_by_name('pw')\n password.send_keys(self.password)\n time.sleep(random.randrange(11,35)/10)\n\n # Click enter key\n password.send_keys(Keys.RETURN)\n \n def upvoter(self):\n upvoteButtons = self.driver.find_elements_by_class_name(\"votearrow\")\n\n # Click every upvote buttons in the page \n for button in upvoteButtons:\n try: \n button.click()\n time.sleep(1)\n except: \n print(\"The upvote button wasn't clickable\")\n pass\n \n def goto_page(self, page):\n self.driver.get(\"https://news.ycombinator.com/news?p={}\".format(page))\n\n def next_page(self):\n more = self.driver.find_elements_by_class_name(\"morelink\")\n more[0].click()\n\nbot = hackernewsUpvoter(input(), input(), destination)\nbot.sign_in()\n\nfor i in range(3,5):\n bot.upvoter() \n bot.goto_page(i)\n time.sleep(random.randrange(300,500)/100)\n\n\n\n",
"step-ids": [
4,
5,
7,
8,
10
]
}
|
[
4,
5,
7,
8,
10
] |
# This is a sample Python script.
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
from PyQt5 import QtWidgets, uic
import sys
import pymysql
import mysql.connector
class Ui_Login(QtWidgets.QDialog):
def __init__(self):
super(Ui_Login, self).__init__()
uic.loadUi('login.ui', self)
self.icon = self.findChild(QtWidgets.QLabel, 'ilogin')
self.icon.setStyleSheet("image: url(sorce/roundicon.png)")
self.inputUsername = self.findChild(QtWidgets.QLineEdit, 'username')
self.inputPassword = self.findChild(QtWidgets.QLineEdit, 'password')
self.daftarButton = self.findChild(QtWidgets.QPushButton, 'daftarBtn')
self.daftarButton.clicked.connect(self.forDaftar)
self.loginButton = self.findChild(QtWidgets.QPushButton, 'login_2')
self.loginButton.clicked.connect(self.testButton)
self.show()
def testButton(self):
user = self.inputUsername.text()
pw = self.inputPassword.text()
con = pymysql.connect(db='bookingfutsal',
user='root',
passwd='',
host='localhost',
port=3306,
autocommit=True)
cur = con.cursor()
sql = "SELECT * FROM admin WHERE username=%s AND password=%s"
data = cur.execute(sql, (user, pw))
if(len(cur.fetchall()) > 0):
self.close()
super(Ui_Login, self).__init__()
uic.loadUi('booking.ui', self)
self.gambar = self.findChild(QtWidgets.QLabel, 'piclap')
self.gambar.setStyleSheet("background-image: url(sorce/lp2.jpg)")
self.bNamaPembayar = self.findChild(QtWidgets.QLineEdit, 'namapembayar')
self.bNominalDp = self.findChild(QtWidgets.QLineEdit, 'nominaldp')
self.bBooking = self.findChild(QtWidgets.QPushButton, 'booking')
self.bBooking.clicked.connect(self.bookingFunc)
self.show()
def forDaftar(self):
self.close()
super(Ui_Login, self).__init__()
uic.loadUi('daftar.ui', self)
self.dUsername = self.findChild(QtWidgets.QLineEdit, 'username')
self.dPassword = self.findChild(QtWidgets.QLineEdit, 'password')
self.dAlamat = self.findChild(QtWidgets.QLineEdit, 'alamat')
self.dNoTelpU = self.findChild(QtWidgets.QLineEdit, 'notelepon')
self.dDaftarButton = self.findChild(QtWidgets.QPushButton, 'daftar')
self.dDaftarButton.clicked.connect(self.daftarFunc)
self.show()
def daftarFunc(self):
user = self.dUsername.text()
pw = self.dPassword.text()
con = pymysql.connect(db='bookingfutsal',
user='root',
passwd='',
host='localhost',
port=3306,
autocommit=True)
cur = con.cursor()
insert = (user, pw)
sql = "INSERT INTO admin (username, password) VALUES" + str(insert)
data = cur.execute(sql)
self.close()
self.__init__();
# booking.Ui_Booking().Boking()
# koneksi.Koneksi()
def bookingFunc(self):
nama = self.bNamaPembayar.text()
nominal = self.bNominalDp.text()
con = pymysql.connect(db='bookingfutsal',
user='root',
passwd='',
host='localhost',
port=3306,
autocommit=True)
cur = con.cursor()
insert = (nama, nominal)
sql = "INSERT INTO pembayaran (atasNama, namaPembayaran) VALUES" + str(insert)
data = cur.execute(sql)
app = QtWidgets.QApplication(sys.argv)
window = Ui_Login()
app.exec_()
|
normal
|
{
"blob_id": "0ff6e22f8704a0c6c0ffff3c53761b9d3a531b6d",
"index": 683,
"step-1": "<mask token>\n\n\nclass Ui_Login(QtWidgets.QDialog):\n\n def __init__(self):\n super(Ui_Login, self).__init__()\n uic.loadUi('login.ui', self)\n self.icon = self.findChild(QtWidgets.QLabel, 'ilogin')\n self.icon.setStyleSheet('image: url(sorce/roundicon.png)')\n self.inputUsername = self.findChild(QtWidgets.QLineEdit, 'username')\n self.inputPassword = self.findChild(QtWidgets.QLineEdit, 'password')\n self.daftarButton = self.findChild(QtWidgets.QPushButton, 'daftarBtn')\n self.daftarButton.clicked.connect(self.forDaftar)\n self.loginButton = self.findChild(QtWidgets.QPushButton, 'login_2')\n self.loginButton.clicked.connect(self.testButton)\n self.show()\n\n def testButton(self):\n user = self.inputUsername.text()\n pw = self.inputPassword.text()\n con = pymysql.connect(db='bookingfutsal', user='root', passwd='',\n host='localhost', port=3306, autocommit=True)\n cur = con.cursor()\n sql = 'SELECT * FROM admin WHERE username=%s AND password=%s'\n data = cur.execute(sql, (user, pw))\n if len(cur.fetchall()) > 0:\n self.close()\n super(Ui_Login, self).__init__()\n uic.loadUi('booking.ui', self)\n self.gambar = self.findChild(QtWidgets.QLabel, 'piclap')\n self.gambar.setStyleSheet('background-image: url(sorce/lp2.jpg)')\n self.bNamaPembayar = self.findChild(QtWidgets.QLineEdit,\n 'namapembayar')\n self.bNominalDp = self.findChild(QtWidgets.QLineEdit, 'nominaldp')\n self.bBooking = self.findChild(QtWidgets.QPushButton, 'booking')\n self.bBooking.clicked.connect(self.bookingFunc)\n self.show()\n <mask token>\n\n def daftarFunc(self):\n user = self.dUsername.text()\n pw = self.dPassword.text()\n con = pymysql.connect(db='bookingfutsal', user='root', passwd='',\n host='localhost', port=3306, autocommit=True)\n cur = con.cursor()\n insert = user, pw\n sql = 'INSERT INTO admin (username, password) VALUES' + str(insert)\n data = cur.execute(sql)\n self.close()\n self.__init__()\n\n def bookingFunc(self):\n nama = self.bNamaPembayar.text()\n nominal = self.bNominalDp.text()\n con = pymysql.connect(db='bookingfutsal', user='root', passwd='',\n host='localhost', port=3306, autocommit=True)\n cur = con.cursor()\n insert = nama, nominal\n sql = 'INSERT INTO pembayaran (atasNama, namaPembayaran) VALUES' + str(\n insert)\n data = cur.execute(sql)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Ui_Login(QtWidgets.QDialog):\n\n def __init__(self):\n super(Ui_Login, self).__init__()\n uic.loadUi('login.ui', self)\n self.icon = self.findChild(QtWidgets.QLabel, 'ilogin')\n self.icon.setStyleSheet('image: url(sorce/roundicon.png)')\n self.inputUsername = self.findChild(QtWidgets.QLineEdit, 'username')\n self.inputPassword = self.findChild(QtWidgets.QLineEdit, 'password')\n self.daftarButton = self.findChild(QtWidgets.QPushButton, 'daftarBtn')\n self.daftarButton.clicked.connect(self.forDaftar)\n self.loginButton = self.findChild(QtWidgets.QPushButton, 'login_2')\n self.loginButton.clicked.connect(self.testButton)\n self.show()\n\n def testButton(self):\n user = self.inputUsername.text()\n pw = self.inputPassword.text()\n con = pymysql.connect(db='bookingfutsal', user='root', passwd='',\n host='localhost', port=3306, autocommit=True)\n cur = con.cursor()\n sql = 'SELECT * FROM admin WHERE username=%s AND password=%s'\n data = cur.execute(sql, (user, pw))\n if len(cur.fetchall()) > 0:\n self.close()\n super(Ui_Login, self).__init__()\n uic.loadUi('booking.ui', self)\n self.gambar = self.findChild(QtWidgets.QLabel, 'piclap')\n self.gambar.setStyleSheet('background-image: url(sorce/lp2.jpg)')\n self.bNamaPembayar = self.findChild(QtWidgets.QLineEdit,\n 'namapembayar')\n self.bNominalDp = self.findChild(QtWidgets.QLineEdit, 'nominaldp')\n self.bBooking = self.findChild(QtWidgets.QPushButton, 'booking')\n self.bBooking.clicked.connect(self.bookingFunc)\n self.show()\n\n def forDaftar(self):\n self.close()\n super(Ui_Login, self).__init__()\n uic.loadUi('daftar.ui', self)\n self.dUsername = self.findChild(QtWidgets.QLineEdit, 'username')\n self.dPassword = self.findChild(QtWidgets.QLineEdit, 'password')\n self.dAlamat = self.findChild(QtWidgets.QLineEdit, 'alamat')\n self.dNoTelpU = self.findChild(QtWidgets.QLineEdit, 'notelepon')\n self.dDaftarButton = self.findChild(QtWidgets.QPushButton, 'daftar')\n self.dDaftarButton.clicked.connect(self.daftarFunc)\n self.show()\n\n def daftarFunc(self):\n user = self.dUsername.text()\n pw = self.dPassword.text()\n con = pymysql.connect(db='bookingfutsal', user='root', passwd='',\n host='localhost', port=3306, autocommit=True)\n cur = con.cursor()\n insert = user, pw\n sql = 'INSERT INTO admin (username, password) VALUES' + str(insert)\n data = cur.execute(sql)\n self.close()\n self.__init__()\n\n def bookingFunc(self):\n nama = self.bNamaPembayar.text()\n nominal = self.bNominalDp.text()\n con = pymysql.connect(db='bookingfutsal', user='root', passwd='',\n host='localhost', port=3306, autocommit=True)\n cur = con.cursor()\n insert = nama, nominal\n sql = 'INSERT INTO pembayaran (atasNama, namaPembayaran) VALUES' + str(\n insert)\n data = cur.execute(sql)\n\n\n<mask token>\napp.exec_()\n",
"step-3": "<mask token>\n\n\nclass Ui_Login(QtWidgets.QDialog):\n\n def __init__(self):\n super(Ui_Login, self).__init__()\n uic.loadUi('login.ui', self)\n self.icon = self.findChild(QtWidgets.QLabel, 'ilogin')\n self.icon.setStyleSheet('image: url(sorce/roundicon.png)')\n self.inputUsername = self.findChild(QtWidgets.QLineEdit, 'username')\n self.inputPassword = self.findChild(QtWidgets.QLineEdit, 'password')\n self.daftarButton = self.findChild(QtWidgets.QPushButton, 'daftarBtn')\n self.daftarButton.clicked.connect(self.forDaftar)\n self.loginButton = self.findChild(QtWidgets.QPushButton, 'login_2')\n self.loginButton.clicked.connect(self.testButton)\n self.show()\n\n def testButton(self):\n user = self.inputUsername.text()\n pw = self.inputPassword.text()\n con = pymysql.connect(db='bookingfutsal', user='root', passwd='',\n host='localhost', port=3306, autocommit=True)\n cur = con.cursor()\n sql = 'SELECT * FROM admin WHERE username=%s AND password=%s'\n data = cur.execute(sql, (user, pw))\n if len(cur.fetchall()) > 0:\n self.close()\n super(Ui_Login, self).__init__()\n uic.loadUi('booking.ui', self)\n self.gambar = self.findChild(QtWidgets.QLabel, 'piclap')\n self.gambar.setStyleSheet('background-image: url(sorce/lp2.jpg)')\n self.bNamaPembayar = self.findChild(QtWidgets.QLineEdit,\n 'namapembayar')\n self.bNominalDp = self.findChild(QtWidgets.QLineEdit, 'nominaldp')\n self.bBooking = self.findChild(QtWidgets.QPushButton, 'booking')\n self.bBooking.clicked.connect(self.bookingFunc)\n self.show()\n\n def forDaftar(self):\n self.close()\n super(Ui_Login, self).__init__()\n uic.loadUi('daftar.ui', self)\n self.dUsername = self.findChild(QtWidgets.QLineEdit, 'username')\n self.dPassword = self.findChild(QtWidgets.QLineEdit, 'password')\n self.dAlamat = self.findChild(QtWidgets.QLineEdit, 'alamat')\n self.dNoTelpU = self.findChild(QtWidgets.QLineEdit, 'notelepon')\n self.dDaftarButton = self.findChild(QtWidgets.QPushButton, 'daftar')\n self.dDaftarButton.clicked.connect(self.daftarFunc)\n self.show()\n\n def daftarFunc(self):\n user = self.dUsername.text()\n pw = self.dPassword.text()\n con = pymysql.connect(db='bookingfutsal', user='root', passwd='',\n host='localhost', port=3306, autocommit=True)\n cur = con.cursor()\n insert = user, pw\n sql = 'INSERT INTO admin (username, password) VALUES' + str(insert)\n data = cur.execute(sql)\n self.close()\n self.__init__()\n\n def bookingFunc(self):\n nama = self.bNamaPembayar.text()\n nominal = self.bNominalDp.text()\n con = pymysql.connect(db='bookingfutsal', user='root', passwd='',\n host='localhost', port=3306, autocommit=True)\n cur = con.cursor()\n insert = nama, nominal\n sql = 'INSERT INTO pembayaran (atasNama, namaPembayaran) VALUES' + str(\n insert)\n data = cur.execute(sql)\n\n\napp = QtWidgets.QApplication(sys.argv)\nwindow = Ui_Login()\napp.exec_()\n",
"step-4": "from PyQt5 import QtWidgets, uic\nimport sys\nimport pymysql\nimport mysql.connector\n\n\nclass Ui_Login(QtWidgets.QDialog):\n\n def __init__(self):\n super(Ui_Login, self).__init__()\n uic.loadUi('login.ui', self)\n self.icon = self.findChild(QtWidgets.QLabel, 'ilogin')\n self.icon.setStyleSheet('image: url(sorce/roundicon.png)')\n self.inputUsername = self.findChild(QtWidgets.QLineEdit, 'username')\n self.inputPassword = self.findChild(QtWidgets.QLineEdit, 'password')\n self.daftarButton = self.findChild(QtWidgets.QPushButton, 'daftarBtn')\n self.daftarButton.clicked.connect(self.forDaftar)\n self.loginButton = self.findChild(QtWidgets.QPushButton, 'login_2')\n self.loginButton.clicked.connect(self.testButton)\n self.show()\n\n def testButton(self):\n user = self.inputUsername.text()\n pw = self.inputPassword.text()\n con = pymysql.connect(db='bookingfutsal', user='root', passwd='',\n host='localhost', port=3306, autocommit=True)\n cur = con.cursor()\n sql = 'SELECT * FROM admin WHERE username=%s AND password=%s'\n data = cur.execute(sql, (user, pw))\n if len(cur.fetchall()) > 0:\n self.close()\n super(Ui_Login, self).__init__()\n uic.loadUi('booking.ui', self)\n self.gambar = self.findChild(QtWidgets.QLabel, 'piclap')\n self.gambar.setStyleSheet('background-image: url(sorce/lp2.jpg)')\n self.bNamaPembayar = self.findChild(QtWidgets.QLineEdit,\n 'namapembayar')\n self.bNominalDp = self.findChild(QtWidgets.QLineEdit, 'nominaldp')\n self.bBooking = self.findChild(QtWidgets.QPushButton, 'booking')\n self.bBooking.clicked.connect(self.bookingFunc)\n self.show()\n\n def forDaftar(self):\n self.close()\n super(Ui_Login, self).__init__()\n uic.loadUi('daftar.ui', self)\n self.dUsername = self.findChild(QtWidgets.QLineEdit, 'username')\n self.dPassword = self.findChild(QtWidgets.QLineEdit, 'password')\n self.dAlamat = self.findChild(QtWidgets.QLineEdit, 'alamat')\n self.dNoTelpU = self.findChild(QtWidgets.QLineEdit, 'notelepon')\n self.dDaftarButton = self.findChild(QtWidgets.QPushButton, 'daftar')\n self.dDaftarButton.clicked.connect(self.daftarFunc)\n self.show()\n\n def daftarFunc(self):\n user = self.dUsername.text()\n pw = self.dPassword.text()\n con = pymysql.connect(db='bookingfutsal', user='root', passwd='',\n host='localhost', port=3306, autocommit=True)\n cur = con.cursor()\n insert = user, pw\n sql = 'INSERT INTO admin (username, password) VALUES' + str(insert)\n data = cur.execute(sql)\n self.close()\n self.__init__()\n\n def bookingFunc(self):\n nama = self.bNamaPembayar.text()\n nominal = self.bNominalDp.text()\n con = pymysql.connect(db='bookingfutsal', user='root', passwd='',\n host='localhost', port=3306, autocommit=True)\n cur = con.cursor()\n insert = nama, nominal\n sql = 'INSERT INTO pembayaran (atasNama, namaPembayaran) VALUES' + str(\n insert)\n data = cur.execute(sql)\n\n\napp = QtWidgets.QApplication(sys.argv)\nwindow = Ui_Login()\napp.exec_()\n",
"step-5": "# This is a sample Python script.\r\n\r\n# Press Shift+F10 to execute it or replace it with your code.\r\n# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.\r\n\r\nfrom PyQt5 import QtWidgets, uic\r\nimport sys\r\nimport pymysql\r\n\r\nimport mysql.connector\r\n\r\nclass Ui_Login(QtWidgets.QDialog):\r\n def __init__(self):\r\n super(Ui_Login, self).__init__()\r\n uic.loadUi('login.ui', self)\r\n\r\n self.icon = self.findChild(QtWidgets.QLabel, 'ilogin')\r\n self.icon.setStyleSheet(\"image: url(sorce/roundicon.png)\")\r\n\r\n self.inputUsername = self.findChild(QtWidgets.QLineEdit, 'username')\r\n self.inputPassword = self.findChild(QtWidgets.QLineEdit, 'password')\r\n\r\n self.daftarButton = self.findChild(QtWidgets.QPushButton, 'daftarBtn')\r\n self.daftarButton.clicked.connect(self.forDaftar)\r\n\r\n self.loginButton = self.findChild(QtWidgets.QPushButton, 'login_2')\r\n self.loginButton.clicked.connect(self.testButton)\r\n\r\n self.show()\r\n\r\n def testButton(self):\r\n user = self.inputUsername.text()\r\n pw = self.inputPassword.text()\r\n con = pymysql.connect(db='bookingfutsal',\r\n user='root',\r\n passwd='',\r\n host='localhost',\r\n port=3306,\r\n autocommit=True)\r\n cur = con.cursor()\r\n sql = \"SELECT * FROM admin WHERE username=%s AND password=%s\"\r\n data = cur.execute(sql, (user, pw))\r\n if(len(cur.fetchall()) > 0):\r\n self.close()\r\n\r\n super(Ui_Login, self).__init__()\r\n uic.loadUi('booking.ui', self)\r\n\r\n self.gambar = self.findChild(QtWidgets.QLabel, 'piclap')\r\n self.gambar.setStyleSheet(\"background-image: url(sorce/lp2.jpg)\")\r\n\r\n self.bNamaPembayar = self.findChild(QtWidgets.QLineEdit, 'namapembayar')\r\n self.bNominalDp = self.findChild(QtWidgets.QLineEdit, 'nominaldp')\r\n\r\n self.bBooking = self.findChild(QtWidgets.QPushButton, 'booking')\r\n self.bBooking.clicked.connect(self.bookingFunc)\r\n\r\n self.show()\r\n\r\n\r\n def forDaftar(self):\r\n self.close()\r\n\r\n super(Ui_Login, self).__init__()\r\n uic.loadUi('daftar.ui', self)\r\n\r\n self.dUsername = self.findChild(QtWidgets.QLineEdit, 'username')\r\n self.dPassword = self.findChild(QtWidgets.QLineEdit, 'password')\r\n self.dAlamat = self.findChild(QtWidgets.QLineEdit, 'alamat')\r\n self.dNoTelpU = self.findChild(QtWidgets.QLineEdit, 'notelepon')\r\n\r\n self.dDaftarButton = self.findChild(QtWidgets.QPushButton, 'daftar')\r\n self.dDaftarButton.clicked.connect(self.daftarFunc)\r\n\r\n self.show()\r\n\r\n def daftarFunc(self):\r\n user = self.dUsername.text()\r\n pw = self.dPassword.text()\r\n con = pymysql.connect(db='bookingfutsal',\r\n user='root',\r\n passwd='',\r\n host='localhost',\r\n port=3306,\r\n autocommit=True)\r\n cur = con.cursor()\r\n insert = (user, pw)\r\n sql = \"INSERT INTO admin (username, password) VALUES\" + str(insert)\r\n data = cur.execute(sql)\r\n\r\n self.close()\r\n\r\n self.__init__();\r\n\r\n# booking.Ui_Booking().Boking()\r\n# koneksi.Koneksi()\r\n\r\n def bookingFunc(self):\r\n nama = self.bNamaPembayar.text()\r\n nominal = self.bNominalDp.text()\r\n\r\n con = pymysql.connect(db='bookingfutsal',\r\n user='root',\r\n passwd='',\r\n host='localhost',\r\n port=3306,\r\n autocommit=True)\r\n cur = con.cursor()\r\n insert = (nama, nominal)\r\n sql = \"INSERT INTO pembayaran (atasNama, namaPembayaran) VALUES\" + str(insert)\r\n data = cur.execute(sql)\r\n\r\napp = QtWidgets.QApplication(sys.argv)\r\nwindow = Ui_Login()\r\napp.exec_()",
"step-ids": [
5,
7,
8,
9,
10
]
}
|
[
5,
7,
8,
9,
10
] |
import serial
import mysql.connector
ser = serial.Serial('/dev/serial0', 9600)
while True:
data = ser.readline()
if data[0]==";":
print(data)
data = data.split(";")
if data[1] == "1":
fonction = data[1]
add = data[2]
tmp = data[3]
debit = data[4]
ser.write([123])
#test affichage
print "Save in DB"
print "fonction :",fonction
print "addresse :",add
print "temperature :",tmp
print "Debit :",debit
conn = mysql.connector.connect(host="mysql-ormeaux.alwaysdata.net",user="ormeaux",password="pGYw478Vy", database="ormeaux_29")
cursor = conn.cursor()
cursor = conn.cursor()
requete = "INSERT INTO mesures(id_bassins,temperature, debit) VALUES (%s, %s, %s)"
valeurs = (add,tmp,debit)
cursor.execute(requete,valeurs)
conn.commit()
conn.close()
|
normal
|
{
"blob_id": "b1a6593e7b528238e7be5ea6da4d1bfee0d78067",
"index": 7824,
"step-1": "import serial\nimport mysql.connector\n\nser = serial.Serial('/dev/serial0', 9600)\n\nwhile True:\n\tdata = ser.readline()\n\tif data[0]==\";\":\n\t\tprint(data)\n\t\tdata = data.split(\";\")\n\t\tif data[1] == \"1\":\n\t\t\tfonction = data[1]\n\t\t\tadd = data[2]\n\t\t\ttmp = data[3]\n\t\t\tdebit = data[4]\n\t\t\tser.write([123])\n\t\t\t#test affichage\n\t\t\tprint \"Save in DB\"\n\t\t\tprint \"fonction :\",fonction\n\t\t\tprint \"addresse :\",add\n\t\t\tprint \"temperature :\",tmp\n\t\t\tprint \"Debit :\",debit\n\n\t\t\tconn = mysql.connector.connect(host=\"mysql-ormeaux.alwaysdata.net\",user=\"ormeaux\",password=\"pGYw478Vy\", database=\"ormeaux_29\")\n\t\t\tcursor = conn.cursor()\n\t\t\tcursor = conn.cursor()\n\n\t\t\trequete = \"INSERT INTO mesures(id_bassins,temperature, debit) VALUES (%s, %s, %s)\"\n\t\t\tvaleurs = (add,tmp,debit)\n\t\n\t\t\tcursor.execute(requete,valeurs)\n\t\t\tconn.commit()\n\t\t\tconn.close()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def ms_matrices(E, Q, matrix_terms, dim):
"""Compute the Möller-Stetter matrices in the monomial basis from a
reduced Macaulay matrix
Parameters
----------
E : (m, k) ndarray
Columns of the reduced Macaulay matrix corresponding to the quotient basis
Q : (l, n) 2d ndarray
Matrix whose columns give the quotient basis in terms of the monomial basis
matrix_terms : 2d ndarray
Array with ordered monomial basis
dim : int
Number of variables
Returns
-------
M : (n, n, dim) ndarray
Array containing the nxn Möller-Stetter matrices, where the matrix
corresponding to multiplication by x_i is M[..., i]
"""
n = Q.shape[1]
m = E.shape[0]
M = np.empty((n, n, dim), dtype=E.dtype)
A = np.vstack((-E, Q))
for i in range(dim):
arr = indexarray(matrix_terms, slice(m, None), i)
M[..., i] = Q.conj().T @ A[arr]
return M
def ms_matrices_cheb(E, Q, matrix_terms, dim):
"""Compute the Möller-Stetter matrices in the Chebyshev basis from a
reduced Macaulay matrix
Parameters
----------
E : (m, k) ndarray
Columns of the reduced Macaulay matrix corresponding to the quotient basis
Q : (l, n) 2d ndarray
Matrix whose columns give the quotient basis in terms of the Chebyshev basis
matrix_terms : 2d ndarray
Array with ordered Chebyshev basis
dim : int
Number of variables
Returns
-------
M : (n, n, dim) ndarray
Array containing the nxn Möller-Stetter matrices, where the matrix
corresponding to multiplication by x_i is M[..., i]
"""
n = Q.shape[1]
m = E.shape[0]
M = np.empty((n, n, dim), dtype=E.dtype)
A = np.vstack((-E, Q))
for i in range(dim):
arr1, arr2 = indexarray_cheb(matrix_terms, slice(m, None), i)
M[..., i] = 0.5 * Q.T.conj() @ (A[arr1] + A[arr2])
return M
<|reserved_special_token_0|>
def ms_matrices_p_cheb(E, P, matrix_terms, dim, cut):
""" Compute the Möller-Stetter matrices in the Chebyshev basis from a
reduced Macaulay matrix (QRP method)
Parameters
----------
E : (m, k) ndarray
Columns of the reduced Macaulay matrix corresponding to the quotient basis
P : (, l) ndarray
Array of pivots returned in QR with pivoting, used to permute the columns.
matrix_terms : 2d ndarray
Array with ordered Chebyshev basis
dim : int
Number of variables
Returns
-------
M : (n, n, dim) ndarray
Array containing the nxn Möller-Stetter matrices, where the matrix
corresponding to multiplication by x_i is M[..., i]
"""
r, n = E.shape
matrix_terms[cut:] = matrix_terms[cut:][P]
M = np.empty((n, n, dim), dtype=E.dtype)
A = np.vstack((-E, np.eye(n)))
for i in range(dim):
arr1, arr2 = indexarray_cheb(matrix_terms, slice(r, None), i)
M[..., i] = 0.5 * (A[arr1] + A[arr2])
return M
def sort_eigs(eigs, diag):
"""Sorts the eigs array to match the order on the diagonal
of the Schur factorization
Parameters
----------
eigs : 1d ndarray
Array of unsorted eigenvalues
diag : 1d complex ndarray
Array containing the diagonal of the approximate Schur factorization
Returns
-------
w : 1d ndarray
Eigenvalues from eigs sorted to match the order in diag
"""
n = diag.shape[0]
lst = list(range(n))
arr = []
for eig in eigs:
i = lst[np.argmin(np.abs(diag[lst] - eig))]
arr.append(i)
lst.remove(i)
return np.argsort(arr)
<|reserved_special_token_0|>
def msroots(M):
"""Computes the roots to a system via the eigenvalues of the Möller-Stetter
matrices. Implicitly performs a random rotation of the coordinate system
to avoid repeated eigenvalues arising from special structure in the underlying
polynomial system. Approximates the joint eigenvalue problem using a Schur
factorization of a linear combination of the matrices.
Parameters
----------
M : (n, n, dim) ndarray
Array containing the nxn Möller-Stetter matrices, where the matrix
corresponding to multiplication by x_i is M[..., i]
Returns
-------
roots : (n, dim) ndarray
Array containing the approximate roots of the system, where each row
is a root.
"""
dim = M.shape[-1]
Q, c = get_Q_c(dim)
M = (Q @ M[..., np.newaxis])[..., 0]
eigs = np.empty((dim, M.shape[0]), dtype='complex')
U = schur((M * c).sum(axis=-1), output='complex')[1]
for i in range(0, dim):
T = U.T.conj() @ M[..., i] @ U
w = eig(M[..., i], right=False)
arr = sort_eigs(w, np.diag(T))
eigs[i] = w[arr]
return (Q.T @ eigs).T
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def indexarray(matrix_terms, which, var):
"""Compute the array mapping monomials under multiplication by x_var
Parameters
----------
matrix_terms : 2d integer ndarray
Array containing the monomials in order. matrix_terms[i] is the array
containing the exponent for each variable in the ith multivariate
monomial
which : slice object
object to index into the matrix_terms for the monomials we want to multiply by var
var : int
Variable to multiply by: x_0, ..., x_(dim-1)
Returns
-------
arr : 1d integer ndarray
Array containing the indices of the lower-degree monomials after multiplication
by x_var
"""
mults = matrix_terms[which].copy()
mults[:, var] += 1
return np.argmin(np.abs(mults[:, np.newaxis] - matrix_terms[np.newaxis]
).sum(axis=-1), axis=1)
<|reserved_special_token_0|>
def ms_matrices(E, Q, matrix_terms, dim):
"""Compute the Möller-Stetter matrices in the monomial basis from a
reduced Macaulay matrix
Parameters
----------
E : (m, k) ndarray
Columns of the reduced Macaulay matrix corresponding to the quotient basis
Q : (l, n) 2d ndarray
Matrix whose columns give the quotient basis in terms of the monomial basis
matrix_terms : 2d ndarray
Array with ordered monomial basis
dim : int
Number of variables
Returns
-------
M : (n, n, dim) ndarray
Array containing the nxn Möller-Stetter matrices, where the matrix
corresponding to multiplication by x_i is M[..., i]
"""
n = Q.shape[1]
m = E.shape[0]
M = np.empty((n, n, dim), dtype=E.dtype)
A = np.vstack((-E, Q))
for i in range(dim):
arr = indexarray(matrix_terms, slice(m, None), i)
M[..., i] = Q.conj().T @ A[arr]
return M
def ms_matrices_cheb(E, Q, matrix_terms, dim):
"""Compute the Möller-Stetter matrices in the Chebyshev basis from a
reduced Macaulay matrix
Parameters
----------
E : (m, k) ndarray
Columns of the reduced Macaulay matrix corresponding to the quotient basis
Q : (l, n) 2d ndarray
Matrix whose columns give the quotient basis in terms of the Chebyshev basis
matrix_terms : 2d ndarray
Array with ordered Chebyshev basis
dim : int
Number of variables
Returns
-------
M : (n, n, dim) ndarray
Array containing the nxn Möller-Stetter matrices, where the matrix
corresponding to multiplication by x_i is M[..., i]
"""
n = Q.shape[1]
m = E.shape[0]
M = np.empty((n, n, dim), dtype=E.dtype)
A = np.vstack((-E, Q))
for i in range(dim):
arr1, arr2 = indexarray_cheb(matrix_terms, slice(m, None), i)
M[..., i] = 0.5 * Q.T.conj() @ (A[arr1] + A[arr2])
return M
<|reserved_special_token_0|>
def ms_matrices_p_cheb(E, P, matrix_terms, dim, cut):
""" Compute the Möller-Stetter matrices in the Chebyshev basis from a
reduced Macaulay matrix (QRP method)
Parameters
----------
E : (m, k) ndarray
Columns of the reduced Macaulay matrix corresponding to the quotient basis
P : (, l) ndarray
Array of pivots returned in QR with pivoting, used to permute the columns.
matrix_terms : 2d ndarray
Array with ordered Chebyshev basis
dim : int
Number of variables
Returns
-------
M : (n, n, dim) ndarray
Array containing the nxn Möller-Stetter matrices, where the matrix
corresponding to multiplication by x_i is M[..., i]
"""
r, n = E.shape
matrix_terms[cut:] = matrix_terms[cut:][P]
M = np.empty((n, n, dim), dtype=E.dtype)
A = np.vstack((-E, np.eye(n)))
for i in range(dim):
arr1, arr2 = indexarray_cheb(matrix_terms, slice(r, None), i)
M[..., i] = 0.5 * (A[arr1] + A[arr2])
return M
def sort_eigs(eigs, diag):
"""Sorts the eigs array to match the order on the diagonal
of the Schur factorization
Parameters
----------
eigs : 1d ndarray
Array of unsorted eigenvalues
diag : 1d complex ndarray
Array containing the diagonal of the approximate Schur factorization
Returns
-------
w : 1d ndarray
Eigenvalues from eigs sorted to match the order in diag
"""
n = diag.shape[0]
lst = list(range(n))
arr = []
for eig in eigs:
i = lst[np.argmin(np.abs(diag[lst] - eig))]
arr.append(i)
lst.remove(i)
return np.argsort(arr)
<|reserved_special_token_0|>
def msroots(M):
"""Computes the roots to a system via the eigenvalues of the Möller-Stetter
matrices. Implicitly performs a random rotation of the coordinate system
to avoid repeated eigenvalues arising from special structure in the underlying
polynomial system. Approximates the joint eigenvalue problem using a Schur
factorization of a linear combination of the matrices.
Parameters
----------
M : (n, n, dim) ndarray
Array containing the nxn Möller-Stetter matrices, where the matrix
corresponding to multiplication by x_i is M[..., i]
Returns
-------
roots : (n, dim) ndarray
Array containing the approximate roots of the system, where each row
is a root.
"""
dim = M.shape[-1]
Q, c = get_Q_c(dim)
M = (Q @ M[..., np.newaxis])[..., 0]
eigs = np.empty((dim, M.shape[0]), dtype='complex')
U = schur((M * c).sum(axis=-1), output='complex')[1]
for i in range(0, dim):
T = U.T.conj() @ M[..., i] @ U
w = eig(M[..., i], right=False)
arr = sort_eigs(w, np.diag(T))
eigs[i] = w[arr]
return (Q.T @ eigs).T
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def indexarray(matrix_terms, which, var):
"""Compute the array mapping monomials under multiplication by x_var
Parameters
----------
matrix_terms : 2d integer ndarray
Array containing the monomials in order. matrix_terms[i] is the array
containing the exponent for each variable in the ith multivariate
monomial
which : slice object
object to index into the matrix_terms for the monomials we want to multiply by var
var : int
Variable to multiply by: x_0, ..., x_(dim-1)
Returns
-------
arr : 1d integer ndarray
Array containing the indices of the lower-degree monomials after multiplication
by x_var
"""
mults = matrix_terms[which].copy()
mults[:, var] += 1
return np.argmin(np.abs(mults[:, np.newaxis] - matrix_terms[np.newaxis]
).sum(axis=-1), axis=1)
def indexarray_cheb(matrix_terms, which, var):
"""Compute the array mapping Chebyshev monomials under multiplication by x_var:
T_1*T_0 = T_1
T_1*T_n = .5(T_(n+1)+ T_(n-1))
Parameters
----------
matrix_terms : 2d integer ndarray
Array containing the monomials in order. matrix_terms[i] is the array
containing the degree for each univariate Chebyshev monomial in the ith
multivariate monomial
m : int
Number of monomials of highest degree, i.e. those that do not need to be
multiplied
var : int
Variable to multiply by: x_0, ..., x_(dim-1)
Returns
-------
arr1 : 1d integer ndarray
Array containing the indices of T_(n+1)
arr2 : 1d
Array containing the indices of T_(n-1)
"""
up = matrix_terms[which].copy()
up[:, var] += 1
down = matrix_terms[which].copy()
down[:, var] -= 1
down[down[:, var] == -1, var] += 2
arr1 = np.argmin(np.abs(up[:, np.newaxis] - matrix_terms[np.newaxis]).
sum(axis=-1), axis=1)
arr2 = np.argmin(np.abs(down[:, np.newaxis] - matrix_terms[np.newaxis])
.sum(axis=-1), axis=1)
return arr1, arr2
def ms_matrices(E, Q, matrix_terms, dim):
"""Compute the Möller-Stetter matrices in the monomial basis from a
reduced Macaulay matrix
Parameters
----------
E : (m, k) ndarray
Columns of the reduced Macaulay matrix corresponding to the quotient basis
Q : (l, n) 2d ndarray
Matrix whose columns give the quotient basis in terms of the monomial basis
matrix_terms : 2d ndarray
Array with ordered monomial basis
dim : int
Number of variables
Returns
-------
M : (n, n, dim) ndarray
Array containing the nxn Möller-Stetter matrices, where the matrix
corresponding to multiplication by x_i is M[..., i]
"""
n = Q.shape[1]
m = E.shape[0]
M = np.empty((n, n, dim), dtype=E.dtype)
A = np.vstack((-E, Q))
for i in range(dim):
arr = indexarray(matrix_terms, slice(m, None), i)
M[..., i] = Q.conj().T @ A[arr]
return M
def ms_matrices_cheb(E, Q, matrix_terms, dim):
"""Compute the Möller-Stetter matrices in the Chebyshev basis from a
reduced Macaulay matrix
Parameters
----------
E : (m, k) ndarray
Columns of the reduced Macaulay matrix corresponding to the quotient basis
Q : (l, n) 2d ndarray
Matrix whose columns give the quotient basis in terms of the Chebyshev basis
matrix_terms : 2d ndarray
Array with ordered Chebyshev basis
dim : int
Number of variables
Returns
-------
M : (n, n, dim) ndarray
Array containing the nxn Möller-Stetter matrices, where the matrix
corresponding to multiplication by x_i is M[..., i]
"""
n = Q.shape[1]
m = E.shape[0]
M = np.empty((n, n, dim), dtype=E.dtype)
A = np.vstack((-E, Q))
for i in range(dim):
arr1, arr2 = indexarray_cheb(matrix_terms, slice(m, None), i)
M[..., i] = 0.5 * Q.T.conj() @ (A[arr1] + A[arr2])
return M
def ms_matrices_p(E, P, matrix_terms, dim, cut):
"""Compute the Möller-Stetter matrices in the power basis from a
reduced Macaulay matrix (QRP method)
Parameters
----------
E : (m, k) ndarray
Columns of the reduced Macaulay matrix corresponding to the quotient basis
P : (, l) ndarray
Array of pivots returned in QR with pivoting, used to permute the columns.
matrix_terms : 2d ndarray
Array with ordered Chebyshev basis
dim : int
Number of variables
Returns
-------
M : (n, n, dim) ndarray
Array containing the nxn Möller-Stetter matrices, where the matrix
corresponding to multiplication by x_i is M[..., i]
"""
r, n = E.shape
matrix_terms[cut:] = matrix_terms[cut:][P]
M = np.empty((n, n, dim), dtype=E.dtype)
A = np.vstack((-E, np.eye(n)))
for i in range(dim):
arr = indexarray(matrix_terms, slice(r, None), i)
M[..., i] = A[arr]
return M
def ms_matrices_p_cheb(E, P, matrix_terms, dim, cut):
""" Compute the Möller-Stetter matrices in the Chebyshev basis from a
reduced Macaulay matrix (QRP method)
Parameters
----------
E : (m, k) ndarray
Columns of the reduced Macaulay matrix corresponding to the quotient basis
P : (, l) ndarray
Array of pivots returned in QR with pivoting, used to permute the columns.
matrix_terms : 2d ndarray
Array with ordered Chebyshev basis
dim : int
Number of variables
Returns
-------
M : (n, n, dim) ndarray
Array containing the nxn Möller-Stetter matrices, where the matrix
corresponding to multiplication by x_i is M[..., i]
"""
r, n = E.shape
matrix_terms[cut:] = matrix_terms[cut:][P]
M = np.empty((n, n, dim), dtype=E.dtype)
A = np.vstack((-E, np.eye(n)))
for i in range(dim):
arr1, arr2 = indexarray_cheb(matrix_terms, slice(r, None), i)
M[..., i] = 0.5 * (A[arr1] + A[arr2])
return M
def sort_eigs(eigs, diag):
"""Sorts the eigs array to match the order on the diagonal
of the Schur factorization
Parameters
----------
eigs : 1d ndarray
Array of unsorted eigenvalues
diag : 1d complex ndarray
Array containing the diagonal of the approximate Schur factorization
Returns
-------
w : 1d ndarray
Eigenvalues from eigs sorted to match the order in diag
"""
n = diag.shape[0]
lst = list(range(n))
arr = []
for eig in eigs:
i = lst[np.argmin(np.abs(diag[lst] - eig))]
arr.append(i)
lst.remove(i)
return np.argsort(arr)
<|reserved_special_token_0|>
def msroots(M):
"""Computes the roots to a system via the eigenvalues of the Möller-Stetter
matrices. Implicitly performs a random rotation of the coordinate system
to avoid repeated eigenvalues arising from special structure in the underlying
polynomial system. Approximates the joint eigenvalue problem using a Schur
factorization of a linear combination of the matrices.
Parameters
----------
M : (n, n, dim) ndarray
Array containing the nxn Möller-Stetter matrices, where the matrix
corresponding to multiplication by x_i is M[..., i]
Returns
-------
roots : (n, dim) ndarray
Array containing the approximate roots of the system, where each row
is a root.
"""
dim = M.shape[-1]
Q, c = get_Q_c(dim)
M = (Q @ M[..., np.newaxis])[..., 0]
eigs = np.empty((dim, M.shape[0]), dtype='complex')
U = schur((M * c).sum(axis=-1), output='complex')[1]
for i in range(0, dim):
T = U.T.conj() @ M[..., i] @ U
w = eig(M[..., i], right=False)
arr = sort_eigs(w, np.diag(T))
eigs[i] = w[arr]
return (Q.T @ eigs).T
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def indexarray(matrix_terms, which, var):
"""Compute the array mapping monomials under multiplication by x_var
Parameters
----------
matrix_terms : 2d integer ndarray
Array containing the monomials in order. matrix_terms[i] is the array
containing the exponent for each variable in the ith multivariate
monomial
which : slice object
object to index into the matrix_terms for the monomials we want to multiply by var
var : int
Variable to multiply by: x_0, ..., x_(dim-1)
Returns
-------
arr : 1d integer ndarray
Array containing the indices of the lower-degree monomials after multiplication
by x_var
"""
mults = matrix_terms[which].copy()
mults[:, var] += 1
return np.argmin(np.abs(mults[:, np.newaxis] - matrix_terms[np.newaxis]
).sum(axis=-1), axis=1)
def indexarray_cheb(matrix_terms, which, var):
"""Compute the array mapping Chebyshev monomials under multiplication by x_var:
T_1*T_0 = T_1
T_1*T_n = .5(T_(n+1)+ T_(n-1))
Parameters
----------
matrix_terms : 2d integer ndarray
Array containing the monomials in order. matrix_terms[i] is the array
containing the degree for each univariate Chebyshev monomial in the ith
multivariate monomial
m : int
Number of monomials of highest degree, i.e. those that do not need to be
multiplied
var : int
Variable to multiply by: x_0, ..., x_(dim-1)
Returns
-------
arr1 : 1d integer ndarray
Array containing the indices of T_(n+1)
arr2 : 1d
Array containing the indices of T_(n-1)
"""
up = matrix_terms[which].copy()
up[:, var] += 1
down = matrix_terms[which].copy()
down[:, var] -= 1
down[down[:, var] == -1, var] += 2
arr1 = np.argmin(np.abs(up[:, np.newaxis] - matrix_terms[np.newaxis]).
sum(axis=-1), axis=1)
arr2 = np.argmin(np.abs(down[:, np.newaxis] - matrix_terms[np.newaxis])
.sum(axis=-1), axis=1)
return arr1, arr2
def ms_matrices(E, Q, matrix_terms, dim):
"""Compute the Möller-Stetter matrices in the monomial basis from a
reduced Macaulay matrix
Parameters
----------
E : (m, k) ndarray
Columns of the reduced Macaulay matrix corresponding to the quotient basis
Q : (l, n) 2d ndarray
Matrix whose columns give the quotient basis in terms of the monomial basis
matrix_terms : 2d ndarray
Array with ordered monomial basis
dim : int
Number of variables
Returns
-------
M : (n, n, dim) ndarray
Array containing the nxn Möller-Stetter matrices, where the matrix
corresponding to multiplication by x_i is M[..., i]
"""
n = Q.shape[1]
m = E.shape[0]
M = np.empty((n, n, dim), dtype=E.dtype)
A = np.vstack((-E, Q))
for i in range(dim):
arr = indexarray(matrix_terms, slice(m, None), i)
M[..., i] = Q.conj().T @ A[arr]
return M
def ms_matrices_cheb(E, Q, matrix_terms, dim):
"""Compute the Möller-Stetter matrices in the Chebyshev basis from a
reduced Macaulay matrix
Parameters
----------
E : (m, k) ndarray
Columns of the reduced Macaulay matrix corresponding to the quotient basis
Q : (l, n) 2d ndarray
Matrix whose columns give the quotient basis in terms of the Chebyshev basis
matrix_terms : 2d ndarray
Array with ordered Chebyshev basis
dim : int
Number of variables
Returns
-------
M : (n, n, dim) ndarray
Array containing the nxn Möller-Stetter matrices, where the matrix
corresponding to multiplication by x_i is M[..., i]
"""
n = Q.shape[1]
m = E.shape[0]
M = np.empty((n, n, dim), dtype=E.dtype)
A = np.vstack((-E, Q))
for i in range(dim):
arr1, arr2 = indexarray_cheb(matrix_terms, slice(m, None), i)
M[..., i] = 0.5 * Q.T.conj() @ (A[arr1] + A[arr2])
return M
def ms_matrices_p(E, P, matrix_terms, dim, cut):
"""Compute the Möller-Stetter matrices in the power basis from a
reduced Macaulay matrix (QRP method)
Parameters
----------
E : (m, k) ndarray
Columns of the reduced Macaulay matrix corresponding to the quotient basis
P : (, l) ndarray
Array of pivots returned in QR with pivoting, used to permute the columns.
matrix_terms : 2d ndarray
Array with ordered Chebyshev basis
dim : int
Number of variables
Returns
-------
M : (n, n, dim) ndarray
Array containing the nxn Möller-Stetter matrices, where the matrix
corresponding to multiplication by x_i is M[..., i]
"""
r, n = E.shape
matrix_terms[cut:] = matrix_terms[cut:][P]
M = np.empty((n, n, dim), dtype=E.dtype)
A = np.vstack((-E, np.eye(n)))
for i in range(dim):
arr = indexarray(matrix_terms, slice(r, None), i)
M[..., i] = A[arr]
return M
def ms_matrices_p_cheb(E, P, matrix_terms, dim, cut):
""" Compute the Möller-Stetter matrices in the Chebyshev basis from a
reduced Macaulay matrix (QRP method)
Parameters
----------
E : (m, k) ndarray
Columns of the reduced Macaulay matrix corresponding to the quotient basis
P : (, l) ndarray
Array of pivots returned in QR with pivoting, used to permute the columns.
matrix_terms : 2d ndarray
Array with ordered Chebyshev basis
dim : int
Number of variables
Returns
-------
M : (n, n, dim) ndarray
Array containing the nxn Möller-Stetter matrices, where the matrix
corresponding to multiplication by x_i is M[..., i]
"""
r, n = E.shape
matrix_terms[cut:] = matrix_terms[cut:][P]
M = np.empty((n, n, dim), dtype=E.dtype)
A = np.vstack((-E, np.eye(n)))
for i in range(dim):
arr1, arr2 = indexarray_cheb(matrix_terms, slice(r, None), i)
M[..., i] = 0.5 * (A[arr1] + A[arr2])
return M
def sort_eigs(eigs, diag):
"""Sorts the eigs array to match the order on the diagonal
of the Schur factorization
Parameters
----------
eigs : 1d ndarray
Array of unsorted eigenvalues
diag : 1d complex ndarray
Array containing the diagonal of the approximate Schur factorization
Returns
-------
w : 1d ndarray
Eigenvalues from eigs sorted to match the order in diag
"""
n = diag.shape[0]
lst = list(range(n))
arr = []
for eig in eigs:
i = lst[np.argmin(np.abs(diag[lst] - eig))]
arr.append(i)
lst.remove(i)
return np.argsort(arr)
@memoize
def get_rand_combos_matrix(rows, cols, normal=False):
""" Generates a rows by cols random matrix with orthogonal rows or columns,
depending on if rows > cols or cols > rows.
Parameters
----------
rows : int
Number of rows
cols : int
Number of columns
normal : bool
Optional. Whether or not to create a matrix using entries drawn
from the standard normal distribution (N(0, 1)) or not. If it's
False, it will return an orthogonal matrix.
Returns
-------
C : (rows, cols) ndarray
Matrix with orthgonal rows or columns, depending on if rows > cols or
cols > rows if normal is False, otherwise a matrix with
coefficients drawn from the standard normal (N(0, 1)).
"""
np.random.seed(57)
if normal:
C = np.random.normal(loc=0, scale=1, size=(rows, cols))
return C
size = max(rows, cols)
C = ortho_group.rvs(size)
return C[:rows, :cols]
@memoize
def get_Q_c(dim):
""" Generates a once-chosen random orthogonal matrix and a random linear combination
for use in the simultaneous eigenvalue compution.
Parameters
----------
dim : int
Dimension of the system
Returns
-------
Q : (dim, dim) ndarray
Random orthogonal rotation
c : (dim, ) ndarray
Random linear combination
"""
np.random.seed(103)
Q = ortho_group.rvs(dim)
c = np.random.randn(dim)
return Q, c
def msroots(M):
"""Computes the roots to a system via the eigenvalues of the Möller-Stetter
matrices. Implicitly performs a random rotation of the coordinate system
to avoid repeated eigenvalues arising from special structure in the underlying
polynomial system. Approximates the joint eigenvalue problem using a Schur
factorization of a linear combination of the matrices.
Parameters
----------
M : (n, n, dim) ndarray
Array containing the nxn Möller-Stetter matrices, where the matrix
corresponding to multiplication by x_i is M[..., i]
Returns
-------
roots : (n, dim) ndarray
Array containing the approximate roots of the system, where each row
is a root.
"""
dim = M.shape[-1]
Q, c = get_Q_c(dim)
M = (Q @ M[..., np.newaxis])[..., 0]
eigs = np.empty((dim, M.shape[0]), dtype='complex')
U = schur((M * c).sum(axis=-1), output='complex')[1]
for i in range(0, dim):
T = U.T.conj() @ M[..., i] @ U
w = eig(M[..., i], right=False)
arr = sort_eigs(w, np.diag(T))
eigs[i] = w[arr]
return (Q.T @ eigs).T
<|reserved_special_token_1|>
import numpy as np
import itertools
from scipy.linalg import eig, schur
from eigen_rootfinding.polynomial import MultiCheb, MultiPower
from eigen_rootfinding.utils import memoize
from scipy.stats import ortho_group
def indexarray(matrix_terms, which, var):
"""Compute the array mapping monomials under multiplication by x_var
Parameters
----------
matrix_terms : 2d integer ndarray
Array containing the monomials in order. matrix_terms[i] is the array
containing the exponent for each variable in the ith multivariate
monomial
which : slice object
object to index into the matrix_terms for the monomials we want to multiply by var
var : int
Variable to multiply by: x_0, ..., x_(dim-1)
Returns
-------
arr : 1d integer ndarray
Array containing the indices of the lower-degree monomials after multiplication
by x_var
"""
mults = matrix_terms[which].copy()
mults[:, var] += 1
return np.argmin(np.abs(mults[:, np.newaxis] - matrix_terms[np.newaxis]).sum(axis=-1), axis=1)
def indexarray_cheb(matrix_terms, which, var):
"""Compute the array mapping Chebyshev monomials under multiplication by x_var:
T_1*T_0 = T_1
T_1*T_n = .5(T_(n+1)+ T_(n-1))
Parameters
----------
matrix_terms : 2d integer ndarray
Array containing the monomials in order. matrix_terms[i] is the array
containing the degree for each univariate Chebyshev monomial in the ith
multivariate monomial
m : int
Number of monomials of highest degree, i.e. those that do not need to be
multiplied
var : int
Variable to multiply by: x_0, ..., x_(dim-1)
Returns
-------
arr1 : 1d integer ndarray
Array containing the indices of T_(n+1)
arr2 : 1d
Array containing the indices of T_(n-1)
"""
up = matrix_terms[which].copy()
up[:, var] += 1
down = matrix_terms[which].copy()
down[:, var] -= 1
down[down[:, var]==-1, var] += 2
arr1 = np.argmin(np.abs(up[:, np.newaxis] - matrix_terms[np.newaxis]).sum(axis=-1), axis=1)
arr2 = np.argmin(np.abs(down[:, np.newaxis] - matrix_terms[np.newaxis]).sum(axis=-1), axis=1)
return arr1, arr2
def ms_matrices(E, Q, matrix_terms, dim):
"""Compute the Möller-Stetter matrices in the monomial basis from a
reduced Macaulay matrix
Parameters
----------
E : (m, k) ndarray
Columns of the reduced Macaulay matrix corresponding to the quotient basis
Q : (l, n) 2d ndarray
Matrix whose columns give the quotient basis in terms of the monomial basis
matrix_terms : 2d ndarray
Array with ordered monomial basis
dim : int
Number of variables
Returns
-------
M : (n, n, dim) ndarray
Array containing the nxn Möller-Stetter matrices, where the matrix
corresponding to multiplication by x_i is M[..., i]
"""
n = Q.shape[1]
m = E.shape[0]
M = np.empty((n, n, dim),dtype=E.dtype)
A = np.vstack((-E, Q))
for i in range(dim):
arr = indexarray(matrix_terms, slice(m,None), i)
M[..., i] = Q.conj().T@A[arr]
return M
def ms_matrices_cheb(E, Q, matrix_terms, dim):
"""Compute the Möller-Stetter matrices in the Chebyshev basis from a
reduced Macaulay matrix
Parameters
----------
E : (m, k) ndarray
Columns of the reduced Macaulay matrix corresponding to the quotient basis
Q : (l, n) 2d ndarray
Matrix whose columns give the quotient basis in terms of the Chebyshev basis
matrix_terms : 2d ndarray
Array with ordered Chebyshev basis
dim : int
Number of variables
Returns
-------
M : (n, n, dim) ndarray
Array containing the nxn Möller-Stetter matrices, where the matrix
corresponding to multiplication by x_i is M[..., i]
"""
n = Q.shape[1]
m = E.shape[0]
M = np.empty((n, n, dim),dtype=E.dtype)
A = np.vstack((-E, Q))
for i in range(dim):
arr1, arr2 = indexarray_cheb(matrix_terms, slice(m,None), i)
M[..., i] = .5*Q.T.conj()@(A[arr1]+A[arr2])
return M
def ms_matrices_p(E, P, matrix_terms, dim, cut):
"""Compute the Möller-Stetter matrices in the power basis from a
reduced Macaulay matrix (QRP method)
Parameters
----------
E : (m, k) ndarray
Columns of the reduced Macaulay matrix corresponding to the quotient basis
P : (, l) ndarray
Array of pivots returned in QR with pivoting, used to permute the columns.
matrix_terms : 2d ndarray
Array with ordered Chebyshev basis
dim : int
Number of variables
Returns
-------
M : (n, n, dim) ndarray
Array containing the nxn Möller-Stetter matrices, where the matrix
corresponding to multiplication by x_i is M[..., i]
"""
r, n = E.shape
matrix_terms[cut:] = matrix_terms[cut:][P]
M = np.empty((n, n, dim),dtype=E.dtype)
A = np.vstack((-E, np.eye(n)))
for i in range(dim):
arr = indexarray(matrix_terms, slice(r,None), i)
M[..., i] = A[arr]
return M
def ms_matrices_p_cheb(E, P, matrix_terms, dim, cut):
""" Compute the Möller-Stetter matrices in the Chebyshev basis from a
reduced Macaulay matrix (QRP method)
Parameters
----------
E : (m, k) ndarray
Columns of the reduced Macaulay matrix corresponding to the quotient basis
P : (, l) ndarray
Array of pivots returned in QR with pivoting, used to permute the columns.
matrix_terms : 2d ndarray
Array with ordered Chebyshev basis
dim : int
Number of variables
Returns
-------
M : (n, n, dim) ndarray
Array containing the nxn Möller-Stetter matrices, where the matrix
corresponding to multiplication by x_i is M[..., i]
"""
r, n = E.shape
matrix_terms[cut:] = matrix_terms[cut:][P]
M = np.empty((n, n, dim),dtype=E.dtype)
A = np.vstack((-E, np.eye(n)))
for i in range(dim):
arr1, arr2 = indexarray_cheb(matrix_terms, slice(r,None), i)
M[..., i] = .5*(A[arr1] + A[arr2])
return M
def sort_eigs(eigs, diag):
"""Sorts the eigs array to match the order on the diagonal
of the Schur factorization
Parameters
----------
eigs : 1d ndarray
Array of unsorted eigenvalues
diag : 1d complex ndarray
Array containing the diagonal of the approximate Schur factorization
Returns
-------
w : 1d ndarray
Eigenvalues from eigs sorted to match the order in diag
"""
n = diag.shape[0]
lst = list(range(n))
arr = []
for eig in eigs:
i = lst[np.argmin(np.abs(diag[lst]-eig))]
arr.append(i)
lst.remove(i)
return np.argsort(arr)
@memoize
def get_rand_combos_matrix(rows, cols, normal=False):
""" Generates a rows by cols random matrix with orthogonal rows or columns,
depending on if rows > cols or cols > rows.
Parameters
----------
rows : int
Number of rows
cols : int
Number of columns
normal : bool
Optional. Whether or not to create a matrix using entries drawn
from the standard normal distribution (N(0, 1)) or not. If it's
False, it will return an orthogonal matrix.
Returns
-------
C : (rows, cols) ndarray
Matrix with orthgonal rows or columns, depending on if rows > cols or
cols > rows if normal is False, otherwise a matrix with
coefficients drawn from the standard normal (N(0, 1)).
"""
np.random.seed(57)
# TODO perhaps explore different types of random matrices?
# randn was giving me conditioning problems
if normal:
C = np.random.normal(loc=0, scale=1, size=(rows, cols))
return C
size = max(rows, cols)
C = ortho_group.rvs(size)
return C[:rows, :cols]
@memoize
def get_Q_c(dim):
""" Generates a once-chosen random orthogonal matrix and a random linear combination
for use in the simultaneous eigenvalue compution.
Parameters
----------
dim : int
Dimension of the system
Returns
-------
Q : (dim, dim) ndarray
Random orthogonal rotation
c : (dim, ) ndarray
Random linear combination
"""
np.random.seed(103)
Q = ortho_group.rvs(dim)
c = np.random.randn(dim)
return Q, c
def msroots(M):
"""Computes the roots to a system via the eigenvalues of the Möller-Stetter
matrices. Implicitly performs a random rotation of the coordinate system
to avoid repeated eigenvalues arising from special structure in the underlying
polynomial system. Approximates the joint eigenvalue problem using a Schur
factorization of a linear combination of the matrices.
Parameters
----------
M : (n, n, dim) ndarray
Array containing the nxn Möller-Stetter matrices, where the matrix
corresponding to multiplication by x_i is M[..., i]
Returns
-------
roots : (n, dim) ndarray
Array containing the approximate roots of the system, where each row
is a root.
"""
dim = M.shape[-1]
# perform a random rotation with a random orthogonal Q
Q, c = get_Q_c(dim)
M = (Q@M[..., np.newaxis])[..., 0]
eigs = np.empty((dim, M.shape[0]), dtype='complex')
# Compute the matrix U that triangularizes a random linear combination
U = schur((M*c).sum(axis=-1), output='complex')[1]
for i in range(0, dim):
T = (U.T.conj())@(M[..., i])@U
w = eig(M[..., i], right=False)
arr = sort_eigs(w, np.diag(T))
eigs[i] = w[arr]
# Rotate back before returning, transposing to match expected shape
return (Q.T@eigs).T
|
flexible
|
{
"blob_id": "14fb6776ac30802edf43c43acbee64263c6bdd7b",
"index": 2777,
"step-1": "<mask token>\n\n\ndef ms_matrices(E, Q, matrix_terms, dim):\n \"\"\"Compute the Möller-Stetter matrices in the monomial basis from a\n reduced Macaulay matrix\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n Q : (l, n) 2d ndarray\n Matrix whose columns give the quotient basis in terms of the monomial basis\n matrix_terms : 2d ndarray\n Array with ordered monomial basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n n = Q.shape[1]\n m = E.shape[0]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, Q))\n for i in range(dim):\n arr = indexarray(matrix_terms, slice(m, None), i)\n M[..., i] = Q.conj().T @ A[arr]\n return M\n\n\ndef ms_matrices_cheb(E, Q, matrix_terms, dim):\n \"\"\"Compute the Möller-Stetter matrices in the Chebyshev basis from a\n reduced Macaulay matrix\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n Q : (l, n) 2d ndarray\n Matrix whose columns give the quotient basis in terms of the Chebyshev basis\n matrix_terms : 2d ndarray\n Array with ordered Chebyshev basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n n = Q.shape[1]\n m = E.shape[0]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, Q))\n for i in range(dim):\n arr1, arr2 = indexarray_cheb(matrix_terms, slice(m, None), i)\n M[..., i] = 0.5 * Q.T.conj() @ (A[arr1] + A[arr2])\n return M\n\n\n<mask token>\n\n\ndef ms_matrices_p_cheb(E, P, matrix_terms, dim, cut):\n \"\"\" Compute the Möller-Stetter matrices in the Chebyshev basis from a\n reduced Macaulay matrix (QRP method)\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n P : (, l) ndarray\n Array of pivots returned in QR with pivoting, used to permute the columns.\n matrix_terms : 2d ndarray\n Array with ordered Chebyshev basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n r, n = E.shape\n matrix_terms[cut:] = matrix_terms[cut:][P]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, np.eye(n)))\n for i in range(dim):\n arr1, arr2 = indexarray_cheb(matrix_terms, slice(r, None), i)\n M[..., i] = 0.5 * (A[arr1] + A[arr2])\n return M\n\n\ndef sort_eigs(eigs, diag):\n \"\"\"Sorts the eigs array to match the order on the diagonal\n of the Schur factorization\n\n Parameters\n ----------\n eigs : 1d ndarray\n Array of unsorted eigenvalues\n diag : 1d complex ndarray\n Array containing the diagonal of the approximate Schur factorization\n\n Returns\n -------\n w : 1d ndarray\n Eigenvalues from eigs sorted to match the order in diag\n \"\"\"\n n = diag.shape[0]\n lst = list(range(n))\n arr = []\n for eig in eigs:\n i = lst[np.argmin(np.abs(diag[lst] - eig))]\n arr.append(i)\n lst.remove(i)\n return np.argsort(arr)\n\n\n<mask token>\n\n\ndef msroots(M):\n \"\"\"Computes the roots to a system via the eigenvalues of the Möller-Stetter\n matrices. Implicitly performs a random rotation of the coordinate system\n to avoid repeated eigenvalues arising from special structure in the underlying\n polynomial system. Approximates the joint eigenvalue problem using a Schur\n factorization of a linear combination of the matrices.\n\n Parameters\n ----------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n\n Returns\n -------\n roots : (n, dim) ndarray\n Array containing the approximate roots of the system, where each row\n is a root.\n \"\"\"\n dim = M.shape[-1]\n Q, c = get_Q_c(dim)\n M = (Q @ M[..., np.newaxis])[..., 0]\n eigs = np.empty((dim, M.shape[0]), dtype='complex')\n U = schur((M * c).sum(axis=-1), output='complex')[1]\n for i in range(0, dim):\n T = U.T.conj() @ M[..., i] @ U\n w = eig(M[..., i], right=False)\n arr = sort_eigs(w, np.diag(T))\n eigs[i] = w[arr]\n return (Q.T @ eigs).T\n",
"step-2": "<mask token>\n\n\ndef indexarray(matrix_terms, which, var):\n \"\"\"Compute the array mapping monomials under multiplication by x_var\n\n Parameters\n ----------\n matrix_terms : 2d integer ndarray\n Array containing the monomials in order. matrix_terms[i] is the array\n containing the exponent for each variable in the ith multivariate\n monomial\n which : slice object\n object to index into the matrix_terms for the monomials we want to multiply by var\n var : int\n Variable to multiply by: x_0, ..., x_(dim-1)\n\n Returns\n -------\n arr : 1d integer ndarray\n Array containing the indices of the lower-degree monomials after multiplication\n by x_var\n \"\"\"\n mults = matrix_terms[which].copy()\n mults[:, var] += 1\n return np.argmin(np.abs(mults[:, np.newaxis] - matrix_terms[np.newaxis]\n ).sum(axis=-1), axis=1)\n\n\n<mask token>\n\n\ndef ms_matrices(E, Q, matrix_terms, dim):\n \"\"\"Compute the Möller-Stetter matrices in the monomial basis from a\n reduced Macaulay matrix\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n Q : (l, n) 2d ndarray\n Matrix whose columns give the quotient basis in terms of the monomial basis\n matrix_terms : 2d ndarray\n Array with ordered monomial basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n n = Q.shape[1]\n m = E.shape[0]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, Q))\n for i in range(dim):\n arr = indexarray(matrix_terms, slice(m, None), i)\n M[..., i] = Q.conj().T @ A[arr]\n return M\n\n\ndef ms_matrices_cheb(E, Q, matrix_terms, dim):\n \"\"\"Compute the Möller-Stetter matrices in the Chebyshev basis from a\n reduced Macaulay matrix\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n Q : (l, n) 2d ndarray\n Matrix whose columns give the quotient basis in terms of the Chebyshev basis\n matrix_terms : 2d ndarray\n Array with ordered Chebyshev basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n n = Q.shape[1]\n m = E.shape[0]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, Q))\n for i in range(dim):\n arr1, arr2 = indexarray_cheb(matrix_terms, slice(m, None), i)\n M[..., i] = 0.5 * Q.T.conj() @ (A[arr1] + A[arr2])\n return M\n\n\n<mask token>\n\n\ndef ms_matrices_p_cheb(E, P, matrix_terms, dim, cut):\n \"\"\" Compute the Möller-Stetter matrices in the Chebyshev basis from a\n reduced Macaulay matrix (QRP method)\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n P : (, l) ndarray\n Array of pivots returned in QR with pivoting, used to permute the columns.\n matrix_terms : 2d ndarray\n Array with ordered Chebyshev basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n r, n = E.shape\n matrix_terms[cut:] = matrix_terms[cut:][P]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, np.eye(n)))\n for i in range(dim):\n arr1, arr2 = indexarray_cheb(matrix_terms, slice(r, None), i)\n M[..., i] = 0.5 * (A[arr1] + A[arr2])\n return M\n\n\ndef sort_eigs(eigs, diag):\n \"\"\"Sorts the eigs array to match the order on the diagonal\n of the Schur factorization\n\n Parameters\n ----------\n eigs : 1d ndarray\n Array of unsorted eigenvalues\n diag : 1d complex ndarray\n Array containing the diagonal of the approximate Schur factorization\n\n Returns\n -------\n w : 1d ndarray\n Eigenvalues from eigs sorted to match the order in diag\n \"\"\"\n n = diag.shape[0]\n lst = list(range(n))\n arr = []\n for eig in eigs:\n i = lst[np.argmin(np.abs(diag[lst] - eig))]\n arr.append(i)\n lst.remove(i)\n return np.argsort(arr)\n\n\n<mask token>\n\n\ndef msroots(M):\n \"\"\"Computes the roots to a system via the eigenvalues of the Möller-Stetter\n matrices. Implicitly performs a random rotation of the coordinate system\n to avoid repeated eigenvalues arising from special structure in the underlying\n polynomial system. Approximates the joint eigenvalue problem using a Schur\n factorization of a linear combination of the matrices.\n\n Parameters\n ----------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n\n Returns\n -------\n roots : (n, dim) ndarray\n Array containing the approximate roots of the system, where each row\n is a root.\n \"\"\"\n dim = M.shape[-1]\n Q, c = get_Q_c(dim)\n M = (Q @ M[..., np.newaxis])[..., 0]\n eigs = np.empty((dim, M.shape[0]), dtype='complex')\n U = schur((M * c).sum(axis=-1), output='complex')[1]\n for i in range(0, dim):\n T = U.T.conj() @ M[..., i] @ U\n w = eig(M[..., i], right=False)\n arr = sort_eigs(w, np.diag(T))\n eigs[i] = w[arr]\n return (Q.T @ eigs).T\n",
"step-3": "<mask token>\n\n\ndef indexarray(matrix_terms, which, var):\n \"\"\"Compute the array mapping monomials under multiplication by x_var\n\n Parameters\n ----------\n matrix_terms : 2d integer ndarray\n Array containing the monomials in order. matrix_terms[i] is the array\n containing the exponent for each variable in the ith multivariate\n monomial\n which : slice object\n object to index into the matrix_terms for the monomials we want to multiply by var\n var : int\n Variable to multiply by: x_0, ..., x_(dim-1)\n\n Returns\n -------\n arr : 1d integer ndarray\n Array containing the indices of the lower-degree monomials after multiplication\n by x_var\n \"\"\"\n mults = matrix_terms[which].copy()\n mults[:, var] += 1\n return np.argmin(np.abs(mults[:, np.newaxis] - matrix_terms[np.newaxis]\n ).sum(axis=-1), axis=1)\n\n\ndef indexarray_cheb(matrix_terms, which, var):\n \"\"\"Compute the array mapping Chebyshev monomials under multiplication by x_var:\n\n T_1*T_0 = T_1\n T_1*T_n = .5(T_(n+1)+ T_(n-1))\n\n Parameters\n ----------\n matrix_terms : 2d integer ndarray\n Array containing the monomials in order. matrix_terms[i] is the array\n containing the degree for each univariate Chebyshev monomial in the ith\n multivariate monomial\n m : int\n Number of monomials of highest degree, i.e. those that do not need to be\n multiplied\n var : int\n Variable to multiply by: x_0, ..., x_(dim-1)\n\n Returns\n -------\n arr1 : 1d integer ndarray\n Array containing the indices of T_(n+1)\n arr2 : 1d\n Array containing the indices of T_(n-1)\n \"\"\"\n up = matrix_terms[which].copy()\n up[:, var] += 1\n down = matrix_terms[which].copy()\n down[:, var] -= 1\n down[down[:, var] == -1, var] += 2\n arr1 = np.argmin(np.abs(up[:, np.newaxis] - matrix_terms[np.newaxis]).\n sum(axis=-1), axis=1)\n arr2 = np.argmin(np.abs(down[:, np.newaxis] - matrix_terms[np.newaxis])\n .sum(axis=-1), axis=1)\n return arr1, arr2\n\n\ndef ms_matrices(E, Q, matrix_terms, dim):\n \"\"\"Compute the Möller-Stetter matrices in the monomial basis from a\n reduced Macaulay matrix\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n Q : (l, n) 2d ndarray\n Matrix whose columns give the quotient basis in terms of the monomial basis\n matrix_terms : 2d ndarray\n Array with ordered monomial basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n n = Q.shape[1]\n m = E.shape[0]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, Q))\n for i in range(dim):\n arr = indexarray(matrix_terms, slice(m, None), i)\n M[..., i] = Q.conj().T @ A[arr]\n return M\n\n\ndef ms_matrices_cheb(E, Q, matrix_terms, dim):\n \"\"\"Compute the Möller-Stetter matrices in the Chebyshev basis from a\n reduced Macaulay matrix\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n Q : (l, n) 2d ndarray\n Matrix whose columns give the quotient basis in terms of the Chebyshev basis\n matrix_terms : 2d ndarray\n Array with ordered Chebyshev basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n n = Q.shape[1]\n m = E.shape[0]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, Q))\n for i in range(dim):\n arr1, arr2 = indexarray_cheb(matrix_terms, slice(m, None), i)\n M[..., i] = 0.5 * Q.T.conj() @ (A[arr1] + A[arr2])\n return M\n\n\ndef ms_matrices_p(E, P, matrix_terms, dim, cut):\n \"\"\"Compute the Möller-Stetter matrices in the power basis from a\n reduced Macaulay matrix (QRP method)\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n P : (, l) ndarray\n Array of pivots returned in QR with pivoting, used to permute the columns.\n matrix_terms : 2d ndarray\n Array with ordered Chebyshev basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n r, n = E.shape\n matrix_terms[cut:] = matrix_terms[cut:][P]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, np.eye(n)))\n for i in range(dim):\n arr = indexarray(matrix_terms, slice(r, None), i)\n M[..., i] = A[arr]\n return M\n\n\ndef ms_matrices_p_cheb(E, P, matrix_terms, dim, cut):\n \"\"\" Compute the Möller-Stetter matrices in the Chebyshev basis from a\n reduced Macaulay matrix (QRP method)\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n P : (, l) ndarray\n Array of pivots returned in QR with pivoting, used to permute the columns.\n matrix_terms : 2d ndarray\n Array with ordered Chebyshev basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n r, n = E.shape\n matrix_terms[cut:] = matrix_terms[cut:][P]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, np.eye(n)))\n for i in range(dim):\n arr1, arr2 = indexarray_cheb(matrix_terms, slice(r, None), i)\n M[..., i] = 0.5 * (A[arr1] + A[arr2])\n return M\n\n\ndef sort_eigs(eigs, diag):\n \"\"\"Sorts the eigs array to match the order on the diagonal\n of the Schur factorization\n\n Parameters\n ----------\n eigs : 1d ndarray\n Array of unsorted eigenvalues\n diag : 1d complex ndarray\n Array containing the diagonal of the approximate Schur factorization\n\n Returns\n -------\n w : 1d ndarray\n Eigenvalues from eigs sorted to match the order in diag\n \"\"\"\n n = diag.shape[0]\n lst = list(range(n))\n arr = []\n for eig in eigs:\n i = lst[np.argmin(np.abs(diag[lst] - eig))]\n arr.append(i)\n lst.remove(i)\n return np.argsort(arr)\n\n\n<mask token>\n\n\ndef msroots(M):\n \"\"\"Computes the roots to a system via the eigenvalues of the Möller-Stetter\n matrices. Implicitly performs a random rotation of the coordinate system\n to avoid repeated eigenvalues arising from special structure in the underlying\n polynomial system. Approximates the joint eigenvalue problem using a Schur\n factorization of a linear combination of the matrices.\n\n Parameters\n ----------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n\n Returns\n -------\n roots : (n, dim) ndarray\n Array containing the approximate roots of the system, where each row\n is a root.\n \"\"\"\n dim = M.shape[-1]\n Q, c = get_Q_c(dim)\n M = (Q @ M[..., np.newaxis])[..., 0]\n eigs = np.empty((dim, M.shape[0]), dtype='complex')\n U = schur((M * c).sum(axis=-1), output='complex')[1]\n for i in range(0, dim):\n T = U.T.conj() @ M[..., i] @ U\n w = eig(M[..., i], right=False)\n arr = sort_eigs(w, np.diag(T))\n eigs[i] = w[arr]\n return (Q.T @ eigs).T\n",
"step-4": "<mask token>\n\n\ndef indexarray(matrix_terms, which, var):\n \"\"\"Compute the array mapping monomials under multiplication by x_var\n\n Parameters\n ----------\n matrix_terms : 2d integer ndarray\n Array containing the monomials in order. matrix_terms[i] is the array\n containing the exponent for each variable in the ith multivariate\n monomial\n which : slice object\n object to index into the matrix_terms for the monomials we want to multiply by var\n var : int\n Variable to multiply by: x_0, ..., x_(dim-1)\n\n Returns\n -------\n arr : 1d integer ndarray\n Array containing the indices of the lower-degree monomials after multiplication\n by x_var\n \"\"\"\n mults = matrix_terms[which].copy()\n mults[:, var] += 1\n return np.argmin(np.abs(mults[:, np.newaxis] - matrix_terms[np.newaxis]\n ).sum(axis=-1), axis=1)\n\n\ndef indexarray_cheb(matrix_terms, which, var):\n \"\"\"Compute the array mapping Chebyshev monomials under multiplication by x_var:\n\n T_1*T_0 = T_1\n T_1*T_n = .5(T_(n+1)+ T_(n-1))\n\n Parameters\n ----------\n matrix_terms : 2d integer ndarray\n Array containing the monomials in order. matrix_terms[i] is the array\n containing the degree for each univariate Chebyshev monomial in the ith\n multivariate monomial\n m : int\n Number of monomials of highest degree, i.e. those that do not need to be\n multiplied\n var : int\n Variable to multiply by: x_0, ..., x_(dim-1)\n\n Returns\n -------\n arr1 : 1d integer ndarray\n Array containing the indices of T_(n+1)\n arr2 : 1d\n Array containing the indices of T_(n-1)\n \"\"\"\n up = matrix_terms[which].copy()\n up[:, var] += 1\n down = matrix_terms[which].copy()\n down[:, var] -= 1\n down[down[:, var] == -1, var] += 2\n arr1 = np.argmin(np.abs(up[:, np.newaxis] - matrix_terms[np.newaxis]).\n sum(axis=-1), axis=1)\n arr2 = np.argmin(np.abs(down[:, np.newaxis] - matrix_terms[np.newaxis])\n .sum(axis=-1), axis=1)\n return arr1, arr2\n\n\ndef ms_matrices(E, Q, matrix_terms, dim):\n \"\"\"Compute the Möller-Stetter matrices in the monomial basis from a\n reduced Macaulay matrix\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n Q : (l, n) 2d ndarray\n Matrix whose columns give the quotient basis in terms of the monomial basis\n matrix_terms : 2d ndarray\n Array with ordered monomial basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n n = Q.shape[1]\n m = E.shape[0]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, Q))\n for i in range(dim):\n arr = indexarray(matrix_terms, slice(m, None), i)\n M[..., i] = Q.conj().T @ A[arr]\n return M\n\n\ndef ms_matrices_cheb(E, Q, matrix_terms, dim):\n \"\"\"Compute the Möller-Stetter matrices in the Chebyshev basis from a\n reduced Macaulay matrix\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n Q : (l, n) 2d ndarray\n Matrix whose columns give the quotient basis in terms of the Chebyshev basis\n matrix_terms : 2d ndarray\n Array with ordered Chebyshev basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n n = Q.shape[1]\n m = E.shape[0]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, Q))\n for i in range(dim):\n arr1, arr2 = indexarray_cheb(matrix_terms, slice(m, None), i)\n M[..., i] = 0.5 * Q.T.conj() @ (A[arr1] + A[arr2])\n return M\n\n\ndef ms_matrices_p(E, P, matrix_terms, dim, cut):\n \"\"\"Compute the Möller-Stetter matrices in the power basis from a\n reduced Macaulay matrix (QRP method)\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n P : (, l) ndarray\n Array of pivots returned in QR with pivoting, used to permute the columns.\n matrix_terms : 2d ndarray\n Array with ordered Chebyshev basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n r, n = E.shape\n matrix_terms[cut:] = matrix_terms[cut:][P]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, np.eye(n)))\n for i in range(dim):\n arr = indexarray(matrix_terms, slice(r, None), i)\n M[..., i] = A[arr]\n return M\n\n\ndef ms_matrices_p_cheb(E, P, matrix_terms, dim, cut):\n \"\"\" Compute the Möller-Stetter matrices in the Chebyshev basis from a\n reduced Macaulay matrix (QRP method)\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n P : (, l) ndarray\n Array of pivots returned in QR with pivoting, used to permute the columns.\n matrix_terms : 2d ndarray\n Array with ordered Chebyshev basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n r, n = E.shape\n matrix_terms[cut:] = matrix_terms[cut:][P]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, np.eye(n)))\n for i in range(dim):\n arr1, arr2 = indexarray_cheb(matrix_terms, slice(r, None), i)\n M[..., i] = 0.5 * (A[arr1] + A[arr2])\n return M\n\n\ndef sort_eigs(eigs, diag):\n \"\"\"Sorts the eigs array to match the order on the diagonal\n of the Schur factorization\n\n Parameters\n ----------\n eigs : 1d ndarray\n Array of unsorted eigenvalues\n diag : 1d complex ndarray\n Array containing the diagonal of the approximate Schur factorization\n\n Returns\n -------\n w : 1d ndarray\n Eigenvalues from eigs sorted to match the order in diag\n \"\"\"\n n = diag.shape[0]\n lst = list(range(n))\n arr = []\n for eig in eigs:\n i = lst[np.argmin(np.abs(diag[lst] - eig))]\n arr.append(i)\n lst.remove(i)\n return np.argsort(arr)\n\n\n@memoize\ndef get_rand_combos_matrix(rows, cols, normal=False):\n \"\"\" Generates a rows by cols random matrix with orthogonal rows or columns,\n depending on if rows > cols or cols > rows.\n\n Parameters\n ----------\n rows : int\n Number of rows\n cols : int\n Number of columns\n normal : bool\n Optional. Whether or not to create a matrix using entries drawn\n from the standard normal distribution (N(0, 1)) or not. If it's\n False, it will return an orthogonal matrix.\n\n Returns\n -------\n C : (rows, cols) ndarray\n Matrix with orthgonal rows or columns, depending on if rows > cols or\n cols > rows if normal is False, otherwise a matrix with\n coefficients drawn from the standard normal (N(0, 1)).\n \"\"\"\n np.random.seed(57)\n if normal:\n C = np.random.normal(loc=0, scale=1, size=(rows, cols))\n return C\n size = max(rows, cols)\n C = ortho_group.rvs(size)\n return C[:rows, :cols]\n\n\n@memoize\ndef get_Q_c(dim):\n \"\"\" Generates a once-chosen random orthogonal matrix and a random linear combination\n for use in the simultaneous eigenvalue compution.\n\n Parameters\n ----------\n dim : int\n Dimension of the system\n\n Returns\n -------\n Q : (dim, dim) ndarray\n Random orthogonal rotation\n c : (dim, ) ndarray\n Random linear combination\n \"\"\"\n np.random.seed(103)\n Q = ortho_group.rvs(dim)\n c = np.random.randn(dim)\n return Q, c\n\n\ndef msroots(M):\n \"\"\"Computes the roots to a system via the eigenvalues of the Möller-Stetter\n matrices. Implicitly performs a random rotation of the coordinate system\n to avoid repeated eigenvalues arising from special structure in the underlying\n polynomial system. Approximates the joint eigenvalue problem using a Schur\n factorization of a linear combination of the matrices.\n\n Parameters\n ----------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n\n Returns\n -------\n roots : (n, dim) ndarray\n Array containing the approximate roots of the system, where each row\n is a root.\n \"\"\"\n dim = M.shape[-1]\n Q, c = get_Q_c(dim)\n M = (Q @ M[..., np.newaxis])[..., 0]\n eigs = np.empty((dim, M.shape[0]), dtype='complex')\n U = schur((M * c).sum(axis=-1), output='complex')[1]\n for i in range(0, dim):\n T = U.T.conj() @ M[..., i] @ U\n w = eig(M[..., i], right=False)\n arr = sort_eigs(w, np.diag(T))\n eigs[i] = w[arr]\n return (Q.T @ eigs).T\n",
"step-5": "import numpy as np\nimport itertools\nfrom scipy.linalg import eig, schur\nfrom eigen_rootfinding.polynomial import MultiCheb, MultiPower\nfrom eigen_rootfinding.utils import memoize\nfrom scipy.stats import ortho_group\n\ndef indexarray(matrix_terms, which, var):\n \"\"\"Compute the array mapping monomials under multiplication by x_var\n\n Parameters\n ----------\n matrix_terms : 2d integer ndarray\n Array containing the monomials in order. matrix_terms[i] is the array\n containing the exponent for each variable in the ith multivariate\n monomial\n which : slice object\n object to index into the matrix_terms for the monomials we want to multiply by var\n var : int\n Variable to multiply by: x_0, ..., x_(dim-1)\n\n Returns\n -------\n arr : 1d integer ndarray\n Array containing the indices of the lower-degree monomials after multiplication\n by x_var\n \"\"\"\n mults = matrix_terms[which].copy()\n mults[:, var] += 1\n return np.argmin(np.abs(mults[:, np.newaxis] - matrix_terms[np.newaxis]).sum(axis=-1), axis=1)\n\ndef indexarray_cheb(matrix_terms, which, var):\n \"\"\"Compute the array mapping Chebyshev monomials under multiplication by x_var:\n\n T_1*T_0 = T_1\n T_1*T_n = .5(T_(n+1)+ T_(n-1))\n\n Parameters\n ----------\n matrix_terms : 2d integer ndarray\n Array containing the monomials in order. matrix_terms[i] is the array\n containing the degree for each univariate Chebyshev monomial in the ith\n multivariate monomial\n m : int\n Number of monomials of highest degree, i.e. those that do not need to be\n multiplied\n var : int\n Variable to multiply by: x_0, ..., x_(dim-1)\n\n Returns\n -------\n arr1 : 1d integer ndarray\n Array containing the indices of T_(n+1)\n arr2 : 1d\n Array containing the indices of T_(n-1)\n \"\"\"\n up = matrix_terms[which].copy()\n up[:, var] += 1\n down = matrix_terms[which].copy()\n down[:, var] -= 1\n down[down[:, var]==-1, var] += 2\n arr1 = np.argmin(np.abs(up[:, np.newaxis] - matrix_terms[np.newaxis]).sum(axis=-1), axis=1)\n arr2 = np.argmin(np.abs(down[:, np.newaxis] - matrix_terms[np.newaxis]).sum(axis=-1), axis=1)\n return arr1, arr2\n\ndef ms_matrices(E, Q, matrix_terms, dim):\n \"\"\"Compute the Möller-Stetter matrices in the monomial basis from a\n reduced Macaulay matrix\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n Q : (l, n) 2d ndarray\n Matrix whose columns give the quotient basis in terms of the monomial basis\n matrix_terms : 2d ndarray\n Array with ordered monomial basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n n = Q.shape[1]\n m = E.shape[0]\n M = np.empty((n, n, dim),dtype=E.dtype)\n A = np.vstack((-E, Q))\n for i in range(dim):\n arr = indexarray(matrix_terms, slice(m,None), i)\n M[..., i] = Q.conj().T@A[arr]\n return M\n\ndef ms_matrices_cheb(E, Q, matrix_terms, dim):\n \"\"\"Compute the Möller-Stetter matrices in the Chebyshev basis from a\n reduced Macaulay matrix\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n Q : (l, n) 2d ndarray\n Matrix whose columns give the quotient basis in terms of the Chebyshev basis\n matrix_terms : 2d ndarray\n Array with ordered Chebyshev basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n n = Q.shape[1]\n m = E.shape[0]\n M = np.empty((n, n, dim),dtype=E.dtype)\n A = np.vstack((-E, Q))\n for i in range(dim):\n arr1, arr2 = indexarray_cheb(matrix_terms, slice(m,None), i)\n M[..., i] = .5*Q.T.conj()@(A[arr1]+A[arr2])\n return M\n\ndef ms_matrices_p(E, P, matrix_terms, dim, cut):\n \"\"\"Compute the Möller-Stetter matrices in the power basis from a\n reduced Macaulay matrix (QRP method)\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n P : (, l) ndarray\n Array of pivots returned in QR with pivoting, used to permute the columns.\n matrix_terms : 2d ndarray\n Array with ordered Chebyshev basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n r, n = E.shape\n matrix_terms[cut:] = matrix_terms[cut:][P]\n M = np.empty((n, n, dim),dtype=E.dtype)\n A = np.vstack((-E, np.eye(n)))\n for i in range(dim):\n arr = indexarray(matrix_terms, slice(r,None), i)\n M[..., i] = A[arr]\n return M\n\ndef ms_matrices_p_cheb(E, P, matrix_terms, dim, cut):\n \"\"\" Compute the Möller-Stetter matrices in the Chebyshev basis from a\n reduced Macaulay matrix (QRP method)\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n P : (, l) ndarray\n Array of pivots returned in QR with pivoting, used to permute the columns.\n matrix_terms : 2d ndarray\n Array with ordered Chebyshev basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n r, n = E.shape\n matrix_terms[cut:] = matrix_terms[cut:][P]\n M = np.empty((n, n, dim),dtype=E.dtype)\n A = np.vstack((-E, np.eye(n)))\n for i in range(dim):\n arr1, arr2 = indexarray_cheb(matrix_terms, slice(r,None), i)\n M[..., i] = .5*(A[arr1] + A[arr2])\n return M\n\ndef sort_eigs(eigs, diag):\n \"\"\"Sorts the eigs array to match the order on the diagonal\n of the Schur factorization\n\n Parameters\n ----------\n eigs : 1d ndarray\n Array of unsorted eigenvalues\n diag : 1d complex ndarray\n Array containing the diagonal of the approximate Schur factorization\n\n Returns\n -------\n w : 1d ndarray\n Eigenvalues from eigs sorted to match the order in diag\n \"\"\"\n n = diag.shape[0]\n lst = list(range(n))\n arr = []\n for eig in eigs:\n i = lst[np.argmin(np.abs(diag[lst]-eig))]\n arr.append(i)\n lst.remove(i)\n return np.argsort(arr)\n\n@memoize\ndef get_rand_combos_matrix(rows, cols, normal=False):\n \"\"\" Generates a rows by cols random matrix with orthogonal rows or columns,\n depending on if rows > cols or cols > rows.\n\n Parameters\n ----------\n rows : int\n Number of rows\n cols : int\n Number of columns\n normal : bool\n Optional. Whether or not to create a matrix using entries drawn\n from the standard normal distribution (N(0, 1)) or not. If it's\n False, it will return an orthogonal matrix.\n\n Returns\n -------\n C : (rows, cols) ndarray\n Matrix with orthgonal rows or columns, depending on if rows > cols or\n cols > rows if normal is False, otherwise a matrix with\n coefficients drawn from the standard normal (N(0, 1)).\n \"\"\"\n np.random.seed(57)\n # TODO perhaps explore different types of random matrices?\n # randn was giving me conditioning problems\n if normal:\n C = np.random.normal(loc=0, scale=1, size=(rows, cols))\n return C\n size = max(rows, cols)\n C = ortho_group.rvs(size)\n return C[:rows, :cols]\n\n@memoize\ndef get_Q_c(dim):\n \"\"\" Generates a once-chosen random orthogonal matrix and a random linear combination\n for use in the simultaneous eigenvalue compution.\n\n Parameters\n ----------\n dim : int\n Dimension of the system\n\n Returns\n -------\n Q : (dim, dim) ndarray\n Random orthogonal rotation\n c : (dim, ) ndarray\n Random linear combination\n \"\"\"\n np.random.seed(103)\n Q = ortho_group.rvs(dim)\n c = np.random.randn(dim)\n return Q, c\n\ndef msroots(M):\n \"\"\"Computes the roots to a system via the eigenvalues of the Möller-Stetter\n matrices. Implicitly performs a random rotation of the coordinate system\n to avoid repeated eigenvalues arising from special structure in the underlying\n polynomial system. Approximates the joint eigenvalue problem using a Schur\n factorization of a linear combination of the matrices.\n\n Parameters\n ----------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n\n Returns\n -------\n roots : (n, dim) ndarray\n Array containing the approximate roots of the system, where each row\n is a root.\n \"\"\"\n dim = M.shape[-1]\n\n # perform a random rotation with a random orthogonal Q\n Q, c = get_Q_c(dim)\n M = (Q@M[..., np.newaxis])[..., 0]\n\n eigs = np.empty((dim, M.shape[0]), dtype='complex')\n # Compute the matrix U that triangularizes a random linear combination\n U = schur((M*c).sum(axis=-1), output='complex')[1]\n\n for i in range(0, dim):\n T = (U.T.conj())@(M[..., i])@U\n w = eig(M[..., i], right=False)\n arr = sort_eigs(w, np.diag(T))\n eigs[i] = w[arr]\n\n # Rotate back before returning, transposing to match expected shape\n return (Q.T@eigs).T\n",
"step-ids": [
5,
6,
8,
10,
12
]
}
|
[
5,
6,
8,
10,
12
] |
#
# @lc app=leetcode id=267 lang=python3
#
# [267] Palindrome Permutation II
#
# https://leetcode.com/problems/palindrome-permutation-ii/description/
#
# algorithms
# Medium (33.28%)
# Total Accepted: 24.8K
# Total Submissions: 74.4K
# Testcase Example: '"aabb"'
#
# Given a string s, return all the palindromic permutations (without
# duplicates) of it. Return an empty list if no palindromic permutation could
# be form.
#
# Example 1:
#
#
# Input: "aabb"
# Output: ["abba", "baab"]
#
# Example 2:
#
#
# Input: "abc"
# Output: []
#
#
class Solution:
def generatePalindromes(self, s: str) -> List[str]:
|
normal
|
{
"blob_id": "4e538251dedfe0b9ffb68de2de7dc50681320f1f",
"index": 8619,
"step-1": "#\n# @lc app=leetcode id=267 lang=python3\n#\n# [267] Palindrome Permutation II\n#\n# https://leetcode.com/problems/palindrome-permutation-ii/description/\n#\n# algorithms\n# Medium (33.28%)\n# Total Accepted: 24.8K\n# Total Submissions: 74.4K\n# Testcase Example: '\"aabb\"'\n#\n# Given a string s, return all the palindromic permutations (without\n# duplicates) of it. Return an empty list if no palindromic permutation could\n# be form.\n# \n# Example 1:\n# \n# \n# Input: \"aabb\"\n# Output: [\"abba\", \"baab\"]\n# \n# Example 2:\n# \n# \n# Input: \"abc\"\n# Output: []\n# \n#\nclass Solution:\n def generatePalindromes(self, s: str) -> List[str]:\n \n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from flask import Flask
import os
app = Flask(__name__)
@app.route("/healthz")
def healthz():
return "ok"
@app.route("/alive")
def alive():
return "ok"
@app.route("/hello")
# def healthz(): # introduces application crash bug
def hello():
myhost = os.uname()[1]
body = ("V1 - Hello World! - %s" % myhost)
# body = ("V2 - Hello World! - %s" % myhost)
return body
if __name__ == "__main__":
from waitress import serve
serve(app, host="0.0.0.0", port=80)
|
normal
|
{
"blob_id": "0259fddbe3ce030030a508ce7118a6a03930aa51",
"index": 7375,
"step-1": "<mask token>\n\n\[email protected]('/healthz')\ndef healthz():\n return 'ok'\n\n\[email protected]('/alive')\ndef alive():\n return 'ok'\n\n\[email protected]('/hello')\ndef hello():\n myhost = os.uname()[1]\n body = 'V1 - Hello World! - %s' % myhost\n return body\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/healthz')\ndef healthz():\n return 'ok'\n\n\[email protected]('/alive')\ndef alive():\n return 'ok'\n\n\[email protected]('/hello')\ndef hello():\n myhost = os.uname()[1]\n body = 'V1 - Hello World! - %s' % myhost\n return body\n\n\nif __name__ == '__main__':\n from waitress import serve\n serve(app, host='0.0.0.0', port=80)\n",
"step-3": "<mask token>\napp = Flask(__name__)\n\n\[email protected]('/healthz')\ndef healthz():\n return 'ok'\n\n\[email protected]('/alive')\ndef alive():\n return 'ok'\n\n\[email protected]('/hello')\ndef hello():\n myhost = os.uname()[1]\n body = 'V1 - Hello World! - %s' % myhost\n return body\n\n\nif __name__ == '__main__':\n from waitress import serve\n serve(app, host='0.0.0.0', port=80)\n",
"step-4": "from flask import Flask\nimport os\napp = Flask(__name__)\n\n\[email protected]('/healthz')\ndef healthz():\n return 'ok'\n\n\[email protected]('/alive')\ndef alive():\n return 'ok'\n\n\[email protected]('/hello')\ndef hello():\n myhost = os.uname()[1]\n body = 'V1 - Hello World! - %s' % myhost\n return body\n\n\nif __name__ == '__main__':\n from waitress import serve\n serve(app, host='0.0.0.0', port=80)\n",
"step-5": "from flask import Flask\nimport os\n\napp = Flask(__name__)\n\n\[email protected](\"/healthz\")\ndef healthz():\n return \"ok\"\n\n\[email protected](\"/alive\")\ndef alive():\n return \"ok\"\n\n\[email protected](\"/hello\")\n# def healthz(): # introduces application crash bug\ndef hello():\n myhost = os.uname()[1]\n body = (\"V1 - Hello World! - %s\" % myhost)\n # body = (\"V2 - Hello World! - %s\" % myhost)\n return body\n\n\nif __name__ == \"__main__\":\n from waitress import serve\n serve(app, host=\"0.0.0.0\", port=80)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
class Solution(object):
def restoreIpAddresses(self, s):
"""
:type s: str
:rtype: List[str]
"""
def helper(sb, string, level):
if len(string) == 0:
if level == 4:
ans.append(sb[:-1])
return
if level == 4: return
for i in range(3):
if i < len(string):
part = string[:i + 1]
if valid(part):
helper(sb + part + '.', string[i + 1:], level + 1)
def valid(num):
if len(num) > 1 and num[0] == '0':
return False
if 0 <= int(num) <= 255:
return True
else:
return False
ans = []
sb = ''
helper(sb, s, 0)
return ans
solution = Solution()
print solution.restoreIpAddresses("010010")
|
normal
|
{
"blob_id": "ec4348c61cd1c9130543bb20f9ca199399e1caff",
"index": 226,
"step-1": "class Solution(object):\n def restoreIpAddresses(self, s):\n \"\"\"\n :type s: str\n :rtype: List[str]\n \"\"\"\n\n def helper(sb, string, level):\n if len(string) == 0:\n if level == 4:\n ans.append(sb[:-1])\n return\n if level == 4: return\n for i in range(3):\n if i < len(string):\n part = string[:i + 1]\n if valid(part):\n helper(sb + part + '.', string[i + 1:], level + 1)\n\n def valid(num):\n if len(num) > 1 and num[0] == '0':\n return False\n if 0 <= int(num) <= 255:\n return True\n else:\n return False\n\n ans = []\n sb = ''\n helper(sb, s, 0)\n return ans\n\nsolution = Solution()\nprint solution.restoreIpAddresses(\"010010\")",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from typing import Union, Tuple
import numpy as np
from UQpy.utilities.kernels.baseclass.GrassmannianKernel import GrassmannianKernel
class ProjectionKernel(GrassmannianKernel):
def __init__(self, kernel_parameter: Union[int, float] = None):
"""
:param kernel_parameter: Number of independent p-planes of each Grassmann point.
"""
super().__init__(kernel_parameter)
def element_wise_operation(self, xi_j: Tuple) -> float:
"""
Compute the Projection kernel entry for a tuple of points on the Grassmann manifold.
:param xi_j: Tuple of orthonormal matrices representing the grassmann points.
"""
xi, xj = xi_j
r = np.dot(xi.T, xj)
n = np.linalg.norm(r, "fro")
return n * n
|
normal
|
{
"blob_id": "14ce803e3deb529b489c150c7ecc702118448acb",
"index": 9022,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass ProjectionKernel(GrassmannianKernel):\n <mask token>\n\n def element_wise_operation(self, xi_j: Tuple) ->float:\n \"\"\"\n Compute the Projection kernel entry for a tuple of points on the Grassmann manifold.\n\n :param xi_j: Tuple of orthonormal matrices representing the grassmann points.\n \"\"\"\n xi, xj = xi_j\n r = np.dot(xi.T, xj)\n n = np.linalg.norm(r, 'fro')\n return n * n\n",
"step-3": "<mask token>\n\n\nclass ProjectionKernel(GrassmannianKernel):\n\n def __init__(self, kernel_parameter: Union[int, float]=None):\n \"\"\"\n :param kernel_parameter: Number of independent p-planes of each Grassmann point.\n \"\"\"\n super().__init__(kernel_parameter)\n\n def element_wise_operation(self, xi_j: Tuple) ->float:\n \"\"\"\n Compute the Projection kernel entry for a tuple of points on the Grassmann manifold.\n\n :param xi_j: Tuple of orthonormal matrices representing the grassmann points.\n \"\"\"\n xi, xj = xi_j\n r = np.dot(xi.T, xj)\n n = np.linalg.norm(r, 'fro')\n return n * n\n",
"step-4": "from typing import Union, Tuple\nimport numpy as np\nfrom UQpy.utilities.kernels.baseclass.GrassmannianKernel import GrassmannianKernel\n\n\nclass ProjectionKernel(GrassmannianKernel):\n\n def __init__(self, kernel_parameter: Union[int, float]=None):\n \"\"\"\n :param kernel_parameter: Number of independent p-planes of each Grassmann point.\n \"\"\"\n super().__init__(kernel_parameter)\n\n def element_wise_operation(self, xi_j: Tuple) ->float:\n \"\"\"\n Compute the Projection kernel entry for a tuple of points on the Grassmann manifold.\n\n :param xi_j: Tuple of orthonormal matrices representing the grassmann points.\n \"\"\"\n xi, xj = xi_j\n r = np.dot(xi.T, xj)\n n = np.linalg.norm(r, 'fro')\n return n * n\n",
"step-5": "from typing import Union, Tuple\n\nimport numpy as np\n\nfrom UQpy.utilities.kernels.baseclass.GrassmannianKernel import GrassmannianKernel\n\n\nclass ProjectionKernel(GrassmannianKernel):\n\n def __init__(self, kernel_parameter: Union[int, float] = None):\n \"\"\"\n :param kernel_parameter: Number of independent p-planes of each Grassmann point.\n \"\"\"\n super().__init__(kernel_parameter)\n\n def element_wise_operation(self, xi_j: Tuple) -> float:\n \"\"\"\n Compute the Projection kernel entry for a tuple of points on the Grassmann manifold.\n\n :param xi_j: Tuple of orthonormal matrices representing the grassmann points.\n \"\"\"\n xi, xj = xi_j\n r = np.dot(xi.T, xj)\n n = np.linalg.norm(r, \"fro\")\n return n * n\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class EndpointsUnitTests(unittest.TestCase):
<|reserved_special_token_0|>
@patch('github_approval_checker.utils.util.verify_signature')
@patch('github_approval_checker.api.endpoints.connexion')
@patch('github_approval_checker.api.endpoints.GithubHandler')
@patch('github_approval_checker.utils.util.validate_config')
def test_post_pull_request_review(self, validate_config, handler_class,
conn, verify_signature):
"""
Test endpoints.post_pull_request_review
"""
conn.request.data.return_value = ''
conn.request.headers.get.return_value = 'sha1=signature'
verify_signature.return_value = None
handler = handler_class.return_value
handler.get_config.return_value = {'context1': ['whitelist1'],
'context2': ['whitelist2']}
handler.get_statuses.return_value = [{'state': 'error', 'context':
'context2', 'target_url': 'fake://status_target_2',
'description': 'Status Check 2'}, {'state': 'pending',
'context': 'context3', 'target_url': 'fake://status_target_3',
'description': 'Status Check 3'}, {'state': 'failure',
'context': 'context1', 'target_url': 'fake://status_target_1',
'description': 'Status Check 1'}]
handler.is_authorized.return_value = True
validate_config.return_value = None
data = {'repository': {'name': 'repo-name', 'full_name':
'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':
{'state': 'approved', 'commit_id': 'review-commit-id', 'user':
{'login': 'review-user-login'}}}
handler.post_status.side_effect = [201, 400]
response = endpoints.post_pull_request_review(data)
handler.get_statuses.assert_called_once_with('repo-full-name',
'review-commit-id')
self.assertEqual(handler.is_authorized.call_count, 2)
handler.post_status.assert_has_calls([call('repo-full-name',
'review-commit-id', 'context2', 'fake://status_target_2',
'review-user-login', 'Status Check 2'), call('repo-full-name',
'review-commit-id', 'context1', 'fake://status_target_1',
'review-user-login', 'Status Check 1')])
self.assertEqual(response, util.STATUS_OK)
<|reserved_special_token_0|>
@patch('github_approval_checker.utils.util.verify_signature')
@patch('github_approval_checker.api.endpoints.connexion')
@patch('github_approval_checker.api.endpoints.GithubHandler')
def test_post_pull_request_review_missing(self, handler_class, conn,
verify_signature):
"""
Test endpoints.post_pull_request_review with a missing config file
"""
conn.request.data.return_value = ''
conn.request.headers.get.return_value = 'sha1=signature'
verify_signature.return_value = None
handler = handler_class.return_value
handler.get_config.side_effect = APIError('config-error',
"{'message': 'bad-config'}")
data = {'repository': {'name': 'repo-name', 'full_name':
'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':
{'state': 'changes-requested', 'commit_id': 'review-commit-id',
'user': {'login': 'review-user-login'}}}
response = endpoints.post_pull_request_review(data)
handler.get_statuses.assert_not_called()
handler.is_authorized.assert_not_called()
handler.post_status.assert_not_called()
self.assertEqual(response, "{'message': 'bad-config'}")
@patch('github_approval_checker.utils.util.verify_signature')
@patch('github_approval_checker.api.endpoints.connexion')
@patch('github_approval_checker.api.endpoints.GithubHandler')
@patch('github_approval_checker.utils.util.validate_config')
def test_post_pull_request_review_bad_config(self, validate_config,
handler_class, conn, verify_signature):
"""
Test endpoints.post_pull_request_review with a bad config file
"""
conn.request.data.return_value = ''
conn.request.headers.get.return_value = 'sha1=signature'
verify_signature.return_value = None
handler = handler_class.return_value
handler.get_config.return_value = 'config-data'
validate_config.side_effect = ConfigError('Config Validation Error',
({'status': 'Config Validation Error', 'message':
'Bad config data'}, 500))
data = {'repository': {'name': 'repo-name', 'full_name':
'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':
{'state': 'changes-requested', 'commit_id': 'review-commit-id',
'user': {'login': 'review-user-login'}}}
response = endpoints.post_pull_request_review(data)
handler.get_statuses.assert_not_called()
handler.is_authorized.assert_not_called()
handler.post_status.assert_not_called()
handler.get_config.assert_called_once_with('repo-full-name', None)
validate_config.assert_called_once_with('config-data')
self.assertEqual(response, ({'status': 'Config Validation Error',
'message': 'Bad config data'}, 500))
@patch('github_approval_checker.utils.util.verify_signature')
@patch('github_approval_checker.api.endpoints.connexion')
@patch('github_approval_checker.api.endpoints.GithubHandler')
@patch('github_approval_checker.utils.util.validate_config')
def test_post_pull_request_review_bad_sign(self, validate_config,
handler_class, conn, verify_signature):
"""
Test endpoints.post_pull_request_review with an incorrect signature
"""
conn.request.data.return_value = ''
conn.request.headers.get.return_value = 'sha1=signature'
verify_signature.side_effect = SignatureError(
'Error validating signature')
response = endpoints.post_pull_request_review({})
handler = handler_class.return_value
handler.get_config.return_value = 'config-data'
handler.get_statuses.assert_not_called()
handler.is_authorized.assert_not_called()
handler.post_status.assert_not_called()
handler.get_config.assert_not_called()
validate_config.assert_not_called()
self.assertEqual(response, ({'status': 'Signature Validation Error',
'message': 'Error validating signature'}, 400))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class EndpointsUnitTests(unittest.TestCase):
<|reserved_special_token_0|>
@patch('github_approval_checker.utils.util.verify_signature')
@patch('github_approval_checker.api.endpoints.connexion')
@patch('github_approval_checker.api.endpoints.GithubHandler')
@patch('github_approval_checker.utils.util.validate_config')
def test_post_pull_request_review(self, validate_config, handler_class,
conn, verify_signature):
"""
Test endpoints.post_pull_request_review
"""
conn.request.data.return_value = ''
conn.request.headers.get.return_value = 'sha1=signature'
verify_signature.return_value = None
handler = handler_class.return_value
handler.get_config.return_value = {'context1': ['whitelist1'],
'context2': ['whitelist2']}
handler.get_statuses.return_value = [{'state': 'error', 'context':
'context2', 'target_url': 'fake://status_target_2',
'description': 'Status Check 2'}, {'state': 'pending',
'context': 'context3', 'target_url': 'fake://status_target_3',
'description': 'Status Check 3'}, {'state': 'failure',
'context': 'context1', 'target_url': 'fake://status_target_1',
'description': 'Status Check 1'}]
handler.is_authorized.return_value = True
validate_config.return_value = None
data = {'repository': {'name': 'repo-name', 'full_name':
'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':
{'state': 'approved', 'commit_id': 'review-commit-id', 'user':
{'login': 'review-user-login'}}}
handler.post_status.side_effect = [201, 400]
response = endpoints.post_pull_request_review(data)
handler.get_statuses.assert_called_once_with('repo-full-name',
'review-commit-id')
self.assertEqual(handler.is_authorized.call_count, 2)
handler.post_status.assert_has_calls([call('repo-full-name',
'review-commit-id', 'context2', 'fake://status_target_2',
'review-user-login', 'Status Check 2'), call('repo-full-name',
'review-commit-id', 'context1', 'fake://status_target_1',
'review-user-login', 'Status Check 1')])
self.assertEqual(response, util.STATUS_OK)
@patch('github_approval_checker.utils.util.verify_signature')
@patch('github_approval_checker.api.endpoints.connexion')
@patch('github_approval_checker.api.endpoints.GithubHandler')
@patch('github_approval_checker.utils.util.validate_config')
def test_post_pull_request_review_unapproved(self, validate_config,
handler_class, conn, verify_signature):
"""
Test endpoints.post_pull_request_review with a review where the status is not approved.
"""
conn.request.data.return_value = ''
conn.request.headers.get.return_value = 'sha1=signature'
verify_signature.return_value = None
handler = handler_class.return_value
handler.get_config.return_value = {'context1': ['whitelist1'],
'context2': ['whitelist2']}
validate_config.return_value = None
data = {'repository': {'name': 'repo-name', 'full_name':
'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':
{'state': 'changes-requested', 'commit_id': 'review-commit-id',
'user': {'login': 'review-user-login'}}}
response = endpoints.post_pull_request_review(data)
handler.get_statuses.assert_not_called()
handler.is_authorized.assert_not_called()
handler.post_status.assert_not_called()
self.assertEqual(response, ({'status': 'OK', 'message':
'Review state is not approved'}, 200))
@patch('github_approval_checker.utils.util.verify_signature')
@patch('github_approval_checker.api.endpoints.connexion')
@patch('github_approval_checker.api.endpoints.GithubHandler')
def test_post_pull_request_review_missing(self, handler_class, conn,
verify_signature):
"""
Test endpoints.post_pull_request_review with a missing config file
"""
conn.request.data.return_value = ''
conn.request.headers.get.return_value = 'sha1=signature'
verify_signature.return_value = None
handler = handler_class.return_value
handler.get_config.side_effect = APIError('config-error',
"{'message': 'bad-config'}")
data = {'repository': {'name': 'repo-name', 'full_name':
'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':
{'state': 'changes-requested', 'commit_id': 'review-commit-id',
'user': {'login': 'review-user-login'}}}
response = endpoints.post_pull_request_review(data)
handler.get_statuses.assert_not_called()
handler.is_authorized.assert_not_called()
handler.post_status.assert_not_called()
self.assertEqual(response, "{'message': 'bad-config'}")
@patch('github_approval_checker.utils.util.verify_signature')
@patch('github_approval_checker.api.endpoints.connexion')
@patch('github_approval_checker.api.endpoints.GithubHandler')
@patch('github_approval_checker.utils.util.validate_config')
def test_post_pull_request_review_bad_config(self, validate_config,
handler_class, conn, verify_signature):
"""
Test endpoints.post_pull_request_review with a bad config file
"""
conn.request.data.return_value = ''
conn.request.headers.get.return_value = 'sha1=signature'
verify_signature.return_value = None
handler = handler_class.return_value
handler.get_config.return_value = 'config-data'
validate_config.side_effect = ConfigError('Config Validation Error',
({'status': 'Config Validation Error', 'message':
'Bad config data'}, 500))
data = {'repository': {'name': 'repo-name', 'full_name':
'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':
{'state': 'changes-requested', 'commit_id': 'review-commit-id',
'user': {'login': 'review-user-login'}}}
response = endpoints.post_pull_request_review(data)
handler.get_statuses.assert_not_called()
handler.is_authorized.assert_not_called()
handler.post_status.assert_not_called()
handler.get_config.assert_called_once_with('repo-full-name', None)
validate_config.assert_called_once_with('config-data')
self.assertEqual(response, ({'status': 'Config Validation Error',
'message': 'Bad config data'}, 500))
@patch('github_approval_checker.utils.util.verify_signature')
@patch('github_approval_checker.api.endpoints.connexion')
@patch('github_approval_checker.api.endpoints.GithubHandler')
@patch('github_approval_checker.utils.util.validate_config')
def test_post_pull_request_review_bad_sign(self, validate_config,
handler_class, conn, verify_signature):
"""
Test endpoints.post_pull_request_review with an incorrect signature
"""
conn.request.data.return_value = ''
conn.request.headers.get.return_value = 'sha1=signature'
verify_signature.side_effect = SignatureError(
'Error validating signature')
response = endpoints.post_pull_request_review({})
handler = handler_class.return_value
handler.get_config.return_value = 'config-data'
handler.get_statuses.assert_not_called()
handler.is_authorized.assert_not_called()
handler.post_status.assert_not_called()
handler.get_config.assert_not_called()
validate_config.assert_not_called()
self.assertEqual(response, ({'status': 'Signature Validation Error',
'message': 'Error validating signature'}, 400))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class EndpointsUnitTests(unittest.TestCase):
"""
Test endpoints.py
"""
@patch('github_approval_checker.utils.util.verify_signature')
@patch('github_approval_checker.api.endpoints.connexion')
@patch('github_approval_checker.api.endpoints.GithubHandler')
@patch('github_approval_checker.utils.util.validate_config')
def test_post_pull_request_review(self, validate_config, handler_class,
conn, verify_signature):
"""
Test endpoints.post_pull_request_review
"""
conn.request.data.return_value = ''
conn.request.headers.get.return_value = 'sha1=signature'
verify_signature.return_value = None
handler = handler_class.return_value
handler.get_config.return_value = {'context1': ['whitelist1'],
'context2': ['whitelist2']}
handler.get_statuses.return_value = [{'state': 'error', 'context':
'context2', 'target_url': 'fake://status_target_2',
'description': 'Status Check 2'}, {'state': 'pending',
'context': 'context3', 'target_url': 'fake://status_target_3',
'description': 'Status Check 3'}, {'state': 'failure',
'context': 'context1', 'target_url': 'fake://status_target_1',
'description': 'Status Check 1'}]
handler.is_authorized.return_value = True
validate_config.return_value = None
data = {'repository': {'name': 'repo-name', 'full_name':
'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':
{'state': 'approved', 'commit_id': 'review-commit-id', 'user':
{'login': 'review-user-login'}}}
handler.post_status.side_effect = [201, 400]
response = endpoints.post_pull_request_review(data)
handler.get_statuses.assert_called_once_with('repo-full-name',
'review-commit-id')
self.assertEqual(handler.is_authorized.call_count, 2)
handler.post_status.assert_has_calls([call('repo-full-name',
'review-commit-id', 'context2', 'fake://status_target_2',
'review-user-login', 'Status Check 2'), call('repo-full-name',
'review-commit-id', 'context1', 'fake://status_target_1',
'review-user-login', 'Status Check 1')])
self.assertEqual(response, util.STATUS_OK)
@patch('github_approval_checker.utils.util.verify_signature')
@patch('github_approval_checker.api.endpoints.connexion')
@patch('github_approval_checker.api.endpoints.GithubHandler')
@patch('github_approval_checker.utils.util.validate_config')
def test_post_pull_request_review_unapproved(self, validate_config,
handler_class, conn, verify_signature):
"""
Test endpoints.post_pull_request_review with a review where the status is not approved.
"""
conn.request.data.return_value = ''
conn.request.headers.get.return_value = 'sha1=signature'
verify_signature.return_value = None
handler = handler_class.return_value
handler.get_config.return_value = {'context1': ['whitelist1'],
'context2': ['whitelist2']}
validate_config.return_value = None
data = {'repository': {'name': 'repo-name', 'full_name':
'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':
{'state': 'changes-requested', 'commit_id': 'review-commit-id',
'user': {'login': 'review-user-login'}}}
response = endpoints.post_pull_request_review(data)
handler.get_statuses.assert_not_called()
handler.is_authorized.assert_not_called()
handler.post_status.assert_not_called()
self.assertEqual(response, ({'status': 'OK', 'message':
'Review state is not approved'}, 200))
@patch('github_approval_checker.utils.util.verify_signature')
@patch('github_approval_checker.api.endpoints.connexion')
@patch('github_approval_checker.api.endpoints.GithubHandler')
def test_post_pull_request_review_missing(self, handler_class, conn,
verify_signature):
"""
Test endpoints.post_pull_request_review with a missing config file
"""
conn.request.data.return_value = ''
conn.request.headers.get.return_value = 'sha1=signature'
verify_signature.return_value = None
handler = handler_class.return_value
handler.get_config.side_effect = APIError('config-error',
"{'message': 'bad-config'}")
data = {'repository': {'name': 'repo-name', 'full_name':
'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':
{'state': 'changes-requested', 'commit_id': 'review-commit-id',
'user': {'login': 'review-user-login'}}}
response = endpoints.post_pull_request_review(data)
handler.get_statuses.assert_not_called()
handler.is_authorized.assert_not_called()
handler.post_status.assert_not_called()
self.assertEqual(response, "{'message': 'bad-config'}")
@patch('github_approval_checker.utils.util.verify_signature')
@patch('github_approval_checker.api.endpoints.connexion')
@patch('github_approval_checker.api.endpoints.GithubHandler')
@patch('github_approval_checker.utils.util.validate_config')
def test_post_pull_request_review_bad_config(self, validate_config,
handler_class, conn, verify_signature):
"""
Test endpoints.post_pull_request_review with a bad config file
"""
conn.request.data.return_value = ''
conn.request.headers.get.return_value = 'sha1=signature'
verify_signature.return_value = None
handler = handler_class.return_value
handler.get_config.return_value = 'config-data'
validate_config.side_effect = ConfigError('Config Validation Error',
({'status': 'Config Validation Error', 'message':
'Bad config data'}, 500))
data = {'repository': {'name': 'repo-name', 'full_name':
'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':
{'state': 'changes-requested', 'commit_id': 'review-commit-id',
'user': {'login': 'review-user-login'}}}
response = endpoints.post_pull_request_review(data)
handler.get_statuses.assert_not_called()
handler.is_authorized.assert_not_called()
handler.post_status.assert_not_called()
handler.get_config.assert_called_once_with('repo-full-name', None)
validate_config.assert_called_once_with('config-data')
self.assertEqual(response, ({'status': 'Config Validation Error',
'message': 'Bad config data'}, 500))
@patch('github_approval_checker.utils.util.verify_signature')
@patch('github_approval_checker.api.endpoints.connexion')
@patch('github_approval_checker.api.endpoints.GithubHandler')
@patch('github_approval_checker.utils.util.validate_config')
def test_post_pull_request_review_bad_sign(self, validate_config,
handler_class, conn, verify_signature):
"""
Test endpoints.post_pull_request_review with an incorrect signature
"""
conn.request.data.return_value = ''
conn.request.headers.get.return_value = 'sha1=signature'
verify_signature.side_effect = SignatureError(
'Error validating signature')
response = endpoints.post_pull_request_review({})
handler = handler_class.return_value
handler.get_config.return_value = 'config-data'
handler.get_statuses.assert_not_called()
handler.is_authorized.assert_not_called()
handler.post_status.assert_not_called()
handler.get_config.assert_not_called()
validate_config.assert_not_called()
self.assertEqual(response, ({'status': 'Signature Validation Error',
'message': 'Error validating signature'}, 400))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import unittest
import os
from mock import patch, call
from github_approval_checker.utils import util
from github_approval_checker.utils.github_handler import GithubHandler
from github_approval_checker.utils.exceptions import ConfigError, APIError, SignatureError
from github_approval_checker.api import endpoints
class EndpointsUnitTests(unittest.TestCase):
"""
Test endpoints.py
"""
@patch('github_approval_checker.utils.util.verify_signature')
@patch('github_approval_checker.api.endpoints.connexion')
@patch('github_approval_checker.api.endpoints.GithubHandler')
@patch('github_approval_checker.utils.util.validate_config')
def test_post_pull_request_review(self, validate_config, handler_class,
conn, verify_signature):
"""
Test endpoints.post_pull_request_review
"""
conn.request.data.return_value = ''
conn.request.headers.get.return_value = 'sha1=signature'
verify_signature.return_value = None
handler = handler_class.return_value
handler.get_config.return_value = {'context1': ['whitelist1'],
'context2': ['whitelist2']}
handler.get_statuses.return_value = [{'state': 'error', 'context':
'context2', 'target_url': 'fake://status_target_2',
'description': 'Status Check 2'}, {'state': 'pending',
'context': 'context3', 'target_url': 'fake://status_target_3',
'description': 'Status Check 3'}, {'state': 'failure',
'context': 'context1', 'target_url': 'fake://status_target_1',
'description': 'Status Check 1'}]
handler.is_authorized.return_value = True
validate_config.return_value = None
data = {'repository': {'name': 'repo-name', 'full_name':
'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':
{'state': 'approved', 'commit_id': 'review-commit-id', 'user':
{'login': 'review-user-login'}}}
handler.post_status.side_effect = [201, 400]
response = endpoints.post_pull_request_review(data)
handler.get_statuses.assert_called_once_with('repo-full-name',
'review-commit-id')
self.assertEqual(handler.is_authorized.call_count, 2)
handler.post_status.assert_has_calls([call('repo-full-name',
'review-commit-id', 'context2', 'fake://status_target_2',
'review-user-login', 'Status Check 2'), call('repo-full-name',
'review-commit-id', 'context1', 'fake://status_target_1',
'review-user-login', 'Status Check 1')])
self.assertEqual(response, util.STATUS_OK)
@patch('github_approval_checker.utils.util.verify_signature')
@patch('github_approval_checker.api.endpoints.connexion')
@patch('github_approval_checker.api.endpoints.GithubHandler')
@patch('github_approval_checker.utils.util.validate_config')
def test_post_pull_request_review_unapproved(self, validate_config,
handler_class, conn, verify_signature):
"""
Test endpoints.post_pull_request_review with a review where the status is not approved.
"""
conn.request.data.return_value = ''
conn.request.headers.get.return_value = 'sha1=signature'
verify_signature.return_value = None
handler = handler_class.return_value
handler.get_config.return_value = {'context1': ['whitelist1'],
'context2': ['whitelist2']}
validate_config.return_value = None
data = {'repository': {'name': 'repo-name', 'full_name':
'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':
{'state': 'changes-requested', 'commit_id': 'review-commit-id',
'user': {'login': 'review-user-login'}}}
response = endpoints.post_pull_request_review(data)
handler.get_statuses.assert_not_called()
handler.is_authorized.assert_not_called()
handler.post_status.assert_not_called()
self.assertEqual(response, ({'status': 'OK', 'message':
'Review state is not approved'}, 200))
@patch('github_approval_checker.utils.util.verify_signature')
@patch('github_approval_checker.api.endpoints.connexion')
@patch('github_approval_checker.api.endpoints.GithubHandler')
def test_post_pull_request_review_missing(self, handler_class, conn,
verify_signature):
"""
Test endpoints.post_pull_request_review with a missing config file
"""
conn.request.data.return_value = ''
conn.request.headers.get.return_value = 'sha1=signature'
verify_signature.return_value = None
handler = handler_class.return_value
handler.get_config.side_effect = APIError('config-error',
"{'message': 'bad-config'}")
data = {'repository': {'name': 'repo-name', 'full_name':
'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':
{'state': 'changes-requested', 'commit_id': 'review-commit-id',
'user': {'login': 'review-user-login'}}}
response = endpoints.post_pull_request_review(data)
handler.get_statuses.assert_not_called()
handler.is_authorized.assert_not_called()
handler.post_status.assert_not_called()
self.assertEqual(response, "{'message': 'bad-config'}")
@patch('github_approval_checker.utils.util.verify_signature')
@patch('github_approval_checker.api.endpoints.connexion')
@patch('github_approval_checker.api.endpoints.GithubHandler')
@patch('github_approval_checker.utils.util.validate_config')
def test_post_pull_request_review_bad_config(self, validate_config,
handler_class, conn, verify_signature):
"""
Test endpoints.post_pull_request_review with a bad config file
"""
conn.request.data.return_value = ''
conn.request.headers.get.return_value = 'sha1=signature'
verify_signature.return_value = None
handler = handler_class.return_value
handler.get_config.return_value = 'config-data'
validate_config.side_effect = ConfigError('Config Validation Error',
({'status': 'Config Validation Error', 'message':
'Bad config data'}, 500))
data = {'repository': {'name': 'repo-name', 'full_name':
'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':
{'state': 'changes-requested', 'commit_id': 'review-commit-id',
'user': {'login': 'review-user-login'}}}
response = endpoints.post_pull_request_review(data)
handler.get_statuses.assert_not_called()
handler.is_authorized.assert_not_called()
handler.post_status.assert_not_called()
handler.get_config.assert_called_once_with('repo-full-name', None)
validate_config.assert_called_once_with('config-data')
self.assertEqual(response, ({'status': 'Config Validation Error',
'message': 'Bad config data'}, 500))
@patch('github_approval_checker.utils.util.verify_signature')
@patch('github_approval_checker.api.endpoints.connexion')
@patch('github_approval_checker.api.endpoints.GithubHandler')
@patch('github_approval_checker.utils.util.validate_config')
def test_post_pull_request_review_bad_sign(self, validate_config,
handler_class, conn, verify_signature):
"""
Test endpoints.post_pull_request_review with an incorrect signature
"""
conn.request.data.return_value = ''
conn.request.headers.get.return_value = 'sha1=signature'
verify_signature.side_effect = SignatureError(
'Error validating signature')
response = endpoints.post_pull_request_review({})
handler = handler_class.return_value
handler.get_config.return_value = 'config-data'
handler.get_statuses.assert_not_called()
handler.is_authorized.assert_not_called()
handler.post_status.assert_not_called()
handler.get_config.assert_not_called()
validate_config.assert_not_called()
self.assertEqual(response, ({'status': 'Signature Validation Error',
'message': 'Error validating signature'}, 400))
<|reserved_special_token_1|>
"""
Unit Tests for endpoints.py
"""
import unittest
import os # pylint: disable=unused-import
from mock import patch, call
from github_approval_checker.utils import util # pylint: disable=unused-import
from github_approval_checker.utils.github_handler import GithubHandler # pylint: disable=unused-import
from github_approval_checker.utils.exceptions import ConfigError, APIError, SignatureError # noqa pylint: disable=unused-import
from github_approval_checker.api import endpoints # pylint: disable=unused-import
class EndpointsUnitTests(unittest.TestCase):
"""
Test endpoints.py
"""
@patch("github_approval_checker.utils.util.verify_signature")
@patch("github_approval_checker.api.endpoints.connexion")
@patch("github_approval_checker.api.endpoints.GithubHandler")
@patch("github_approval_checker.utils.util.validate_config")
def test_post_pull_request_review(
self,
validate_config,
handler_class,
conn,
verify_signature
):
"""
Test endpoints.post_pull_request_review
"""
conn.request.data.return_value = ''
conn.request.headers.get.return_value = 'sha1=signature'
verify_signature.return_value = None
handler = handler_class.return_value
handler.get_config.return_value = {
"context1": [
"whitelist1"
],
"context2": [
"whitelist2"
]
}
handler.get_statuses.return_value = [
{
"state": "error",
"context": "context2",
"target_url": "fake://status_target_2",
"description": "Status Check 2"
},
{
"state": "pending",
"context": "context3",
"target_url": "fake://status_target_3",
"description": "Status Check 3"
},
{
"state": "failure",
"context": "context1",
"target_url": "fake://status_target_1",
"description": "Status Check 1"
}
]
handler.is_authorized.return_value = True
validate_config.return_value = None
data = {
"repository": {
"name": "repo-name",
"full_name": "repo-full-name",
"owner": {
"login": "repo-owner"
}
},
"review": {
"state": "approved",
"commit_id": "review-commit-id",
"user": {
"login": "review-user-login"
}
}
}
handler.post_status.side_effect = [
201,
400
]
response = endpoints.post_pull_request_review(data)
handler.get_statuses.assert_called_once_with("repo-full-name", "review-commit-id")
self.assertEqual(handler.is_authorized.call_count, 2)
handler.post_status.assert_has_calls([
call(
"repo-full-name",
"review-commit-id",
"context2",
"fake://status_target_2",
"review-user-login",
"Status Check 2"
),
call(
"repo-full-name",
"review-commit-id",
"context1",
"fake://status_target_1",
"review-user-login",
"Status Check 1"
)
])
self.assertEqual(response, util.STATUS_OK)
@patch("github_approval_checker.utils.util.verify_signature")
@patch("github_approval_checker.api.endpoints.connexion")
@patch("github_approval_checker.api.endpoints.GithubHandler")
@patch("github_approval_checker.utils.util.validate_config")
def test_post_pull_request_review_unapproved(
self,
validate_config,
handler_class,
conn,
verify_signature
):
"""
Test endpoints.post_pull_request_review with a review where the status is not approved.
"""
conn.request.data.return_value = ''
conn.request.headers.get.return_value = 'sha1=signature'
verify_signature.return_value = None
handler = handler_class.return_value
handler.get_config.return_value = {
"context1": [
"whitelist1"
],
"context2": [
"whitelist2"
]
}
validate_config.return_value = None
data = {
"repository": {
"name": "repo-name",
"full_name": "repo-full-name",
"owner": {
"login": "repo-owner"
}
},
"review": {
"state": "changes-requested",
"commit_id": "review-commit-id",
"user": {
"login": "review-user-login"
}
}
}
response = endpoints.post_pull_request_review(data)
handler.get_statuses.assert_not_called()
handler.is_authorized.assert_not_called()
handler.post_status.assert_not_called()
self.assertEqual(response, ({'status': 'OK', 'message': 'Review state is not approved'}, 200))
@patch("github_approval_checker.utils.util.verify_signature")
@patch("github_approval_checker.api.endpoints.connexion")
@patch("github_approval_checker.api.endpoints.GithubHandler")
def test_post_pull_request_review_missing(
self,
handler_class,
conn,
verify_signature
):
"""
Test endpoints.post_pull_request_review with a missing config file
"""
conn.request.data.return_value = ''
conn.request.headers.get.return_value = 'sha1=signature'
verify_signature.return_value = None
handler = handler_class.return_value
handler.get_config.side_effect = APIError("config-error", "{'message': 'bad-config'}")
data = {
"repository": {
"name": "repo-name",
"full_name": "repo-full-name",
"owner": {
"login": "repo-owner"
}
},
"review": {
"state": "changes-requested",
"commit_id": "review-commit-id",
"user": {
"login": "review-user-login"
}
}
}
response = endpoints.post_pull_request_review(data)
handler.get_statuses.assert_not_called()
handler.is_authorized.assert_not_called()
handler.post_status.assert_not_called()
self.assertEqual(response, "{'message': 'bad-config'}")
@patch("github_approval_checker.utils.util.verify_signature")
@patch("github_approval_checker.api.endpoints.connexion")
@patch("github_approval_checker.api.endpoints.GithubHandler")
@patch("github_approval_checker.utils.util.validate_config")
def test_post_pull_request_review_bad_config(
self,
validate_config,
handler_class,
conn,
verify_signature
):
"""
Test endpoints.post_pull_request_review with a bad config file
"""
conn.request.data.return_value = ''
conn.request.headers.get.return_value = 'sha1=signature'
verify_signature.return_value = None
handler = handler_class.return_value
handler.get_config.return_value = "config-data"
validate_config.side_effect = ConfigError(
'Config Validation Error',
({'status': 'Config Validation Error', 'message': 'Bad config data'}, 500)
)
data = {
"repository": {
"name": "repo-name",
"full_name": "repo-full-name",
"owner": {
"login": "repo-owner"
}
},
"review": {
"state": "changes-requested",
"commit_id": "review-commit-id",
"user": {
"login": "review-user-login"
}
}
}
response = endpoints.post_pull_request_review(data)
handler.get_statuses.assert_not_called()
handler.is_authorized.assert_not_called()
handler.post_status.assert_not_called()
handler.get_config.assert_called_once_with("repo-full-name", None)
validate_config.assert_called_once_with("config-data")
self.assertEqual(
response,
(
{
'status': 'Config Validation Error',
'message': 'Bad config data'
},
500
)
)
@patch("github_approval_checker.utils.util.verify_signature")
@patch("github_approval_checker.api.endpoints.connexion")
@patch("github_approval_checker.api.endpoints.GithubHandler")
@patch("github_approval_checker.utils.util.validate_config")
def test_post_pull_request_review_bad_sign(
self,
validate_config,
handler_class,
conn,
verify_signature
):
"""
Test endpoints.post_pull_request_review with an incorrect signature
"""
conn.request.data.return_value = ''
conn.request.headers.get.return_value = 'sha1=signature'
verify_signature.side_effect = SignatureError("Error validating signature")
response = endpoints.post_pull_request_review({})
handler = handler_class.return_value
handler.get_config.return_value = "config-data"
handler.get_statuses.assert_not_called()
handler.is_authorized.assert_not_called()
handler.post_status.assert_not_called()
handler.get_config.assert_not_called()
validate_config.assert_not_called()
self.assertEqual(
response,
(
{
'status': 'Signature Validation Error',
'message': 'Error validating signature'
},
400
)
)
|
flexible
|
{
"blob_id": "7626202d1e3ec7321addbb028be2275b882efda2",
"index": 6453,
"step-1": "<mask token>\n\n\nclass EndpointsUnitTests(unittest.TestCase):\n <mask token>\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review(self, validate_config, handler_class,\n conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.return_value = {'context1': ['whitelist1'],\n 'context2': ['whitelist2']}\n handler.get_statuses.return_value = [{'state': 'error', 'context':\n 'context2', 'target_url': 'fake://status_target_2',\n 'description': 'Status Check 2'}, {'state': 'pending',\n 'context': 'context3', 'target_url': 'fake://status_target_3',\n 'description': 'Status Check 3'}, {'state': 'failure',\n 'context': 'context1', 'target_url': 'fake://status_target_1',\n 'description': 'Status Check 1'}]\n handler.is_authorized.return_value = True\n validate_config.return_value = None\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'approved', 'commit_id': 'review-commit-id', 'user':\n {'login': 'review-user-login'}}}\n handler.post_status.side_effect = [201, 400]\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_called_once_with('repo-full-name',\n 'review-commit-id')\n self.assertEqual(handler.is_authorized.call_count, 2)\n handler.post_status.assert_has_calls([call('repo-full-name',\n 'review-commit-id', 'context2', 'fake://status_target_2',\n 'review-user-login', 'Status Check 2'), call('repo-full-name',\n 'review-commit-id', 'context1', 'fake://status_target_1',\n 'review-user-login', 'Status Check 1')])\n self.assertEqual(response, util.STATUS_OK)\n <mask token>\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n def test_post_pull_request_review_missing(self, handler_class, conn,\n verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with a missing config file\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.side_effect = APIError('config-error',\n \"{'message': 'bad-config'}\")\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'changes-requested', 'commit_id': 'review-commit-id',\n 'user': {'login': 'review-user-login'}}}\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n self.assertEqual(response, \"{'message': 'bad-config'}\")\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review_bad_config(self, validate_config,\n handler_class, conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with a bad config file\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.return_value = 'config-data'\n validate_config.side_effect = ConfigError('Config Validation Error',\n ({'status': 'Config Validation Error', 'message':\n 'Bad config data'}, 500))\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'changes-requested', 'commit_id': 'review-commit-id',\n 'user': {'login': 'review-user-login'}}}\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n handler.get_config.assert_called_once_with('repo-full-name', None)\n validate_config.assert_called_once_with('config-data')\n self.assertEqual(response, ({'status': 'Config Validation Error',\n 'message': 'Bad config data'}, 500))\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review_bad_sign(self, validate_config,\n handler_class, conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with an incorrect signature\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.side_effect = SignatureError(\n 'Error validating signature')\n response = endpoints.post_pull_request_review({})\n handler = handler_class.return_value\n handler.get_config.return_value = 'config-data'\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n handler.get_config.assert_not_called()\n validate_config.assert_not_called()\n self.assertEqual(response, ({'status': 'Signature Validation Error',\n 'message': 'Error validating signature'}, 400))\n",
"step-2": "<mask token>\n\n\nclass EndpointsUnitTests(unittest.TestCase):\n <mask token>\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review(self, validate_config, handler_class,\n conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.return_value = {'context1': ['whitelist1'],\n 'context2': ['whitelist2']}\n handler.get_statuses.return_value = [{'state': 'error', 'context':\n 'context2', 'target_url': 'fake://status_target_2',\n 'description': 'Status Check 2'}, {'state': 'pending',\n 'context': 'context3', 'target_url': 'fake://status_target_3',\n 'description': 'Status Check 3'}, {'state': 'failure',\n 'context': 'context1', 'target_url': 'fake://status_target_1',\n 'description': 'Status Check 1'}]\n handler.is_authorized.return_value = True\n validate_config.return_value = None\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'approved', 'commit_id': 'review-commit-id', 'user':\n {'login': 'review-user-login'}}}\n handler.post_status.side_effect = [201, 400]\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_called_once_with('repo-full-name',\n 'review-commit-id')\n self.assertEqual(handler.is_authorized.call_count, 2)\n handler.post_status.assert_has_calls([call('repo-full-name',\n 'review-commit-id', 'context2', 'fake://status_target_2',\n 'review-user-login', 'Status Check 2'), call('repo-full-name',\n 'review-commit-id', 'context1', 'fake://status_target_1',\n 'review-user-login', 'Status Check 1')])\n self.assertEqual(response, util.STATUS_OK)\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review_unapproved(self, validate_config,\n handler_class, conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with a review where the status is not approved.\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.return_value = {'context1': ['whitelist1'],\n 'context2': ['whitelist2']}\n validate_config.return_value = None\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'changes-requested', 'commit_id': 'review-commit-id',\n 'user': {'login': 'review-user-login'}}}\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n self.assertEqual(response, ({'status': 'OK', 'message':\n 'Review state is not approved'}, 200))\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n def test_post_pull_request_review_missing(self, handler_class, conn,\n verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with a missing config file\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.side_effect = APIError('config-error',\n \"{'message': 'bad-config'}\")\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'changes-requested', 'commit_id': 'review-commit-id',\n 'user': {'login': 'review-user-login'}}}\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n self.assertEqual(response, \"{'message': 'bad-config'}\")\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review_bad_config(self, validate_config,\n handler_class, conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with a bad config file\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.return_value = 'config-data'\n validate_config.side_effect = ConfigError('Config Validation Error',\n ({'status': 'Config Validation Error', 'message':\n 'Bad config data'}, 500))\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'changes-requested', 'commit_id': 'review-commit-id',\n 'user': {'login': 'review-user-login'}}}\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n handler.get_config.assert_called_once_with('repo-full-name', None)\n validate_config.assert_called_once_with('config-data')\n self.assertEqual(response, ({'status': 'Config Validation Error',\n 'message': 'Bad config data'}, 500))\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review_bad_sign(self, validate_config,\n handler_class, conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with an incorrect signature\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.side_effect = SignatureError(\n 'Error validating signature')\n response = endpoints.post_pull_request_review({})\n handler = handler_class.return_value\n handler.get_config.return_value = 'config-data'\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n handler.get_config.assert_not_called()\n validate_config.assert_not_called()\n self.assertEqual(response, ({'status': 'Signature Validation Error',\n 'message': 'Error validating signature'}, 400))\n",
"step-3": "<mask token>\n\n\nclass EndpointsUnitTests(unittest.TestCase):\n \"\"\"\n Test endpoints.py\n \"\"\"\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review(self, validate_config, handler_class,\n conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.return_value = {'context1': ['whitelist1'],\n 'context2': ['whitelist2']}\n handler.get_statuses.return_value = [{'state': 'error', 'context':\n 'context2', 'target_url': 'fake://status_target_2',\n 'description': 'Status Check 2'}, {'state': 'pending',\n 'context': 'context3', 'target_url': 'fake://status_target_3',\n 'description': 'Status Check 3'}, {'state': 'failure',\n 'context': 'context1', 'target_url': 'fake://status_target_1',\n 'description': 'Status Check 1'}]\n handler.is_authorized.return_value = True\n validate_config.return_value = None\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'approved', 'commit_id': 'review-commit-id', 'user':\n {'login': 'review-user-login'}}}\n handler.post_status.side_effect = [201, 400]\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_called_once_with('repo-full-name',\n 'review-commit-id')\n self.assertEqual(handler.is_authorized.call_count, 2)\n handler.post_status.assert_has_calls([call('repo-full-name',\n 'review-commit-id', 'context2', 'fake://status_target_2',\n 'review-user-login', 'Status Check 2'), call('repo-full-name',\n 'review-commit-id', 'context1', 'fake://status_target_1',\n 'review-user-login', 'Status Check 1')])\n self.assertEqual(response, util.STATUS_OK)\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review_unapproved(self, validate_config,\n handler_class, conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with a review where the status is not approved.\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.return_value = {'context1': ['whitelist1'],\n 'context2': ['whitelist2']}\n validate_config.return_value = None\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'changes-requested', 'commit_id': 'review-commit-id',\n 'user': {'login': 'review-user-login'}}}\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n self.assertEqual(response, ({'status': 'OK', 'message':\n 'Review state is not approved'}, 200))\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n def test_post_pull_request_review_missing(self, handler_class, conn,\n verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with a missing config file\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.side_effect = APIError('config-error',\n \"{'message': 'bad-config'}\")\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'changes-requested', 'commit_id': 'review-commit-id',\n 'user': {'login': 'review-user-login'}}}\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n self.assertEqual(response, \"{'message': 'bad-config'}\")\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review_bad_config(self, validate_config,\n handler_class, conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with a bad config file\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.return_value = 'config-data'\n validate_config.side_effect = ConfigError('Config Validation Error',\n ({'status': 'Config Validation Error', 'message':\n 'Bad config data'}, 500))\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'changes-requested', 'commit_id': 'review-commit-id',\n 'user': {'login': 'review-user-login'}}}\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n handler.get_config.assert_called_once_with('repo-full-name', None)\n validate_config.assert_called_once_with('config-data')\n self.assertEqual(response, ({'status': 'Config Validation Error',\n 'message': 'Bad config data'}, 500))\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review_bad_sign(self, validate_config,\n handler_class, conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with an incorrect signature\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.side_effect = SignatureError(\n 'Error validating signature')\n response = endpoints.post_pull_request_review({})\n handler = handler_class.return_value\n handler.get_config.return_value = 'config-data'\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n handler.get_config.assert_not_called()\n validate_config.assert_not_called()\n self.assertEqual(response, ({'status': 'Signature Validation Error',\n 'message': 'Error validating signature'}, 400))\n",
"step-4": "<mask token>\nimport unittest\nimport os\nfrom mock import patch, call\nfrom github_approval_checker.utils import util\nfrom github_approval_checker.utils.github_handler import GithubHandler\nfrom github_approval_checker.utils.exceptions import ConfigError, APIError, SignatureError\nfrom github_approval_checker.api import endpoints\n\n\nclass EndpointsUnitTests(unittest.TestCase):\n \"\"\"\n Test endpoints.py\n \"\"\"\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review(self, validate_config, handler_class,\n conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.return_value = {'context1': ['whitelist1'],\n 'context2': ['whitelist2']}\n handler.get_statuses.return_value = [{'state': 'error', 'context':\n 'context2', 'target_url': 'fake://status_target_2',\n 'description': 'Status Check 2'}, {'state': 'pending',\n 'context': 'context3', 'target_url': 'fake://status_target_3',\n 'description': 'Status Check 3'}, {'state': 'failure',\n 'context': 'context1', 'target_url': 'fake://status_target_1',\n 'description': 'Status Check 1'}]\n handler.is_authorized.return_value = True\n validate_config.return_value = None\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'approved', 'commit_id': 'review-commit-id', 'user':\n {'login': 'review-user-login'}}}\n handler.post_status.side_effect = [201, 400]\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_called_once_with('repo-full-name',\n 'review-commit-id')\n self.assertEqual(handler.is_authorized.call_count, 2)\n handler.post_status.assert_has_calls([call('repo-full-name',\n 'review-commit-id', 'context2', 'fake://status_target_2',\n 'review-user-login', 'Status Check 2'), call('repo-full-name',\n 'review-commit-id', 'context1', 'fake://status_target_1',\n 'review-user-login', 'Status Check 1')])\n self.assertEqual(response, util.STATUS_OK)\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review_unapproved(self, validate_config,\n handler_class, conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with a review where the status is not approved.\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.return_value = {'context1': ['whitelist1'],\n 'context2': ['whitelist2']}\n validate_config.return_value = None\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'changes-requested', 'commit_id': 'review-commit-id',\n 'user': {'login': 'review-user-login'}}}\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n self.assertEqual(response, ({'status': 'OK', 'message':\n 'Review state is not approved'}, 200))\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n def test_post_pull_request_review_missing(self, handler_class, conn,\n verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with a missing config file\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.side_effect = APIError('config-error',\n \"{'message': 'bad-config'}\")\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'changes-requested', 'commit_id': 'review-commit-id',\n 'user': {'login': 'review-user-login'}}}\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n self.assertEqual(response, \"{'message': 'bad-config'}\")\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review_bad_config(self, validate_config,\n handler_class, conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with a bad config file\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.return_value = 'config-data'\n validate_config.side_effect = ConfigError('Config Validation Error',\n ({'status': 'Config Validation Error', 'message':\n 'Bad config data'}, 500))\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'changes-requested', 'commit_id': 'review-commit-id',\n 'user': {'login': 'review-user-login'}}}\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n handler.get_config.assert_called_once_with('repo-full-name', None)\n validate_config.assert_called_once_with('config-data')\n self.assertEqual(response, ({'status': 'Config Validation Error',\n 'message': 'Bad config data'}, 500))\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review_bad_sign(self, validate_config,\n handler_class, conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with an incorrect signature\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.side_effect = SignatureError(\n 'Error validating signature')\n response = endpoints.post_pull_request_review({})\n handler = handler_class.return_value\n handler.get_config.return_value = 'config-data'\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n handler.get_config.assert_not_called()\n validate_config.assert_not_called()\n self.assertEqual(response, ({'status': 'Signature Validation Error',\n 'message': 'Error validating signature'}, 400))\n",
"step-5": "\"\"\"\nUnit Tests for endpoints.py\n\"\"\"\n\nimport unittest\nimport os # pylint: disable=unused-import\nfrom mock import patch, call\nfrom github_approval_checker.utils import util # pylint: disable=unused-import\nfrom github_approval_checker.utils.github_handler import GithubHandler # pylint: disable=unused-import\nfrom github_approval_checker.utils.exceptions import ConfigError, APIError, SignatureError # noqa pylint: disable=unused-import\nfrom github_approval_checker.api import endpoints # pylint: disable=unused-import\n\n\nclass EndpointsUnitTests(unittest.TestCase):\n \"\"\"\n Test endpoints.py\n \"\"\"\n\n @patch(\"github_approval_checker.utils.util.verify_signature\")\n @patch(\"github_approval_checker.api.endpoints.connexion\")\n @patch(\"github_approval_checker.api.endpoints.GithubHandler\")\n @patch(\"github_approval_checker.utils.util.validate_config\")\n def test_post_pull_request_review(\n self,\n validate_config,\n handler_class,\n conn,\n verify_signature\n ):\n \"\"\"\n Test endpoints.post_pull_request_review\n \"\"\"\n\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n\n handler = handler_class.return_value\n handler.get_config.return_value = {\n \"context1\": [\n \"whitelist1\"\n ],\n \"context2\": [\n \"whitelist2\"\n ]\n }\n\n handler.get_statuses.return_value = [\n {\n \"state\": \"error\",\n \"context\": \"context2\",\n \"target_url\": \"fake://status_target_2\",\n \"description\": \"Status Check 2\"\n },\n {\n \"state\": \"pending\",\n \"context\": \"context3\",\n \"target_url\": \"fake://status_target_3\",\n \"description\": \"Status Check 3\"\n },\n {\n \"state\": \"failure\",\n \"context\": \"context1\",\n \"target_url\": \"fake://status_target_1\",\n \"description\": \"Status Check 1\"\n }\n ]\n\n handler.is_authorized.return_value = True\n\n validate_config.return_value = None\n\n data = {\n \"repository\": {\n \"name\": \"repo-name\",\n \"full_name\": \"repo-full-name\",\n \"owner\": {\n \"login\": \"repo-owner\"\n }\n },\n \"review\": {\n \"state\": \"approved\",\n \"commit_id\": \"review-commit-id\",\n \"user\": {\n \"login\": \"review-user-login\"\n }\n }\n }\n\n handler.post_status.side_effect = [\n 201,\n 400\n ]\n\n response = endpoints.post_pull_request_review(data)\n\n handler.get_statuses.assert_called_once_with(\"repo-full-name\", \"review-commit-id\")\n self.assertEqual(handler.is_authorized.call_count, 2)\n handler.post_status.assert_has_calls([\n call(\n \"repo-full-name\",\n \"review-commit-id\",\n \"context2\",\n \"fake://status_target_2\",\n \"review-user-login\",\n \"Status Check 2\"\n ),\n call(\n \"repo-full-name\",\n \"review-commit-id\",\n \"context1\",\n \"fake://status_target_1\",\n \"review-user-login\",\n \"Status Check 1\"\n )\n ])\n self.assertEqual(response, util.STATUS_OK)\n\n @patch(\"github_approval_checker.utils.util.verify_signature\")\n @patch(\"github_approval_checker.api.endpoints.connexion\")\n @patch(\"github_approval_checker.api.endpoints.GithubHandler\")\n @patch(\"github_approval_checker.utils.util.validate_config\")\n def test_post_pull_request_review_unapproved(\n self,\n validate_config,\n handler_class,\n conn,\n verify_signature\n ):\n \"\"\"\n Test endpoints.post_pull_request_review with a review where the status is not approved.\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n\n handler = handler_class.return_value\n handler.get_config.return_value = {\n \"context1\": [\n \"whitelist1\"\n ],\n \"context2\": [\n \"whitelist2\"\n ]\n }\n\n validate_config.return_value = None\n\n data = {\n \"repository\": {\n \"name\": \"repo-name\",\n \"full_name\": \"repo-full-name\",\n \"owner\": {\n \"login\": \"repo-owner\"\n }\n },\n \"review\": {\n \"state\": \"changes-requested\",\n \"commit_id\": \"review-commit-id\",\n \"user\": {\n \"login\": \"review-user-login\"\n }\n }\n }\n\n response = endpoints.post_pull_request_review(data)\n\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n self.assertEqual(response, ({'status': 'OK', 'message': 'Review state is not approved'}, 200))\n\n @patch(\"github_approval_checker.utils.util.verify_signature\")\n @patch(\"github_approval_checker.api.endpoints.connexion\")\n @patch(\"github_approval_checker.api.endpoints.GithubHandler\")\n def test_post_pull_request_review_missing(\n self,\n handler_class,\n conn,\n verify_signature\n ):\n \"\"\"\n Test endpoints.post_pull_request_review with a missing config file\n \"\"\"\n\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n\n handler = handler_class.return_value\n handler.get_config.side_effect = APIError(\"config-error\", \"{'message': 'bad-config'}\")\n\n data = {\n \"repository\": {\n \"name\": \"repo-name\",\n \"full_name\": \"repo-full-name\",\n \"owner\": {\n \"login\": \"repo-owner\"\n }\n },\n \"review\": {\n \"state\": \"changes-requested\",\n \"commit_id\": \"review-commit-id\",\n \"user\": {\n \"login\": \"review-user-login\"\n }\n }\n }\n\n response = endpoints.post_pull_request_review(data)\n\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n self.assertEqual(response, \"{'message': 'bad-config'}\")\n\n @patch(\"github_approval_checker.utils.util.verify_signature\")\n @patch(\"github_approval_checker.api.endpoints.connexion\")\n @patch(\"github_approval_checker.api.endpoints.GithubHandler\")\n @patch(\"github_approval_checker.utils.util.validate_config\")\n def test_post_pull_request_review_bad_config(\n self,\n validate_config,\n handler_class,\n conn,\n verify_signature\n ):\n \"\"\"\n Test endpoints.post_pull_request_review with a bad config file\n \"\"\"\n\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n\n handler = handler_class.return_value\n handler.get_config.return_value = \"config-data\"\n\n validate_config.side_effect = ConfigError(\n 'Config Validation Error',\n ({'status': 'Config Validation Error', 'message': 'Bad config data'}, 500)\n )\n\n data = {\n \"repository\": {\n \"name\": \"repo-name\",\n \"full_name\": \"repo-full-name\",\n \"owner\": {\n \"login\": \"repo-owner\"\n }\n },\n \"review\": {\n \"state\": \"changes-requested\",\n \"commit_id\": \"review-commit-id\",\n \"user\": {\n \"login\": \"review-user-login\"\n }\n }\n }\n\n response = endpoints.post_pull_request_review(data)\n\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n handler.get_config.assert_called_once_with(\"repo-full-name\", None)\n validate_config.assert_called_once_with(\"config-data\")\n self.assertEqual(\n response,\n (\n {\n 'status': 'Config Validation Error',\n 'message': 'Bad config data'\n },\n 500\n )\n )\n\n @patch(\"github_approval_checker.utils.util.verify_signature\")\n @patch(\"github_approval_checker.api.endpoints.connexion\")\n @patch(\"github_approval_checker.api.endpoints.GithubHandler\")\n @patch(\"github_approval_checker.utils.util.validate_config\")\n def test_post_pull_request_review_bad_sign(\n self,\n validate_config,\n handler_class,\n conn,\n verify_signature\n ):\n \"\"\"\n Test endpoints.post_pull_request_review with an incorrect signature\n \"\"\"\n\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.side_effect = SignatureError(\"Error validating signature\")\n\n response = endpoints.post_pull_request_review({})\n\n handler = handler_class.return_value\n handler.get_config.return_value = \"config-data\"\n\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n handler.get_config.assert_not_called()\n validate_config.assert_not_called()\n self.assertEqual(\n response,\n (\n {\n 'status': 'Signature Validation Error',\n 'message': 'Error validating signature'\n },\n 400\n )\n )\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
def show_im(dataset):
data = np.uint8(dataset[0]).reshape((30, 96)) * 255
im = Image.fromarray(data)
im.show()
def test_model(captcha):
im = Image.open(os.path.join(basedir, 'downloader', 'captchas', captcha))
im = im.convert('L')
im = grey_to_binary(im)
im = clear_paper_noise(im, 5)
model = load_model_nn()
x = model['x']
keep_prob = model['keep_prob']
saver = model['saver']
prediction = model['prediction']
graph = model['graph']
model_ckpt_path, _ = find_model_ckpt(os.path.join('trainer', '.checkpoint')
)
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
saver.restore(session, model_ckpt_path)
dataset = []
dataset.append(np.asarray(im.convert('L')).reshape([30 * 96]) / 255)
label = prediction.eval(feed_dict={x: dataset, keep_prob: 1.0},
session=session)[0]
string = ''
for i in range(4):
string += chr(label[i] + ord('0'))
print(string)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
os.chdir(os.path.dirname(os.path.abspath(__file__)))
sys.path.append('trainer')
sys.path.append('downloader')
<|reserved_special_token_0|>
def show_im(dataset):
data = np.uint8(dataset[0]).reshape((30, 96)) * 255
im = Image.fromarray(data)
im.show()
def test_model(captcha):
im = Image.open(os.path.join(basedir, 'downloader', 'captchas', captcha))
im = im.convert('L')
im = grey_to_binary(im)
im = clear_paper_noise(im, 5)
model = load_model_nn()
x = model['x']
keep_prob = model['keep_prob']
saver = model['saver']
prediction = model['prediction']
graph = model['graph']
model_ckpt_path, _ = find_model_ckpt(os.path.join('trainer', '.checkpoint')
)
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
saver.restore(session, model_ckpt_path)
dataset = []
dataset.append(np.asarray(im.convert('L')).reshape([30 * 96]) / 255)
label = prediction.eval(feed_dict={x: dataset, keep_prob: 1.0},
session=session)[0]
string = ''
for i in range(4):
string += chr(label[i] + ord('0'))
print(string)
if __name__ == '__main__':
if len(sys.argv) <= 1:
captcha = download(1)[0]
else:
captcha = sys.argv[1]
test_model(captcha)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
basedir = os.getcwd()
os.chdir(os.path.dirname(os.path.abspath(__file__)))
sys.path.append('trainer')
sys.path.append('downloader')
<|reserved_special_token_0|>
def show_im(dataset):
data = np.uint8(dataset[0]).reshape((30, 96)) * 255
im = Image.fromarray(data)
im.show()
def test_model(captcha):
im = Image.open(os.path.join(basedir, 'downloader', 'captchas', captcha))
im = im.convert('L')
im = grey_to_binary(im)
im = clear_paper_noise(im, 5)
model = load_model_nn()
x = model['x']
keep_prob = model['keep_prob']
saver = model['saver']
prediction = model['prediction']
graph = model['graph']
model_ckpt_path, _ = find_model_ckpt(os.path.join('trainer', '.checkpoint')
)
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
saver.restore(session, model_ckpt_path)
dataset = []
dataset.append(np.asarray(im.convert('L')).reshape([30 * 96]) / 255)
label = prediction.eval(feed_dict={x: dataset, keep_prob: 1.0},
session=session)[0]
string = ''
for i in range(4):
string += chr(label[i] + ord('0'))
print(string)
if __name__ == '__main__':
if len(sys.argv) <= 1:
captcha = download(1)[0]
else:
captcha = sys.argv[1]
test_model(captcha)
<|reserved_special_token_1|>
from __future__ import print_function
import os
import sys
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
basedir = os.getcwd()
os.chdir(os.path.dirname(os.path.abspath(__file__)))
sys.path.append('trainer')
sys.path.append('downloader')
from gen.gen_captcha import gen_dataset, load_templates, candidates
from gen.img_process import grey_to_binary, clear_paper_noise
from model.nn import load_model_nn
from model.common import find_model_ckpt
import tensorflow as tf
from gen.utils import vec2str
import numpy as np
from PIL import Image
from downloader import download
def show_im(dataset):
data = np.uint8(dataset[0]).reshape((30, 96)) * 255
im = Image.fromarray(data)
im.show()
def test_model(captcha):
im = Image.open(os.path.join(basedir, 'downloader', 'captchas', captcha))
im = im.convert('L')
im = grey_to_binary(im)
im = clear_paper_noise(im, 5)
model = load_model_nn()
x = model['x']
keep_prob = model['keep_prob']
saver = model['saver']
prediction = model['prediction']
graph = model['graph']
model_ckpt_path, _ = find_model_ckpt(os.path.join('trainer', '.checkpoint')
)
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
saver.restore(session, model_ckpt_path)
dataset = []
dataset.append(np.asarray(im.convert('L')).reshape([30 * 96]) / 255)
label = prediction.eval(feed_dict={x: dataset, keep_prob: 1.0},
session=session)[0]
string = ''
for i in range(4):
string += chr(label[i] + ord('0'))
print(string)
if __name__ == '__main__':
if len(sys.argv) <= 1:
captcha = download(1)[0]
else:
captcha = sys.argv[1]
test_model(captcha)
<|reserved_special_token_1|>
# coding=utf-8
from __future__ import print_function
import os
import sys
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
basedir = os.getcwd()
os.chdir(os.path.dirname(os.path.abspath(__file__)))
sys.path.append('trainer')
sys.path.append('downloader')
from gen.gen_captcha import gen_dataset, load_templates, candidates
from gen.img_process import grey_to_binary, clear_paper_noise
from model.nn import load_model_nn
from model.common import find_model_ckpt
import tensorflow as tf
from gen.utils import vec2str
import numpy as np
from PIL import Image
from downloader import download
def show_im(dataset):
data = np.uint8(dataset[0]).reshape((30, 96)) * 255
im = Image.fromarray(data)
im.show()
def test_model(captcha):
im = Image.open(os.path.join(basedir, 'downloader', 'captchas', captcha))
im = im.convert('L')
im = grey_to_binary(im)
im = clear_paper_noise(im, 5)
# im.show()
# templates = load_templates(os.path.join('trainer', 'templates'))
model = load_model_nn()
x = model['x']
keep_prob = model['keep_prob']
saver = model['saver']
prediction = model['prediction']
graph = model['graph']
model_ckpt_path, _ = find_model_ckpt(os.path.join('trainer', '.checkpoint'))
# print("Used the model:", model_ckpt_path)
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
saver.restore(session, model_ckpt_path)
# dataset, labels = gen_dataset(1, templates) # generate one image
dataset = []
dataset.append(np.asarray(im.convert("L")).reshape([30 * 96]) / 255)
label = prediction.eval(feed_dict={x: dataset, keep_prob: 1.0}, session=session)[0]
string = ''
for i in range(4):
string += chr(label[i] + ord('0'))
print(string)
if __name__ == "__main__":
if len(sys.argv) <= 1:
captcha = download(1)[0]
else:
captcha = sys.argv[1]
test_model(captcha)
|
flexible
|
{
"blob_id": "8e34b5e15c5b6107d6841e7b567abf967c631f1b",
"index": 7440,
"step-1": "<mask token>\n\n\ndef show_im(dataset):\n data = np.uint8(dataset[0]).reshape((30, 96)) * 255\n im = Image.fromarray(data)\n im.show()\n\n\ndef test_model(captcha):\n im = Image.open(os.path.join(basedir, 'downloader', 'captchas', captcha))\n im = im.convert('L')\n im = grey_to_binary(im)\n im = clear_paper_noise(im, 5)\n model = load_model_nn()\n x = model['x']\n keep_prob = model['keep_prob']\n saver = model['saver']\n prediction = model['prediction']\n graph = model['graph']\n model_ckpt_path, _ = find_model_ckpt(os.path.join('trainer', '.checkpoint')\n )\n with tf.Session(graph=graph) as session:\n tf.global_variables_initializer().run()\n saver.restore(session, model_ckpt_path)\n dataset = []\n dataset.append(np.asarray(im.convert('L')).reshape([30 * 96]) / 255)\n label = prediction.eval(feed_dict={x: dataset, keep_prob: 1.0},\n session=session)[0]\n string = ''\n for i in range(4):\n string += chr(label[i] + ord('0'))\n print(string)\n\n\n<mask token>\n",
"step-2": "<mask token>\nos.chdir(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append('trainer')\nsys.path.append('downloader')\n<mask token>\n\n\ndef show_im(dataset):\n data = np.uint8(dataset[0]).reshape((30, 96)) * 255\n im = Image.fromarray(data)\n im.show()\n\n\ndef test_model(captcha):\n im = Image.open(os.path.join(basedir, 'downloader', 'captchas', captcha))\n im = im.convert('L')\n im = grey_to_binary(im)\n im = clear_paper_noise(im, 5)\n model = load_model_nn()\n x = model['x']\n keep_prob = model['keep_prob']\n saver = model['saver']\n prediction = model['prediction']\n graph = model['graph']\n model_ckpt_path, _ = find_model_ckpt(os.path.join('trainer', '.checkpoint')\n )\n with tf.Session(graph=graph) as session:\n tf.global_variables_initializer().run()\n saver.restore(session, model_ckpt_path)\n dataset = []\n dataset.append(np.asarray(im.convert('L')).reshape([30 * 96]) / 255)\n label = prediction.eval(feed_dict={x: dataset, keep_prob: 1.0},\n session=session)[0]\n string = ''\n for i in range(4):\n string += chr(label[i] + ord('0'))\n print(string)\n\n\nif __name__ == '__main__':\n if len(sys.argv) <= 1:\n captcha = download(1)[0]\n else:\n captcha = sys.argv[1]\n test_model(captcha)\n",
"step-3": "<mask token>\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nbasedir = os.getcwd()\nos.chdir(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append('trainer')\nsys.path.append('downloader')\n<mask token>\n\n\ndef show_im(dataset):\n data = np.uint8(dataset[0]).reshape((30, 96)) * 255\n im = Image.fromarray(data)\n im.show()\n\n\ndef test_model(captcha):\n im = Image.open(os.path.join(basedir, 'downloader', 'captchas', captcha))\n im = im.convert('L')\n im = grey_to_binary(im)\n im = clear_paper_noise(im, 5)\n model = load_model_nn()\n x = model['x']\n keep_prob = model['keep_prob']\n saver = model['saver']\n prediction = model['prediction']\n graph = model['graph']\n model_ckpt_path, _ = find_model_ckpt(os.path.join('trainer', '.checkpoint')\n )\n with tf.Session(graph=graph) as session:\n tf.global_variables_initializer().run()\n saver.restore(session, model_ckpt_path)\n dataset = []\n dataset.append(np.asarray(im.convert('L')).reshape([30 * 96]) / 255)\n label = prediction.eval(feed_dict={x: dataset, keep_prob: 1.0},\n session=session)[0]\n string = ''\n for i in range(4):\n string += chr(label[i] + ord('0'))\n print(string)\n\n\nif __name__ == '__main__':\n if len(sys.argv) <= 1:\n captcha = download(1)[0]\n else:\n captcha = sys.argv[1]\n test_model(captcha)\n",
"step-4": "from __future__ import print_function\nimport os\nimport sys\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nbasedir = os.getcwd()\nos.chdir(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append('trainer')\nsys.path.append('downloader')\nfrom gen.gen_captcha import gen_dataset, load_templates, candidates\nfrom gen.img_process import grey_to_binary, clear_paper_noise\nfrom model.nn import load_model_nn\nfrom model.common import find_model_ckpt\nimport tensorflow as tf\nfrom gen.utils import vec2str\nimport numpy as np\nfrom PIL import Image\nfrom downloader import download\n\n\ndef show_im(dataset):\n data = np.uint8(dataset[0]).reshape((30, 96)) * 255\n im = Image.fromarray(data)\n im.show()\n\n\ndef test_model(captcha):\n im = Image.open(os.path.join(basedir, 'downloader', 'captchas', captcha))\n im = im.convert('L')\n im = grey_to_binary(im)\n im = clear_paper_noise(im, 5)\n model = load_model_nn()\n x = model['x']\n keep_prob = model['keep_prob']\n saver = model['saver']\n prediction = model['prediction']\n graph = model['graph']\n model_ckpt_path, _ = find_model_ckpt(os.path.join('trainer', '.checkpoint')\n )\n with tf.Session(graph=graph) as session:\n tf.global_variables_initializer().run()\n saver.restore(session, model_ckpt_path)\n dataset = []\n dataset.append(np.asarray(im.convert('L')).reshape([30 * 96]) / 255)\n label = prediction.eval(feed_dict={x: dataset, keep_prob: 1.0},\n session=session)[0]\n string = ''\n for i in range(4):\n string += chr(label[i] + ord('0'))\n print(string)\n\n\nif __name__ == '__main__':\n if len(sys.argv) <= 1:\n captcha = download(1)[0]\n else:\n captcha = sys.argv[1]\n test_model(captcha)\n",
"step-5": "# coding=utf-8\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nbasedir = os.getcwd()\nos.chdir(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append('trainer')\nsys.path.append('downloader')\n\nfrom gen.gen_captcha import gen_dataset, load_templates, candidates\nfrom gen.img_process import grey_to_binary, clear_paper_noise\nfrom model.nn import load_model_nn\nfrom model.common import find_model_ckpt\nimport tensorflow as tf\nfrom gen.utils import vec2str\nimport numpy as np\nfrom PIL import Image\nfrom downloader import download\n\ndef show_im(dataset):\n data = np.uint8(dataset[0]).reshape((30, 96)) * 255\n im = Image.fromarray(data)\n im.show()\n\ndef test_model(captcha):\n im = Image.open(os.path.join(basedir, 'downloader', 'captchas', captcha))\n im = im.convert('L')\n im = grey_to_binary(im)\n im = clear_paper_noise(im, 5)\n # im.show()\n # templates = load_templates(os.path.join('trainer', 'templates'))\n\n model = load_model_nn()\n x = model['x']\n keep_prob = model['keep_prob']\n saver = model['saver']\n prediction = model['prediction']\n graph = model['graph']\n model_ckpt_path, _ = find_model_ckpt(os.path.join('trainer', '.checkpoint'))\n # print(\"Used the model:\", model_ckpt_path)\n\n\n with tf.Session(graph=graph) as session:\n tf.global_variables_initializer().run()\n saver.restore(session, model_ckpt_path)\n\n # dataset, labels = gen_dataset(1, templates) # generate one image\n dataset = []\n dataset.append(np.asarray(im.convert(\"L\")).reshape([30 * 96]) / 255)\n\n label = prediction.eval(feed_dict={x: dataset, keep_prob: 1.0}, session=session)[0]\n string = ''\n for i in range(4):\n string += chr(label[i] + ord('0'))\n print(string)\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) <= 1:\n captcha = download(1)[0]\n else:\n captcha = sys.argv[1]\n test_model(captcha)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import logging
from utils import Utils
from block import Block
from message import Message
from transaction import Transaction
class Response:
def __init__(self, node, data):
self.node = node
self.data = data
self.selector()
def selector(self):
if self.data['flag'] == 1:
self.chain_size()
elif self.data['flag'] == 2:
self.chain_sync()
elif self.data['flag'] == 3:
if isinstance(self.data['content'], bool):
self.append_new_block()
else:
self.new_block()
else:
self.new_transaction()
def chain_size(self):
server_chain_size = self.node.get_ledger_size()
self.return_response(1, server_chain_size)
def chain_sync(self):
u = Utils()
blocks = [u.dict_to_json(block) for block in self.node.get_server_ledger()]
self.return_response(2, blocks)
def new_block(self):
b = Block()
block = self.data['content'][0]
if not self.node.get_server_ledger():
# Server has no chain, cannot validate previous hash
logging.error('{0}Peer #{1}: cannot validate blocks! Authorizing!'.format(self.node.type,self.node.index))
self.return_response(3, block)
else:
if b.validate(block):
self.node.server.write_message('announce', 1, block['index'])
self.node.add_block(block)
self.return_response(3, block)
else:
self.node.server.write_message('announce', 2, block['index'])
self.return_response(3)
def new_transaction(self):
t = Transaction()
tx = self.data['content'][0][0]
if t.validate(tx):
self.node.server.shared_tx.append(tx)
self.return_response(4, tx)
else:
self.return_response(4)
def return_response(self, flag, content=None):
m = Message()
response = m.create('response', flag, [content])
self.node.send(response)
|
normal
|
{
"blob_id": "55b8590410bfe8f12ce3b52710238a79d27189a7",
"index": 5125,
"step-1": "<mask token>\n\n\nclass Response:\n <mask token>\n <mask token>\n\n def chain_size(self):\n server_chain_size = self.node.get_ledger_size()\n self.return_response(1, server_chain_size)\n\n def chain_sync(self):\n u = Utils()\n blocks = [u.dict_to_json(block) for block in self.node.\n get_server_ledger()]\n self.return_response(2, blocks)\n <mask token>\n\n def new_transaction(self):\n t = Transaction()\n tx = self.data['content'][0][0]\n if t.validate(tx):\n self.node.server.shared_tx.append(tx)\n self.return_response(4, tx)\n else:\n self.return_response(4)\n\n def return_response(self, flag, content=None):\n m = Message()\n response = m.create('response', flag, [content])\n self.node.send(response)\n",
"step-2": "<mask token>\n\n\nclass Response:\n <mask token>\n <mask token>\n\n def chain_size(self):\n server_chain_size = self.node.get_ledger_size()\n self.return_response(1, server_chain_size)\n\n def chain_sync(self):\n u = Utils()\n blocks = [u.dict_to_json(block) for block in self.node.\n get_server_ledger()]\n self.return_response(2, blocks)\n\n def new_block(self):\n b = Block()\n block = self.data['content'][0]\n if not self.node.get_server_ledger():\n logging.error('{0}Peer #{1}: cannot validate blocks! Authorizing!'\n .format(self.node.type, self.node.index))\n self.return_response(3, block)\n elif b.validate(block):\n self.node.server.write_message('announce', 1, block['index'])\n self.node.add_block(block)\n self.return_response(3, block)\n else:\n self.node.server.write_message('announce', 2, block['index'])\n self.return_response(3)\n\n def new_transaction(self):\n t = Transaction()\n tx = self.data['content'][0][0]\n if t.validate(tx):\n self.node.server.shared_tx.append(tx)\n self.return_response(4, tx)\n else:\n self.return_response(4)\n\n def return_response(self, flag, content=None):\n m = Message()\n response = m.create('response', flag, [content])\n self.node.send(response)\n",
"step-3": "<mask token>\n\n\nclass Response:\n\n def __init__(self, node, data):\n self.node = node\n self.data = data\n self.selector()\n\n def selector(self):\n if self.data['flag'] == 1:\n self.chain_size()\n elif self.data['flag'] == 2:\n self.chain_sync()\n elif self.data['flag'] == 3:\n if isinstance(self.data['content'], bool):\n self.append_new_block()\n else:\n self.new_block()\n else:\n self.new_transaction()\n\n def chain_size(self):\n server_chain_size = self.node.get_ledger_size()\n self.return_response(1, server_chain_size)\n\n def chain_sync(self):\n u = Utils()\n blocks = [u.dict_to_json(block) for block in self.node.\n get_server_ledger()]\n self.return_response(2, blocks)\n\n def new_block(self):\n b = Block()\n block = self.data['content'][0]\n if not self.node.get_server_ledger():\n logging.error('{0}Peer #{1}: cannot validate blocks! Authorizing!'\n .format(self.node.type, self.node.index))\n self.return_response(3, block)\n elif b.validate(block):\n self.node.server.write_message('announce', 1, block['index'])\n self.node.add_block(block)\n self.return_response(3, block)\n else:\n self.node.server.write_message('announce', 2, block['index'])\n self.return_response(3)\n\n def new_transaction(self):\n t = Transaction()\n tx = self.data['content'][0][0]\n if t.validate(tx):\n self.node.server.shared_tx.append(tx)\n self.return_response(4, tx)\n else:\n self.return_response(4)\n\n def return_response(self, flag, content=None):\n m = Message()\n response = m.create('response', flag, [content])\n self.node.send(response)\n",
"step-4": "import logging\nfrom utils import Utils\nfrom block import Block\nfrom message import Message\nfrom transaction import Transaction\n\n\nclass Response:\n\n def __init__(self, node, data):\n self.node = node\n self.data = data\n self.selector()\n\n def selector(self):\n if self.data['flag'] == 1:\n self.chain_size()\n elif self.data['flag'] == 2:\n self.chain_sync()\n elif self.data['flag'] == 3:\n if isinstance(self.data['content'], bool):\n self.append_new_block()\n else:\n self.new_block()\n else:\n self.new_transaction()\n\n def chain_size(self):\n server_chain_size = self.node.get_ledger_size()\n self.return_response(1, server_chain_size)\n\n def chain_sync(self):\n u = Utils()\n blocks = [u.dict_to_json(block) for block in self.node.\n get_server_ledger()]\n self.return_response(2, blocks)\n\n def new_block(self):\n b = Block()\n block = self.data['content'][0]\n if not self.node.get_server_ledger():\n logging.error('{0}Peer #{1}: cannot validate blocks! Authorizing!'\n .format(self.node.type, self.node.index))\n self.return_response(3, block)\n elif b.validate(block):\n self.node.server.write_message('announce', 1, block['index'])\n self.node.add_block(block)\n self.return_response(3, block)\n else:\n self.node.server.write_message('announce', 2, block['index'])\n self.return_response(3)\n\n def new_transaction(self):\n t = Transaction()\n tx = self.data['content'][0][0]\n if t.validate(tx):\n self.node.server.shared_tx.append(tx)\n self.return_response(4, tx)\n else:\n self.return_response(4)\n\n def return_response(self, flag, content=None):\n m = Message()\n response = m.create('response', flag, [content])\n self.node.send(response)\n",
"step-5": "import logging\n\nfrom utils import Utils\nfrom block import Block\nfrom message import Message\nfrom transaction import Transaction\n\nclass Response:\n def __init__(self, node, data):\n self.node = node\n self.data = data\n self.selector()\n\n def selector(self):\n if self.data['flag'] == 1:\n self.chain_size()\n elif self.data['flag'] == 2:\n self.chain_sync()\n elif self.data['flag'] == 3:\n if isinstance(self.data['content'], bool):\n self.append_new_block()\n else:\n self.new_block()\n else:\n self.new_transaction()\n\n def chain_size(self):\n server_chain_size = self.node.get_ledger_size()\n self.return_response(1, server_chain_size)\n\n def chain_sync(self):\n u = Utils()\n blocks = [u.dict_to_json(block) for block in self.node.get_server_ledger()]\n self.return_response(2, blocks)\n\n def new_block(self):\n b = Block()\n block = self.data['content'][0]\n if not self.node.get_server_ledger():\n # Server has no chain, cannot validate previous hash\n logging.error('{0}Peer #{1}: cannot validate blocks! Authorizing!'.format(self.node.type,self.node.index))\n self.return_response(3, block)\n else:\n if b.validate(block):\n self.node.server.write_message('announce', 1, block['index'])\n self.node.add_block(block)\n self.return_response(3, block)\n else:\n self.node.server.write_message('announce', 2, block['index'])\n self.return_response(3)\n\n def new_transaction(self):\n t = Transaction()\n tx = self.data['content'][0][0]\n if t.validate(tx):\n self.node.server.shared_tx.append(tx)\n self.return_response(4, tx)\n else:\n self.return_response(4)\n\n def return_response(self, flag, content=None):\n m = Message()\n response = m.create('response', flag, [content])\n self.node.send(response)\n",
"step-ids": [
5,
6,
8,
9,
10
]
}
|
[
5,
6,
8,
9,
10
] |
'''
!pip install wget
from zipfile import ZipFile
import wget
print('Beginning file downlaod with wget module')
url = 'https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip'
wget.download(url, 'sample_data/')
print('2. Extract all files in ZIP to different directory')
# Create a ZipFile Object and load sample.zip in it
with ZipFile('sample_data/kagglecatsanddogs_3367a.zip', 'r') as zipObj:
# Extract all the contents of zip file in different directory
zipObj.extractall('content/')
'''
import numpy as np
import matplotlib.pyplot as plt
import os
import cv2
import pickle
import random
import datetime
import tensorflow as tf
from tensorflow.python.keras.datasets import cifar10
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Activation, Dense, Flatten, Dropout
from tensorflow.python.keras.layers import Conv2D, MaxPooling2D
from tensorflow.python.keras.optimizers import Adam
from tensorflow.python.keras.callbacks import TensorBoard
DATADIR = 'content/PetImages'
CATEGORIES = ['Cat', 'Dog'] #'''categories that we have to deal with'''
img_array= []
for category in CATEGORIES:
path = os.path.join(DATADIR, category) # path to cats and dogs dir
for img in os.listdir(path):
img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_COLOR)
plt.imshow(img_array, cmap='gray')
plt.show()
print(img_array)
print(img_array.shape)
break
break
IMG_SIZE = 299 #every image of 299x299
resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
plt.imshow(resized_img_array, cmap='gray') # cmap = hot, plasma, cool,
plt.show()
training_data = []
def create_training_data(): # creating training datasets
for category in CATEGORIES:
path = os.path.join(DATADIR, category) # path to cats and dogs dir
classIndex = CATEGORIES.index(category) # 0 for dog and 1 for cat
for img in os.listdir(path):
try:
img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_COLOR)
resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
training_data.append([resized_img_array, classIndex])
except Exception as e:
pass
create_training_data()
print(len(training_data))
'''shuffle training data'''
random.shuffle(training_data)
# for sample in training_data[:10]:
# print(sample[1])
x=[]
y=[]
for features, label in training_data:
x.append(features)
y.append(label)
x = np.array(x).reshape(-1, IMG_SIZE, IMG_SIZE, 3) #we can't pass a list to keras for training
#'''we have to pass here a numpy array '''
# print(x[0].reshape(-1, IMG_SIZE, IMG_SIZE, 1))
pickle_out = open("x.pickle", 'wb')
pickle.dump(x, pickle_out)
pickle_out.close()
pickle_out= open('y.pickle', 'wb')
pickle.dump(y, pickle_out)
pickle_out.close()
pickle_in = open('x.pickle', 'rb')
x = pickle.load(pickle_in)
pickle_in = open('y.pickle', 'rb')
y = pickle.load(pickle_in)
x = x / 255.0
INPUT_SHAPE = x.shape[1:]#(224, 224, 3)
DROPOUT=0.2
NB_CLASSES=10
NB_EPOCHS=10
BATCH_SIZE=128
VALIDATION_SPLIT=0.2
OPTIMIZER = Adam()
max, min, accIndex , lossIndex=70.0 , 4.0, 1, 1
date = datetime.datetime.now()
dense_layers = [2, 1, 0] # 0, 1,2
layer_sizes = [512, 256, 128, 64] #32, 64, 128, 256, 512
conv_layers = [3, 2, 1] # 1, 2,3
for dense_layer in dense_layers:
for layer_size in layer_sizes:
for conv_layer in conv_layers:
NAME = "{}-conv-{}-nodes-{}-dense-{}".format(conv_layer, layer_size, dense_layer, int(time.time()))
print(NAME)
model = Sequential()
model.add(Conv2D(layer_size, (3, 3), input_shape=INPUT_SHAPE))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
for l in range(conv_layer-1):
model.add(Conv2D(layer_size, (5, 5)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(DROPOUT))
model.add(Flatten())
for _ in range(dense_layer):
model.add(Dense(layer_size))
model.add(Activation('relu'))
model.add(Dropout(DROPOUT))
model.add(Dense(NB_CLASSES))
model.add(Activation('softmax'))
tensorboard = TensorBoard(log_dir="logs/{}".format(NAME))
model.compile(loss='categorical_crossentropy',
optimizer=OPTIMIZER,
metrics=['accuracy'],
)
history = model.fit(x, y,
batch_size=BATCH_SIZE,
epochs=NB_EPOCHS,
validation_split=VALIDATION_SPLIT,
verbose=1,
callbacks=[tensorboard])
if history.history.get('val_acc')[-1] > max:
max = history.history.get('val_acc')[-1]
if accIndex >= 2:
os.remove('{}_{}_{}_{}_{}_{}'.format(accIndex-1, round(max, 4), CBP[0], CBP[1], CBP[2], f":{date:%Y-%m-%d-%Hh%Mm%Ss}"))
val_acc_out = open('{}_{}_{}_{}_{}_{}'.format(accIndex, round(max, 4), CBP[0], CBP[1], CBP[2], f":{date:%Y-%m-%d-%Hh%Mm%Ss}"), "wb")
pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(accIndex, round(max, 4), CBP[0], CBP[1], CBP[2], f":{date:%Y-%m-%d-%Hh%Mm%Ss}")),
val_acc_out)
val_acc_out.close()
accIndex += 1
pickle_upload = open('{}_pickle'.format(accIndex - 1), 'rb')
p_upload = pickle.load(pickle_upload)
print(p_upload)
if history.history.get('val_loss')[-1] < min:
min = history.history.get('val_loss')[-1]
if lossIndex>=2:
os.remove('{}_{}_{}_{}_{}_{}'.format(lossIndex-1, round(min, 4), CBP[0], CBP[1], CBP[2], f":{date:%Y-%m-%d-%Hh%Mm%Ss}"))
val_loss_out = open('{}_{}_{}_{}_{}_{}'.format(lossIndex, round(min, 4), CBP[0], CBP[1], CBP[2], f":{date:%Y-%m-%d-%Hh%Mm%Ss}"))
pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(lossIndex, round(min, 4), CBP[0], CBP[1], CBP[2], f":{date:%Y-%m-%d-%Hh%Mm%Ss}")),
val_loss_out)
val_loss_out.close()
lossIndex += 1
model.save('64x3-CNN.model')
CATEGORIES = ["Dog", "Cat"] # will use this to convert prediction num to string value
def prepare(filepath):
IMG_SIZE = 299 # 50 in txt-based
img_array = cv2.imread(filepath, cv2.IMREAD_COLOR) # read in the image, convert to grayscale
resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE)) # resize image to match model's expected sizing
return resized_img_array.reshape(-1, IMG_SIZE, IMG_SIZE, 3) # return the image with shaping that TF wants.
model = tf.keras.models.load_model("64x3-CNN.model")
prediction = model.predict([prepare('dog.jpg')]) # REMEMBER YOU'RE PASSING A LIST OF THINGS YOU WISH TO PREDICT
print(prediction)
print(prediction[0][0])
print(CATEGORIES[int(prediction[0][0])])
#We can also test our cat example:
prediction = model.predict([prepare('cat.jpg')])
print(prediction) # will be a list in a list.
print(CATEGORIES[int(prediction[0][0])])
'''
alpha. Also referred to as the learning rate or step size. The proportion that weights are updated (e.g. 0.001). Larger values (e.g. 0.3) results in faster initial learning before the rate is updated. Smaller values (e.g. 1.0E-5) slow learning right down during training
beta1. The exponential decay rate for the first moment estimates (e.g. 0.9).
beta2. The exponential decay rate for the second-moment estimates (e.g. 0.999). This value should be set close to 1.0 on problems with a sparse gradient (e.g. NLP and computer vision problems).
epsilon. Is a very small number to prevent any division by zero in the implementation (e.g. 10E-8).
We can see that the popular deep learning libraries generally use the default parameters recommended by the paper.
TensorFlow: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08.
Keras: lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0.
Blocks: learning_rate=0.002, beta1=0.9, beta2=0.999, epsilon=1e-08, decay_factor=1.
Lasagne: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08
Caffe: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08
MxNet: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8
Torch: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8
'''
|
normal
|
{
"blob_id": "13c9f0f58ec6da317c3802f594bb0db7c275dee9",
"index": 21,
"step-1": "<mask token>\n\n\ndef create_training_data():\n for category in CATEGORIES:\n path = os.path.join(DATADIR, category)\n classIndex = CATEGORIES.index(category)\n for img in os.listdir(path):\n try:\n img_array = cv2.imread(os.path.join(path, img), cv2.\n IMREAD_COLOR)\n resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\n training_data.append([resized_img_array, classIndex])\n except Exception as e:\n pass\n\n\n<mask token>\n",
"step-2": "<mask token>\nfor category in CATEGORIES:\n path = os.path.join(DATADIR, category)\n for img in os.listdir(path):\n img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_COLOR)\n plt.imshow(img_array, cmap='gray')\n plt.show()\n print(img_array)\n print(img_array.shape)\n break\n break\n<mask token>\nplt.imshow(resized_img_array, cmap='gray')\nplt.show()\n<mask token>\n\n\ndef create_training_data():\n for category in CATEGORIES:\n path = os.path.join(DATADIR, category)\n classIndex = CATEGORIES.index(category)\n for img in os.listdir(path):\n try:\n img_array = cv2.imread(os.path.join(path, img), cv2.\n IMREAD_COLOR)\n resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\n training_data.append([resized_img_array, classIndex])\n except Exception as e:\n pass\n\n\ncreate_training_data()\nprint(len(training_data))\n<mask token>\nrandom.shuffle(training_data)\n<mask token>\nfor features, label in training_data:\n x.append(features)\n y.append(label)\n<mask token>\npickle.dump(x, pickle_out)\npickle_out.close()\n<mask token>\npickle.dump(y, pickle_out)\npickle_out.close()\n<mask token>\nfor dense_layer in dense_layers:\n for layer_size in layer_sizes:\n for conv_layer in conv_layers:\n NAME = '{}-conv-{}-nodes-{}-dense-{}'.format(conv_layer,\n layer_size, dense_layer, int(time.time()))\n print(NAME)\n model = Sequential()\n model.add(Conv2D(layer_size, (3, 3), input_shape=INPUT_SHAPE))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n for l in range(conv_layer - 1):\n model.add(Conv2D(layer_size, (5, 5)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n model.add(Dropout(DROPOUT))\n model.add(Flatten())\n for _ in range(dense_layer):\n model.add(Dense(layer_size))\n model.add(Activation('relu'))\n model.add(Dropout(DROPOUT))\n model.add(Dense(NB_CLASSES))\n model.add(Activation('softmax'))\n tensorboard = TensorBoard(log_dir='logs/{}'.format(NAME))\n model.compile(loss='categorical_crossentropy', optimizer=\n OPTIMIZER, metrics=['accuracy'])\n history = model.fit(x, y, batch_size=BATCH_SIZE, epochs=\n NB_EPOCHS, validation_split=VALIDATION_SPLIT, verbose=1,\n callbacks=[tensorboard])\n if history.history.get('val_acc')[-1] > max:\n max = history.history.get('val_acc')[-1]\n if accIndex >= 2:\n os.remove('{}_{}_{}_{}_{}_{}'.format(accIndex - 1,\n round(max, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))\n val_acc_out = open('{}_{}_{}_{}_{}_{}'.format(accIndex,\n round(max, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}'), 'wb')\n pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(accIndex,\n round(max, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}')), val_acc_out)\n val_acc_out.close()\n accIndex += 1\n pickle_upload = open('{}_pickle'.format(accIndex - 1), 'rb')\n p_upload = pickle.load(pickle_upload)\n print(p_upload)\n if history.history.get('val_loss')[-1] < min:\n min = history.history.get('val_loss')[-1]\n if lossIndex >= 2:\n os.remove('{}_{}_{}_{}_{}_{}'.format(lossIndex - 1,\n round(min, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))\n val_loss_out = open('{}_{}_{}_{}_{}_{}'.format(lossIndex,\n round(min, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))\n pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(lossIndex,\n round(min, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}')), val_loss_out)\n val_loss_out.close()\n lossIndex += 1\nmodel.save('64x3-CNN.model')\n<mask token>\n\n\ndef prepare(filepath):\n IMG_SIZE = 299\n img_array = cv2.imread(filepath, cv2.IMREAD_COLOR)\n resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\n return resized_img_array.reshape(-1, IMG_SIZE, IMG_SIZE, 3)\n\n\n<mask token>\nprint(prediction)\nprint(prediction[0][0])\nprint(CATEGORIES[int(prediction[0][0])])\n<mask token>\nprint(prediction)\nprint(CATEGORIES[int(prediction[0][0])])\n<mask token>\n",
"step-3": "<mask token>\nDATADIR = 'content/PetImages'\nCATEGORIES = ['Cat', 'Dog']\nimg_array = []\nfor category in CATEGORIES:\n path = os.path.join(DATADIR, category)\n for img in os.listdir(path):\n img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_COLOR)\n plt.imshow(img_array, cmap='gray')\n plt.show()\n print(img_array)\n print(img_array.shape)\n break\n break\nIMG_SIZE = 299\nresized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\nplt.imshow(resized_img_array, cmap='gray')\nplt.show()\ntraining_data = []\n\n\ndef create_training_data():\n for category in CATEGORIES:\n path = os.path.join(DATADIR, category)\n classIndex = CATEGORIES.index(category)\n for img in os.listdir(path):\n try:\n img_array = cv2.imread(os.path.join(path, img), cv2.\n IMREAD_COLOR)\n resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\n training_data.append([resized_img_array, classIndex])\n except Exception as e:\n pass\n\n\ncreate_training_data()\nprint(len(training_data))\n<mask token>\nrandom.shuffle(training_data)\nx = []\ny = []\nfor features, label in training_data:\n x.append(features)\n y.append(label)\nx = np.array(x).reshape(-1, IMG_SIZE, IMG_SIZE, 3)\npickle_out = open('x.pickle', 'wb')\npickle.dump(x, pickle_out)\npickle_out.close()\npickle_out = open('y.pickle', 'wb')\npickle.dump(y, pickle_out)\npickle_out.close()\npickle_in = open('x.pickle', 'rb')\nx = pickle.load(pickle_in)\npickle_in = open('y.pickle', 'rb')\ny = pickle.load(pickle_in)\nx = x / 255.0\nINPUT_SHAPE = x.shape[1:]\nDROPOUT = 0.2\nNB_CLASSES = 10\nNB_EPOCHS = 10\nBATCH_SIZE = 128\nVALIDATION_SPLIT = 0.2\nOPTIMIZER = Adam()\nmax, min, accIndex, lossIndex = 70.0, 4.0, 1, 1\ndate = datetime.datetime.now()\ndense_layers = [2, 1, 0]\nlayer_sizes = [512, 256, 128, 64]\nconv_layers = [3, 2, 1]\nfor dense_layer in dense_layers:\n for layer_size in layer_sizes:\n for conv_layer in conv_layers:\n NAME = '{}-conv-{}-nodes-{}-dense-{}'.format(conv_layer,\n layer_size, dense_layer, int(time.time()))\n print(NAME)\n model = Sequential()\n model.add(Conv2D(layer_size, (3, 3), input_shape=INPUT_SHAPE))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n for l in range(conv_layer - 1):\n model.add(Conv2D(layer_size, (5, 5)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n model.add(Dropout(DROPOUT))\n model.add(Flatten())\n for _ in range(dense_layer):\n model.add(Dense(layer_size))\n model.add(Activation('relu'))\n model.add(Dropout(DROPOUT))\n model.add(Dense(NB_CLASSES))\n model.add(Activation('softmax'))\n tensorboard = TensorBoard(log_dir='logs/{}'.format(NAME))\n model.compile(loss='categorical_crossentropy', optimizer=\n OPTIMIZER, metrics=['accuracy'])\n history = model.fit(x, y, batch_size=BATCH_SIZE, epochs=\n NB_EPOCHS, validation_split=VALIDATION_SPLIT, verbose=1,\n callbacks=[tensorboard])\n if history.history.get('val_acc')[-1] > max:\n max = history.history.get('val_acc')[-1]\n if accIndex >= 2:\n os.remove('{}_{}_{}_{}_{}_{}'.format(accIndex - 1,\n round(max, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))\n val_acc_out = open('{}_{}_{}_{}_{}_{}'.format(accIndex,\n round(max, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}'), 'wb')\n pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(accIndex,\n round(max, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}')), val_acc_out)\n val_acc_out.close()\n accIndex += 1\n pickle_upload = open('{}_pickle'.format(accIndex - 1), 'rb')\n p_upload = pickle.load(pickle_upload)\n print(p_upload)\n if history.history.get('val_loss')[-1] < min:\n min = history.history.get('val_loss')[-1]\n if lossIndex >= 2:\n os.remove('{}_{}_{}_{}_{}_{}'.format(lossIndex - 1,\n round(min, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))\n val_loss_out = open('{}_{}_{}_{}_{}_{}'.format(lossIndex,\n round(min, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))\n pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(lossIndex,\n round(min, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}')), val_loss_out)\n val_loss_out.close()\n lossIndex += 1\nmodel.save('64x3-CNN.model')\nCATEGORIES = ['Dog', 'Cat']\n\n\ndef prepare(filepath):\n IMG_SIZE = 299\n img_array = cv2.imread(filepath, cv2.IMREAD_COLOR)\n resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\n return resized_img_array.reshape(-1, IMG_SIZE, IMG_SIZE, 3)\n\n\nmodel = tf.keras.models.load_model('64x3-CNN.model')\nprediction = model.predict([prepare('dog.jpg')])\nprint(prediction)\nprint(prediction[0][0])\nprint(CATEGORIES[int(prediction[0][0])])\nprediction = model.predict([prepare('cat.jpg')])\nprint(prediction)\nprint(CATEGORIES[int(prediction[0][0])])\n<mask token>\n",
"step-4": "<mask token>\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport cv2\nimport pickle\nimport random\nimport datetime\nimport tensorflow as tf\nfrom tensorflow.python.keras.datasets import cifar10\nfrom tensorflow.python.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.python.keras.models import Sequential\nfrom tensorflow.python.keras.layers import Activation, Dense, Flatten, Dropout\nfrom tensorflow.python.keras.layers import Conv2D, MaxPooling2D\nfrom tensorflow.python.keras.optimizers import Adam\nfrom tensorflow.python.keras.callbacks import TensorBoard\nDATADIR = 'content/PetImages'\nCATEGORIES = ['Cat', 'Dog']\nimg_array = []\nfor category in CATEGORIES:\n path = os.path.join(DATADIR, category)\n for img in os.listdir(path):\n img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_COLOR)\n plt.imshow(img_array, cmap='gray')\n plt.show()\n print(img_array)\n print(img_array.shape)\n break\n break\nIMG_SIZE = 299\nresized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\nplt.imshow(resized_img_array, cmap='gray')\nplt.show()\ntraining_data = []\n\n\ndef create_training_data():\n for category in CATEGORIES:\n path = os.path.join(DATADIR, category)\n classIndex = CATEGORIES.index(category)\n for img in os.listdir(path):\n try:\n img_array = cv2.imread(os.path.join(path, img), cv2.\n IMREAD_COLOR)\n resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\n training_data.append([resized_img_array, classIndex])\n except Exception as e:\n pass\n\n\ncreate_training_data()\nprint(len(training_data))\n<mask token>\nrandom.shuffle(training_data)\nx = []\ny = []\nfor features, label in training_data:\n x.append(features)\n y.append(label)\nx = np.array(x).reshape(-1, IMG_SIZE, IMG_SIZE, 3)\npickle_out = open('x.pickle', 'wb')\npickle.dump(x, pickle_out)\npickle_out.close()\npickle_out = open('y.pickle', 'wb')\npickle.dump(y, pickle_out)\npickle_out.close()\npickle_in = open('x.pickle', 'rb')\nx = pickle.load(pickle_in)\npickle_in = open('y.pickle', 'rb')\ny = pickle.load(pickle_in)\nx = x / 255.0\nINPUT_SHAPE = x.shape[1:]\nDROPOUT = 0.2\nNB_CLASSES = 10\nNB_EPOCHS = 10\nBATCH_SIZE = 128\nVALIDATION_SPLIT = 0.2\nOPTIMIZER = Adam()\nmax, min, accIndex, lossIndex = 70.0, 4.0, 1, 1\ndate = datetime.datetime.now()\ndense_layers = [2, 1, 0]\nlayer_sizes = [512, 256, 128, 64]\nconv_layers = [3, 2, 1]\nfor dense_layer in dense_layers:\n for layer_size in layer_sizes:\n for conv_layer in conv_layers:\n NAME = '{}-conv-{}-nodes-{}-dense-{}'.format(conv_layer,\n layer_size, dense_layer, int(time.time()))\n print(NAME)\n model = Sequential()\n model.add(Conv2D(layer_size, (3, 3), input_shape=INPUT_SHAPE))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n for l in range(conv_layer - 1):\n model.add(Conv2D(layer_size, (5, 5)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n model.add(Dropout(DROPOUT))\n model.add(Flatten())\n for _ in range(dense_layer):\n model.add(Dense(layer_size))\n model.add(Activation('relu'))\n model.add(Dropout(DROPOUT))\n model.add(Dense(NB_CLASSES))\n model.add(Activation('softmax'))\n tensorboard = TensorBoard(log_dir='logs/{}'.format(NAME))\n model.compile(loss='categorical_crossentropy', optimizer=\n OPTIMIZER, metrics=['accuracy'])\n history = model.fit(x, y, batch_size=BATCH_SIZE, epochs=\n NB_EPOCHS, validation_split=VALIDATION_SPLIT, verbose=1,\n callbacks=[tensorboard])\n if history.history.get('val_acc')[-1] > max:\n max = history.history.get('val_acc')[-1]\n if accIndex >= 2:\n os.remove('{}_{}_{}_{}_{}_{}'.format(accIndex - 1,\n round(max, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))\n val_acc_out = open('{}_{}_{}_{}_{}_{}'.format(accIndex,\n round(max, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}'), 'wb')\n pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(accIndex,\n round(max, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}')), val_acc_out)\n val_acc_out.close()\n accIndex += 1\n pickle_upload = open('{}_pickle'.format(accIndex - 1), 'rb')\n p_upload = pickle.load(pickle_upload)\n print(p_upload)\n if history.history.get('val_loss')[-1] < min:\n min = history.history.get('val_loss')[-1]\n if lossIndex >= 2:\n os.remove('{}_{}_{}_{}_{}_{}'.format(lossIndex - 1,\n round(min, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))\n val_loss_out = open('{}_{}_{}_{}_{}_{}'.format(lossIndex,\n round(min, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))\n pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(lossIndex,\n round(min, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}')), val_loss_out)\n val_loss_out.close()\n lossIndex += 1\nmodel.save('64x3-CNN.model')\nCATEGORIES = ['Dog', 'Cat']\n\n\ndef prepare(filepath):\n IMG_SIZE = 299\n img_array = cv2.imread(filepath, cv2.IMREAD_COLOR)\n resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\n return resized_img_array.reshape(-1, IMG_SIZE, IMG_SIZE, 3)\n\n\nmodel = tf.keras.models.load_model('64x3-CNN.model')\nprediction = model.predict([prepare('dog.jpg')])\nprint(prediction)\nprint(prediction[0][0])\nprint(CATEGORIES[int(prediction[0][0])])\nprediction = model.predict([prepare('cat.jpg')])\nprint(prediction)\nprint(CATEGORIES[int(prediction[0][0])])\n<mask token>\n",
"step-5": "'''\n!pip install wget\nfrom zipfile import ZipFile\nimport wget\nprint('Beginning file downlaod with wget module')\n\nurl = 'https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip'\nwget.download(url, 'sample_data/')\n\n\nprint('2. Extract all files in ZIP to different directory')\n\n # Create a ZipFile Object and load sample.zip in it\nwith ZipFile('sample_data/kagglecatsanddogs_3367a.zip', 'r') as zipObj:\n # Extract all the contents of zip file in different directory\n zipObj.extractall('content/')\n\n'''\n\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport cv2\nimport pickle\nimport random\nimport datetime\nimport tensorflow as tf\nfrom tensorflow.python.keras.datasets import cifar10\nfrom tensorflow.python.keras.preprocessing.image import ImageDataGenerator\n\n\nfrom tensorflow.python.keras.models import Sequential\nfrom tensorflow.python.keras.layers import Activation, Dense, Flatten, Dropout\nfrom tensorflow.python.keras.layers import Conv2D, MaxPooling2D\nfrom tensorflow.python.keras.optimizers import Adam\n\nfrom tensorflow.python.keras.callbacks import TensorBoard\n\n\nDATADIR = 'content/PetImages'\nCATEGORIES = ['Cat', 'Dog'] #'''categories that we have to deal with'''\nimg_array= []\n\nfor category in CATEGORIES:\n path = os.path.join(DATADIR, category) # path to cats and dogs dir\n for img in os.listdir(path):\n img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_COLOR)\n plt.imshow(img_array, cmap='gray')\n plt.show()\n\n print(img_array)\n print(img_array.shape)\n\n break\n break\n\n\nIMG_SIZE = 299 #every image of 299x299\nresized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\nplt.imshow(resized_img_array, cmap='gray') # cmap = hot, plasma, cool,\nplt.show()\n\n\ntraining_data = []\ndef create_training_data(): # creating training datasets\n for category in CATEGORIES:\n path = os.path.join(DATADIR, category) # path to cats and dogs dir\n\n classIndex = CATEGORIES.index(category) # 0 for dog and 1 for cat\n\n for img in os.listdir(path):\n try:\n img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_COLOR)\n\n resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\n training_data.append([resized_img_array, classIndex])\n except Exception as e:\n pass\n\ncreate_training_data()\n\nprint(len(training_data))\n\n\n\n'''shuffle training data'''\nrandom.shuffle(training_data)\n\n\n\n# for sample in training_data[:10]:\n# print(sample[1])\n\n\n\nx=[]\ny=[]\nfor features, label in training_data:\n x.append(features)\n y.append(label)\nx = np.array(x).reshape(-1, IMG_SIZE, IMG_SIZE, 3) #we can't pass a list to keras for training\n #'''we have to pass here a numpy array '''\n\n# print(x[0].reshape(-1, IMG_SIZE, IMG_SIZE, 1))\n\n\npickle_out = open(\"x.pickle\", 'wb')\npickle.dump(x, pickle_out)\npickle_out.close()\n\npickle_out= open('y.pickle', 'wb')\npickle.dump(y, pickle_out)\npickle_out.close()\n\npickle_in = open('x.pickle', 'rb')\nx = pickle.load(pickle_in)\npickle_in = open('y.pickle', 'rb')\ny = pickle.load(pickle_in)\n\n\nx = x / 255.0\nINPUT_SHAPE = x.shape[1:]#(224, 224, 3)\nDROPOUT=0.2\nNB_CLASSES=10\nNB_EPOCHS=10\nBATCH_SIZE=128\nVALIDATION_SPLIT=0.2\nOPTIMIZER = Adam()\n\n\nmax, min, accIndex , lossIndex=70.0 , 4.0, 1, 1\ndate = datetime.datetime.now()\n\ndense_layers = [2, 1, 0] # 0, 1,2\nlayer_sizes = [512, 256, 128, 64] #32, 64, 128, 256, 512\nconv_layers = [3, 2, 1] # 1, 2,3\n\nfor dense_layer in dense_layers:\n for layer_size in layer_sizes:\n for conv_layer in conv_layers:\n NAME = \"{}-conv-{}-nodes-{}-dense-{}\".format(conv_layer, layer_size, dense_layer, int(time.time()))\n print(NAME)\n\n model = Sequential()\n\n model.add(Conv2D(layer_size, (3, 3), input_shape=INPUT_SHAPE))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n for l in range(conv_layer-1):\n model.add(Conv2D(layer_size, (5, 5)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n model.add(Dropout(DROPOUT))\n\n model.add(Flatten())\n\n for _ in range(dense_layer):\n model.add(Dense(layer_size))\n model.add(Activation('relu'))\n model.add(Dropout(DROPOUT))\n\n model.add(Dense(NB_CLASSES))\n model.add(Activation('softmax'))\n\n tensorboard = TensorBoard(log_dir=\"logs/{}\".format(NAME))\n\n model.compile(loss='categorical_crossentropy',\n optimizer=OPTIMIZER,\n metrics=['accuracy'],\n )\n\n history = model.fit(x, y,\n batch_size=BATCH_SIZE,\n epochs=NB_EPOCHS,\n validation_split=VALIDATION_SPLIT,\n verbose=1,\n callbacks=[tensorboard])\n if history.history.get('val_acc')[-1] > max:\n max = history.history.get('val_acc')[-1]\n if accIndex >= 2:\n os.remove('{}_{}_{}_{}_{}_{}'.format(accIndex-1, round(max, 4), CBP[0], CBP[1], CBP[2], f\":{date:%Y-%m-%d-%Hh%Mm%Ss}\"))\n val_acc_out = open('{}_{}_{}_{}_{}_{}'.format(accIndex, round(max, 4), CBP[0], CBP[1], CBP[2], f\":{date:%Y-%m-%d-%Hh%Mm%Ss}\"), \"wb\")\n pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(accIndex, round(max, 4), CBP[0], CBP[1], CBP[2], f\":{date:%Y-%m-%d-%Hh%Mm%Ss}\")),\n val_acc_out)\n val_acc_out.close()\n accIndex += 1\n\n pickle_upload = open('{}_pickle'.format(accIndex - 1), 'rb')\n p_upload = pickle.load(pickle_upload)\n print(p_upload)\n\n\n if history.history.get('val_loss')[-1] < min:\n min = history.history.get('val_loss')[-1]\n if lossIndex>=2:\n os.remove('{}_{}_{}_{}_{}_{}'.format(lossIndex-1, round(min, 4), CBP[0], CBP[1], CBP[2], f\":{date:%Y-%m-%d-%Hh%Mm%Ss}\"))\n val_loss_out = open('{}_{}_{}_{}_{}_{}'.format(lossIndex, round(min, 4), CBP[0], CBP[1], CBP[2], f\":{date:%Y-%m-%d-%Hh%Mm%Ss}\"))\n pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(lossIndex, round(min, 4), CBP[0], CBP[1], CBP[2], f\":{date:%Y-%m-%d-%Hh%Mm%Ss}\")),\n val_loss_out)\n val_loss_out.close()\n lossIndex += 1\n\n\n\n\nmodel.save('64x3-CNN.model')\n\n\nCATEGORIES = [\"Dog\", \"Cat\"] # will use this to convert prediction num to string value\n\n\ndef prepare(filepath):\n IMG_SIZE = 299 # 50 in txt-based\n img_array = cv2.imread(filepath, cv2.IMREAD_COLOR) # read in the image, convert to grayscale\n resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE)) # resize image to match model's expected sizing\n return resized_img_array.reshape(-1, IMG_SIZE, IMG_SIZE, 3) # return the image with shaping that TF wants.\n\n\nmodel = tf.keras.models.load_model(\"64x3-CNN.model\")\nprediction = model.predict([prepare('dog.jpg')]) # REMEMBER YOU'RE PASSING A LIST OF THINGS YOU WISH TO PREDICT\nprint(prediction)\nprint(prediction[0][0])\n\nprint(CATEGORIES[int(prediction[0][0])])\n\n\n#We can also test our cat example:\n\nprediction = model.predict([prepare('cat.jpg')])\nprint(prediction) # will be a list in a list.\nprint(CATEGORIES[int(prediction[0][0])])\n\n\n\n'''\nalpha. Also referred to as the learning rate or step size. The proportion that weights are updated (e.g. 0.001). Larger values (e.g. 0.3) results in faster initial learning before the rate is updated. Smaller values (e.g. 1.0E-5) slow learning right down during training\nbeta1. The exponential decay rate for the first moment estimates (e.g. 0.9).\nbeta2. The exponential decay rate for the second-moment estimates (e.g. 0.999). This value should be set close to 1.0 on problems with a sparse gradient (e.g. NLP and computer vision problems).\nepsilon. Is a very small number to prevent any division by zero in the implementation (e.g. 10E-8).\n\nWe can see that the popular deep learning libraries generally use the default parameters recommended by the paper.\n\nTensorFlow: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08.\nKeras: lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0.\nBlocks: learning_rate=0.002, beta1=0.9, beta2=0.999, epsilon=1e-08, decay_factor=1.\nLasagne: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08\nCaffe: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08\nMxNet: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8\nTorch: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8\n\n\n\n\n'''",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
fix_extra_refs(currentAddress)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from utils.references import *
if __name__ == '__main__':
fix_extra_refs(currentAddress)
<|reserved_special_token_1|>
# Fix a method's vtable calls + reference making
#@author simo
#@category iOS.kernel
#@keybinding R
#@toolbar logos/refs.png
#@description Resolve references for better CFG
# -*- coding: utf-8 -*-
"""
script which does the following:
- adds references to virtual method calls
- Identifies methods belong to a specific namespace
- Handles multi value vtable reference (multi-nodes)
"""
from utils.references import *
if __name__ == "__main__":
fix_extra_refs(currentAddress)
|
flexible
|
{
"blob_id": "30a57197e3156023ac9a7c4a5218bfe825e143d9",
"index": 5978,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n fix_extra_refs(currentAddress)\n",
"step-3": "<mask token>\nfrom utils.references import *\nif __name__ == '__main__':\n fix_extra_refs(currentAddress)\n",
"step-4": "# Fix a method's vtable calls + reference making\n\n#@author simo\n#@category iOS.kernel\n#@keybinding R\n#@toolbar logos/refs.png\n#@description Resolve references for better CFG\n# -*- coding: utf-8 -*-\n\n\"\"\"\nscript which does the following:\n- adds references to virtual method calls\n- Identifies methods belong to a specific namespace\n- Handles multi value vtable reference (multi-nodes)\n\"\"\"\n\nfrom utils.references import *\n\nif __name__ == \"__main__\":\n fix_extra_refs(currentAddress)\n \n \n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('travels', '0011_auto_20210505_2230')]
operations = [migrations.RenameField(model_name='trip', old_name=
'hotel_decription', new_name='hotel_description'), migrations.
AlterField(model_name='trip', name='hotelstars', field=models.
IntegerField(blank=True, choices=[(1, 1), (2, 2), (3, 3), (4, 4), (
5, 5), (6, 6)], verbose_name='Gwiazdki hotelu'))]
<|reserved_special_token_1|>
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('travels', '0011_auto_20210505_2230')]
operations = [migrations.RenameField(model_name='trip', old_name=
'hotel_decription', new_name='hotel_description'), migrations.
AlterField(model_name='trip', name='hotelstars', field=models.
IntegerField(blank=True, choices=[(1, 1), (2, 2), (3, 3), (4, 4), (
5, 5), (6, 6)], verbose_name='Gwiazdki hotelu'))]
<|reserved_special_token_1|>
# Generated by Django 3.1.7 on 2021-05-05 23:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('travels', '0011_auto_20210505_2230'),
]
operations = [
migrations.RenameField(
model_name='trip',
old_name='hotel_decription',
new_name='hotel_description',
),
migrations.AlterField(
model_name='trip',
name='hotelstars',
field=models.IntegerField(blank=True, choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)], verbose_name='Gwiazdki hotelu'),
),
]
|
flexible
|
{
"blob_id": "1e853d58c2066f3fbd381d0d603cd2fcece0cf15",
"index": 7933,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('travels', '0011_auto_20210505_2230')]\n operations = [migrations.RenameField(model_name='trip', old_name=\n 'hotel_decription', new_name='hotel_description'), migrations.\n AlterField(model_name='trip', name='hotelstars', field=models.\n IntegerField(blank=True, choices=[(1, 1), (2, 2), (3, 3), (4, 4), (\n 5, 5), (6, 6)], verbose_name='Gwiazdki hotelu'))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('travels', '0011_auto_20210505_2230')]\n operations = [migrations.RenameField(model_name='trip', old_name=\n 'hotel_decription', new_name='hotel_description'), migrations.\n AlterField(model_name='trip', name='hotelstars', field=models.\n IntegerField(blank=True, choices=[(1, 1), (2, 2), (3, 3), (4, 4), (\n 5, 5), (6, 6)], verbose_name='Gwiazdki hotelu'))]\n",
"step-5": "# Generated by Django 3.1.7 on 2021-05-05 23:28\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('travels', '0011_auto_20210505_2230'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='trip',\n old_name='hotel_decription',\n new_name='hotel_description',\n ),\n migrations.AlterField(\n model_name='trip',\n name='hotelstars',\n field=models.IntegerField(blank=True, choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)], verbose_name='Gwiazdki hotelu'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import pandas as pd
import numpy as np
import inspect
from script.data_handler.Base.df_plotterMixIn import df_plotterMixIn
from script.util.MixIn import LoggerMixIn
from script.util.PlotTools import PlotTools
DF = pd.DataFrame
Series = pd.Series
class null_clean_methodMixIn:
@staticmethod
def drop_col(df: DF, key):
return df.drop(key, axis=1)
@staticmethod
def fill_major_value_cate(df: DF, key) -> DF:
major_value = df[key].astype(str).describe()['top']
df[key] = df[key].fillna(major_value)
return df
@staticmethod
def fill_random_value_cate(df: DF, key) -> DF:
values = df[key].value_counts().keys()
df[key] = df[key].transform(lambda x: x.fillna(np.random.choice(values)))
# df[key] = df[key].fillna()
return df
@staticmethod
def fill_rate_value_cate(df: DF, key) -> DF:
values, count = zip(*list(df[key].value_counts().items()))
p = np.array(count) / np.sum(count)
df[key] = df[key].transform(lambda x: x.fillna(np.random.choice(values, p=p)))
return df
class Base_dfCleaner(LoggerMixIn, null_clean_methodMixIn, df_plotterMixIn):
import_code = """
import pandas as pd
import numpy as np
import random
from script.data_handler.Base_dfCleaner import Base_dfCleaner
DF = pd.DataFrame
Series = pd.Series
"""
class_template = """
class dfCleaner(Base_dfCleaner):
"""
def __init__(self, df: DF, df_Xs_keys, df_Ys_key, silent=False, verbose=0):
LoggerMixIn.__init__(self, verbose)
null_clean_methodMixIn.__init__(self)
df_plotterMixIn.__init__(self)
self.df = df
self.silent = silent
self.df_Xs_keys = df_Xs_keys
self.df_Ys_key = df_Ys_key
self.plot = PlotTools()
def __method_template(self, df: DF, col_key: str, col: DF, series: Series, Xs_key: list, Ys_key: list):
return df
@property
def method_template(self):
method_template = inspect.getsource(self.__method_template)
method_template = method_template.replace('__method_template', '{col_name}')
return method_template
def boilerplate_maker(self, path=None, encoding='UTF8'):
code = [self.import_code]
code += [self.class_template]
df_only_null = self._df_null_include(self.df)
for key in df_only_null.keys():
code += [self.method_template.format(col_name=key)]
code = "\n".join(code)
if path is not None:
with open(path, mode='w', encoding=encoding) as f:
f.write(code)
return code
def clean(self) -> DF:
for key, val in self.__class__.__dict__.items():
if key in self.df.keys():
col = self.df[[key]]
series = self.df[key]
self.df = val(self, self.df, key, col, series, self.df_Xs_keys, self.df_Ys_key)
return self.df
def null_cols_info(self) -> str:
ret = []
for key, val in list(self.__class__.__dict__.items()):
if key in self.df.keys():
info = self._str_null_col_info(self.df, key)
ret += [info]
return "\n\n".join(ret)
def null_cols_plot(self):
df_only_null = self._df_null_include(self.df)
self._df_cols_plot(df_only_null, list(df_only_null.keys()), self.df_Ys_key)
@staticmethod
def _df_null_include(df: DF) -> DF:
null_column = df.columns[df.isna().any()].tolist()
return df.loc[:, null_column]
def _str_null_col_info(self, df: DF, key) -> str:
ret = []
col = df[[key]]
series = df[key]
na_count = series.isna().sum()
total = len(col)
ret += [f'column : "{key}", null ratio:{float(na_count)/float(total):.4f}%, {na_count}/{total}(null/total)']
ret += [col.describe()]
ret += ['value_counts']
ret += [series.value_counts()]
groupby = df[[key, self.df_Ys_key]].groupby(key).agg(['mean', 'std', 'min', 'max', 'count'])
ret += [groupby]
return "\n".join(map(str, ret))
|
normal
|
{
"blob_id": "198beb5a17575d781f7bce1ab36a6213ad7331b3",
"index": 5853,
"step-1": "<mask token>\n\n\nclass Base_dfCleaner(LoggerMixIn, null_clean_methodMixIn, df_plotterMixIn):\n <mask token>\n <mask token>\n\n def __init__(self, df: DF, df_Xs_keys, df_Ys_key, silent=False, verbose=0):\n LoggerMixIn.__init__(self, verbose)\n null_clean_methodMixIn.__init__(self)\n df_plotterMixIn.__init__(self)\n self.df = df\n self.silent = silent\n self.df_Xs_keys = df_Xs_keys\n self.df_Ys_key = df_Ys_key\n self.plot = PlotTools()\n\n def __method_template(self, df: DF, col_key: str, col: DF, series:\n Series, Xs_key: list, Ys_key: list):\n return df\n\n @property\n def method_template(self):\n method_template = inspect.getsource(self.__method_template)\n method_template = method_template.replace('__method_template',\n '{col_name}')\n return method_template\n <mask token>\n\n def clean(self) ->DF:\n for key, val in self.__class__.__dict__.items():\n if key in self.df.keys():\n col = self.df[[key]]\n series = self.df[key]\n self.df = val(self, self.df, key, col, series, self.\n df_Xs_keys, self.df_Ys_key)\n return self.df\n <mask token>\n\n def null_cols_plot(self):\n df_only_null = self._df_null_include(self.df)\n self._df_cols_plot(df_only_null, list(df_only_null.keys()), self.\n df_Ys_key)\n\n @staticmethod\n def _df_null_include(df: DF) ->DF:\n null_column = df.columns[df.isna().any()].tolist()\n return df.loc[:, null_column]\n\n def _str_null_col_info(self, df: DF, key) ->str:\n ret = []\n col = df[[key]]\n series = df[key]\n na_count = series.isna().sum()\n total = len(col)\n ret += [\n f'column : \"{key}\", null ratio:{float(na_count) / float(total):.4f}%, {na_count}/{total}(null/total)'\n ]\n ret += [col.describe()]\n ret += ['value_counts']\n ret += [series.value_counts()]\n groupby = df[[key, self.df_Ys_key]].groupby(key).agg(['mean', 'std',\n 'min', 'max', 'count'])\n ret += [groupby]\n return '\\n'.join(map(str, ret))\n",
"step-2": "<mask token>\n\n\nclass null_clean_methodMixIn:\n\n @staticmethod\n def drop_col(df: DF, key):\n return df.drop(key, axis=1)\n <mask token>\n\n @staticmethod\n def fill_random_value_cate(df: DF, key) ->DF:\n values = df[key].value_counts().keys()\n df[key] = df[key].transform(lambda x: x.fillna(np.random.choice(\n values)))\n return df\n\n @staticmethod\n def fill_rate_value_cate(df: DF, key) ->DF:\n values, count = zip(*list(df[key].value_counts().items()))\n p = np.array(count) / np.sum(count)\n df[key] = df[key].transform(lambda x: x.fillna(np.random.choice(\n values, p=p)))\n return df\n\n\nclass Base_dfCleaner(LoggerMixIn, null_clean_methodMixIn, df_plotterMixIn):\n import_code = \"\"\"\n import pandas as pd\n import numpy as np\n import random\n from script.data_handler.Base_dfCleaner import Base_dfCleaner \n\n DF = pd.DataFrame\n Series = pd.Series\n \n\"\"\"\n class_template = '\\nclass dfCleaner(Base_dfCleaner):\\n'\n\n def __init__(self, df: DF, df_Xs_keys, df_Ys_key, silent=False, verbose=0):\n LoggerMixIn.__init__(self, verbose)\n null_clean_methodMixIn.__init__(self)\n df_plotterMixIn.__init__(self)\n self.df = df\n self.silent = silent\n self.df_Xs_keys = df_Xs_keys\n self.df_Ys_key = df_Ys_key\n self.plot = PlotTools()\n\n def __method_template(self, df: DF, col_key: str, col: DF, series:\n Series, Xs_key: list, Ys_key: list):\n return df\n\n @property\n def method_template(self):\n method_template = inspect.getsource(self.__method_template)\n method_template = method_template.replace('__method_template',\n '{col_name}')\n return method_template\n\n def boilerplate_maker(self, path=None, encoding='UTF8'):\n code = [self.import_code]\n code += [self.class_template]\n df_only_null = self._df_null_include(self.df)\n for key in df_only_null.keys():\n code += [self.method_template.format(col_name=key)]\n code = '\\n'.join(code)\n if path is not None:\n with open(path, mode='w', encoding=encoding) as f:\n f.write(code)\n return code\n\n def clean(self) ->DF:\n for key, val in self.__class__.__dict__.items():\n if key in self.df.keys():\n col = self.df[[key]]\n series = self.df[key]\n self.df = val(self, self.df, key, col, series, self.\n df_Xs_keys, self.df_Ys_key)\n return self.df\n\n def null_cols_info(self) ->str:\n ret = []\n for key, val in list(self.__class__.__dict__.items()):\n if key in self.df.keys():\n info = self._str_null_col_info(self.df, key)\n ret += [info]\n return '\\n\\n'.join(ret)\n\n def null_cols_plot(self):\n df_only_null = self._df_null_include(self.df)\n self._df_cols_plot(df_only_null, list(df_only_null.keys()), self.\n df_Ys_key)\n\n @staticmethod\n def _df_null_include(df: DF) ->DF:\n null_column = df.columns[df.isna().any()].tolist()\n return df.loc[:, null_column]\n\n def _str_null_col_info(self, df: DF, key) ->str:\n ret = []\n col = df[[key]]\n series = df[key]\n na_count = series.isna().sum()\n total = len(col)\n ret += [\n f'column : \"{key}\", null ratio:{float(na_count) / float(total):.4f}%, {na_count}/{total}(null/total)'\n ]\n ret += [col.describe()]\n ret += ['value_counts']\n ret += [series.value_counts()]\n groupby = df[[key, self.df_Ys_key]].groupby(key).agg(['mean', 'std',\n 'min', 'max', 'count'])\n ret += [groupby]\n return '\\n'.join(map(str, ret))\n",
"step-3": "<mask token>\n\n\nclass null_clean_methodMixIn:\n\n @staticmethod\n def drop_col(df: DF, key):\n return df.drop(key, axis=1)\n\n @staticmethod\n def fill_major_value_cate(df: DF, key) ->DF:\n major_value = df[key].astype(str).describe()['top']\n df[key] = df[key].fillna(major_value)\n return df\n\n @staticmethod\n def fill_random_value_cate(df: DF, key) ->DF:\n values = df[key].value_counts().keys()\n df[key] = df[key].transform(lambda x: x.fillna(np.random.choice(\n values)))\n return df\n\n @staticmethod\n def fill_rate_value_cate(df: DF, key) ->DF:\n values, count = zip(*list(df[key].value_counts().items()))\n p = np.array(count) / np.sum(count)\n df[key] = df[key].transform(lambda x: x.fillna(np.random.choice(\n values, p=p)))\n return df\n\n\nclass Base_dfCleaner(LoggerMixIn, null_clean_methodMixIn, df_plotterMixIn):\n import_code = \"\"\"\n import pandas as pd\n import numpy as np\n import random\n from script.data_handler.Base_dfCleaner import Base_dfCleaner \n\n DF = pd.DataFrame\n Series = pd.Series\n \n\"\"\"\n class_template = '\\nclass dfCleaner(Base_dfCleaner):\\n'\n\n def __init__(self, df: DF, df_Xs_keys, df_Ys_key, silent=False, verbose=0):\n LoggerMixIn.__init__(self, verbose)\n null_clean_methodMixIn.__init__(self)\n df_plotterMixIn.__init__(self)\n self.df = df\n self.silent = silent\n self.df_Xs_keys = df_Xs_keys\n self.df_Ys_key = df_Ys_key\n self.plot = PlotTools()\n\n def __method_template(self, df: DF, col_key: str, col: DF, series:\n Series, Xs_key: list, Ys_key: list):\n return df\n\n @property\n def method_template(self):\n method_template = inspect.getsource(self.__method_template)\n method_template = method_template.replace('__method_template',\n '{col_name}')\n return method_template\n\n def boilerplate_maker(self, path=None, encoding='UTF8'):\n code = [self.import_code]\n code += [self.class_template]\n df_only_null = self._df_null_include(self.df)\n for key in df_only_null.keys():\n code += [self.method_template.format(col_name=key)]\n code = '\\n'.join(code)\n if path is not None:\n with open(path, mode='w', encoding=encoding) as f:\n f.write(code)\n return code\n\n def clean(self) ->DF:\n for key, val in self.__class__.__dict__.items():\n if key in self.df.keys():\n col = self.df[[key]]\n series = self.df[key]\n self.df = val(self, self.df, key, col, series, self.\n df_Xs_keys, self.df_Ys_key)\n return self.df\n\n def null_cols_info(self) ->str:\n ret = []\n for key, val in list(self.__class__.__dict__.items()):\n if key in self.df.keys():\n info = self._str_null_col_info(self.df, key)\n ret += [info]\n return '\\n\\n'.join(ret)\n\n def null_cols_plot(self):\n df_only_null = self._df_null_include(self.df)\n self._df_cols_plot(df_only_null, list(df_only_null.keys()), self.\n df_Ys_key)\n\n @staticmethod\n def _df_null_include(df: DF) ->DF:\n null_column = df.columns[df.isna().any()].tolist()\n return df.loc[:, null_column]\n\n def _str_null_col_info(self, df: DF, key) ->str:\n ret = []\n col = df[[key]]\n series = df[key]\n na_count = series.isna().sum()\n total = len(col)\n ret += [\n f'column : \"{key}\", null ratio:{float(na_count) / float(total):.4f}%, {na_count}/{total}(null/total)'\n ]\n ret += [col.describe()]\n ret += ['value_counts']\n ret += [series.value_counts()]\n groupby = df[[key, self.df_Ys_key]].groupby(key).agg(['mean', 'std',\n 'min', 'max', 'count'])\n ret += [groupby]\n return '\\n'.join(map(str, ret))\n",
"step-4": "<mask token>\nDF = pd.DataFrame\nSeries = pd.Series\n\n\nclass null_clean_methodMixIn:\n\n @staticmethod\n def drop_col(df: DF, key):\n return df.drop(key, axis=1)\n\n @staticmethod\n def fill_major_value_cate(df: DF, key) ->DF:\n major_value = df[key].astype(str).describe()['top']\n df[key] = df[key].fillna(major_value)\n return df\n\n @staticmethod\n def fill_random_value_cate(df: DF, key) ->DF:\n values = df[key].value_counts().keys()\n df[key] = df[key].transform(lambda x: x.fillna(np.random.choice(\n values)))\n return df\n\n @staticmethod\n def fill_rate_value_cate(df: DF, key) ->DF:\n values, count = zip(*list(df[key].value_counts().items()))\n p = np.array(count) / np.sum(count)\n df[key] = df[key].transform(lambda x: x.fillna(np.random.choice(\n values, p=p)))\n return df\n\n\nclass Base_dfCleaner(LoggerMixIn, null_clean_methodMixIn, df_plotterMixIn):\n import_code = \"\"\"\n import pandas as pd\n import numpy as np\n import random\n from script.data_handler.Base_dfCleaner import Base_dfCleaner \n\n DF = pd.DataFrame\n Series = pd.Series\n \n\"\"\"\n class_template = '\\nclass dfCleaner(Base_dfCleaner):\\n'\n\n def __init__(self, df: DF, df_Xs_keys, df_Ys_key, silent=False, verbose=0):\n LoggerMixIn.__init__(self, verbose)\n null_clean_methodMixIn.__init__(self)\n df_plotterMixIn.__init__(self)\n self.df = df\n self.silent = silent\n self.df_Xs_keys = df_Xs_keys\n self.df_Ys_key = df_Ys_key\n self.plot = PlotTools()\n\n def __method_template(self, df: DF, col_key: str, col: DF, series:\n Series, Xs_key: list, Ys_key: list):\n return df\n\n @property\n def method_template(self):\n method_template = inspect.getsource(self.__method_template)\n method_template = method_template.replace('__method_template',\n '{col_name}')\n return method_template\n\n def boilerplate_maker(self, path=None, encoding='UTF8'):\n code = [self.import_code]\n code += [self.class_template]\n df_only_null = self._df_null_include(self.df)\n for key in df_only_null.keys():\n code += [self.method_template.format(col_name=key)]\n code = '\\n'.join(code)\n if path is not None:\n with open(path, mode='w', encoding=encoding) as f:\n f.write(code)\n return code\n\n def clean(self) ->DF:\n for key, val in self.__class__.__dict__.items():\n if key in self.df.keys():\n col = self.df[[key]]\n series = self.df[key]\n self.df = val(self, self.df, key, col, series, self.\n df_Xs_keys, self.df_Ys_key)\n return self.df\n\n def null_cols_info(self) ->str:\n ret = []\n for key, val in list(self.__class__.__dict__.items()):\n if key in self.df.keys():\n info = self._str_null_col_info(self.df, key)\n ret += [info]\n return '\\n\\n'.join(ret)\n\n def null_cols_plot(self):\n df_only_null = self._df_null_include(self.df)\n self._df_cols_plot(df_only_null, list(df_only_null.keys()), self.\n df_Ys_key)\n\n @staticmethod\n def _df_null_include(df: DF) ->DF:\n null_column = df.columns[df.isna().any()].tolist()\n return df.loc[:, null_column]\n\n def _str_null_col_info(self, df: DF, key) ->str:\n ret = []\n col = df[[key]]\n series = df[key]\n na_count = series.isna().sum()\n total = len(col)\n ret += [\n f'column : \"{key}\", null ratio:{float(na_count) / float(total):.4f}%, {na_count}/{total}(null/total)'\n ]\n ret += [col.describe()]\n ret += ['value_counts']\n ret += [series.value_counts()]\n groupby = df[[key, self.df_Ys_key]].groupby(key).agg(['mean', 'std',\n 'min', 'max', 'count'])\n ret += [groupby]\n return '\\n'.join(map(str, ret))\n",
"step-5": "import pandas as pd\nimport numpy as np\nimport inspect\n\nfrom script.data_handler.Base.df_plotterMixIn import df_plotterMixIn\nfrom script.util.MixIn import LoggerMixIn\nfrom script.util.PlotTools import PlotTools\n\nDF = pd.DataFrame\nSeries = pd.Series\n\n\nclass null_clean_methodMixIn:\n @staticmethod\n def drop_col(df: DF, key):\n return df.drop(key, axis=1)\n\n @staticmethod\n def fill_major_value_cate(df: DF, key) -> DF:\n major_value = df[key].astype(str).describe()['top']\n df[key] = df[key].fillna(major_value)\n return df\n\n @staticmethod\n def fill_random_value_cate(df: DF, key) -> DF:\n values = df[key].value_counts().keys()\n df[key] = df[key].transform(lambda x: x.fillna(np.random.choice(values)))\n # df[key] = df[key].fillna()\n return df\n\n @staticmethod\n def fill_rate_value_cate(df: DF, key) -> DF:\n values, count = zip(*list(df[key].value_counts().items()))\n p = np.array(count) / np.sum(count)\n df[key] = df[key].transform(lambda x: x.fillna(np.random.choice(values, p=p)))\n return df\n\n\nclass Base_dfCleaner(LoggerMixIn, null_clean_methodMixIn, df_plotterMixIn):\n import_code = \"\"\"\n import pandas as pd\n import numpy as np\n import random\n from script.data_handler.Base_dfCleaner import Base_dfCleaner \n\n DF = pd.DataFrame\n Series = pd.Series\n \n\"\"\"\n\n class_template = \"\"\"\nclass dfCleaner(Base_dfCleaner):\n\"\"\"\n\n def __init__(self, df: DF, df_Xs_keys, df_Ys_key, silent=False, verbose=0):\n LoggerMixIn.__init__(self, verbose)\n null_clean_methodMixIn.__init__(self)\n df_plotterMixIn.__init__(self)\n\n self.df = df\n self.silent = silent\n self.df_Xs_keys = df_Xs_keys\n self.df_Ys_key = df_Ys_key\n self.plot = PlotTools()\n\n def __method_template(self, df: DF, col_key: str, col: DF, series: Series, Xs_key: list, Ys_key: list):\n return df\n\n @property\n def method_template(self):\n method_template = inspect.getsource(self.__method_template)\n method_template = method_template.replace('__method_template', '{col_name}')\n return method_template\n\n def boilerplate_maker(self, path=None, encoding='UTF8'):\n code = [self.import_code]\n code += [self.class_template]\n\n df_only_null = self._df_null_include(self.df)\n for key in df_only_null.keys():\n code += [self.method_template.format(col_name=key)]\n\n code = \"\\n\".join(code)\n if path is not None:\n with open(path, mode='w', encoding=encoding) as f:\n f.write(code)\n\n return code\n\n def clean(self) -> DF:\n for key, val in self.__class__.__dict__.items():\n if key in self.df.keys():\n col = self.df[[key]]\n series = self.df[key]\n\n self.df = val(self, self.df, key, col, series, self.df_Xs_keys, self.df_Ys_key)\n\n return self.df\n\n def null_cols_info(self) -> str:\n ret = []\n for key, val in list(self.__class__.__dict__.items()):\n if key in self.df.keys():\n info = self._str_null_col_info(self.df, key)\n ret += [info]\n\n return \"\\n\\n\".join(ret)\n\n def null_cols_plot(self):\n df_only_null = self._df_null_include(self.df)\n self._df_cols_plot(df_only_null, list(df_only_null.keys()), self.df_Ys_key)\n\n @staticmethod\n def _df_null_include(df: DF) -> DF:\n null_column = df.columns[df.isna().any()].tolist()\n return df.loc[:, null_column]\n\n def _str_null_col_info(self, df: DF, key) -> str:\n ret = []\n col = df[[key]]\n series = df[key]\n\n na_count = series.isna().sum()\n total = len(col)\n ret += [f'column : \"{key}\", null ratio:{float(na_count)/float(total):.4f}%, {na_count}/{total}(null/total)']\n ret += [col.describe()]\n ret += ['value_counts']\n ret += [series.value_counts()]\n groupby = df[[key, self.df_Ys_key]].groupby(key).agg(['mean', 'std', 'min', 'max', 'count'])\n ret += [groupby]\n\n return \"\\n\".join(map(str, ret))\n",
"step-ids": [
8,
15,
16,
17,
19
]
}
|
[
8,
15,
16,
17,
19
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while True:
ret, square = result.read()
area = square[100:200, 100:200]
cv2.imshow('video', square)
cv2.imshow('video2', area)
print(square)
if cv2.waitKey(25) & 255 == ord('q'):
break
result.release()
cv2.destroyAllWindows()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
result = cv2.VideoCapture(0)
while True:
ret, square = result.read()
area = square[100:200, 100:200]
cv2.imshow('video', square)
cv2.imshow('video2', area)
print(square)
if cv2.waitKey(25) & 255 == ord('q'):
break
result.release()
cv2.destroyAllWindows()
<|reserved_special_token_1|>
import cv2
import numpy as np
result = cv2.VideoCapture(0)
while True:
ret, square = result.read()
area = square[100:200, 100:200]
cv2.imshow('video', square)
cv2.imshow('video2', area)
print(square)
if cv2.waitKey(25) & 255 == ord('q'):
break
result.release()
cv2.destroyAllWindows()
<|reserved_special_token_1|>
import cv2
import numpy as np
result=cv2.VideoCapture(0)
while True:
ret,square=result.read()
area=square[100:200,100:200]
cv2.imshow("video",square)
cv2.imshow("video2",area)
print(square)
if cv2.waitKey(25) & 0xff == ord('q'):
break
result.release()
cv2.destroyAllWindows()
|
flexible
|
{
"blob_id": "934921b22d036bd611134ce74f6eba3a2710018e",
"index": 529,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n ret, square = result.read()\n area = square[100:200, 100:200]\n cv2.imshow('video', square)\n cv2.imshow('video2', area)\n print(square)\n if cv2.waitKey(25) & 255 == ord('q'):\n break\nresult.release()\ncv2.destroyAllWindows()\n",
"step-3": "<mask token>\nresult = cv2.VideoCapture(0)\nwhile True:\n ret, square = result.read()\n area = square[100:200, 100:200]\n cv2.imshow('video', square)\n cv2.imshow('video2', area)\n print(square)\n if cv2.waitKey(25) & 255 == ord('q'):\n break\nresult.release()\ncv2.destroyAllWindows()\n",
"step-4": "import cv2\nimport numpy as np\nresult = cv2.VideoCapture(0)\nwhile True:\n ret, square = result.read()\n area = square[100:200, 100:200]\n cv2.imshow('video', square)\n cv2.imshow('video2', area)\n print(square)\n if cv2.waitKey(25) & 255 == ord('q'):\n break\nresult.release()\ncv2.destroyAllWindows()\n",
"step-5": "import cv2\nimport numpy as np\n\n\nresult=cv2.VideoCapture(0)\n\nwhile True:\n ret,square=result.read()\n area=square[100:200,100:200]\n cv2.imshow(\"video\",square)\n cv2.imshow(\"video2\",area)\n print(square)\n\n if cv2.waitKey(25) & 0xff == ord('q'):\n break\nresult.release()\ncv2.destroyAllWindows()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""Note: AWS Glue split from spark since it requires different test dependencies."""
from tests.integration.backend_dependencies import BackendDependencies
from tests.integration.integration_test_fixture import IntegrationTestFixture
aws_glue_integration_tests = []
deployment_patterns = [
# TODO: The AWS_GLUE dependency is only being marked and not run at this time.
IntegrationTestFixture(
name="how_to_use_great_expectations_in_aws_glue",
user_flow_script="tests/integration/docusaurus/deployment_patterns/aws_glue_deployment_patterns.py",
backend_dependencies=[
BackendDependencies.SPARK,
BackendDependencies.AWS,
BackendDependencies.AWS_GLUE,
],
),
]
aws_glue_integration_tests += deployment_patterns
|
normal
|
{
"blob_id": "e288403cb310bb7241b25e74d1b5bcc63967128c",
"index": 1031,
"step-1": "<mask token>\n",
"step-2": "<mask token>\naws_glue_integration_tests += deployment_patterns\n",
"step-3": "<mask token>\naws_glue_integration_tests = []\ndeployment_patterns = [IntegrationTestFixture(name=\n 'how_to_use_great_expectations_in_aws_glue', user_flow_script=\n 'tests/integration/docusaurus/deployment_patterns/aws_glue_deployment_patterns.py'\n , backend_dependencies=[BackendDependencies.SPARK, BackendDependencies.\n AWS, BackendDependencies.AWS_GLUE])]\naws_glue_integration_tests += deployment_patterns\n",
"step-4": "<mask token>\nfrom tests.integration.backend_dependencies import BackendDependencies\nfrom tests.integration.integration_test_fixture import IntegrationTestFixture\naws_glue_integration_tests = []\ndeployment_patterns = [IntegrationTestFixture(name=\n 'how_to_use_great_expectations_in_aws_glue', user_flow_script=\n 'tests/integration/docusaurus/deployment_patterns/aws_glue_deployment_patterns.py'\n , backend_dependencies=[BackendDependencies.SPARK, BackendDependencies.\n AWS, BackendDependencies.AWS_GLUE])]\naws_glue_integration_tests += deployment_patterns\n",
"step-5": "\"\"\"Note: AWS Glue split from spark since it requires different test dependencies.\"\"\"\nfrom tests.integration.backend_dependencies import BackendDependencies\nfrom tests.integration.integration_test_fixture import IntegrationTestFixture\n\naws_glue_integration_tests = []\n\ndeployment_patterns = [\n # TODO: The AWS_GLUE dependency is only being marked and not run at this time.\n IntegrationTestFixture(\n name=\"how_to_use_great_expectations_in_aws_glue\",\n user_flow_script=\"tests/integration/docusaurus/deployment_patterns/aws_glue_deployment_patterns.py\",\n backend_dependencies=[\n BackendDependencies.SPARK,\n BackendDependencies.AWS,\n BackendDependencies.AWS_GLUE,\n ],\n ),\n]\n\naws_glue_integration_tests += deployment_patterns\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
'''OpenGL extension EXT.YUV_target
This module customises the behaviour of the
OpenGL.raw.GLES2.EXT.YUV_target to provide a more
Python-friendly API
Overview (from the spec)
This extension adds support for three new YUV related items: first
rendering to YUV images, second sampling from YUV images while keeping the
data in YUV space, third it defines a new built in function that does
conversion from RGB to YUV with controls to choose ITU-R BT.601-7,
ITU-R BT.601-7 Full range (JFIF images), or ITU-R BT.709-5 standard.
This new functionality is layered on top of the OES_EGL_image_external
extension.
To perform the YUV rendering capability in this extension an application
will attach a texture to the framebuffer object as the color attachment.
If the texture has a target type of TEXTURE_EXTERNAL_OES with YUV color
format then the GL driver can use this framebuffer object as the render
target, TEXTURE_EXTERNAL_OES target with RGB color format are not allowed
with this extension.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/YUV_target.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.EXT.YUV_target import *
from OpenGL.raw.GLES2.EXT.YUV_target import _EXTENSION_NAME
def glInitYuvTargetEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
|
normal
|
{
"blob_id": "08420d31713859946b2f19cebf68c333331cb80e",
"index": 1494,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef glInitYuvTargetEXT():\n \"\"\"Return boolean indicating whether this extension is available\"\"\"\n from OpenGL import extensions\n return extensions.hasGLExtension(_EXTENSION_NAME)\n",
"step-3": "<mask token>\nfrom OpenGL import platform, constant, arrays\nfrom OpenGL import extensions, wrapper\nimport ctypes\nfrom OpenGL.raw.GLES2 import _types, _glgets\nfrom OpenGL.raw.GLES2.EXT.YUV_target import *\nfrom OpenGL.raw.GLES2.EXT.YUV_target import _EXTENSION_NAME\n\n\ndef glInitYuvTargetEXT():\n \"\"\"Return boolean indicating whether this extension is available\"\"\"\n from OpenGL import extensions\n return extensions.hasGLExtension(_EXTENSION_NAME)\n",
"step-4": "'''OpenGL extension EXT.YUV_target\n\nThis module customises the behaviour of the \nOpenGL.raw.GLES2.EXT.YUV_target to provide a more \nPython-friendly API\n\nOverview (from the spec)\n\t\n\tThis extension adds support for three new YUV related items: first\n\trendering to YUV images, second sampling from YUV images while keeping the\n\tdata in YUV space, third it defines a new built in function that does\n\tconversion from RGB to YUV with controls to choose ITU-R BT.601-7,\n\tITU-R BT.601-7 Full range (JFIF images), or ITU-R BT.709-5 standard.\n\t\n\tThis new functionality is layered on top of the OES_EGL_image_external\n\textension.\n\t\n\tTo perform the YUV rendering capability in this extension an application\n\twill attach a texture to the framebuffer object as the color attachment.\n\tIf the texture has a target type of TEXTURE_EXTERNAL_OES with YUV color\n\tformat then the GL driver can use this framebuffer object as the render\n\ttarget, TEXTURE_EXTERNAL_OES target with RGB color format are not allowed\n\twith this extension.\n\nThe official definition of this extension is available here:\nhttp://www.opengl.org/registry/specs/EXT/YUV_target.txt\n'''\nfrom OpenGL import platform, constant, arrays\nfrom OpenGL import extensions, wrapper\nimport ctypes\nfrom OpenGL.raw.GLES2 import _types, _glgets\nfrom OpenGL.raw.GLES2.EXT.YUV_target import *\nfrom OpenGL.raw.GLES2.EXT.YUV_target import _EXTENSION_NAME\n\ndef glInitYuvTargetEXT():\n '''Return boolean indicating whether this extension is available'''\n from OpenGL import extensions\n return extensions.hasGLExtension( _EXTENSION_NAME )\n\n\n### END AUTOGENERATED SECTION",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(anios * dias_por_anio * horas_por_dia * segundos_por_hora)
<|reserved_special_token_1|>
anios = 30
dias_por_anio = 365
horas_por_dia = 24
segundos_por_hora = 60
print(anios * dias_por_anio * horas_por_dia * segundos_por_hora)
<|reserved_special_token_1|>
#!/usr/bin/env python
# coding: utf-8
# # Cabecera
# In[1]:
# -*- coding: utf-8 -*-
# ------------- Cantidad de segundos que has vivido -------------
# # Definición de variables
# In[2]:
# Definición de variables
anios = 30
dias_por_anio = 365
horas_por_dia = 24
segundos_por_hora = 60
# # Operación
# In[3]:
# Operación
print (anios * dias_por_anio * horas_por_dia * segundos_por_hora)
# In[ ]:
|
flexible
|
{
"blob_id": "f153da7e4537f807f6c9d9d268a00443933d8315",
"index": 4167,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(anios * dias_por_anio * horas_por_dia * segundos_por_hora)\n",
"step-3": "anios = 30\ndias_por_anio = 365\nhoras_por_dia = 24\nsegundos_por_hora = 60\nprint(anios * dias_por_anio * horas_por_dia * segundos_por_hora)\n",
"step-4": "#!/usr/bin/env python\n# coding: utf-8\n\n# # Cabecera\n\n# In[1]:\n\n\n# -*- coding: utf-8 -*-\n\n# ------------- Cantidad de segundos que has vivido -------------\n\n\n# # Definición de variables\n\n# In[2]:\n\n\n# Definición de variables\nanios = 30\ndias_por_anio = 365\nhoras_por_dia = 24\nsegundos_por_hora = 60\n\n\n# # Operación\n\n# In[3]:\n\n\n# Operación\nprint (anios * dias_por_anio * horas_por_dia * segundos_por_hora)\n\n\n# In[ ]:\n\n\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from sklearn.base import BaseEstimator
class movingAverage(BaseEstimator):
'''Implements a moving average.'''
def __init__(self, lag):
self.lag = lag
def movingAverage(self, periods=5):
'''Implements a naiveLV forecast.'''
try:
# sets data
x = self.data['Values']
d = self.data.index
# sets variables
N = x.size
f = np.zeros((N,))
p = int(periods)
# check history
if self.length < p:
print('Moving Average: Not enough data for %s periods' % str(p))
return False
# forecast
f[0:p] = x[0:p]
for i in range(p, N):
# ex-post
if d[i] <= self.maxDate:
f[i] = x[i - p:i].mean()
# ex-ante
else:
f[i] = f[i - 1]
# truncate 0s
if self.truncate:
f[f < 0] = 0
# set name
colName = 'Moving Average %s' % p
# add to data
self.data[colName] = f
return True
except Exception, e:
self.raiseError('Error in Moving Average: ' + str(e))
|
normal
|
{
"blob_id": "f4e45c19105d4ee1520acc0cd61dadfe27904d0f",
"index": 8134,
"step-1": "from sklearn.base import BaseEstimator\n\n\nclass movingAverage(BaseEstimator):\n '''Implements a moving average.'''\n\n def __init__(self, lag):\n self.lag = lag\n\ndef movingAverage(self, periods=5):\n '''Implements a naiveLV forecast.'''\n try:\n # sets data\n x = self.data['Values']\n d = self.data.index\n # sets variables\n N = x.size\n f = np.zeros((N,))\n p = int(periods)\n # check history\n if self.length < p:\n print('Moving Average: Not enough data for %s periods' % str(p))\n return False\n # forecast\n f[0:p] = x[0:p]\n for i in range(p, N):\n # ex-post\n if d[i] <= self.maxDate:\n f[i] = x[i - p:i].mean()\n # ex-ante\n else:\n f[i] = f[i - 1]\n # truncate 0s\n if self.truncate:\n f[f < 0] = 0\n # set name\n colName = 'Moving Average %s' % p\n # add to data\n self.data[colName] = f\n return True\n\n except Exception, e:\n self.raiseError('Error in Moving Average: ' + str(e))",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import numpy as np
import numdifftools as nd
from scipy import stats
from scipy import optimize
from functools import partial
class TCRPowerCalculator:
def __init__(self, pcmodel):
self.pcmodel = pcmodel
self.predict_variance = self.pcmodel.predict_variance
self.predict_mean = self.pcmodel.predict_mean
self.get_prediction_interval = self.pcmodel.get_prediction_interval
self.predict_detection_probability = self.pcmodel.predict_detection_probability
#possivle TODO: Parse this method out into a new 2-step model class
def predict_detection_probability_2step(self, tcr_frequency, num_reads, num_cells, detect_thresh = 1):
"""
2-step detection probability model where
1) Num_cells_TCR is sampled first from the blood (Poisson model)
2) The RNA detection probability is calculated (Negbin model).
The num_cells_TCR is marginalized with the num_cells parameter as the upper limit
on the number of cells that could be sampled for a given TCR.
"""
mu_cells = tcr_frequency*num_cells
p0_poisson = stats.poisson.pmf(0, mu_cells)
num_cells_TCR = np.arange(1, num_cells + 1)[:,np.newaxis]
#Step 1 Poisson
p1 = stats.poisson.pmf(num_cells_TCR, mu_cells)
#Get rid of 0 probability cell counts
num_cells_TCR = num_cells_TCR[p1 >0]
p1 = p1[p1 >0]
#Step 2 Negbin
mu_reads = self.pcmodel.predict_mean(num_cells_TCR/num_cells, num_reads)
p2 = np.zeros(p1.shape)
for i in np.arange(detect_thresh):
p2 += self.pcmodel.pmf(mu_reads, count = i)
p0_2step = np.dot(p1.squeeze(), p2.squeeze())
#If 0 cells from Poisson model then automatically get 0 reads
return 1.0 - p0_poisson - p0_2step
def get_limit_of_detection_tcrfreq(self, num_reads, conf_level = 0.95):
opt_f = partial(self.pcmodel.predict_detection_probability, num_reads = num_reads)
opt_res = optimize.root_scalar(lambda freq: opt_f(freq) - conf_level,
method = "brentq",
bracket = [1.0e-16, 1])
return opt_res.root
def get_limit_of_detection_nreads(self, tcr_freq, conf_level = 0.95):
opt_nreads = partial(self.pcmodel.predict_detection_probability, tcr_frequencies = tcr_freq)
opt_res = optimize.root_scalar(lambda nreads: opt_nreads(num_reads = nreads) - conf_level,
method = "secant",
x0 = 1.0e-16,
x1 = 1)
return int(np.around(opt_res.root))
|
normal
|
{
"blob_id": "d327151c9659078e12e4aca46631de33e7ca4dcf",
"index": 167,
"step-1": "<mask token>\n\n\nclass TCRPowerCalculator:\n <mask token>\n\n def predict_detection_probability_2step(self, tcr_frequency, num_reads,\n num_cells, detect_thresh=1):\n \"\"\"\n\t\t2-step detection probability model where \n\t\t\n\t\t1) Num_cells_TCR is sampled first from the blood (Poisson model)\n\t\t2) The RNA detection probability is calculated (Negbin model).\n\t\t\n\t\tThe num_cells_TCR is marginalized with the num_cells parameter as the upper limit \n\t\ton the number of cells that could be sampled for a given TCR.\n\t\t\"\"\"\n mu_cells = tcr_frequency * num_cells\n p0_poisson = stats.poisson.pmf(0, mu_cells)\n num_cells_TCR = np.arange(1, num_cells + 1)[:, np.newaxis]\n p1 = stats.poisson.pmf(num_cells_TCR, mu_cells)\n num_cells_TCR = num_cells_TCR[p1 > 0]\n p1 = p1[p1 > 0]\n mu_reads = self.pcmodel.predict_mean(num_cells_TCR / num_cells,\n num_reads)\n p2 = np.zeros(p1.shape)\n for i in np.arange(detect_thresh):\n p2 += self.pcmodel.pmf(mu_reads, count=i)\n p0_2step = np.dot(p1.squeeze(), p2.squeeze())\n return 1.0 - p0_poisson - p0_2step\n <mask token>\n\n def get_limit_of_detection_nreads(self, tcr_freq, conf_level=0.95):\n opt_nreads = partial(self.pcmodel.predict_detection_probability,\n tcr_frequencies=tcr_freq)\n opt_res = optimize.root_scalar(lambda nreads: opt_nreads(num_reads=\n nreads) - conf_level, method='secant', x0=1e-16, x1=1)\n return int(np.around(opt_res.root))\n",
"step-2": "<mask token>\n\n\nclass TCRPowerCalculator:\n\n def __init__(self, pcmodel):\n self.pcmodel = pcmodel\n self.predict_variance = self.pcmodel.predict_variance\n self.predict_mean = self.pcmodel.predict_mean\n self.get_prediction_interval = self.pcmodel.get_prediction_interval\n self.predict_detection_probability = (self.pcmodel.\n predict_detection_probability)\n\n def predict_detection_probability_2step(self, tcr_frequency, num_reads,\n num_cells, detect_thresh=1):\n \"\"\"\n\t\t2-step detection probability model where \n\t\t\n\t\t1) Num_cells_TCR is sampled first from the blood (Poisson model)\n\t\t2) The RNA detection probability is calculated (Negbin model).\n\t\t\n\t\tThe num_cells_TCR is marginalized with the num_cells parameter as the upper limit \n\t\ton the number of cells that could be sampled for a given TCR.\n\t\t\"\"\"\n mu_cells = tcr_frequency * num_cells\n p0_poisson = stats.poisson.pmf(0, mu_cells)\n num_cells_TCR = np.arange(1, num_cells + 1)[:, np.newaxis]\n p1 = stats.poisson.pmf(num_cells_TCR, mu_cells)\n num_cells_TCR = num_cells_TCR[p1 > 0]\n p1 = p1[p1 > 0]\n mu_reads = self.pcmodel.predict_mean(num_cells_TCR / num_cells,\n num_reads)\n p2 = np.zeros(p1.shape)\n for i in np.arange(detect_thresh):\n p2 += self.pcmodel.pmf(mu_reads, count=i)\n p0_2step = np.dot(p1.squeeze(), p2.squeeze())\n return 1.0 - p0_poisson - p0_2step\n <mask token>\n\n def get_limit_of_detection_nreads(self, tcr_freq, conf_level=0.95):\n opt_nreads = partial(self.pcmodel.predict_detection_probability,\n tcr_frequencies=tcr_freq)\n opt_res = optimize.root_scalar(lambda nreads: opt_nreads(num_reads=\n nreads) - conf_level, method='secant', x0=1e-16, x1=1)\n return int(np.around(opt_res.root))\n",
"step-3": "<mask token>\n\n\nclass TCRPowerCalculator:\n\n def __init__(self, pcmodel):\n self.pcmodel = pcmodel\n self.predict_variance = self.pcmodel.predict_variance\n self.predict_mean = self.pcmodel.predict_mean\n self.get_prediction_interval = self.pcmodel.get_prediction_interval\n self.predict_detection_probability = (self.pcmodel.\n predict_detection_probability)\n\n def predict_detection_probability_2step(self, tcr_frequency, num_reads,\n num_cells, detect_thresh=1):\n \"\"\"\n\t\t2-step detection probability model where \n\t\t\n\t\t1) Num_cells_TCR is sampled first from the blood (Poisson model)\n\t\t2) The RNA detection probability is calculated (Negbin model).\n\t\t\n\t\tThe num_cells_TCR is marginalized with the num_cells parameter as the upper limit \n\t\ton the number of cells that could be sampled for a given TCR.\n\t\t\"\"\"\n mu_cells = tcr_frequency * num_cells\n p0_poisson = stats.poisson.pmf(0, mu_cells)\n num_cells_TCR = np.arange(1, num_cells + 1)[:, np.newaxis]\n p1 = stats.poisson.pmf(num_cells_TCR, mu_cells)\n num_cells_TCR = num_cells_TCR[p1 > 0]\n p1 = p1[p1 > 0]\n mu_reads = self.pcmodel.predict_mean(num_cells_TCR / num_cells,\n num_reads)\n p2 = np.zeros(p1.shape)\n for i in np.arange(detect_thresh):\n p2 += self.pcmodel.pmf(mu_reads, count=i)\n p0_2step = np.dot(p1.squeeze(), p2.squeeze())\n return 1.0 - p0_poisson - p0_2step\n\n def get_limit_of_detection_tcrfreq(self, num_reads, conf_level=0.95):\n opt_f = partial(self.pcmodel.predict_detection_probability,\n num_reads=num_reads)\n opt_res = optimize.root_scalar(lambda freq: opt_f(freq) -\n conf_level, method='brentq', bracket=[1e-16, 1])\n return opt_res.root\n\n def get_limit_of_detection_nreads(self, tcr_freq, conf_level=0.95):\n opt_nreads = partial(self.pcmodel.predict_detection_probability,\n tcr_frequencies=tcr_freq)\n opt_res = optimize.root_scalar(lambda nreads: opt_nreads(num_reads=\n nreads) - conf_level, method='secant', x0=1e-16, x1=1)\n return int(np.around(opt_res.root))\n",
"step-4": "import numpy as np\nimport numdifftools as nd\nfrom scipy import stats\nfrom scipy import optimize\nfrom functools import partial\n\n\nclass TCRPowerCalculator:\n\n def __init__(self, pcmodel):\n self.pcmodel = pcmodel\n self.predict_variance = self.pcmodel.predict_variance\n self.predict_mean = self.pcmodel.predict_mean\n self.get_prediction_interval = self.pcmodel.get_prediction_interval\n self.predict_detection_probability = (self.pcmodel.\n predict_detection_probability)\n\n def predict_detection_probability_2step(self, tcr_frequency, num_reads,\n num_cells, detect_thresh=1):\n \"\"\"\n\t\t2-step detection probability model where \n\t\t\n\t\t1) Num_cells_TCR is sampled first from the blood (Poisson model)\n\t\t2) The RNA detection probability is calculated (Negbin model).\n\t\t\n\t\tThe num_cells_TCR is marginalized with the num_cells parameter as the upper limit \n\t\ton the number of cells that could be sampled for a given TCR.\n\t\t\"\"\"\n mu_cells = tcr_frequency * num_cells\n p0_poisson = stats.poisson.pmf(0, mu_cells)\n num_cells_TCR = np.arange(1, num_cells + 1)[:, np.newaxis]\n p1 = stats.poisson.pmf(num_cells_TCR, mu_cells)\n num_cells_TCR = num_cells_TCR[p1 > 0]\n p1 = p1[p1 > 0]\n mu_reads = self.pcmodel.predict_mean(num_cells_TCR / num_cells,\n num_reads)\n p2 = np.zeros(p1.shape)\n for i in np.arange(detect_thresh):\n p2 += self.pcmodel.pmf(mu_reads, count=i)\n p0_2step = np.dot(p1.squeeze(), p2.squeeze())\n return 1.0 - p0_poisson - p0_2step\n\n def get_limit_of_detection_tcrfreq(self, num_reads, conf_level=0.95):\n opt_f = partial(self.pcmodel.predict_detection_probability,\n num_reads=num_reads)\n opt_res = optimize.root_scalar(lambda freq: opt_f(freq) -\n conf_level, method='brentq', bracket=[1e-16, 1])\n return opt_res.root\n\n def get_limit_of_detection_nreads(self, tcr_freq, conf_level=0.95):\n opt_nreads = partial(self.pcmodel.predict_detection_probability,\n tcr_frequencies=tcr_freq)\n opt_res = optimize.root_scalar(lambda nreads: opt_nreads(num_reads=\n nreads) - conf_level, method='secant', x0=1e-16, x1=1)\n return int(np.around(opt_res.root))\n",
"step-5": "import numpy as np \nimport numdifftools as nd\nfrom scipy import stats\nfrom scipy import optimize\nfrom functools import partial\n\nclass TCRPowerCalculator:\n\tdef __init__(self, pcmodel):\n\t\tself.pcmodel = pcmodel\n\t\tself.predict_variance = self.pcmodel.predict_variance\n\t\tself.predict_mean = self.pcmodel.predict_mean\n\t\tself.get_prediction_interval = self.pcmodel.get_prediction_interval\n\t\tself.predict_detection_probability = self.pcmodel.predict_detection_probability\n\n\t#possivle TODO: Parse this method out into a new 2-step model class\n\tdef predict_detection_probability_2step(self, tcr_frequency, num_reads, num_cells, detect_thresh = 1):\t\t\n\t\t\"\"\"\n\t\t2-step detection probability model where \n\t\t\n\t\t1) Num_cells_TCR is sampled first from the blood (Poisson model)\n\t\t2) The RNA detection probability is calculated (Negbin model).\n\t\t\n\t\tThe num_cells_TCR is marginalized with the num_cells parameter as the upper limit \n\t\ton the number of cells that could be sampled for a given TCR.\n\t\t\"\"\"\n\n\t\tmu_cells = tcr_frequency*num_cells\n\t\tp0_poisson = stats.poisson.pmf(0, mu_cells)\n\t\t\n\t\tnum_cells_TCR = np.arange(1, num_cells + 1)[:,np.newaxis]\n\t\t\n\t\t#Step 1 Poisson\n\t\tp1 = stats.poisson.pmf(num_cells_TCR, mu_cells)\n\n\t\t#Get rid of 0 probability cell counts\n\t\tnum_cells_TCR = num_cells_TCR[p1 >0]\n\t\tp1 = p1[p1 >0]\n\n\t\t#Step 2 Negbin\n\t\tmu_reads = self.pcmodel.predict_mean(num_cells_TCR/num_cells, num_reads)\n\t\t\t\t\n\t\tp2 = np.zeros(p1.shape)\n\t\tfor i in np.arange(detect_thresh):\n\t\t\tp2 += self.pcmodel.pmf(mu_reads, count = i)\n\n\t\tp0_2step = np.dot(p1.squeeze(), p2.squeeze())\n\n\t\t#If 0 cells from Poisson model then automatically get 0 reads\n\t\treturn 1.0 - p0_poisson - p0_2step\n\t\n\tdef get_limit_of_detection_tcrfreq(self, num_reads, conf_level = 0.95):\n\t\topt_f = partial(self.pcmodel.predict_detection_probability, num_reads = num_reads) \n\n\t\topt_res = optimize.root_scalar(lambda freq: opt_f(freq) - conf_level,\n\t\t \t\t\t\t\t\t\t\tmethod = \"brentq\",\n\t\t \t\t\t\t\t\t\t\tbracket = [1.0e-16, 1])\n\t\treturn opt_res.root\n\n\tdef get_limit_of_detection_nreads(self, tcr_freq, conf_level = 0.95):\n\t\topt_nreads = partial(self.pcmodel.predict_detection_probability, tcr_frequencies = tcr_freq) \n\n\t\topt_res = optimize.root_scalar(lambda nreads: opt_nreads(num_reads = nreads) - conf_level,\n\t\t\t\t\t\t\t\t\t\tmethod = \"secant\",\n\t\t\t\t\t\t\t\t\t\tx0 = 1.0e-16,\n\t\t\t\t\t\t\t\t\t\tx1 = 1)\n\t\t\n\t\treturn int(np.around(opt_res.root))",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
s = input()
if len(s) < 26:
for i in range(26):
c = chr(ord("a")+i)
if c not in s:
print(s+c)
exit()
else:
for i in reversed(range(1,26)):
if s[i-1] < s[i]:
s1 = s[0:i-1]
for j in range(26):
c = chr(ord("a")+j)
if c > s[i-1] and c not in s1:
print(s1+c)
exit()
print(-1)
|
normal
|
{
"blob_id": "9931fc25118981bcce80cffd3fda9dc99d951bf5",
"index": 180,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif len(s) < 26:\n for i in range(26):\n c = chr(ord('a') + i)\n if c not in s:\n print(s + c)\n exit()\nelse:\n for i in reversed(range(1, 26)):\n if s[i - 1] < s[i]:\n s1 = s[0:i - 1]\n for j in range(26):\n c = chr(ord('a') + j)\n if c > s[i - 1] and c not in s1:\n print(s1 + c)\n exit()\n print(-1)\n",
"step-3": "s = input()\nif len(s) < 26:\n for i in range(26):\n c = chr(ord('a') + i)\n if c not in s:\n print(s + c)\n exit()\nelse:\n for i in reversed(range(1, 26)):\n if s[i - 1] < s[i]:\n s1 = s[0:i - 1]\n for j in range(26):\n c = chr(ord('a') + j)\n if c > s[i - 1] and c not in s1:\n print(s1 + c)\n exit()\n print(-1)\n",
"step-4": "s = input()\nif len(s) < 26:\n for i in range(26):\n c = chr(ord(\"a\")+i)\n if c not in s:\n print(s+c)\n exit()\nelse:\n for i in reversed(range(1,26)):\n if s[i-1] < s[i]:\n s1 = s[0:i-1]\n for j in range(26):\n c = chr(ord(\"a\")+j)\n if c > s[i-1] and c not in s1:\n print(s1+c)\n exit()\n print(-1)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
loops(loop, phoneNumber, message)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
phoneNumber = 'fill the number'
message = 'fill with ur message'
loop = 1
loops(loop, phoneNumber, message)
<|reserved_special_token_1|>
from theMachine import loops
phoneNumber = 'fill the number'
message = 'fill with ur message'
loop = 1
loops(loop, phoneNumber, message)
<|reserved_special_token_1|>
# required !!!
# pip install selenium
# pip install webdriver-manager
from theMachine import loops
# fill the number and message
# you can fill the number with array
phoneNumber = "fill the number"
message = "fill with ur message"
loop = 1 # this how many u want to loop
loops(loop, phoneNumber, message) # input how many u want to loop
|
flexible
|
{
"blob_id": "81dfdf0479fc1f136fa5153840d8c7015f9db676",
"index": 32,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nloops(loop, phoneNumber, message)\n",
"step-3": "<mask token>\nphoneNumber = 'fill the number'\nmessage = 'fill with ur message'\nloop = 1\nloops(loop, phoneNumber, message)\n",
"step-4": "from theMachine import loops\nphoneNumber = 'fill the number'\nmessage = 'fill with ur message'\nloop = 1\nloops(loop, phoneNumber, message)\n",
"step-5": "# required !!!\r\n# pip install selenium\r\n# pip install webdriver-manager\r\n\r\nfrom theMachine import loops\r\n\r\n# fill the number and message\r\n# you can fill the number with array\r\nphoneNumber = \"fill the number\"\r\nmessage = \"fill with ur message\"\r\nloop = 1 # this how many u want to loop\r\n\r\nloops(loop, phoneNumber, message) # input how many u want to loop\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# All Rights Reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rest_framework import viewsets
from rest_framework.exceptions import APIException
from NSDManagement.serializers import *
from rest_framework.response import Response
from rest_framework import status
from rest_framework.utils import json
from rest_framework.decorators import action
from VnfPackageManagement.models import VnfPkgInfo
from utils.file_manipulation import remove_file, decompress_zip
from utils.format_tools import set_request_parameter_to_string
from utils.process_package.base_package import on_boarded, disabled, enabled, not_in_use, created
from utils.process_package.ns_descriptor import NetworkServiceDescriptor
class NSDescriptorsViewSet(viewsets.ModelViewSet):
queryset = NsdInfo.objects.all()
serializer_class = NsdInfoSerializer
def create(self, request, *args, **kwargs):
set_request_parameter_to_string(request, 'userDefinedData')
request.data['_links'] = {'self': request.build_absolute_uri(),
'nsd_content': request.build_absolute_uri()}
return super().create(request)
def get_success_headers(self, data):
return {'Location': data['_links']['self']}
def list(self, request, *args, **kwargs):
if self.get_queryset().__len__() < 1:
raise APIException(detail='One or more individual NS descriptor resource have been created')
return super().list(request)
def update(self, request, *args, **kwargs):
instance = self.get_object()
if on_boarded != instance.nsdOnboardingState:
raise APIException(detail='NSD nsdOnboardingState is not {}'.format(on_boarded))
if disabled != request.data['nsdOperationalState'] and enabled != request.data['nsdOperationalState']:
raise APIException(detail='ValueError: invalid operationalState',
code=status.HTTP_409_CONFLICT)
response = request.data.copy()
set_request_parameter_to_string(request, 'userDefinedData')
super().update(request)
return Response(response, status=status.HTTP_200_OK)
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
if disabled != instance.nsdOperationalState:
raise APIException(detail='NSD nsdOperationalState is not {}'.format(disabled),
code=status.HTTP_409_CONFLICT)
if not_in_use != instance.nsdUsageState:
raise APIException(detail='NSD nsdUsageState is not {}'.format(not_in_use),
code=status.HTTP_409_CONFLICT)
remove_file('{}{}'.format(nsd_base_path, instance.id))
super().destroy(request)
return Response(status=status.HTTP_204_NO_CONTENT)
@action(detail=True, methods=['PUT'], url_path='nsd_content')
def upload_content(self, request, **kwargs):
instance = self.get_object()
if created != instance.nsdOnboardingState:
raise APIException(detail='NSD nsdOnboardingState is not {}'.format(created),
code=status.HTTP_409_CONFLICT)
if 'application/zip' not in request.META['HTTP_ACCEPT']:
raise APIException(detail='HEAD need to have application/zip value')
network_service_path = decompress_zip(
request.data["file"], '{}{}'.format(nsd_base_path, instance.id) + '/nsd_content/')
network_service_descriptor = NetworkServiceDescriptor(path=network_service_path)
nsd_content = network_service_descriptor.processing_data()
vnf_pkg_ids_list = list()
for vnfd in network_service_descriptor.get_constituent_vnfd():
vnf_pkg_ids_list.append(str(VnfPkgInfo.objects.filter(vnfdId__iexact=vnfd['vnfd_id']).last().id))
nsd_content['vnfPkgIds'] = json.dumps(vnf_pkg_ids_list)
serializer = self.get_serializer(instance, data=nsd_content)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(status=status.HTTP_202_ACCEPTED)
|
normal
|
{
"blob_id": "5e2fcc6379a8ecee0378d26108e4deab9d17dba6",
"index": 7483,
"step-1": "<mask token>\n\n\nclass NSDescriptorsViewSet(viewsets.ModelViewSet):\n <mask token>\n <mask token>\n <mask token>\n\n def get_success_headers(self, data):\n return {'Location': data['_links']['self']}\n\n def list(self, request, *args, **kwargs):\n if self.get_queryset().__len__() < 1:\n raise APIException(detail=\n 'One or more individual NS descriptor resource have been created'\n )\n return super().list(request)\n\n def update(self, request, *args, **kwargs):\n instance = self.get_object()\n if on_boarded != instance.nsdOnboardingState:\n raise APIException(detail='NSD nsdOnboardingState is not {}'.\n format(on_boarded))\n if disabled != request.data['nsdOperationalState'\n ] and enabled != request.data['nsdOperationalState']:\n raise APIException(detail=\n 'ValueError: invalid operationalState', code=status.\n HTTP_409_CONFLICT)\n response = request.data.copy()\n set_request_parameter_to_string(request, 'userDefinedData')\n super().update(request)\n return Response(response, status=status.HTTP_200_OK)\n\n def destroy(self, request, *args, **kwargs):\n instance = self.get_object()\n if disabled != instance.nsdOperationalState:\n raise APIException(detail='NSD nsdOperationalState is not {}'.\n format(disabled), code=status.HTTP_409_CONFLICT)\n if not_in_use != instance.nsdUsageState:\n raise APIException(detail='NSD nsdUsageState is not {}'.format(\n not_in_use), code=status.HTTP_409_CONFLICT)\n remove_file('{}{}'.format(nsd_base_path, instance.id))\n super().destroy(request)\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n @action(detail=True, methods=['PUT'], url_path='nsd_content')\n def upload_content(self, request, **kwargs):\n instance = self.get_object()\n if created != instance.nsdOnboardingState:\n raise APIException(detail='NSD nsdOnboardingState is not {}'.\n format(created), code=status.HTTP_409_CONFLICT)\n if 'application/zip' not in request.META['HTTP_ACCEPT']:\n raise APIException(detail='HEAD need to have application/zip value'\n )\n network_service_path = decompress_zip(request.data['file'], '{}{}'.\n format(nsd_base_path, instance.id) + '/nsd_content/')\n network_service_descriptor = NetworkServiceDescriptor(path=\n network_service_path)\n nsd_content = network_service_descriptor.processing_data()\n vnf_pkg_ids_list = list()\n for vnfd in network_service_descriptor.get_constituent_vnfd():\n vnf_pkg_ids_list.append(str(VnfPkgInfo.objects.filter(\n vnfdId__iexact=vnfd['vnfd_id']).last().id))\n nsd_content['vnfPkgIds'] = json.dumps(vnf_pkg_ids_list)\n serializer = self.get_serializer(instance, data=nsd_content)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(status=status.HTTP_202_ACCEPTED)\n",
"step-2": "<mask token>\n\n\nclass NSDescriptorsViewSet(viewsets.ModelViewSet):\n <mask token>\n <mask token>\n\n def create(self, request, *args, **kwargs):\n set_request_parameter_to_string(request, 'userDefinedData')\n request.data['_links'] = {'self': request.build_absolute_uri(),\n 'nsd_content': request.build_absolute_uri()}\n return super().create(request)\n\n def get_success_headers(self, data):\n return {'Location': data['_links']['self']}\n\n def list(self, request, *args, **kwargs):\n if self.get_queryset().__len__() < 1:\n raise APIException(detail=\n 'One or more individual NS descriptor resource have been created'\n )\n return super().list(request)\n\n def update(self, request, *args, **kwargs):\n instance = self.get_object()\n if on_boarded != instance.nsdOnboardingState:\n raise APIException(detail='NSD nsdOnboardingState is not {}'.\n format(on_boarded))\n if disabled != request.data['nsdOperationalState'\n ] and enabled != request.data['nsdOperationalState']:\n raise APIException(detail=\n 'ValueError: invalid operationalState', code=status.\n HTTP_409_CONFLICT)\n response = request.data.copy()\n set_request_parameter_to_string(request, 'userDefinedData')\n super().update(request)\n return Response(response, status=status.HTTP_200_OK)\n\n def destroy(self, request, *args, **kwargs):\n instance = self.get_object()\n if disabled != instance.nsdOperationalState:\n raise APIException(detail='NSD nsdOperationalState is not {}'.\n format(disabled), code=status.HTTP_409_CONFLICT)\n if not_in_use != instance.nsdUsageState:\n raise APIException(detail='NSD nsdUsageState is not {}'.format(\n not_in_use), code=status.HTTP_409_CONFLICT)\n remove_file('{}{}'.format(nsd_base_path, instance.id))\n super().destroy(request)\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n @action(detail=True, methods=['PUT'], url_path='nsd_content')\n def upload_content(self, request, **kwargs):\n instance = self.get_object()\n if created != instance.nsdOnboardingState:\n raise APIException(detail='NSD nsdOnboardingState is not {}'.\n format(created), code=status.HTTP_409_CONFLICT)\n if 'application/zip' not in request.META['HTTP_ACCEPT']:\n raise APIException(detail='HEAD need to have application/zip value'\n )\n network_service_path = decompress_zip(request.data['file'], '{}{}'.\n format(nsd_base_path, instance.id) + '/nsd_content/')\n network_service_descriptor = NetworkServiceDescriptor(path=\n network_service_path)\n nsd_content = network_service_descriptor.processing_data()\n vnf_pkg_ids_list = list()\n for vnfd in network_service_descriptor.get_constituent_vnfd():\n vnf_pkg_ids_list.append(str(VnfPkgInfo.objects.filter(\n vnfdId__iexact=vnfd['vnfd_id']).last().id))\n nsd_content['vnfPkgIds'] = json.dumps(vnf_pkg_ids_list)\n serializer = self.get_serializer(instance, data=nsd_content)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(status=status.HTTP_202_ACCEPTED)\n",
"step-3": "<mask token>\n\n\nclass NSDescriptorsViewSet(viewsets.ModelViewSet):\n queryset = NsdInfo.objects.all()\n serializer_class = NsdInfoSerializer\n\n def create(self, request, *args, **kwargs):\n set_request_parameter_to_string(request, 'userDefinedData')\n request.data['_links'] = {'self': request.build_absolute_uri(),\n 'nsd_content': request.build_absolute_uri()}\n return super().create(request)\n\n def get_success_headers(self, data):\n return {'Location': data['_links']['self']}\n\n def list(self, request, *args, **kwargs):\n if self.get_queryset().__len__() < 1:\n raise APIException(detail=\n 'One or more individual NS descriptor resource have been created'\n )\n return super().list(request)\n\n def update(self, request, *args, **kwargs):\n instance = self.get_object()\n if on_boarded != instance.nsdOnboardingState:\n raise APIException(detail='NSD nsdOnboardingState is not {}'.\n format(on_boarded))\n if disabled != request.data['nsdOperationalState'\n ] and enabled != request.data['nsdOperationalState']:\n raise APIException(detail=\n 'ValueError: invalid operationalState', code=status.\n HTTP_409_CONFLICT)\n response = request.data.copy()\n set_request_parameter_to_string(request, 'userDefinedData')\n super().update(request)\n return Response(response, status=status.HTTP_200_OK)\n\n def destroy(self, request, *args, **kwargs):\n instance = self.get_object()\n if disabled != instance.nsdOperationalState:\n raise APIException(detail='NSD nsdOperationalState is not {}'.\n format(disabled), code=status.HTTP_409_CONFLICT)\n if not_in_use != instance.nsdUsageState:\n raise APIException(detail='NSD nsdUsageState is not {}'.format(\n not_in_use), code=status.HTTP_409_CONFLICT)\n remove_file('{}{}'.format(nsd_base_path, instance.id))\n super().destroy(request)\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n @action(detail=True, methods=['PUT'], url_path='nsd_content')\n def upload_content(self, request, **kwargs):\n instance = self.get_object()\n if created != instance.nsdOnboardingState:\n raise APIException(detail='NSD nsdOnboardingState is not {}'.\n format(created), code=status.HTTP_409_CONFLICT)\n if 'application/zip' not in request.META['HTTP_ACCEPT']:\n raise APIException(detail='HEAD need to have application/zip value'\n )\n network_service_path = decompress_zip(request.data['file'], '{}{}'.\n format(nsd_base_path, instance.id) + '/nsd_content/')\n network_service_descriptor = NetworkServiceDescriptor(path=\n network_service_path)\n nsd_content = network_service_descriptor.processing_data()\n vnf_pkg_ids_list = list()\n for vnfd in network_service_descriptor.get_constituent_vnfd():\n vnf_pkg_ids_list.append(str(VnfPkgInfo.objects.filter(\n vnfdId__iexact=vnfd['vnfd_id']).last().id))\n nsd_content['vnfPkgIds'] = json.dumps(vnf_pkg_ids_list)\n serializer = self.get_serializer(instance, data=nsd_content)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(status=status.HTTP_202_ACCEPTED)\n",
"step-4": "from rest_framework import viewsets\nfrom rest_framework.exceptions import APIException\nfrom NSDManagement.serializers import *\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.utils import json\nfrom rest_framework.decorators import action\nfrom VnfPackageManagement.models import VnfPkgInfo\nfrom utils.file_manipulation import remove_file, decompress_zip\nfrom utils.format_tools import set_request_parameter_to_string\nfrom utils.process_package.base_package import on_boarded, disabled, enabled, not_in_use, created\nfrom utils.process_package.ns_descriptor import NetworkServiceDescriptor\n\n\nclass NSDescriptorsViewSet(viewsets.ModelViewSet):\n queryset = NsdInfo.objects.all()\n serializer_class = NsdInfoSerializer\n\n def create(self, request, *args, **kwargs):\n set_request_parameter_to_string(request, 'userDefinedData')\n request.data['_links'] = {'self': request.build_absolute_uri(),\n 'nsd_content': request.build_absolute_uri()}\n return super().create(request)\n\n def get_success_headers(self, data):\n return {'Location': data['_links']['self']}\n\n def list(self, request, *args, **kwargs):\n if self.get_queryset().__len__() < 1:\n raise APIException(detail=\n 'One or more individual NS descriptor resource have been created'\n )\n return super().list(request)\n\n def update(self, request, *args, **kwargs):\n instance = self.get_object()\n if on_boarded != instance.nsdOnboardingState:\n raise APIException(detail='NSD nsdOnboardingState is not {}'.\n format(on_boarded))\n if disabled != request.data['nsdOperationalState'\n ] and enabled != request.data['nsdOperationalState']:\n raise APIException(detail=\n 'ValueError: invalid operationalState', code=status.\n HTTP_409_CONFLICT)\n response = request.data.copy()\n set_request_parameter_to_string(request, 'userDefinedData')\n super().update(request)\n return Response(response, status=status.HTTP_200_OK)\n\n def destroy(self, request, *args, **kwargs):\n instance = self.get_object()\n if disabled != instance.nsdOperationalState:\n raise APIException(detail='NSD nsdOperationalState is not {}'.\n format(disabled), code=status.HTTP_409_CONFLICT)\n if not_in_use != instance.nsdUsageState:\n raise APIException(detail='NSD nsdUsageState is not {}'.format(\n not_in_use), code=status.HTTP_409_CONFLICT)\n remove_file('{}{}'.format(nsd_base_path, instance.id))\n super().destroy(request)\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n @action(detail=True, methods=['PUT'], url_path='nsd_content')\n def upload_content(self, request, **kwargs):\n instance = self.get_object()\n if created != instance.nsdOnboardingState:\n raise APIException(detail='NSD nsdOnboardingState is not {}'.\n format(created), code=status.HTTP_409_CONFLICT)\n if 'application/zip' not in request.META['HTTP_ACCEPT']:\n raise APIException(detail='HEAD need to have application/zip value'\n )\n network_service_path = decompress_zip(request.data['file'], '{}{}'.\n format(nsd_base_path, instance.id) + '/nsd_content/')\n network_service_descriptor = NetworkServiceDescriptor(path=\n network_service_path)\n nsd_content = network_service_descriptor.processing_data()\n vnf_pkg_ids_list = list()\n for vnfd in network_service_descriptor.get_constituent_vnfd():\n vnf_pkg_ids_list.append(str(VnfPkgInfo.objects.filter(\n vnfdId__iexact=vnfd['vnfd_id']).last().id))\n nsd_content['vnfPkgIds'] = json.dumps(vnf_pkg_ids_list)\n serializer = self.get_serializer(instance, data=nsd_content)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(status=status.HTTP_202_ACCEPTED)\n",
"step-5": "# All Rights Reserved.\n#\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rest_framework import viewsets\nfrom rest_framework.exceptions import APIException\nfrom NSDManagement.serializers import *\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.utils import json\nfrom rest_framework.decorators import action\nfrom VnfPackageManagement.models import VnfPkgInfo\nfrom utils.file_manipulation import remove_file, decompress_zip\nfrom utils.format_tools import set_request_parameter_to_string\nfrom utils.process_package.base_package import on_boarded, disabled, enabled, not_in_use, created\nfrom utils.process_package.ns_descriptor import NetworkServiceDescriptor\n\n\nclass NSDescriptorsViewSet(viewsets.ModelViewSet):\n queryset = NsdInfo.objects.all()\n serializer_class = NsdInfoSerializer\n\n def create(self, request, *args, **kwargs):\n set_request_parameter_to_string(request, 'userDefinedData')\n request.data['_links'] = {'self': request.build_absolute_uri(),\n 'nsd_content': request.build_absolute_uri()}\n return super().create(request)\n\n def get_success_headers(self, data):\n return {'Location': data['_links']['self']}\n\n def list(self, request, *args, **kwargs):\n if self.get_queryset().__len__() < 1:\n raise APIException(detail='One or more individual NS descriptor resource have been created')\n\n return super().list(request)\n\n def update(self, request, *args, **kwargs):\n instance = self.get_object()\n if on_boarded != instance.nsdOnboardingState:\n raise APIException(detail='NSD nsdOnboardingState is not {}'.format(on_boarded))\n\n if disabled != request.data['nsdOperationalState'] and enabled != request.data['nsdOperationalState']:\n raise APIException(detail='ValueError: invalid operationalState',\n code=status.HTTP_409_CONFLICT)\n\n response = request.data.copy()\n set_request_parameter_to_string(request, 'userDefinedData')\n\n super().update(request)\n return Response(response, status=status.HTTP_200_OK)\n\n def destroy(self, request, *args, **kwargs):\n instance = self.get_object()\n if disabled != instance.nsdOperationalState:\n raise APIException(detail='NSD nsdOperationalState is not {}'.format(disabled),\n code=status.HTTP_409_CONFLICT)\n\n if not_in_use != instance.nsdUsageState:\n raise APIException(detail='NSD nsdUsageState is not {}'.format(not_in_use),\n code=status.HTTP_409_CONFLICT)\n\n remove_file('{}{}'.format(nsd_base_path, instance.id))\n super().destroy(request)\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n @action(detail=True, methods=['PUT'], url_path='nsd_content')\n def upload_content(self, request, **kwargs):\n instance = self.get_object()\n if created != instance.nsdOnboardingState:\n raise APIException(detail='NSD nsdOnboardingState is not {}'.format(created),\n code=status.HTTP_409_CONFLICT)\n\n if 'application/zip' not in request.META['HTTP_ACCEPT']:\n raise APIException(detail='HEAD need to have application/zip value')\n\n network_service_path = decompress_zip(\n request.data[\"file\"], '{}{}'.format(nsd_base_path, instance.id) + '/nsd_content/')\n network_service_descriptor = NetworkServiceDescriptor(path=network_service_path)\n nsd_content = network_service_descriptor.processing_data()\n vnf_pkg_ids_list = list()\n for vnfd in network_service_descriptor.get_constituent_vnfd():\n vnf_pkg_ids_list.append(str(VnfPkgInfo.objects.filter(vnfdId__iexact=vnfd['vnfd_id']).last().id))\n\n nsd_content['vnfPkgIds'] = json.dumps(vnf_pkg_ids_list)\n serializer = self.get_serializer(instance, data=nsd_content)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n return Response(status=status.HTTP_202_ACCEPTED)\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
class Reader:
@staticmethod
def read_file(file_path):
return ''
|
normal
|
{
"blob_id": "8c51b2c06f971c92e30d6b2d668fdd2fd75142d2",
"index": 4846,
"step-1": "<mask token>\n",
"step-2": "class Reader:\n <mask token>\n",
"step-3": "class Reader:\n\n @staticmethod\n def read_file(file_path):\n return ''\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import tensorflow as tf
import blood_model
import os
import numpy as np
FLAGS = tf.app.flags.FLAGS
RUN = 'new_test_hm'
tf.app.flags.DEFINE_string('checkpoint_dir', RUN+'/checkpoints',
"""Directory where to write event logs and checkpoint.""")
tf.app.flags.DEFINE_string('summaries_dir', RUN+'/summaries',
"""Summaries directory""")
tf.app.flags.DEFINE_string('max_steps', 20000,
"""Maximum steps to train the model""")
tf.app.flags.DEFINE_string('continue_run', True,
"""Continue from when training stopped?""")
def train():
"""Train blood_model for a number of steps. Periodically evaluate training and validation accuracies """
global_step = tf.Variable(0, name='global_step', trainable=False)
# Get images and labels for blood_model.
blood_datasets = blood_model.inputs(eval_data=False)
# randomize the inputs look
x, y_, data, keep_prob = blood_model.prepare_input()
# build the convolution network
conv_output, _, _, _, _ = blood_model.inference(data, keep_prob)
# Calculate loss.
loss = blood_model.loss(conv_output, y_)
accuracy = blood_model.accuracy(conv_output, y_)
train_op = blood_model.train(loss, global_step)
sess = tf.InteractiveSession()
sess.run(tf.initialize_all_variables())
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
saver = tf.train.Saver()
check_filesystem()
train_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/train', sess.graph)
validation_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/validation', sess.graph)
test_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/test', sess.graph)
_ = reload_checkpoint_if_exists(sess, saver, train_writer, validation_writer, test_writer)
for step in range(tf.train.global_step(sess, global_step)+1, FLAGS.max_steps):
batch = blood_datasets.train.next_batch()
_, loss_output = sess.run([train_op, loss], feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
assert not np.isnan(loss_output)
if step % 100 == 0:
summary, train_accuracy = sess.run([summary_op, accuracy], feed_dict={
x: batch[0], y_: batch[1], keep_prob: 1.0})
train_writer.add_summary(summary, step)
print("step %d, training accuracy %g, loss %g" % (step, train_accuracy, loss_output))
if (step % 1000 == 0 or (step + 1) == FLAGS.max_steps) and not step == 0:
batch = blood_datasets.validation.next_batch()
summary_validation, accuracy_validation = sess.run([summary_op, accuracy], feed_dict={
x: batch[0], y_: batch[1], keep_prob: 1.0})
validation_writer.add_summary(summary_validation, step)
print("validation accuracy %g" % accuracy_validation)
# save checkpoint
checkpoint_path = os.path.join(FLAGS.checkpoint_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
print("saving checkpoint")
def check_filesystem():
"""
either start a new checkpoint or continue from existing checkpoint folder
"""
if FLAGS.continue_run:
# start a new run, set flag to continue, so there is nothing
# check if something there, if not, create, but don't delete
if not tf.gfile.Exists(FLAGS.summaries_dir):
tf.gfile.MakeDirs(FLAGS.summaries_dir)
tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'train'))
tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'validation'))
tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'test'))
if not tf.gfile.Exists(FLAGS.checkpoint_dir):
tf.gfile.MakeDirs(FLAGS.checkpoint_dir)
else:
# delete checkpoints and event summaries because training restarted
if tf.gfile.Exists(FLAGS.summaries_dir):
tf.gfile.DeleteRecursively(FLAGS.summaries_dir)
tf.gfile.MakeDirs(FLAGS.summaries_dir)
tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'train'))
tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'validation'))
tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'test'))
if tf.gfile.Exists(FLAGS.checkpoint_dir):
tf.gfile.DeleteRecursively(FLAGS.checkpoint_dir)
tf.gfile.MakeDirs(FLAGS.checkpoint_dir)
def reload_checkpoint_if_exists(sess, saver, train_writer, validation_writer, test_writer):
"""
restore existing model from checkpoint data
"""
global_step = -1
if FLAGS.continue_run:
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
saver.restore(sess, ckpt.model_checkpoint_path)
# extract global_step from it.
global_step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])
print("checkpoint found at step %d", global_step)
# ensure that the writers ignore saved summaries that occurred after the last checkpoint but before a crash
train_writer.add_session_log(tf.SessionLog(status=tf.SessionLog.START), global_step)
validation_writer.add_session_log(tf.SessionLog(status=tf.SessionLog.START), global_step)
test_writer.add_session_log(tf.SessionLog(status=tf.SessionLog.START), global_step)
else:
print('No checkpoint file found')
return global_step
def main(argv=None):
train()
if __name__ == '__main__':
tf.app.run()
|
normal
|
{
"blob_id": "f653e906d3026de4bb1e705162f4321bb75e8705",
"index": 4166,
"step-1": "<mask token>\n\n\ndef check_filesystem():\n \"\"\"\n either start a new checkpoint or continue from existing checkpoint folder\n \"\"\"\n if FLAGS.continue_run:\n if not tf.gfile.Exists(FLAGS.summaries_dir):\n tf.gfile.MakeDirs(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'train'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'validation'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'test'))\n if not tf.gfile.Exists(FLAGS.checkpoint_dir):\n tf.gfile.MakeDirs(FLAGS.checkpoint_dir)\n else:\n if tf.gfile.Exists(FLAGS.summaries_dir):\n tf.gfile.DeleteRecursively(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'train'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'validation'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'test'))\n if tf.gfile.Exists(FLAGS.checkpoint_dir):\n tf.gfile.DeleteRecursively(FLAGS.checkpoint_dir)\n tf.gfile.MakeDirs(FLAGS.checkpoint_dir)\n\n\ndef reload_checkpoint_if_exists(sess, saver, train_writer,\n validation_writer, test_writer):\n \"\"\"\n restore existing model from checkpoint data\n \"\"\"\n global_step = -1\n if FLAGS.continue_run:\n ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n global_step = int(ckpt.model_checkpoint_path.split('/')[-1].\n split('-')[-1])\n print('checkpoint found at step %d', global_step)\n train_writer.add_session_log(tf.SessionLog(status=tf.SessionLog\n .START), global_step)\n validation_writer.add_session_log(tf.SessionLog(status=tf.\n SessionLog.START), global_step)\n test_writer.add_session_log(tf.SessionLog(status=tf.SessionLog.\n START), global_step)\n else:\n print('No checkpoint file found')\n return global_step\n\n\ndef main(argv=None):\n train()\n\n\n<mask token>\n",
"step-2": "<mask token>\ntf.app.flags.DEFINE_string('checkpoint_dir', RUN + '/checkpoints',\n 'Directory where to write event logs and checkpoint.')\ntf.app.flags.DEFINE_string('summaries_dir', RUN + '/summaries',\n 'Summaries directory')\ntf.app.flags.DEFINE_string('max_steps', 20000,\n 'Maximum steps to train the model')\ntf.app.flags.DEFINE_string('continue_run', True,\n 'Continue from when training stopped?')\n\n\ndef train():\n \"\"\"Train blood_model for a number of steps. Periodically evaluate training and validation accuracies \"\"\"\n global_step = tf.Variable(0, name='global_step', trainable=False)\n blood_datasets = blood_model.inputs(eval_data=False)\n x, y_, data, keep_prob = blood_model.prepare_input()\n conv_output, _, _, _, _ = blood_model.inference(data, keep_prob)\n loss = blood_model.loss(conv_output, y_)\n accuracy = blood_model.accuracy(conv_output, y_)\n train_op = blood_model.train(loss, global_step)\n sess = tf.InteractiveSession()\n sess.run(tf.initialize_all_variables())\n summary_op = tf.merge_all_summaries()\n saver = tf.train.Saver()\n check_filesystem()\n train_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/train',\n sess.graph)\n validation_writer = tf.train.SummaryWriter(FLAGS.summaries_dir +\n '/validation', sess.graph)\n test_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/test',\n sess.graph)\n _ = reload_checkpoint_if_exists(sess, saver, train_writer,\n validation_writer, test_writer)\n for step in range(tf.train.global_step(sess, global_step) + 1, FLAGS.\n max_steps):\n batch = blood_datasets.train.next_batch()\n _, loss_output = sess.run([train_op, loss], feed_dict={x: batch[0],\n y_: batch[1], keep_prob: 0.5})\n assert not np.isnan(loss_output)\n if step % 100 == 0:\n summary, train_accuracy = sess.run([summary_op, accuracy],\n feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})\n train_writer.add_summary(summary, step)\n print('step %d, training accuracy %g, loss %g' % (step,\n train_accuracy, loss_output))\n if (step % 1000 == 0 or step + 1 == FLAGS.max_steps) and not step == 0:\n batch = blood_datasets.validation.next_batch()\n summary_validation, accuracy_validation = sess.run([summary_op,\n accuracy], feed_dict={x: batch[0], y_: batch[1], keep_prob:\n 1.0})\n validation_writer.add_summary(summary_validation, step)\n print('validation accuracy %g' % accuracy_validation)\n checkpoint_path = os.path.join(FLAGS.checkpoint_dir, 'model.ckpt')\n saver.save(sess, checkpoint_path, global_step=step)\n print('saving checkpoint')\n\n\ndef check_filesystem():\n \"\"\"\n either start a new checkpoint or continue from existing checkpoint folder\n \"\"\"\n if FLAGS.continue_run:\n if not tf.gfile.Exists(FLAGS.summaries_dir):\n tf.gfile.MakeDirs(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'train'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'validation'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'test'))\n if not tf.gfile.Exists(FLAGS.checkpoint_dir):\n tf.gfile.MakeDirs(FLAGS.checkpoint_dir)\n else:\n if tf.gfile.Exists(FLAGS.summaries_dir):\n tf.gfile.DeleteRecursively(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'train'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'validation'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'test'))\n if tf.gfile.Exists(FLAGS.checkpoint_dir):\n tf.gfile.DeleteRecursively(FLAGS.checkpoint_dir)\n tf.gfile.MakeDirs(FLAGS.checkpoint_dir)\n\n\ndef reload_checkpoint_if_exists(sess, saver, train_writer,\n validation_writer, test_writer):\n \"\"\"\n restore existing model from checkpoint data\n \"\"\"\n global_step = -1\n if FLAGS.continue_run:\n ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n global_step = int(ckpt.model_checkpoint_path.split('/')[-1].\n split('-')[-1])\n print('checkpoint found at step %d', global_step)\n train_writer.add_session_log(tf.SessionLog(status=tf.SessionLog\n .START), global_step)\n validation_writer.add_session_log(tf.SessionLog(status=tf.\n SessionLog.START), global_step)\n test_writer.add_session_log(tf.SessionLog(status=tf.SessionLog.\n START), global_step)\n else:\n print('No checkpoint file found')\n return global_step\n\n\ndef main(argv=None):\n train()\n\n\nif __name__ == '__main__':\n tf.app.run()\n",
"step-3": "<mask token>\nFLAGS = tf.app.flags.FLAGS\nRUN = 'new_test_hm'\ntf.app.flags.DEFINE_string('checkpoint_dir', RUN + '/checkpoints',\n 'Directory where to write event logs and checkpoint.')\ntf.app.flags.DEFINE_string('summaries_dir', RUN + '/summaries',\n 'Summaries directory')\ntf.app.flags.DEFINE_string('max_steps', 20000,\n 'Maximum steps to train the model')\ntf.app.flags.DEFINE_string('continue_run', True,\n 'Continue from when training stopped?')\n\n\ndef train():\n \"\"\"Train blood_model for a number of steps. Periodically evaluate training and validation accuracies \"\"\"\n global_step = tf.Variable(0, name='global_step', trainable=False)\n blood_datasets = blood_model.inputs(eval_data=False)\n x, y_, data, keep_prob = blood_model.prepare_input()\n conv_output, _, _, _, _ = blood_model.inference(data, keep_prob)\n loss = blood_model.loss(conv_output, y_)\n accuracy = blood_model.accuracy(conv_output, y_)\n train_op = blood_model.train(loss, global_step)\n sess = tf.InteractiveSession()\n sess.run(tf.initialize_all_variables())\n summary_op = tf.merge_all_summaries()\n saver = tf.train.Saver()\n check_filesystem()\n train_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/train',\n sess.graph)\n validation_writer = tf.train.SummaryWriter(FLAGS.summaries_dir +\n '/validation', sess.graph)\n test_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/test',\n sess.graph)\n _ = reload_checkpoint_if_exists(sess, saver, train_writer,\n validation_writer, test_writer)\n for step in range(tf.train.global_step(sess, global_step) + 1, FLAGS.\n max_steps):\n batch = blood_datasets.train.next_batch()\n _, loss_output = sess.run([train_op, loss], feed_dict={x: batch[0],\n y_: batch[1], keep_prob: 0.5})\n assert not np.isnan(loss_output)\n if step % 100 == 0:\n summary, train_accuracy = sess.run([summary_op, accuracy],\n feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})\n train_writer.add_summary(summary, step)\n print('step %d, training accuracy %g, loss %g' % (step,\n train_accuracy, loss_output))\n if (step % 1000 == 0 or step + 1 == FLAGS.max_steps) and not step == 0:\n batch = blood_datasets.validation.next_batch()\n summary_validation, accuracy_validation = sess.run([summary_op,\n accuracy], feed_dict={x: batch[0], y_: batch[1], keep_prob:\n 1.0})\n validation_writer.add_summary(summary_validation, step)\n print('validation accuracy %g' % accuracy_validation)\n checkpoint_path = os.path.join(FLAGS.checkpoint_dir, 'model.ckpt')\n saver.save(sess, checkpoint_path, global_step=step)\n print('saving checkpoint')\n\n\ndef check_filesystem():\n \"\"\"\n either start a new checkpoint or continue from existing checkpoint folder\n \"\"\"\n if FLAGS.continue_run:\n if not tf.gfile.Exists(FLAGS.summaries_dir):\n tf.gfile.MakeDirs(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'train'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'validation'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'test'))\n if not tf.gfile.Exists(FLAGS.checkpoint_dir):\n tf.gfile.MakeDirs(FLAGS.checkpoint_dir)\n else:\n if tf.gfile.Exists(FLAGS.summaries_dir):\n tf.gfile.DeleteRecursively(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'train'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'validation'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'test'))\n if tf.gfile.Exists(FLAGS.checkpoint_dir):\n tf.gfile.DeleteRecursively(FLAGS.checkpoint_dir)\n tf.gfile.MakeDirs(FLAGS.checkpoint_dir)\n\n\ndef reload_checkpoint_if_exists(sess, saver, train_writer,\n validation_writer, test_writer):\n \"\"\"\n restore existing model from checkpoint data\n \"\"\"\n global_step = -1\n if FLAGS.continue_run:\n ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n global_step = int(ckpt.model_checkpoint_path.split('/')[-1].\n split('-')[-1])\n print('checkpoint found at step %d', global_step)\n train_writer.add_session_log(tf.SessionLog(status=tf.SessionLog\n .START), global_step)\n validation_writer.add_session_log(tf.SessionLog(status=tf.\n SessionLog.START), global_step)\n test_writer.add_session_log(tf.SessionLog(status=tf.SessionLog.\n START), global_step)\n else:\n print('No checkpoint file found')\n return global_step\n\n\ndef main(argv=None):\n train()\n\n\nif __name__ == '__main__':\n tf.app.run()\n",
"step-4": "import tensorflow as tf\nimport blood_model\nimport os\nimport numpy as np\nFLAGS = tf.app.flags.FLAGS\nRUN = 'new_test_hm'\ntf.app.flags.DEFINE_string('checkpoint_dir', RUN + '/checkpoints',\n 'Directory where to write event logs and checkpoint.')\ntf.app.flags.DEFINE_string('summaries_dir', RUN + '/summaries',\n 'Summaries directory')\ntf.app.flags.DEFINE_string('max_steps', 20000,\n 'Maximum steps to train the model')\ntf.app.flags.DEFINE_string('continue_run', True,\n 'Continue from when training stopped?')\n\n\ndef train():\n \"\"\"Train blood_model for a number of steps. Periodically evaluate training and validation accuracies \"\"\"\n global_step = tf.Variable(0, name='global_step', trainable=False)\n blood_datasets = blood_model.inputs(eval_data=False)\n x, y_, data, keep_prob = blood_model.prepare_input()\n conv_output, _, _, _, _ = blood_model.inference(data, keep_prob)\n loss = blood_model.loss(conv_output, y_)\n accuracy = blood_model.accuracy(conv_output, y_)\n train_op = blood_model.train(loss, global_step)\n sess = tf.InteractiveSession()\n sess.run(tf.initialize_all_variables())\n summary_op = tf.merge_all_summaries()\n saver = tf.train.Saver()\n check_filesystem()\n train_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/train',\n sess.graph)\n validation_writer = tf.train.SummaryWriter(FLAGS.summaries_dir +\n '/validation', sess.graph)\n test_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/test',\n sess.graph)\n _ = reload_checkpoint_if_exists(sess, saver, train_writer,\n validation_writer, test_writer)\n for step in range(tf.train.global_step(sess, global_step) + 1, FLAGS.\n max_steps):\n batch = blood_datasets.train.next_batch()\n _, loss_output = sess.run([train_op, loss], feed_dict={x: batch[0],\n y_: batch[1], keep_prob: 0.5})\n assert not np.isnan(loss_output)\n if step % 100 == 0:\n summary, train_accuracy = sess.run([summary_op, accuracy],\n feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})\n train_writer.add_summary(summary, step)\n print('step %d, training accuracy %g, loss %g' % (step,\n train_accuracy, loss_output))\n if (step % 1000 == 0 or step + 1 == FLAGS.max_steps) and not step == 0:\n batch = blood_datasets.validation.next_batch()\n summary_validation, accuracy_validation = sess.run([summary_op,\n accuracy], feed_dict={x: batch[0], y_: batch[1], keep_prob:\n 1.0})\n validation_writer.add_summary(summary_validation, step)\n print('validation accuracy %g' % accuracy_validation)\n checkpoint_path = os.path.join(FLAGS.checkpoint_dir, 'model.ckpt')\n saver.save(sess, checkpoint_path, global_step=step)\n print('saving checkpoint')\n\n\ndef check_filesystem():\n \"\"\"\n either start a new checkpoint or continue from existing checkpoint folder\n \"\"\"\n if FLAGS.continue_run:\n if not tf.gfile.Exists(FLAGS.summaries_dir):\n tf.gfile.MakeDirs(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'train'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'validation'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'test'))\n if not tf.gfile.Exists(FLAGS.checkpoint_dir):\n tf.gfile.MakeDirs(FLAGS.checkpoint_dir)\n else:\n if tf.gfile.Exists(FLAGS.summaries_dir):\n tf.gfile.DeleteRecursively(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'train'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'validation'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'test'))\n if tf.gfile.Exists(FLAGS.checkpoint_dir):\n tf.gfile.DeleteRecursively(FLAGS.checkpoint_dir)\n tf.gfile.MakeDirs(FLAGS.checkpoint_dir)\n\n\ndef reload_checkpoint_if_exists(sess, saver, train_writer,\n validation_writer, test_writer):\n \"\"\"\n restore existing model from checkpoint data\n \"\"\"\n global_step = -1\n if FLAGS.continue_run:\n ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n global_step = int(ckpt.model_checkpoint_path.split('/')[-1].\n split('-')[-1])\n print('checkpoint found at step %d', global_step)\n train_writer.add_session_log(tf.SessionLog(status=tf.SessionLog\n .START), global_step)\n validation_writer.add_session_log(tf.SessionLog(status=tf.\n SessionLog.START), global_step)\n test_writer.add_session_log(tf.SessionLog(status=tf.SessionLog.\n START), global_step)\n else:\n print('No checkpoint file found')\n return global_step\n\n\ndef main(argv=None):\n train()\n\n\nif __name__ == '__main__':\n tf.app.run()\n",
"step-5": "import tensorflow as tf\nimport blood_model\nimport os\nimport numpy as np\n\n\nFLAGS = tf.app.flags.FLAGS\nRUN = 'new_test_hm'\ntf.app.flags.DEFINE_string('checkpoint_dir', RUN+'/checkpoints',\n \"\"\"Directory where to write event logs and checkpoint.\"\"\")\ntf.app.flags.DEFINE_string('summaries_dir', RUN+'/summaries',\n \"\"\"Summaries directory\"\"\")\ntf.app.flags.DEFINE_string('max_steps', 20000,\n \"\"\"Maximum steps to train the model\"\"\")\ntf.app.flags.DEFINE_string('continue_run', True,\n \"\"\"Continue from when training stopped?\"\"\")\n\n\ndef train():\n \"\"\"Train blood_model for a number of steps. Periodically evaluate training and validation accuracies \"\"\"\n\n global_step = tf.Variable(0, name='global_step', trainable=False)\n\n # Get images and labels for blood_model.\n blood_datasets = blood_model.inputs(eval_data=False)\n\n # randomize the inputs look\n x, y_, data, keep_prob = blood_model.prepare_input()\n\n # build the convolution network\n conv_output, _, _, _, _ = blood_model.inference(data, keep_prob)\n # Calculate loss.\n loss = blood_model.loss(conv_output, y_)\n accuracy = blood_model.accuracy(conv_output, y_)\n\n train_op = blood_model.train(loss, global_step)\n\n sess = tf.InteractiveSession()\n\n sess.run(tf.initialize_all_variables())\n\n # Build the summary operation based on the TF collection of Summaries.\n summary_op = tf.merge_all_summaries()\n\n saver = tf.train.Saver()\n\n check_filesystem()\n\n train_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/train', sess.graph)\n validation_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/validation', sess.graph)\n test_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/test', sess.graph)\n\n _ = reload_checkpoint_if_exists(sess, saver, train_writer, validation_writer, test_writer)\n for step in range(tf.train.global_step(sess, global_step)+1, FLAGS.max_steps):\n batch = blood_datasets.train.next_batch()\n _, loss_output = sess.run([train_op, loss], feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})\n assert not np.isnan(loss_output)\n if step % 100 == 0:\n summary, train_accuracy = sess.run([summary_op, accuracy], feed_dict={\n x: batch[0], y_: batch[1], keep_prob: 1.0})\n train_writer.add_summary(summary, step)\n print(\"step %d, training accuracy %g, loss %g\" % (step, train_accuracy, loss_output))\n\n if (step % 1000 == 0 or (step + 1) == FLAGS.max_steps) and not step == 0:\n batch = blood_datasets.validation.next_batch()\n summary_validation, accuracy_validation = sess.run([summary_op, accuracy], feed_dict={\n x: batch[0], y_: batch[1], keep_prob: 1.0})\n validation_writer.add_summary(summary_validation, step)\n print(\"validation accuracy %g\" % accuracy_validation)\n\n # save checkpoint\n checkpoint_path = os.path.join(FLAGS.checkpoint_dir, 'model.ckpt')\n saver.save(sess, checkpoint_path, global_step=step)\n print(\"saving checkpoint\")\n\n\ndef check_filesystem():\n \"\"\"\n either start a new checkpoint or continue from existing checkpoint folder\n \"\"\"\n if FLAGS.continue_run:\n # start a new run, set flag to continue, so there is nothing\n # check if something there, if not, create, but don't delete\n if not tf.gfile.Exists(FLAGS.summaries_dir):\n tf.gfile.MakeDirs(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'train'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'validation'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'test'))\n if not tf.gfile.Exists(FLAGS.checkpoint_dir):\n tf.gfile.MakeDirs(FLAGS.checkpoint_dir)\n else:\n # delete checkpoints and event summaries because training restarted\n if tf.gfile.Exists(FLAGS.summaries_dir):\n tf.gfile.DeleteRecursively(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'train'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'validation'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'test'))\n if tf.gfile.Exists(FLAGS.checkpoint_dir):\n tf.gfile.DeleteRecursively(FLAGS.checkpoint_dir)\n tf.gfile.MakeDirs(FLAGS.checkpoint_dir)\n\n\ndef reload_checkpoint_if_exists(sess, saver, train_writer, validation_writer, test_writer):\n \"\"\"\n restore existing model from checkpoint data\n \"\"\"\n global_step = -1\n if FLAGS.continue_run:\n ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)\n if ckpt and ckpt.model_checkpoint_path:\n # Restores from checkpoint\n saver.restore(sess, ckpt.model_checkpoint_path)\n # extract global_step from it.\n global_step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])\n print(\"checkpoint found at step %d\", global_step)\n # ensure that the writers ignore saved summaries that occurred after the last checkpoint but before a crash\n train_writer.add_session_log(tf.SessionLog(status=tf.SessionLog.START), global_step)\n validation_writer.add_session_log(tf.SessionLog(status=tf.SessionLog.START), global_step)\n test_writer.add_session_log(tf.SessionLog(status=tf.SessionLog.START), global_step)\n else:\n print('No checkpoint file found')\n return global_step\n\n\ndef main(argv=None):\n train()\n\nif __name__ == '__main__':\n tf.app.run()\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
while True:
print('Light Levels:' + input.light_level())
if input.light_level() < 6:
light.set_all(light.rgb(255, 0, 255))
elif input.light_level() < 13:
light.set_all(light.rgb(255, 0, 0))
else:
light.clear()
<|reserved_special_token_1|>
while True:
print("Light Levels:" + input.light_level())
if input.light_level() < 6:
light.set_all(light.rgb(255, 0, 255))
elif input.light_level() < 13:
light.set_all(light.rgb(255, 0, 0))
else:
light.clear()
|
flexible
|
{
"blob_id": "7277b045f85d58383f26ab0d3299feb166f45e36",
"index": 2575,
"step-1": "<mask token>\n",
"step-2": "while True:\n print('Light Levels:' + input.light_level())\n if input.light_level() < 6:\n light.set_all(light.rgb(255, 0, 255))\n elif input.light_level() < 13:\n light.set_all(light.rgb(255, 0, 0))\nelse:\n light.clear()\n",
"step-3": "while True:\n print(\"Light Levels:\" + input.light_level())\n if input.light_level() < 6:\n light.set_all(light.rgb(255, 0, 255))\n elif input.light_level() < 13:\n light.set_all(light.rgb(255, 0, 0))\nelse:\n light.clear()\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
parser.add_argument('data_dir', help='path to training images')
parser.add_argument('--save_dir', default='.', help=
'path where checkpoint is saved')
parser.add_argument('--arch', default='vgg11', help=
'which pre-trained model to use as a base. vgg11 or alexnet')
parser.add_argument('--learning_rate', type=float, default=0.003, help=
'learning rate of the model')
parser.add_argument('--hidden_units', type=int, default=1024, help=
'size of hidden layer')
parser.add_argument('--gpu', default=False, action='store_true', help=
'size of hidden layer')
parser.add_argument('--epochs', type=int, default=1, help=
'number of training epochs')
<|reserved_special_token_0|>
print(args)
def main():
f_class = FlowerClassifier(args.arch, args.hidden_units, args.gpu)
f_class.train(data_dir=args.data_dir, epochs=args.epochs, learning_rate
=args.learning_rate)
save_checkpoint(f_class, 'checkpoint.pth')
top_probs, top_classes = f_class.predict('flowers/valid/1/image_06765.jpg',
3, 'cat_to_name.json')
print(top_probs, top_classes)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
parser = argparse.ArgumentParser()
parser.add_argument('data_dir', help='path to training images')
parser.add_argument('--save_dir', default='.', help=
'path where checkpoint is saved')
parser.add_argument('--arch', default='vgg11', help=
'which pre-trained model to use as a base. vgg11 or alexnet')
parser.add_argument('--learning_rate', type=float, default=0.003, help=
'learning rate of the model')
parser.add_argument('--hidden_units', type=int, default=1024, help=
'size of hidden layer')
parser.add_argument('--gpu', default=False, action='store_true', help=
'size of hidden layer')
parser.add_argument('--epochs', type=int, default=1, help=
'number of training epochs')
args = parser.parse_args()
print(args)
def main():
f_class = FlowerClassifier(args.arch, args.hidden_units, args.gpu)
f_class.train(data_dir=args.data_dir, epochs=args.epochs, learning_rate
=args.learning_rate)
save_checkpoint(f_class, 'checkpoint.pth')
top_probs, top_classes = f_class.predict('flowers/valid/1/image_06765.jpg',
3, 'cat_to_name.json')
print(top_probs, top_classes)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import argparse
from flower_classifier import FlowerClassifier
from util import *
parser = argparse.ArgumentParser()
parser.add_argument('data_dir', help='path to training images')
parser.add_argument('--save_dir', default='.', help=
'path where checkpoint is saved')
parser.add_argument('--arch', default='vgg11', help=
'which pre-trained model to use as a base. vgg11 or alexnet')
parser.add_argument('--learning_rate', type=float, default=0.003, help=
'learning rate of the model')
parser.add_argument('--hidden_units', type=int, default=1024, help=
'size of hidden layer')
parser.add_argument('--gpu', default=False, action='store_true', help=
'size of hidden layer')
parser.add_argument('--epochs', type=int, default=1, help=
'number of training epochs')
args = parser.parse_args()
print(args)
def main():
f_class = FlowerClassifier(args.arch, args.hidden_units, args.gpu)
f_class.train(data_dir=args.data_dir, epochs=args.epochs, learning_rate
=args.learning_rate)
save_checkpoint(f_class, 'checkpoint.pth')
top_probs, top_classes = f_class.predict('flowers/valid/1/image_06765.jpg',
3, 'cat_to_name.json')
print(top_probs, top_classes)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import argparse
from flower_classifier import FlowerClassifier
from util import *
parser = argparse.ArgumentParser()
parser.add_argument("data_dir", help="path to training images")
parser.add_argument("--save_dir", default=".", help="path where checkpoint is saved")
parser.add_argument("--arch", default="vgg11", help="which pre-trained model to use as a base. vgg11 or alexnet")
parser.add_argument("--learning_rate", type=float, default=0.003, help="learning rate of the model")
parser.add_argument("--hidden_units", type=int, default=1024, help="size of hidden layer")
parser.add_argument("--gpu", default=False, action="store_true", help="size of hidden layer")
parser.add_argument("--epochs", type=int, default=1, help="number of training epochs")
args = parser.parse_args()
print(args)
def main():
f_class = FlowerClassifier(args.arch, args.hidden_units, args.gpu)
f_class.train(data_dir=args.data_dir, epochs=args.epochs, learning_rate=args.learning_rate)
save_checkpoint(f_class, 'checkpoint.pth')
#print(model.cat_to_name)
top_probs, top_classes = f_class.predict('flowers/valid/1/image_06765.jpg', 3, 'cat_to_name.json')
print(top_probs, top_classes)
if __name__ == "__main__": main()
|
flexible
|
{
"blob_id": "0c3947a1699c78080661a55bbaa9215774b4a18e",
"index": 4751,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nparser.add_argument('data_dir', help='path to training images')\nparser.add_argument('--save_dir', default='.', help=\n 'path where checkpoint is saved')\nparser.add_argument('--arch', default='vgg11', help=\n 'which pre-trained model to use as a base. vgg11 or alexnet')\nparser.add_argument('--learning_rate', type=float, default=0.003, help=\n 'learning rate of the model')\nparser.add_argument('--hidden_units', type=int, default=1024, help=\n 'size of hidden layer')\nparser.add_argument('--gpu', default=False, action='store_true', help=\n 'size of hidden layer')\nparser.add_argument('--epochs', type=int, default=1, help=\n 'number of training epochs')\n<mask token>\nprint(args)\n\n\ndef main():\n f_class = FlowerClassifier(args.arch, args.hidden_units, args.gpu)\n f_class.train(data_dir=args.data_dir, epochs=args.epochs, learning_rate\n =args.learning_rate)\n save_checkpoint(f_class, 'checkpoint.pth')\n top_probs, top_classes = f_class.predict('flowers/valid/1/image_06765.jpg',\n 3, 'cat_to_name.json')\n print(top_probs, top_classes)\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nparser = argparse.ArgumentParser()\nparser.add_argument('data_dir', help='path to training images')\nparser.add_argument('--save_dir', default='.', help=\n 'path where checkpoint is saved')\nparser.add_argument('--arch', default='vgg11', help=\n 'which pre-trained model to use as a base. vgg11 or alexnet')\nparser.add_argument('--learning_rate', type=float, default=0.003, help=\n 'learning rate of the model')\nparser.add_argument('--hidden_units', type=int, default=1024, help=\n 'size of hidden layer')\nparser.add_argument('--gpu', default=False, action='store_true', help=\n 'size of hidden layer')\nparser.add_argument('--epochs', type=int, default=1, help=\n 'number of training epochs')\nargs = parser.parse_args()\nprint(args)\n\n\ndef main():\n f_class = FlowerClassifier(args.arch, args.hidden_units, args.gpu)\n f_class.train(data_dir=args.data_dir, epochs=args.epochs, learning_rate\n =args.learning_rate)\n save_checkpoint(f_class, 'checkpoint.pth')\n top_probs, top_classes = f_class.predict('flowers/valid/1/image_06765.jpg',\n 3, 'cat_to_name.json')\n print(top_probs, top_classes)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import argparse\nfrom flower_classifier import FlowerClassifier\nfrom util import *\nparser = argparse.ArgumentParser()\nparser.add_argument('data_dir', help='path to training images')\nparser.add_argument('--save_dir', default='.', help=\n 'path where checkpoint is saved')\nparser.add_argument('--arch', default='vgg11', help=\n 'which pre-trained model to use as a base. vgg11 or alexnet')\nparser.add_argument('--learning_rate', type=float, default=0.003, help=\n 'learning rate of the model')\nparser.add_argument('--hidden_units', type=int, default=1024, help=\n 'size of hidden layer')\nparser.add_argument('--gpu', default=False, action='store_true', help=\n 'size of hidden layer')\nparser.add_argument('--epochs', type=int, default=1, help=\n 'number of training epochs')\nargs = parser.parse_args()\nprint(args)\n\n\ndef main():\n f_class = FlowerClassifier(args.arch, args.hidden_units, args.gpu)\n f_class.train(data_dir=args.data_dir, epochs=args.epochs, learning_rate\n =args.learning_rate)\n save_checkpoint(f_class, 'checkpoint.pth')\n top_probs, top_classes = f_class.predict('flowers/valid/1/image_06765.jpg',\n 3, 'cat_to_name.json')\n print(top_probs, top_classes)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import argparse\nfrom flower_classifier import FlowerClassifier\nfrom util import *\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"data_dir\", help=\"path to training images\")\nparser.add_argument(\"--save_dir\", default=\".\", help=\"path where checkpoint is saved\")\nparser.add_argument(\"--arch\", default=\"vgg11\", help=\"which pre-trained model to use as a base. vgg11 or alexnet\")\nparser.add_argument(\"--learning_rate\", type=float, default=0.003, help=\"learning rate of the model\")\nparser.add_argument(\"--hidden_units\", type=int, default=1024, help=\"size of hidden layer\")\nparser.add_argument(\"--gpu\", default=False, action=\"store_true\", help=\"size of hidden layer\")\nparser.add_argument(\"--epochs\", type=int, default=1, help=\"number of training epochs\")\nargs = parser.parse_args()\nprint(args)\n\ndef main():\n f_class = FlowerClassifier(args.arch, args.hidden_units, args.gpu)\n f_class.train(data_dir=args.data_dir, epochs=args.epochs, learning_rate=args.learning_rate)\n save_checkpoint(f_class, 'checkpoint.pth')\n #print(model.cat_to_name)\n top_probs, top_classes = f_class.predict('flowers/valid/1/image_06765.jpg', 3, 'cat_to_name.json')\n print(top_probs, top_classes)\n\nif __name__ == \"__main__\": main()\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def target2_text(first_input, *params):
return first_input
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def input2_text(first_input, *params):
return my_dataset.voc.idx2docs(first_input)
def target2_text(first_input, *params):
return first_input
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def input2_text(first_input, *params):
return my_dataset.voc.idx2docs(first_input)
def target2_text(first_input, *params):
return first_input
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
BATCH_SIZE = 128
NUM_EPOCHS = 500
NUM_WORKERS = 0
PRINT_EVERY = 100
PREDICT_EVERY = 500
EVAL_EVERY = 500
PRE_TRAINED_MODEL = ''
my_dataset.bootstrap()
train_loader = my_dataset.get_dl_train(batch_size=BATCH_SIZE, size=None)
eval_loader = my_dataset.get_dl_eval(batch_size=BATCH_SIZE, size=None)
logging.info('There will be %s steps for training', NUM_EPOCHS * len(
train_loader))
model = LSTMAttention(vocab_size=len(my_dataset.voc.index2word), no_class=2
)
model.train()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model.to(device)
logging.info('Model architecture: \n%s', model)
logging.info('Total trainable parameters: %s', pytorch_utils.
count_parameters(model))
init_step = 0
if PRE_TRAINED_MODEL != '':
checkpoint = torch.load(PRE_TRAINED_MODEL, map_location=device)
model.load_state_dict(checkpoint['model_state_dict'])
model.optimizer.load_state_dict(checkpoint['optimizer'])
init_step = checkpoint.get('step', 0)
logging.info('Load pre-trained model from %s successfully',
PRE_TRAINED_MODEL)
root_dir = '/source/main/train/output/'
exp_id = datetime.strftime(datetime.now(), '%Y-%m-%dT%H:%M:%S')
path_checkpoints = os.path.join(root_dir, 'saved_models', model.
__class__.__name__, exp_id)
training_checker = TrainingChecker(model, root_dir=path_checkpoints,
init_score=-10000)
path_logging = os.path.join(root_dir, 'logging', model.__class__.
__name__, exp_id)
train_logger = TrainingLogger(model, measure_interval=PRINT_EVERY,
predict_interval=PREDICT_EVERY, path_to_file=path_logging +
'_train', input_transform=input2_text, output_transform=target2_text)
eval_logger = EvaluateLogger(path_logging + '_validate')
evaluator = Evaluator(model, eval_loader, device, EVAL_EVERY,
eval_logger, training_checker)
training_loop = TrainingLoop(model, train_loader, device, NUM_EPOCHS,
train_logger, evaluator)
training_loop.run()
<|reserved_special_token_1|>
import os
import logging
from datetime import datetime
import torch
from naruto_skills.training_checker import TrainingChecker
from data_for_train import is_question as my_dataset
from model_def.lstm_attention import LSTMAttention
from utils import pytorch_utils
from train.new_trainer import TrainingLoop, TrainingLogger, EvaluateLogger, Evaluator
def input2_text(first_input, *params):
return my_dataset.voc.idx2docs(first_input)
def target2_text(first_input, *params):
return first_input
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
BATCH_SIZE = 128
NUM_EPOCHS = 500
NUM_WORKERS = 0
PRINT_EVERY = 100
PREDICT_EVERY = 500
EVAL_EVERY = 500
PRE_TRAINED_MODEL = ''
my_dataset.bootstrap()
train_loader = my_dataset.get_dl_train(batch_size=BATCH_SIZE, size=None)
eval_loader = my_dataset.get_dl_eval(batch_size=BATCH_SIZE, size=None)
logging.info('There will be %s steps for training', NUM_EPOCHS * len(
train_loader))
model = LSTMAttention(vocab_size=len(my_dataset.voc.index2word), no_class=2
)
model.train()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model.to(device)
logging.info('Model architecture: \n%s', model)
logging.info('Total trainable parameters: %s', pytorch_utils.
count_parameters(model))
init_step = 0
if PRE_TRAINED_MODEL != '':
checkpoint = torch.load(PRE_TRAINED_MODEL, map_location=device)
model.load_state_dict(checkpoint['model_state_dict'])
model.optimizer.load_state_dict(checkpoint['optimizer'])
init_step = checkpoint.get('step', 0)
logging.info('Load pre-trained model from %s successfully',
PRE_TRAINED_MODEL)
root_dir = '/source/main/train/output/'
exp_id = datetime.strftime(datetime.now(), '%Y-%m-%dT%H:%M:%S')
path_checkpoints = os.path.join(root_dir, 'saved_models', model.
__class__.__name__, exp_id)
training_checker = TrainingChecker(model, root_dir=path_checkpoints,
init_score=-10000)
path_logging = os.path.join(root_dir, 'logging', model.__class__.
__name__, exp_id)
train_logger = TrainingLogger(model, measure_interval=PRINT_EVERY,
predict_interval=PREDICT_EVERY, path_to_file=path_logging +
'_train', input_transform=input2_text, output_transform=target2_text)
eval_logger = EvaluateLogger(path_logging + '_validate')
evaluator = Evaluator(model, eval_loader, device, EVAL_EVERY,
eval_logger, training_checker)
training_loop = TrainingLoop(model, train_loader, device, NUM_EPOCHS,
train_logger, evaluator)
training_loop.run()
<|reserved_special_token_1|>
import os
import logging
from datetime import datetime
import torch
from naruto_skills.training_checker import TrainingChecker
from data_for_train import is_question as my_dataset
from model_def.lstm_attention import LSTMAttention
from utils import pytorch_utils
from train.new_trainer import TrainingLoop, TrainingLogger, EvaluateLogger, Evaluator
def input2_text(first_input, *params):
return my_dataset.voc.idx2docs(first_input)
def target2_text(first_input, *params):
return first_input
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
BATCH_SIZE = 128
NUM_EPOCHS = 500
NUM_WORKERS = 0
PRINT_EVERY = 100
PREDICT_EVERY = 500
EVAL_EVERY = 500
PRE_TRAINED_MODEL = ''
my_dataset.bootstrap()
train_loader = my_dataset.get_dl_train(batch_size=BATCH_SIZE, size=None)
eval_loader = my_dataset.get_dl_eval(batch_size=BATCH_SIZE, size=None)
logging.info('There will be %s steps for training', NUM_EPOCHS * len(train_loader))
model = LSTMAttention(vocab_size=len(my_dataset.voc.index2word), no_class=2)
model.train()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
logging.info('Model architecture: \n%s', model)
logging.info('Total trainable parameters: %s', pytorch_utils.count_parameters(model))
init_step = 0
# Restore model
if PRE_TRAINED_MODEL != '':
checkpoint = torch.load(PRE_TRAINED_MODEL, map_location=device)
model.load_state_dict(checkpoint['model_state_dict'])
model.optimizer.load_state_dict(checkpoint['optimizer'])
init_step = checkpoint.get('step', 0)
logging.info('Load pre-trained model from %s successfully', PRE_TRAINED_MODEL)
root_dir = '/source/main/train/output/'
exp_id = datetime.strftime(datetime.now(), '%Y-%m-%dT%H:%M:%S')
path_checkpoints = os.path.join(root_dir, 'saved_models', model.__class__.__name__, exp_id)
training_checker = TrainingChecker(model, root_dir=path_checkpoints, init_score=-10000)
path_logging = os.path.join(root_dir, 'logging', model.__class__.__name__, exp_id)
train_logger = TrainingLogger(model, measure_interval=PRINT_EVERY, predict_interval=PREDICT_EVERY,
path_to_file=path_logging + '_train', input_transform=input2_text,
output_transform=target2_text)
eval_logger = EvaluateLogger(path_logging + '_validate')
evaluator = Evaluator(model, eval_loader, device, EVAL_EVERY, eval_logger, training_checker)
training_loop = TrainingLoop(model, train_loader, device, NUM_EPOCHS, train_logger, evaluator)
training_loop.run()
|
flexible
|
{
"blob_id": "77884dd72f5efe91fccad27e6328c4ad34378be2",
"index": 6953,
"step-1": "<mask token>\n\n\ndef target2_text(first_input, *params):\n return first_input\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef input2_text(first_input, *params):\n return my_dataset.voc.idx2docs(first_input)\n\n\ndef target2_text(first_input, *params):\n return first_input\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef input2_text(first_input, *params):\n return my_dataset.voc.idx2docs(first_input)\n\n\ndef target2_text(first_input, *params):\n return first_input\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n BATCH_SIZE = 128\n NUM_EPOCHS = 500\n NUM_WORKERS = 0\n PRINT_EVERY = 100\n PREDICT_EVERY = 500\n EVAL_EVERY = 500\n PRE_TRAINED_MODEL = ''\n my_dataset.bootstrap()\n train_loader = my_dataset.get_dl_train(batch_size=BATCH_SIZE, size=None)\n eval_loader = my_dataset.get_dl_eval(batch_size=BATCH_SIZE, size=None)\n logging.info('There will be %s steps for training', NUM_EPOCHS * len(\n train_loader))\n model = LSTMAttention(vocab_size=len(my_dataset.voc.index2word), no_class=2\n )\n model.train()\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n model.to(device)\n logging.info('Model architecture: \\n%s', model)\n logging.info('Total trainable parameters: %s', pytorch_utils.\n count_parameters(model))\n init_step = 0\n if PRE_TRAINED_MODEL != '':\n checkpoint = torch.load(PRE_TRAINED_MODEL, map_location=device)\n model.load_state_dict(checkpoint['model_state_dict'])\n model.optimizer.load_state_dict(checkpoint['optimizer'])\n init_step = checkpoint.get('step', 0)\n logging.info('Load pre-trained model from %s successfully',\n PRE_TRAINED_MODEL)\n root_dir = '/source/main/train/output/'\n exp_id = datetime.strftime(datetime.now(), '%Y-%m-%dT%H:%M:%S')\n path_checkpoints = os.path.join(root_dir, 'saved_models', model.\n __class__.__name__, exp_id)\n training_checker = TrainingChecker(model, root_dir=path_checkpoints,\n init_score=-10000)\n path_logging = os.path.join(root_dir, 'logging', model.__class__.\n __name__, exp_id)\n train_logger = TrainingLogger(model, measure_interval=PRINT_EVERY,\n predict_interval=PREDICT_EVERY, path_to_file=path_logging +\n '_train', input_transform=input2_text, output_transform=target2_text)\n eval_logger = EvaluateLogger(path_logging + '_validate')\n evaluator = Evaluator(model, eval_loader, device, EVAL_EVERY,\n eval_logger, training_checker)\n training_loop = TrainingLoop(model, train_loader, device, NUM_EPOCHS,\n train_logger, evaluator)\n training_loop.run()\n",
"step-4": "import os\nimport logging\nfrom datetime import datetime\nimport torch\nfrom naruto_skills.training_checker import TrainingChecker\nfrom data_for_train import is_question as my_dataset\nfrom model_def.lstm_attention import LSTMAttention\nfrom utils import pytorch_utils\nfrom train.new_trainer import TrainingLoop, TrainingLogger, EvaluateLogger, Evaluator\n\n\ndef input2_text(first_input, *params):\n return my_dataset.voc.idx2docs(first_input)\n\n\ndef target2_text(first_input, *params):\n return first_input\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n BATCH_SIZE = 128\n NUM_EPOCHS = 500\n NUM_WORKERS = 0\n PRINT_EVERY = 100\n PREDICT_EVERY = 500\n EVAL_EVERY = 500\n PRE_TRAINED_MODEL = ''\n my_dataset.bootstrap()\n train_loader = my_dataset.get_dl_train(batch_size=BATCH_SIZE, size=None)\n eval_loader = my_dataset.get_dl_eval(batch_size=BATCH_SIZE, size=None)\n logging.info('There will be %s steps for training', NUM_EPOCHS * len(\n train_loader))\n model = LSTMAttention(vocab_size=len(my_dataset.voc.index2word), no_class=2\n )\n model.train()\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n model.to(device)\n logging.info('Model architecture: \\n%s', model)\n logging.info('Total trainable parameters: %s', pytorch_utils.\n count_parameters(model))\n init_step = 0\n if PRE_TRAINED_MODEL != '':\n checkpoint = torch.load(PRE_TRAINED_MODEL, map_location=device)\n model.load_state_dict(checkpoint['model_state_dict'])\n model.optimizer.load_state_dict(checkpoint['optimizer'])\n init_step = checkpoint.get('step', 0)\n logging.info('Load pre-trained model from %s successfully',\n PRE_TRAINED_MODEL)\n root_dir = '/source/main/train/output/'\n exp_id = datetime.strftime(datetime.now(), '%Y-%m-%dT%H:%M:%S')\n path_checkpoints = os.path.join(root_dir, 'saved_models', model.\n __class__.__name__, exp_id)\n training_checker = TrainingChecker(model, root_dir=path_checkpoints,\n init_score=-10000)\n path_logging = os.path.join(root_dir, 'logging', model.__class__.\n __name__, exp_id)\n train_logger = TrainingLogger(model, measure_interval=PRINT_EVERY,\n predict_interval=PREDICT_EVERY, path_to_file=path_logging +\n '_train', input_transform=input2_text, output_transform=target2_text)\n eval_logger = EvaluateLogger(path_logging + '_validate')\n evaluator = Evaluator(model, eval_loader, device, EVAL_EVERY,\n eval_logger, training_checker)\n training_loop = TrainingLoop(model, train_loader, device, NUM_EPOCHS,\n train_logger, evaluator)\n training_loop.run()\n",
"step-5": "import os\nimport logging\nfrom datetime import datetime\n\nimport torch\nfrom naruto_skills.training_checker import TrainingChecker\n\nfrom data_for_train import is_question as my_dataset\nfrom model_def.lstm_attention import LSTMAttention\nfrom utils import pytorch_utils\nfrom train.new_trainer import TrainingLoop, TrainingLogger, EvaluateLogger, Evaluator\n\n\ndef input2_text(first_input, *params):\n return my_dataset.voc.idx2docs(first_input)\n\n\ndef target2_text(first_input, *params):\n return first_input\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n BATCH_SIZE = 128\n NUM_EPOCHS = 500\n NUM_WORKERS = 0\n PRINT_EVERY = 100\n PREDICT_EVERY = 500\n EVAL_EVERY = 500\n PRE_TRAINED_MODEL = ''\n\n my_dataset.bootstrap()\n train_loader = my_dataset.get_dl_train(batch_size=BATCH_SIZE, size=None)\n eval_loader = my_dataset.get_dl_eval(batch_size=BATCH_SIZE, size=None)\n logging.info('There will be %s steps for training', NUM_EPOCHS * len(train_loader))\n model = LSTMAttention(vocab_size=len(my_dataset.voc.index2word), no_class=2)\n model.train()\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model.to(device)\n logging.info('Model architecture: \\n%s', model)\n logging.info('Total trainable parameters: %s', pytorch_utils.count_parameters(model))\n\n init_step = 0\n # Restore model\n if PRE_TRAINED_MODEL != '':\n checkpoint = torch.load(PRE_TRAINED_MODEL, map_location=device)\n model.load_state_dict(checkpoint['model_state_dict'])\n model.optimizer.load_state_dict(checkpoint['optimizer'])\n init_step = checkpoint.get('step', 0)\n\n logging.info('Load pre-trained model from %s successfully', PRE_TRAINED_MODEL)\n\n root_dir = '/source/main/train/output/'\n exp_id = datetime.strftime(datetime.now(), '%Y-%m-%dT%H:%M:%S')\n\n path_checkpoints = os.path.join(root_dir, 'saved_models', model.__class__.__name__, exp_id)\n training_checker = TrainingChecker(model, root_dir=path_checkpoints, init_score=-10000)\n\n path_logging = os.path.join(root_dir, 'logging', model.__class__.__name__, exp_id)\n train_logger = TrainingLogger(model, measure_interval=PRINT_EVERY, predict_interval=PREDICT_EVERY,\n path_to_file=path_logging + '_train', input_transform=input2_text,\n output_transform=target2_text)\n\n eval_logger = EvaluateLogger(path_logging + '_validate')\n evaluator = Evaluator(model, eval_loader, device, EVAL_EVERY, eval_logger, training_checker)\n\n training_loop = TrainingLoop(model, train_loader, device, NUM_EPOCHS, train_logger, evaluator)\n training_loop.run()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class RSTTable(Table):
def run(self) ->List[Node]:
...
class CSVTable(Table):
option_spec: Dict[str, Callable[[str], Any]] = ...
class DocutilsDialect(csv.Dialect):
delimiter: str = ...
quotechar: str = ...
doublequote: bool = ...
skipinitialspace: bool = ...
strict: bool = ...
lineterminator: str = ...
quoting: Any = ...
escapechar: str = ...
def __init__(self, options: Dict[str, Any]) ->None:
...
class HeaderDialect(csv.Dialect):
delimiter: str = ...
quotechar: str = ...
escapechar: str = ...
doublequote: bool = ...
skipinitialspace: bool = ...
strict: bool = ...
lineterminator: str = ...
quoting: Any = ...
def check_requirements(self) ->None:
...
def run(self) ->List[Node]:
...
def get_csv_data(self) ->Tuple[List[str], str]:
...
decode_from_csv: Callable[[str], str] = ...
encode_for_csv: Callable[[str], str] = ...
def parse_csv_data_into_rows(self, csv_data: List[str], dialect: Any,
source: str) ->Tuple[List[Tuple[int, int, int, StringList]], int]:
...
class ListTable(Table):
option_spec: Dict[str, Callable[[str], Any]] = ...
def run(self) ->List[Node]:
...
def check_list_content(self, node: Node) ->Tuple[int, List[int]]:
...
def build_table_from_list(self, table_data: List[List[N_co]],
col_widths: List[int], header_rows: int, stub_columns: int) ->table:
...
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Table(Directive):
optional_arguments: int = ...
final_argument_whitespace: bool = ...
option_spec: Dict[str, Callable[[str], Any]] = ...
has_content: bool = ...
def make_title(self) ->Tuple[title, List[system_message]]:
...
def process_header_option(self) ->Tuple[List[Node], int]:
...
def check_table_dimensions(self, rows: List[List[N_co]], header_rows:
int, stub_columns: int) ->None:
...
def set_table_width(self, table_node: table) ->None:
...
@property
def widths(self) ->str:
...
def get_column_widths(self, max_cols: int) ->List[int]:
...
def extend_short_rows_with_empty_cells(self, columns: int, parts: Tuple
[List[N_co], List[N_co]]) ->None:
...
class RSTTable(Table):
def run(self) ->List[Node]:
...
class CSVTable(Table):
option_spec: Dict[str, Callable[[str], Any]] = ...
class DocutilsDialect(csv.Dialect):
delimiter: str = ...
quotechar: str = ...
doublequote: bool = ...
skipinitialspace: bool = ...
strict: bool = ...
lineterminator: str = ...
quoting: Any = ...
escapechar: str = ...
def __init__(self, options: Dict[str, Any]) ->None:
...
class HeaderDialect(csv.Dialect):
delimiter: str = ...
quotechar: str = ...
escapechar: str = ...
doublequote: bool = ...
skipinitialspace: bool = ...
strict: bool = ...
lineterminator: str = ...
quoting: Any = ...
def check_requirements(self) ->None:
...
def run(self) ->List[Node]:
...
def get_csv_data(self) ->Tuple[List[str], str]:
...
decode_from_csv: Callable[[str], str] = ...
encode_for_csv: Callable[[str], str] = ...
def parse_csv_data_into_rows(self, csv_data: List[str], dialect: Any,
source: str) ->Tuple[List[Tuple[int, int, int, StringList]], int]:
...
class ListTable(Table):
option_spec: Dict[str, Callable[[str], Any]] = ...
def run(self) ->List[Node]:
...
def check_list_content(self, node: Node) ->Tuple[int, List[int]]:
...
def build_table_from_list(self, table_data: List[List[N_co]],
col_widths: List[int], header_rows: int, stub_columns: int) ->table:
...
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def align(argument: str) ->str:
...
class Table(Directive):
optional_arguments: int = ...
final_argument_whitespace: bool = ...
option_spec: Dict[str, Callable[[str], Any]] = ...
has_content: bool = ...
def make_title(self) ->Tuple[title, List[system_message]]:
...
def process_header_option(self) ->Tuple[List[Node], int]:
...
def check_table_dimensions(self, rows: List[List[N_co]], header_rows:
int, stub_columns: int) ->None:
...
def set_table_width(self, table_node: table) ->None:
...
@property
def widths(self) ->str:
...
def get_column_widths(self, max_cols: int) ->List[int]:
...
def extend_short_rows_with_empty_cells(self, columns: int, parts: Tuple
[List[N_co], List[N_co]]) ->None:
...
class RSTTable(Table):
def run(self) ->List[Node]:
...
class CSVTable(Table):
option_spec: Dict[str, Callable[[str], Any]] = ...
class DocutilsDialect(csv.Dialect):
delimiter: str = ...
quotechar: str = ...
doublequote: bool = ...
skipinitialspace: bool = ...
strict: bool = ...
lineterminator: str = ...
quoting: Any = ...
escapechar: str = ...
def __init__(self, options: Dict[str, Any]) ->None:
...
class HeaderDialect(csv.Dialect):
delimiter: str = ...
quotechar: str = ...
escapechar: str = ...
doublequote: bool = ...
skipinitialspace: bool = ...
strict: bool = ...
lineterminator: str = ...
quoting: Any = ...
def check_requirements(self) ->None:
...
def run(self) ->List[Node]:
...
def get_csv_data(self) ->Tuple[List[str], str]:
...
decode_from_csv: Callable[[str], str] = ...
encode_for_csv: Callable[[str], str] = ...
def parse_csv_data_into_rows(self, csv_data: List[str], dialect: Any,
source: str) ->Tuple[List[Tuple[int, int, int, StringList]], int]:
...
class ListTable(Table):
option_spec: Dict[str, Callable[[str], Any]] = ...
def run(self) ->List[Node]:
...
def check_list_content(self, node: Node) ->Tuple[int, List[int]]:
...
def build_table_from_list(self, table_data: List[List[N_co]],
col_widths: List[int], header_rows: int, stub_columns: int) ->table:
...
<|reserved_special_token_1|>
<|reserved_special_token_0|>
N_co = TypeVar('N_co', bound=Node, covariant=True)
__docformat__: str
def align(argument: str) ->str:
...
class Table(Directive):
optional_arguments: int = ...
final_argument_whitespace: bool = ...
option_spec: Dict[str, Callable[[str], Any]] = ...
has_content: bool = ...
def make_title(self) ->Tuple[title, List[system_message]]:
...
def process_header_option(self) ->Tuple[List[Node], int]:
...
def check_table_dimensions(self, rows: List[List[N_co]], header_rows:
int, stub_columns: int) ->None:
...
def set_table_width(self, table_node: table) ->None:
...
@property
def widths(self) ->str:
...
def get_column_widths(self, max_cols: int) ->List[int]:
...
def extend_short_rows_with_empty_cells(self, columns: int, parts: Tuple
[List[N_co], List[N_co]]) ->None:
...
class RSTTable(Table):
def run(self) ->List[Node]:
...
class CSVTable(Table):
option_spec: Dict[str, Callable[[str], Any]] = ...
class DocutilsDialect(csv.Dialect):
delimiter: str = ...
quotechar: str = ...
doublequote: bool = ...
skipinitialspace: bool = ...
strict: bool = ...
lineterminator: str = ...
quoting: Any = ...
escapechar: str = ...
def __init__(self, options: Dict[str, Any]) ->None:
...
class HeaderDialect(csv.Dialect):
delimiter: str = ...
quotechar: str = ...
escapechar: str = ...
doublequote: bool = ...
skipinitialspace: bool = ...
strict: bool = ...
lineterminator: str = ...
quoting: Any = ...
def check_requirements(self) ->None:
...
def run(self) ->List[Node]:
...
def get_csv_data(self) ->Tuple[List[str], str]:
...
decode_from_csv: Callable[[str], str] = ...
encode_for_csv: Callable[[str], str] = ...
def parse_csv_data_into_rows(self, csv_data: List[str], dialect: Any,
source: str) ->Tuple[List[Tuple[int, int, int, StringList]], int]:
...
class ListTable(Table):
option_spec: Dict[str, Callable[[str], Any]] = ...
def run(self) ->List[Node]:
...
def check_list_content(self, node: Node) ->Tuple[int, List[int]]:
...
def build_table_from_list(self, table_data: List[List[N_co]],
col_widths: List[int], header_rows: int, stub_columns: int) ->table:
...
<|reserved_special_token_1|>
# Stubs for docutils.parsers.rst.directives.tables (Python 3.6)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
import csv
from docutils.statemachine import StringList
from docutils.nodes import Node, system_message, table, title
from docutils.parsers.rst import Directive
from typing import Any, Callable, Dict, List, Tuple, TypeVar
N_co = TypeVar('N_co', bound=Node, covariant=True)
__docformat__: str
def align(argument: str) -> str: ...
class Table(Directive):
optional_arguments: int = ...
final_argument_whitespace: bool = ...
option_spec: Dict[str, Callable[[str], Any]] = ...
has_content: bool = ...
def make_title(self) -> Tuple[title, List[system_message]]: ...
def process_header_option(self) -> Tuple[List[Node], int]: ...
def check_table_dimensions(self, rows: List[List[N_co]], header_rows: int, stub_columns: int) -> None: ...
def set_table_width(self, table_node: table) -> None: ...
@property
def widths(self) -> str: ...
def get_column_widths(self, max_cols: int) -> List[int]: ...
def extend_short_rows_with_empty_cells(self, columns: int, parts: Tuple[List[N_co], List[N_co]]) -> None: ...
class RSTTable(Table):
def run(self) -> List[Node]: ...
class CSVTable(Table):
option_spec: Dict[str, Callable[[str], Any]] = ...
class DocutilsDialect(csv.Dialect):
delimiter: str = ...
quotechar: str = ...
doublequote: bool = ...
skipinitialspace: bool = ...
strict: bool = ...
lineterminator: str = ...
quoting: Any = ...
escapechar: str = ...
def __init__(self, options: Dict[str, Any]) -> None: ...
class HeaderDialect(csv.Dialect):
delimiter: str = ...
quotechar: str = ...
escapechar: str = ...
doublequote: bool = ...
skipinitialspace: bool = ...
strict: bool = ...
lineterminator: str = ...
quoting: Any = ...
def check_requirements(self) -> None: ...
def run(self) -> List[Node]: ...
def get_csv_data(self) -> Tuple[List[str], str]: ...
decode_from_csv: Callable[[str], str] = ...
encode_for_csv: Callable[[str], str] = ...
def parse_csv_data_into_rows(self, csv_data: List[str], dialect: Any, source: str) -> Tuple[List[Tuple[int, int, int, StringList]], int]: ...
class ListTable(Table):
option_spec: Dict[str, Callable[[str], Any]] = ...
def run(self) -> List[Node]: ...
def check_list_content(self, node: Node) -> Tuple[int, List[int]]: ...
def build_table_from_list(self, table_data: List[List[N_co]], col_widths: List[int], header_rows: int, stub_columns: int) -> table: ...
|
flexible
|
{
"blob_id": "9abf2b9b90d18332ede94cf1af778e0dda54330b",
"index": 949,
"step-1": "<mask token>\n\n\nclass RSTTable(Table):\n\n def run(self) ->List[Node]:\n ...\n\n\nclass CSVTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n\n class DocutilsDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n escapechar: str = ...\n\n def __init__(self, options: Dict[str, Any]) ->None:\n ...\n\n\n class HeaderDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n escapechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n\n def check_requirements(self) ->None:\n ...\n\n def run(self) ->List[Node]:\n ...\n\n def get_csv_data(self) ->Tuple[List[str], str]:\n ...\n decode_from_csv: Callable[[str], str] = ...\n encode_for_csv: Callable[[str], str] = ...\n\n def parse_csv_data_into_rows(self, csv_data: List[str], dialect: Any,\n source: str) ->Tuple[List[Tuple[int, int, int, StringList]], int]:\n ...\n\n\nclass ListTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n def run(self) ->List[Node]:\n ...\n\n def check_list_content(self, node: Node) ->Tuple[int, List[int]]:\n ...\n\n def build_table_from_list(self, table_data: List[List[N_co]],\n col_widths: List[int], header_rows: int, stub_columns: int) ->table:\n ...\n",
"step-2": "<mask token>\n\n\nclass Table(Directive):\n optional_arguments: int = ...\n final_argument_whitespace: bool = ...\n option_spec: Dict[str, Callable[[str], Any]] = ...\n has_content: bool = ...\n\n def make_title(self) ->Tuple[title, List[system_message]]:\n ...\n\n def process_header_option(self) ->Tuple[List[Node], int]:\n ...\n\n def check_table_dimensions(self, rows: List[List[N_co]], header_rows:\n int, stub_columns: int) ->None:\n ...\n\n def set_table_width(self, table_node: table) ->None:\n ...\n\n @property\n def widths(self) ->str:\n ...\n\n def get_column_widths(self, max_cols: int) ->List[int]:\n ...\n\n def extend_short_rows_with_empty_cells(self, columns: int, parts: Tuple\n [List[N_co], List[N_co]]) ->None:\n ...\n\n\nclass RSTTable(Table):\n\n def run(self) ->List[Node]:\n ...\n\n\nclass CSVTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n\n class DocutilsDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n escapechar: str = ...\n\n def __init__(self, options: Dict[str, Any]) ->None:\n ...\n\n\n class HeaderDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n escapechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n\n def check_requirements(self) ->None:\n ...\n\n def run(self) ->List[Node]:\n ...\n\n def get_csv_data(self) ->Tuple[List[str], str]:\n ...\n decode_from_csv: Callable[[str], str] = ...\n encode_for_csv: Callable[[str], str] = ...\n\n def parse_csv_data_into_rows(self, csv_data: List[str], dialect: Any,\n source: str) ->Tuple[List[Tuple[int, int, int, StringList]], int]:\n ...\n\n\nclass ListTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n def run(self) ->List[Node]:\n ...\n\n def check_list_content(self, node: Node) ->Tuple[int, List[int]]:\n ...\n\n def build_table_from_list(self, table_data: List[List[N_co]],\n col_widths: List[int], header_rows: int, stub_columns: int) ->table:\n ...\n",
"step-3": "<mask token>\n\n\ndef align(argument: str) ->str:\n ...\n\n\nclass Table(Directive):\n optional_arguments: int = ...\n final_argument_whitespace: bool = ...\n option_spec: Dict[str, Callable[[str], Any]] = ...\n has_content: bool = ...\n\n def make_title(self) ->Tuple[title, List[system_message]]:\n ...\n\n def process_header_option(self) ->Tuple[List[Node], int]:\n ...\n\n def check_table_dimensions(self, rows: List[List[N_co]], header_rows:\n int, stub_columns: int) ->None:\n ...\n\n def set_table_width(self, table_node: table) ->None:\n ...\n\n @property\n def widths(self) ->str:\n ...\n\n def get_column_widths(self, max_cols: int) ->List[int]:\n ...\n\n def extend_short_rows_with_empty_cells(self, columns: int, parts: Tuple\n [List[N_co], List[N_co]]) ->None:\n ...\n\n\nclass RSTTable(Table):\n\n def run(self) ->List[Node]:\n ...\n\n\nclass CSVTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n\n class DocutilsDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n escapechar: str = ...\n\n def __init__(self, options: Dict[str, Any]) ->None:\n ...\n\n\n class HeaderDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n escapechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n\n def check_requirements(self) ->None:\n ...\n\n def run(self) ->List[Node]:\n ...\n\n def get_csv_data(self) ->Tuple[List[str], str]:\n ...\n decode_from_csv: Callable[[str], str] = ...\n encode_for_csv: Callable[[str], str] = ...\n\n def parse_csv_data_into_rows(self, csv_data: List[str], dialect: Any,\n source: str) ->Tuple[List[Tuple[int, int, int, StringList]], int]:\n ...\n\n\nclass ListTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n def run(self) ->List[Node]:\n ...\n\n def check_list_content(self, node: Node) ->Tuple[int, List[int]]:\n ...\n\n def build_table_from_list(self, table_data: List[List[N_co]],\n col_widths: List[int], header_rows: int, stub_columns: int) ->table:\n ...\n",
"step-4": "<mask token>\nN_co = TypeVar('N_co', bound=Node, covariant=True)\n__docformat__: str\n\n\ndef align(argument: str) ->str:\n ...\n\n\nclass Table(Directive):\n optional_arguments: int = ...\n final_argument_whitespace: bool = ...\n option_spec: Dict[str, Callable[[str], Any]] = ...\n has_content: bool = ...\n\n def make_title(self) ->Tuple[title, List[system_message]]:\n ...\n\n def process_header_option(self) ->Tuple[List[Node], int]:\n ...\n\n def check_table_dimensions(self, rows: List[List[N_co]], header_rows:\n int, stub_columns: int) ->None:\n ...\n\n def set_table_width(self, table_node: table) ->None:\n ...\n\n @property\n def widths(self) ->str:\n ...\n\n def get_column_widths(self, max_cols: int) ->List[int]:\n ...\n\n def extend_short_rows_with_empty_cells(self, columns: int, parts: Tuple\n [List[N_co], List[N_co]]) ->None:\n ...\n\n\nclass RSTTable(Table):\n\n def run(self) ->List[Node]:\n ...\n\n\nclass CSVTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n\n class DocutilsDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n escapechar: str = ...\n\n def __init__(self, options: Dict[str, Any]) ->None:\n ...\n\n\n class HeaderDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n escapechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n\n def check_requirements(self) ->None:\n ...\n\n def run(self) ->List[Node]:\n ...\n\n def get_csv_data(self) ->Tuple[List[str], str]:\n ...\n decode_from_csv: Callable[[str], str] = ...\n encode_for_csv: Callable[[str], str] = ...\n\n def parse_csv_data_into_rows(self, csv_data: List[str], dialect: Any,\n source: str) ->Tuple[List[Tuple[int, int, int, StringList]], int]:\n ...\n\n\nclass ListTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n def run(self) ->List[Node]:\n ...\n\n def check_list_content(self, node: Node) ->Tuple[int, List[int]]:\n ...\n\n def build_table_from_list(self, table_data: List[List[N_co]],\n col_widths: List[int], header_rows: int, stub_columns: int) ->table:\n ...\n",
"step-5": "# Stubs for docutils.parsers.rst.directives.tables (Python 3.6)\n#\n# NOTE: This dynamically typed stub was automatically generated by stubgen.\n\nimport csv\nfrom docutils.statemachine import StringList\nfrom docutils.nodes import Node, system_message, table, title\nfrom docutils.parsers.rst import Directive\nfrom typing import Any, Callable, Dict, List, Tuple, TypeVar\n\nN_co = TypeVar('N_co', bound=Node, covariant=True)\n\n__docformat__: str\n\ndef align(argument: str) -> str: ...\n\nclass Table(Directive):\n optional_arguments: int = ...\n final_argument_whitespace: bool = ...\n option_spec: Dict[str, Callable[[str], Any]] = ...\n has_content: bool = ...\n def make_title(self) -> Tuple[title, List[system_message]]: ...\n def process_header_option(self) -> Tuple[List[Node], int]: ...\n def check_table_dimensions(self, rows: List[List[N_co]], header_rows: int, stub_columns: int) -> None: ...\n def set_table_width(self, table_node: table) -> None: ...\n @property\n def widths(self) -> str: ...\n def get_column_widths(self, max_cols: int) -> List[int]: ...\n def extend_short_rows_with_empty_cells(self, columns: int, parts: Tuple[List[N_co], List[N_co]]) -> None: ...\n\nclass RSTTable(Table):\n def run(self) -> List[Node]: ...\n\nclass CSVTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n class DocutilsDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n escapechar: str = ...\n def __init__(self, options: Dict[str, Any]) -> None: ...\n class HeaderDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n escapechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n def check_requirements(self) -> None: ...\n def run(self) -> List[Node]: ...\n def get_csv_data(self) -> Tuple[List[str], str]: ...\n decode_from_csv: Callable[[str], str] = ...\n encode_for_csv: Callable[[str], str] = ...\n def parse_csv_data_into_rows(self, csv_data: List[str], dialect: Any, source: str) -> Tuple[List[Tuple[int, int, int, StringList]], int]: ...\n\nclass ListTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n def run(self) -> List[Node]: ...\n def check_list_content(self, node: Node) -> Tuple[int, List[int]]: ...\n def build_table_from_list(self, table_data: List[List[N_co]], col_widths: List[int], header_rows: int, stub_columns: int) -> table: ...\n",
"step-ids": [
11,
19,
20,
22,
24
]
}
|
[
11,
19,
20,
22,
24
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('sample.csv') as rf:
csv_reader = csv.DictReader(rf)
with open('sample1.csv', 'w') as wf:
csv_headers = ['fname', 'lname', 'email']
if os.path.isfile('sample1.csv'):
q = input('File already exists. Do you want to overwrite?')
if q.lower() == 'yes':
csv_writer = csv.DictWriter(wf, fieldnames=csv_headers,
delimiter=',')
csv_writer.writeheader()
for l in csv_reader:
csv_writer.writerow(l)
else:
print('Please try with a different file name')
<|reserved_special_token_1|>
import csv
import os
with open('sample.csv') as rf:
csv_reader = csv.DictReader(rf)
with open('sample1.csv', 'w') as wf:
csv_headers = ['fname', 'lname', 'email']
if os.path.isfile('sample1.csv'):
q = input('File already exists. Do you want to overwrite?')
if q.lower() == 'yes':
csv_writer = csv.DictWriter(wf, fieldnames=csv_headers,
delimiter=',')
csv_writer.writeheader()
for l in csv_reader:
csv_writer.writerow(l)
else:
print('Please try with a different file name')
<|reserved_special_token_1|>
import csv
import os
with open("sample.csv") as rf:
csv_reader=csv.DictReader(rf)
with open("sample1.csv","w") as wf:
csv_headers=['fname','lname','email']
if os.path.isfile('sample1.csv'):
q=input("File already exists. Do you want to overwrite?")
if q.lower()=='yes':
csv_writer=csv.DictWriter(wf,fieldnames=csv_headers,delimiter=',')
csv_writer.writeheader()
for l in csv_reader:
csv_writer.writerow(l)
else:
print("Please try with a different file name")
|
flexible
|
{
"blob_id": "43196258b61801799b8d6b7d23f5816d84cb5dff",
"index": 7294,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('sample.csv') as rf:\n csv_reader = csv.DictReader(rf)\n with open('sample1.csv', 'w') as wf:\n csv_headers = ['fname', 'lname', 'email']\n if os.path.isfile('sample1.csv'):\n q = input('File already exists. Do you want to overwrite?')\n if q.lower() == 'yes':\n csv_writer = csv.DictWriter(wf, fieldnames=csv_headers,\n delimiter=',')\n csv_writer.writeheader()\n for l in csv_reader:\n csv_writer.writerow(l)\n else:\n print('Please try with a different file name')\n",
"step-3": "import csv\nimport os\nwith open('sample.csv') as rf:\n csv_reader = csv.DictReader(rf)\n with open('sample1.csv', 'w') as wf:\n csv_headers = ['fname', 'lname', 'email']\n if os.path.isfile('sample1.csv'):\n q = input('File already exists. Do you want to overwrite?')\n if q.lower() == 'yes':\n csv_writer = csv.DictWriter(wf, fieldnames=csv_headers,\n delimiter=',')\n csv_writer.writeheader()\n for l in csv_reader:\n csv_writer.writerow(l)\n else:\n print('Please try with a different file name')\n",
"step-4": "import csv\r\nimport os\r\nwith open(\"sample.csv\") as rf:\r\n csv_reader=csv.DictReader(rf)\r\n with open(\"sample1.csv\",\"w\") as wf:\r\n csv_headers=['fname','lname','email']\r\n if os.path.isfile('sample1.csv'):\r\n q=input(\"File already exists. Do you want to overwrite?\")\r\n if q.lower()=='yes':\r\n csv_writer=csv.DictWriter(wf,fieldnames=csv_headers,delimiter=',')\r\n csv_writer.writeheader()\r\n for l in csv_reader:\r\n \r\n csv_writer.writerow(l)\r\n else:\r\n print(\"Please try with a different file name\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This is a collection of monkey patches and workarounds for bugs in
earlier versions of Numpy.
"""
from ...utils import minversion
__all__ = ['NUMPY_LT_1_10_4', 'NUMPY_LT_1_11',
'NUMPY_LT_1_12', 'NUMPY_LT_1_13', 'NUMPY_LT_1_14',
'NUMPY_LT_1_14_1', 'NUMPY_LT_1_14_2']
# TODO: It might also be nice to have aliases to these named for specific
# features/bugs we're checking for (ex:
# astropy.table.table._BROKEN_UNICODE_TABLE_SORT)
NUMPY_LT_1_10_4 = not minversion('numpy', '1.10.4')
NUMPY_LT_1_11 = not minversion('numpy', '1.11.0')
NUMPY_LT_1_12 = not minversion('numpy', '1.12')
NUMPY_LT_1_13 = not minversion('numpy', '1.13')
NUMPY_LT_1_14 = not minversion('numpy', '1.14')
NUMPY_LT_1_14_1 = not minversion('numpy', '1.14.1')
NUMPY_LT_1_14_2 = not minversion('numpy', '1.14.2')
|
normal
|
{
"blob_id": "9376d697158faf91f066a88e87d317e79a4d9240",
"index": 6575,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n__all__ = ['NUMPY_LT_1_10_4', 'NUMPY_LT_1_11', 'NUMPY_LT_1_12',\n 'NUMPY_LT_1_13', 'NUMPY_LT_1_14', 'NUMPY_LT_1_14_1', 'NUMPY_LT_1_14_2']\nNUMPY_LT_1_10_4 = not minversion('numpy', '1.10.4')\nNUMPY_LT_1_11 = not minversion('numpy', '1.11.0')\nNUMPY_LT_1_12 = not minversion('numpy', '1.12')\nNUMPY_LT_1_13 = not minversion('numpy', '1.13')\nNUMPY_LT_1_14 = not minversion('numpy', '1.14')\nNUMPY_LT_1_14_1 = not minversion('numpy', '1.14.1')\nNUMPY_LT_1_14_2 = not minversion('numpy', '1.14.2')\n",
"step-3": "<mask token>\nfrom ...utils import minversion\n__all__ = ['NUMPY_LT_1_10_4', 'NUMPY_LT_1_11', 'NUMPY_LT_1_12',\n 'NUMPY_LT_1_13', 'NUMPY_LT_1_14', 'NUMPY_LT_1_14_1', 'NUMPY_LT_1_14_2']\nNUMPY_LT_1_10_4 = not minversion('numpy', '1.10.4')\nNUMPY_LT_1_11 = not minversion('numpy', '1.11.0')\nNUMPY_LT_1_12 = not minversion('numpy', '1.12')\nNUMPY_LT_1_13 = not minversion('numpy', '1.13')\nNUMPY_LT_1_14 = not minversion('numpy', '1.14')\nNUMPY_LT_1_14_1 = not minversion('numpy', '1.14.1')\nNUMPY_LT_1_14_2 = not minversion('numpy', '1.14.2')\n",
"step-4": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"\nThis is a collection of monkey patches and workarounds for bugs in\nearlier versions of Numpy.\n\"\"\"\nfrom ...utils import minversion\n\n\n__all__ = ['NUMPY_LT_1_10_4', 'NUMPY_LT_1_11',\n 'NUMPY_LT_1_12', 'NUMPY_LT_1_13', 'NUMPY_LT_1_14',\n 'NUMPY_LT_1_14_1', 'NUMPY_LT_1_14_2']\n\n# TODO: It might also be nice to have aliases to these named for specific\n# features/bugs we're checking for (ex:\n# astropy.table.table._BROKEN_UNICODE_TABLE_SORT)\nNUMPY_LT_1_10_4 = not minversion('numpy', '1.10.4')\nNUMPY_LT_1_11 = not minversion('numpy', '1.11.0')\nNUMPY_LT_1_12 = not minversion('numpy', '1.12')\nNUMPY_LT_1_13 = not minversion('numpy', '1.13')\nNUMPY_LT_1_14 = not minversion('numpy', '1.14')\nNUMPY_LT_1_14_1 = not minversion('numpy', '1.14.1')\nNUMPY_LT_1_14_2 = not minversion('numpy', '1.14.2')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class DummyWorker:
def echo(self, message='hello', delay=0, fail=False):
sleep(delay)
if fail:
raise Exception('failed')
self.message = message
return self.message
def test_default(working_path):
task = Task(DummyWorker(), 'echo').start()
while not task.done:
status = task.status()
assert status['result'] == 'hello'
assert status['instance'].message == 'hello'
<|reserved_special_token_0|>
def test_stop_running(working_path):
task = Task(DummyWorker(), 'echo', delay=5).start()
sleep(0.5)
assert task.done == False
task.stop()
assert task.done == True
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DummyWorker:
def echo(self, message='hello', delay=0, fail=False):
sleep(delay)
if fail:
raise Exception('failed')
self.message = message
return self.message
def test_default(working_path):
task = Task(DummyWorker(), 'echo').start()
while not task.done:
status = task.status()
assert status['result'] == 'hello'
assert status['instance'].message == 'hello'
<|reserved_special_token_0|>
def test_exception(working_path):
task = Task(DummyWorker(), 'echo', fail=True).start()
while not task.done:
status = task.status()
assert status['success'] == False
assert status['exception'].args[0] == 'failed'
def test_stop_running(working_path):
task = Task(DummyWorker(), 'echo', delay=5).start()
sleep(0.5)
assert task.done == False
task.stop()
assert task.done == True
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DummyWorker:
def echo(self, message='hello', delay=0, fail=False):
sleep(delay)
if fail:
raise Exception('failed')
self.message = message
return self.message
def test_default(working_path):
task = Task(DummyWorker(), 'echo').start()
while not task.done:
status = task.status()
assert status['result'] == 'hello'
assert status['instance'].message == 'hello'
<|reserved_special_token_0|>
def test_kwargs(working_path):
task = Task(DummyWorker(), 'echo', message='foobar').start()
while not task.done:
status = task.status()
assert status['result'] == 'foobar'
def test_exception(working_path):
task = Task(DummyWorker(), 'echo', fail=True).start()
while not task.done:
status = task.status()
assert status['success'] == False
assert status['exception'].args[0] == 'failed'
def test_stop_running(working_path):
task = Task(DummyWorker(), 'echo', delay=5).start()
sleep(0.5)
assert task.done == False
task.stop()
assert task.done == True
def test_stop_not_running(working_path):
task = Task(DummyWorker(), 'echo').start()
while not task.done:
status = task.status()
task.stop()
assert task.done == True
<|reserved_special_token_1|>
import pytest
from time import sleep
from timeflux.helpers.background import Task
class DummyWorker:
def echo(self, message='hello', delay=0, fail=False):
sleep(delay)
if fail:
raise Exception('failed')
self.message = message
return self.message
def test_default(working_path):
task = Task(DummyWorker(), 'echo').start()
while not task.done:
status = task.status()
assert status['result'] == 'hello'
assert status['instance'].message == 'hello'
def test_args(working_path):
task = Task(DummyWorker(), 'echo', 'foobar').start()
while not task.done:
status = task.status()
assert status['result'] == 'foobar'
def test_kwargs(working_path):
task = Task(DummyWorker(), 'echo', message='foobar').start()
while not task.done:
status = task.status()
assert status['result'] == 'foobar'
def test_exception(working_path):
task = Task(DummyWorker(), 'echo', fail=True).start()
while not task.done:
status = task.status()
assert status['success'] == False
assert status['exception'].args[0] == 'failed'
def test_stop_running(working_path):
task = Task(DummyWorker(), 'echo', delay=5).start()
sleep(0.5)
assert task.done == False
task.stop()
assert task.done == True
def test_stop_not_running(working_path):
task = Task(DummyWorker(), 'echo').start()
while not task.done:
status = task.status()
task.stop()
assert task.done == True
<|reserved_special_token_1|>
import pytest
from time import sleep
from timeflux.helpers.background import Task
class DummyWorker():
def echo(self, message='hello', delay=0, fail=False):
sleep(delay)
if fail: raise Exception('failed')
self.message = message
return(self.message)
def test_default(working_path):
task = Task(DummyWorker(), 'echo').start()
while not task.done:
status = task.status()
assert status['result'] == 'hello'
assert status['instance'].message == 'hello'
def test_args(working_path):
task = Task(DummyWorker(), 'echo', 'foobar').start()
while not task.done:
status = task.status()
assert status['result'] == 'foobar'
def test_kwargs(working_path):
task = Task(DummyWorker(), 'echo', message='foobar').start()
while not task.done:
status = task.status()
assert status['result'] == 'foobar'
def test_exception(working_path):
task = Task(DummyWorker(), 'echo', fail=True).start()
while not task.done:
status = task.status()
assert status['success'] == False
assert status['exception'].args[0] == 'failed'
def test_stop_running(working_path):
task = Task(DummyWorker(), 'echo', delay=5).start()
sleep(.5)
assert task.done == False
task.stop()
assert task.done == True
def test_stop_not_running(working_path):
task = Task(DummyWorker(), 'echo').start()
while not task.done:
status = task.status()
task.stop()
assert task.done == True
|
flexible
|
{
"blob_id": "d2e46944ab05c5e8c1979101728b7b25900be342",
"index": 415,
"step-1": "<mask token>\n\n\nclass DummyWorker:\n\n def echo(self, message='hello', delay=0, fail=False):\n sleep(delay)\n if fail:\n raise Exception('failed')\n self.message = message\n return self.message\n\n\ndef test_default(working_path):\n task = Task(DummyWorker(), 'echo').start()\n while not task.done:\n status = task.status()\n assert status['result'] == 'hello'\n assert status['instance'].message == 'hello'\n\n\n<mask token>\n\n\ndef test_stop_running(working_path):\n task = Task(DummyWorker(), 'echo', delay=5).start()\n sleep(0.5)\n assert task.done == False\n task.stop()\n assert task.done == True\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass DummyWorker:\n\n def echo(self, message='hello', delay=0, fail=False):\n sleep(delay)\n if fail:\n raise Exception('failed')\n self.message = message\n return self.message\n\n\ndef test_default(working_path):\n task = Task(DummyWorker(), 'echo').start()\n while not task.done:\n status = task.status()\n assert status['result'] == 'hello'\n assert status['instance'].message == 'hello'\n\n\n<mask token>\n\n\ndef test_exception(working_path):\n task = Task(DummyWorker(), 'echo', fail=True).start()\n while not task.done:\n status = task.status()\n assert status['success'] == False\n assert status['exception'].args[0] == 'failed'\n\n\ndef test_stop_running(working_path):\n task = Task(DummyWorker(), 'echo', delay=5).start()\n sleep(0.5)\n assert task.done == False\n task.stop()\n assert task.done == True\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass DummyWorker:\n\n def echo(self, message='hello', delay=0, fail=False):\n sleep(delay)\n if fail:\n raise Exception('failed')\n self.message = message\n return self.message\n\n\ndef test_default(working_path):\n task = Task(DummyWorker(), 'echo').start()\n while not task.done:\n status = task.status()\n assert status['result'] == 'hello'\n assert status['instance'].message == 'hello'\n\n\n<mask token>\n\n\ndef test_kwargs(working_path):\n task = Task(DummyWorker(), 'echo', message='foobar').start()\n while not task.done:\n status = task.status()\n assert status['result'] == 'foobar'\n\n\ndef test_exception(working_path):\n task = Task(DummyWorker(), 'echo', fail=True).start()\n while not task.done:\n status = task.status()\n assert status['success'] == False\n assert status['exception'].args[0] == 'failed'\n\n\ndef test_stop_running(working_path):\n task = Task(DummyWorker(), 'echo', delay=5).start()\n sleep(0.5)\n assert task.done == False\n task.stop()\n assert task.done == True\n\n\ndef test_stop_not_running(working_path):\n task = Task(DummyWorker(), 'echo').start()\n while not task.done:\n status = task.status()\n task.stop()\n assert task.done == True\n",
"step-4": "import pytest\nfrom time import sleep\nfrom timeflux.helpers.background import Task\n\n\nclass DummyWorker:\n\n def echo(self, message='hello', delay=0, fail=False):\n sleep(delay)\n if fail:\n raise Exception('failed')\n self.message = message\n return self.message\n\n\ndef test_default(working_path):\n task = Task(DummyWorker(), 'echo').start()\n while not task.done:\n status = task.status()\n assert status['result'] == 'hello'\n assert status['instance'].message == 'hello'\n\n\ndef test_args(working_path):\n task = Task(DummyWorker(), 'echo', 'foobar').start()\n while not task.done:\n status = task.status()\n assert status['result'] == 'foobar'\n\n\ndef test_kwargs(working_path):\n task = Task(DummyWorker(), 'echo', message='foobar').start()\n while not task.done:\n status = task.status()\n assert status['result'] == 'foobar'\n\n\ndef test_exception(working_path):\n task = Task(DummyWorker(), 'echo', fail=True).start()\n while not task.done:\n status = task.status()\n assert status['success'] == False\n assert status['exception'].args[0] == 'failed'\n\n\ndef test_stop_running(working_path):\n task = Task(DummyWorker(), 'echo', delay=5).start()\n sleep(0.5)\n assert task.done == False\n task.stop()\n assert task.done == True\n\n\ndef test_stop_not_running(working_path):\n task = Task(DummyWorker(), 'echo').start()\n while not task.done:\n status = task.status()\n task.stop()\n assert task.done == True\n",
"step-5": "import pytest\nfrom time import sleep\nfrom timeflux.helpers.background import Task\n\nclass DummyWorker():\n def echo(self, message='hello', delay=0, fail=False):\n sleep(delay)\n if fail: raise Exception('failed')\n self.message = message\n return(self.message)\n\ndef test_default(working_path):\n task = Task(DummyWorker(), 'echo').start()\n while not task.done:\n status = task.status()\n assert status['result'] == 'hello'\n assert status['instance'].message == 'hello'\n\ndef test_args(working_path):\n task = Task(DummyWorker(), 'echo', 'foobar').start()\n while not task.done:\n status = task.status()\n assert status['result'] == 'foobar'\n\ndef test_kwargs(working_path):\n task = Task(DummyWorker(), 'echo', message='foobar').start()\n while not task.done:\n status = task.status()\n assert status['result'] == 'foobar'\n\ndef test_exception(working_path):\n task = Task(DummyWorker(), 'echo', fail=True).start()\n while not task.done:\n status = task.status()\n assert status['success'] == False\n assert status['exception'].args[0] == 'failed'\n\ndef test_stop_running(working_path):\n task = Task(DummyWorker(), 'echo', delay=5).start()\n sleep(.5)\n assert task.done == False\n task.stop()\n assert task.done == True\n\ndef test_stop_not_running(working_path):\n task = Task(DummyWorker(), 'echo').start()\n while not task.done:\n status = task.status()\n task.stop()\n assert task.done == True\n",
"step-ids": [
4,
5,
7,
9,
10
]
}
|
[
4,
5,
7,
9,
10
] |
"""
Sprites - animations for objects.
"""
import config
import os
import pygame
class Sheet(object):
""" An single large image composed of smaller images used for sprite
animations. All the sprites on the sheet must be the same size. The width x
height give the sprite dimensions in pixels. The rows x columns give the
sheet dimensions in images. """
def __init__(self, width, height, rows, columns, filename):
self.rows = rows
self.columns = columns
self.width = width
self.height = height
path = os.path.join(config.SPRITE_DIRECTORY, filename)
self.surface = pygame.image.load(path)
self.surface.convert_alpha()
def get_image(self, index):
x = (index % self.columns) * self.width
y = (index / self.columns) * self.height
rect = pygame.Rect(x, y, self.width, self.height)
return self.surface.subsurface(rect)
class Sprite(object):
""" Abstract base class for all sprites. """
def __init__(self, sheet):
self.sheet = sheet
@property
def height(self):
""" The height in pixels. """
return self.sheet.height
@property
def width(self):
""" The width in pixels. """
return self.sheet.width
class CompositeSprite(Sprite):
""" A sprite that is composed of multiples sprites layered on top of each
other. The first sprite goes on the bottom, the next above that, and so
on. The sprites should all be the same size (the first sprite sets the
size; they will all be anchored to the top left corner)."""
def __init__(self, sprites):
super(CompositeSprite, self).__init__(sprites[0].sheet)
self.sprites = sprites
self.surface = pygame.Surface((self.width, self.height))
self.surface.convert_alpha()
def get_image(self, frame):
""" Return the layered image for the given animation frame number. """
self.surface.fill((0, 0, 0, 0))
for sprite in self.sprites:
self.surface.blit(sprite.get_image(frame), (0, 0))
return self.surface
class AnimatedSprite(Sprite):
""" The animation for an object. Each sprite refers to a sheet, a starting
image and a number of sequential animation images. The facings bitmask
indicates the number and type of facings which the sprite supports. The
sprite's frames attribute is the image sequence."""
# XXX: facings not handled yet
def __init__(self, sheet, num_frames, start_frame, facings=0):
super(AnimatedSprite, self).__init__(sheet)
self.num_frames = num_frames
self.facings = facings
self.frames = []
for frame in range(num_frames):
index = start_frame + frame
self.frames.append(sheet.get_image(index))
def get_image(self, frame):
""" Return the image for the given animation frame number. """
msec = frame * config.MS_PER_FRAME
frame = msec // 250
return self.frames[frame % self.num_frames]
class WaveSprite(Sprite):
""" A sprite with a single frame that animates by "rolling". """
def __init__(self, sheet, frame):
super(WaveSprite, self).__init__(sheet)
self.num_frames = self.height
# pygame's surface scroll *almost* does what I want, but it doesn't
# wrap. So I double the image and scroll up the double.
self.double = pygame.Surface((self.width, self.height * 2))
self.double.convert_alpha()
image = sheet.get_image(frame)
self.double.blit(image, (0, 0))
self.double.blit(image, (0, self.height))
def get_image(self, frame):
""" Return the image for the given animation frame number. """
rect = pygame.Rect(0, 0, self.width, self.height)
msec = frame * config.MS_PER_FRAME
frame = msec // 100
rect.y = self.height - (frame % self.height)
return self.double.subsurface(rect)
class Fade(object):
""" A shaded semi-transparent surface. """
def __init__(self, width, height):
self.surf = pygame.Surface((width, height), flags=pygame.SRCALPHA)
self.surf.convert_alpha()
self.surf.fill(pygame.Color(0, 0, 0, 128))
|
normal
|
{
"blob_id": "080aa8b99cdded7a947880a1c3399f68b28ae44d",
"index": 6318,
"step-1": "<mask token>\n\n\nclass Sprite(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass CompositeSprite(Sprite):\n \"\"\" A sprite that is composed of multiples sprites layered on top of each\n other. The first sprite goes on the bottom, the next above that, and so\n on. The sprites should all be the same size (the first sprite sets the\n size; they will all be anchored to the top left corner).\"\"\"\n\n def __init__(self, sprites):\n super(CompositeSprite, self).__init__(sprites[0].sheet)\n self.sprites = sprites\n self.surface = pygame.Surface((self.width, self.height))\n self.surface.convert_alpha()\n\n def get_image(self, frame):\n \"\"\" Return the layered image for the given animation frame number. \"\"\"\n self.surface.fill((0, 0, 0, 0))\n for sprite in self.sprites:\n self.surface.blit(sprite.get_image(frame), (0, 0))\n return self.surface\n\n\nclass AnimatedSprite(Sprite):\n \"\"\" The animation for an object. Each sprite refers to a sheet, a starting\n image and a number of sequential animation images. The facings bitmask\n indicates the number and type of facings which the sprite supports. The\n sprite's frames attribute is the image sequence.\"\"\"\n\n def __init__(self, sheet, num_frames, start_frame, facings=0):\n super(AnimatedSprite, self).__init__(sheet)\n self.num_frames = num_frames\n self.facings = facings\n self.frames = []\n for frame in range(num_frames):\n index = start_frame + frame\n self.frames.append(sheet.get_image(index))\n\n def get_image(self, frame):\n \"\"\" Return the image for the given animation frame number. \"\"\"\n msec = frame * config.MS_PER_FRAME\n frame = msec // 250\n return self.frames[frame % self.num_frames]\n\n\nclass WaveSprite(Sprite):\n \"\"\" A sprite with a single frame that animates by \"rolling\". \"\"\"\n\n def __init__(self, sheet, frame):\n super(WaveSprite, self).__init__(sheet)\n self.num_frames = self.height\n self.double = pygame.Surface((self.width, self.height * 2))\n self.double.convert_alpha()\n image = sheet.get_image(frame)\n self.double.blit(image, (0, 0))\n self.double.blit(image, (0, self.height))\n\n def get_image(self, frame):\n \"\"\" Return the image for the given animation frame number. \"\"\"\n rect = pygame.Rect(0, 0, self.width, self.height)\n msec = frame * config.MS_PER_FRAME\n frame = msec // 100\n rect.y = self.height - frame % self.height\n return self.double.subsurface(rect)\n\n\nclass Fade(object):\n \"\"\" A shaded semi-transparent surface. \"\"\"\n\n def __init__(self, width, height):\n self.surf = pygame.Surface((width, height), flags=pygame.SRCALPHA)\n self.surf.convert_alpha()\n self.surf.fill(pygame.Color(0, 0, 0, 128))\n",
"step-2": "<mask token>\n\n\nclass Sprite(object):\n <mask token>\n\n def __init__(self, sheet):\n self.sheet = sheet\n <mask token>\n\n @property\n def width(self):\n \"\"\" The width in pixels. \"\"\"\n return self.sheet.width\n\n\nclass CompositeSprite(Sprite):\n \"\"\" A sprite that is composed of multiples sprites layered on top of each\n other. The first sprite goes on the bottom, the next above that, and so\n on. The sprites should all be the same size (the first sprite sets the\n size; they will all be anchored to the top left corner).\"\"\"\n\n def __init__(self, sprites):\n super(CompositeSprite, self).__init__(sprites[0].sheet)\n self.sprites = sprites\n self.surface = pygame.Surface((self.width, self.height))\n self.surface.convert_alpha()\n\n def get_image(self, frame):\n \"\"\" Return the layered image for the given animation frame number. \"\"\"\n self.surface.fill((0, 0, 0, 0))\n for sprite in self.sprites:\n self.surface.blit(sprite.get_image(frame), (0, 0))\n return self.surface\n\n\nclass AnimatedSprite(Sprite):\n \"\"\" The animation for an object. Each sprite refers to a sheet, a starting\n image and a number of sequential animation images. The facings bitmask\n indicates the number and type of facings which the sprite supports. The\n sprite's frames attribute is the image sequence.\"\"\"\n\n def __init__(self, sheet, num_frames, start_frame, facings=0):\n super(AnimatedSprite, self).__init__(sheet)\n self.num_frames = num_frames\n self.facings = facings\n self.frames = []\n for frame in range(num_frames):\n index = start_frame + frame\n self.frames.append(sheet.get_image(index))\n\n def get_image(self, frame):\n \"\"\" Return the image for the given animation frame number. \"\"\"\n msec = frame * config.MS_PER_FRAME\n frame = msec // 250\n return self.frames[frame % self.num_frames]\n\n\nclass WaveSprite(Sprite):\n \"\"\" A sprite with a single frame that animates by \"rolling\". \"\"\"\n\n def __init__(self, sheet, frame):\n super(WaveSprite, self).__init__(sheet)\n self.num_frames = self.height\n self.double = pygame.Surface((self.width, self.height * 2))\n self.double.convert_alpha()\n image = sheet.get_image(frame)\n self.double.blit(image, (0, 0))\n self.double.blit(image, (0, self.height))\n\n def get_image(self, frame):\n \"\"\" Return the image for the given animation frame number. \"\"\"\n rect = pygame.Rect(0, 0, self.width, self.height)\n msec = frame * config.MS_PER_FRAME\n frame = msec // 100\n rect.y = self.height - frame % self.height\n return self.double.subsurface(rect)\n\n\nclass Fade(object):\n \"\"\" A shaded semi-transparent surface. \"\"\"\n\n def __init__(self, width, height):\n self.surf = pygame.Surface((width, height), flags=pygame.SRCALPHA)\n self.surf.convert_alpha()\n self.surf.fill(pygame.Color(0, 0, 0, 128))\n",
"step-3": "<mask token>\n\n\nclass Sprite(object):\n <mask token>\n\n def __init__(self, sheet):\n self.sheet = sheet\n\n @property\n def height(self):\n \"\"\" The height in pixels. \"\"\"\n return self.sheet.height\n\n @property\n def width(self):\n \"\"\" The width in pixels. \"\"\"\n return self.sheet.width\n\n\nclass CompositeSprite(Sprite):\n \"\"\" A sprite that is composed of multiples sprites layered on top of each\n other. The first sprite goes on the bottom, the next above that, and so\n on. The sprites should all be the same size (the first sprite sets the\n size; they will all be anchored to the top left corner).\"\"\"\n\n def __init__(self, sprites):\n super(CompositeSprite, self).__init__(sprites[0].sheet)\n self.sprites = sprites\n self.surface = pygame.Surface((self.width, self.height))\n self.surface.convert_alpha()\n\n def get_image(self, frame):\n \"\"\" Return the layered image for the given animation frame number. \"\"\"\n self.surface.fill((0, 0, 0, 0))\n for sprite in self.sprites:\n self.surface.blit(sprite.get_image(frame), (0, 0))\n return self.surface\n\n\nclass AnimatedSprite(Sprite):\n \"\"\" The animation for an object. Each sprite refers to a sheet, a starting\n image and a number of sequential animation images. The facings bitmask\n indicates the number and type of facings which the sprite supports. The\n sprite's frames attribute is the image sequence.\"\"\"\n\n def __init__(self, sheet, num_frames, start_frame, facings=0):\n super(AnimatedSprite, self).__init__(sheet)\n self.num_frames = num_frames\n self.facings = facings\n self.frames = []\n for frame in range(num_frames):\n index = start_frame + frame\n self.frames.append(sheet.get_image(index))\n\n def get_image(self, frame):\n \"\"\" Return the image for the given animation frame number. \"\"\"\n msec = frame * config.MS_PER_FRAME\n frame = msec // 250\n return self.frames[frame % self.num_frames]\n\n\nclass WaveSprite(Sprite):\n \"\"\" A sprite with a single frame that animates by \"rolling\". \"\"\"\n\n def __init__(self, sheet, frame):\n super(WaveSprite, self).__init__(sheet)\n self.num_frames = self.height\n self.double = pygame.Surface((self.width, self.height * 2))\n self.double.convert_alpha()\n image = sheet.get_image(frame)\n self.double.blit(image, (0, 0))\n self.double.blit(image, (0, self.height))\n\n def get_image(self, frame):\n \"\"\" Return the image for the given animation frame number. \"\"\"\n rect = pygame.Rect(0, 0, self.width, self.height)\n msec = frame * config.MS_PER_FRAME\n frame = msec // 100\n rect.y = self.height - frame % self.height\n return self.double.subsurface(rect)\n\n\nclass Fade(object):\n \"\"\" A shaded semi-transparent surface. \"\"\"\n\n def __init__(self, width, height):\n self.surf = pygame.Surface((width, height), flags=pygame.SRCALPHA)\n self.surf.convert_alpha()\n self.surf.fill(pygame.Color(0, 0, 0, 128))\n",
"step-4": "<mask token>\n\n\nclass Sheet(object):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Sprite(object):\n \"\"\" Abstract base class for all sprites. \"\"\"\n\n def __init__(self, sheet):\n self.sheet = sheet\n\n @property\n def height(self):\n \"\"\" The height in pixels. \"\"\"\n return self.sheet.height\n\n @property\n def width(self):\n \"\"\" The width in pixels. \"\"\"\n return self.sheet.width\n\n\nclass CompositeSprite(Sprite):\n \"\"\" A sprite that is composed of multiples sprites layered on top of each\n other. The first sprite goes on the bottom, the next above that, and so\n on. The sprites should all be the same size (the first sprite sets the\n size; they will all be anchored to the top left corner).\"\"\"\n\n def __init__(self, sprites):\n super(CompositeSprite, self).__init__(sprites[0].sheet)\n self.sprites = sprites\n self.surface = pygame.Surface((self.width, self.height))\n self.surface.convert_alpha()\n\n def get_image(self, frame):\n \"\"\" Return the layered image for the given animation frame number. \"\"\"\n self.surface.fill((0, 0, 0, 0))\n for sprite in self.sprites:\n self.surface.blit(sprite.get_image(frame), (0, 0))\n return self.surface\n\n\nclass AnimatedSprite(Sprite):\n \"\"\" The animation for an object. Each sprite refers to a sheet, a starting\n image and a number of sequential animation images. The facings bitmask\n indicates the number and type of facings which the sprite supports. The\n sprite's frames attribute is the image sequence.\"\"\"\n\n def __init__(self, sheet, num_frames, start_frame, facings=0):\n super(AnimatedSprite, self).__init__(sheet)\n self.num_frames = num_frames\n self.facings = facings\n self.frames = []\n for frame in range(num_frames):\n index = start_frame + frame\n self.frames.append(sheet.get_image(index))\n\n def get_image(self, frame):\n \"\"\" Return the image for the given animation frame number. \"\"\"\n msec = frame * config.MS_PER_FRAME\n frame = msec // 250\n return self.frames[frame % self.num_frames]\n\n\nclass WaveSprite(Sprite):\n \"\"\" A sprite with a single frame that animates by \"rolling\". \"\"\"\n\n def __init__(self, sheet, frame):\n super(WaveSprite, self).__init__(sheet)\n self.num_frames = self.height\n self.double = pygame.Surface((self.width, self.height * 2))\n self.double.convert_alpha()\n image = sheet.get_image(frame)\n self.double.blit(image, (0, 0))\n self.double.blit(image, (0, self.height))\n\n def get_image(self, frame):\n \"\"\" Return the image for the given animation frame number. \"\"\"\n rect = pygame.Rect(0, 0, self.width, self.height)\n msec = frame * config.MS_PER_FRAME\n frame = msec // 100\n rect.y = self.height - frame % self.height\n return self.double.subsurface(rect)\n\n\nclass Fade(object):\n \"\"\" A shaded semi-transparent surface. \"\"\"\n\n def __init__(self, width, height):\n self.surf = pygame.Surface((width, height), flags=pygame.SRCALPHA)\n self.surf.convert_alpha()\n self.surf.fill(pygame.Color(0, 0, 0, 128))\n",
"step-5": "\"\"\"\nSprites - animations for objects.\n\"\"\"\nimport config\nimport os\nimport pygame\n\n\nclass Sheet(object):\n \"\"\" An single large image composed of smaller images used for sprite\n animations. All the sprites on the sheet must be the same size. The width x\n height give the sprite dimensions in pixels. The rows x columns give the\n sheet dimensions in images. \"\"\"\n\n def __init__(self, width, height, rows, columns, filename):\n self.rows = rows\n self.columns = columns\n self.width = width\n self.height = height\n path = os.path.join(config.SPRITE_DIRECTORY, filename)\n self.surface = pygame.image.load(path)\n self.surface.convert_alpha()\n\n def get_image(self, index):\n x = (index % self.columns) * self.width\n y = (index / self.columns) * self.height\n rect = pygame.Rect(x, y, self.width, self.height)\n return self.surface.subsurface(rect)\n\n\nclass Sprite(object):\n \"\"\" Abstract base class for all sprites. \"\"\"\n\n def __init__(self, sheet):\n self.sheet = sheet\n\n @property\n def height(self):\n \"\"\" The height in pixels. \"\"\"\n return self.sheet.height\n\n @property\n def width(self):\n \"\"\" The width in pixels. \"\"\"\n return self.sheet.width\n\nclass CompositeSprite(Sprite):\n \"\"\" A sprite that is composed of multiples sprites layered on top of each\n other. The first sprite goes on the bottom, the next above that, and so\n on. The sprites should all be the same size (the first sprite sets the\n size; they will all be anchored to the top left corner).\"\"\"\n\n def __init__(self, sprites):\n super(CompositeSprite, self).__init__(sprites[0].sheet)\n self.sprites = sprites\n self.surface = pygame.Surface((self.width, self.height))\n self.surface.convert_alpha()\n\n def get_image(self, frame):\n \"\"\" Return the layered image for the given animation frame number. \"\"\"\n self.surface.fill((0, 0, 0, 0))\n for sprite in self.sprites:\n self.surface.blit(sprite.get_image(frame), (0, 0))\n return self.surface\n \n\nclass AnimatedSprite(Sprite):\n \"\"\" The animation for an object. Each sprite refers to a sheet, a starting\n image and a number of sequential animation images. The facings bitmask\n indicates the number and type of facings which the sprite supports. The\n sprite's frames attribute is the image sequence.\"\"\"\n\n # XXX: facings not handled yet\n def __init__(self, sheet, num_frames, start_frame, facings=0):\n super(AnimatedSprite, self).__init__(sheet)\n self.num_frames = num_frames\n self.facings = facings\n self.frames = []\n for frame in range(num_frames):\n index = start_frame + frame\n self.frames.append(sheet.get_image(index))\n \n def get_image(self, frame):\n \"\"\" Return the image for the given animation frame number. \"\"\"\n msec = frame * config.MS_PER_FRAME\n frame = msec // 250\n return self.frames[frame % self.num_frames]\n\n\nclass WaveSprite(Sprite):\n \"\"\" A sprite with a single frame that animates by \"rolling\". \"\"\"\n\n def __init__(self, sheet, frame):\n super(WaveSprite, self).__init__(sheet)\n self.num_frames = self.height\n\n # pygame's surface scroll *almost* does what I want, but it doesn't\n # wrap. So I double the image and scroll up the double.\n self.double = pygame.Surface((self.width, self.height * 2))\n self.double.convert_alpha()\n image = sheet.get_image(frame)\n self.double.blit(image, (0, 0))\n self.double.blit(image, (0, self.height))\n\n def get_image(self, frame):\n \"\"\" Return the image for the given animation frame number. \"\"\"\n rect = pygame.Rect(0, 0, self.width, self.height)\n msec = frame * config.MS_PER_FRAME\n frame = msec // 100\n rect.y = self.height - (frame % self.height)\n return self.double.subsurface(rect)\n\n \nclass Fade(object):\n \"\"\" A shaded semi-transparent surface. \"\"\"\n def __init__(self, width, height):\n self.surf = pygame.Surface((width, height), flags=pygame.SRCALPHA)\n self.surf.convert_alpha()\n self.surf.fill(pygame.Color(0, 0, 0, 128))\n",
"step-ids": [
16,
18,
19,
21,
26
]
}
|
[
16,
18,
19,
21,
26
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(name='SumoSound', packages=['SumoSound'], version='1.0.2', license=
'MIT', description=
'A python library to add 3D sound to a Sumo traffic simulation.',
long_description=long_description, long_description_content_type=
'text/markdown', author='Patrick Malcolm', author_email=
'[email protected]', url=
'https://github.com/patmalcolm91/SumoSound', download_url=
'https://github.com/patmalcolm91/SumoSound/archive/v_1.0.2.tar.gz',
keywords=['sumo', 'TraCI', 'sound', 'sound effects', '3D sound',
'OpenAL', 'traffic'], install_requires=['pyopenal'], classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8'], package_data={'SumoSound': [
'stock_sounds/*.wav']})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(name='SumoSound', packages=['SumoSound'], version='1.0.2', license=
'MIT', description=
'A python library to add 3D sound to a Sumo traffic simulation.',
long_description=long_description, long_description_content_type=
'text/markdown', author='Patrick Malcolm', author_email=
'[email protected]', url=
'https://github.com/patmalcolm91/SumoSound', download_url=
'https://github.com/patmalcolm91/SumoSound/archive/v_1.0.2.tar.gz',
keywords=['sumo', 'TraCI', 'sound', 'sound effects', '3D sound',
'OpenAL', 'traffic'], install_requires=['pyopenal'], classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8'], package_data={'SumoSound': [
'stock_sounds/*.wav']})
<|reserved_special_token_1|>
from setuptools import setup
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(name='SumoSound', packages=['SumoSound'], version='1.0.2', license=
'MIT', description=
'A python library to add 3D sound to a Sumo traffic simulation.',
long_description=long_description, long_description_content_type=
'text/markdown', author='Patrick Malcolm', author_email=
'[email protected]', url=
'https://github.com/patmalcolm91/SumoSound', download_url=
'https://github.com/patmalcolm91/SumoSound/archive/v_1.0.2.tar.gz',
keywords=['sumo', 'TraCI', 'sound', 'sound effects', '3D sound',
'OpenAL', 'traffic'], install_requires=['pyopenal'], classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8'], package_data={'SumoSound': [
'stock_sounds/*.wav']})
<|reserved_special_token_1|>
from setuptools import setup
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='SumoSound',
packages=['SumoSound'],
version='1.0.2',
license='MIT',
description='A python library to add 3D sound to a Sumo traffic simulation.',
long_description=long_description,
long_description_content_type='text/markdown',
author='Patrick Malcolm',
author_email='[email protected]',
url='https://github.com/patmalcolm91/SumoSound',
download_url='https://github.com/patmalcolm91/SumoSound/archive/v_1.0.2.tar.gz',
keywords=['sumo', 'TraCI', 'sound', 'sound effects', '3D sound', 'OpenAL', 'traffic'],
install_requires=[
'pyopenal',
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8'
],
package_data={'SumoSound': ['stock_sounds/*.wav']}
)
|
flexible
|
{
"blob_id": "81c9cabaa611f8e884708d535f0b99ff83ec1c0d",
"index": 8319,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\nsetup(name='SumoSound', packages=['SumoSound'], version='1.0.2', license=\n 'MIT', description=\n 'A python library to add 3D sound to a Sumo traffic simulation.',\n long_description=long_description, long_description_content_type=\n 'text/markdown', author='Patrick Malcolm', author_email=\n '[email protected]', url=\n 'https://github.com/patmalcolm91/SumoSound', download_url=\n 'https://github.com/patmalcolm91/SumoSound/archive/v_1.0.2.tar.gz',\n keywords=['sumo', 'TraCI', 'sound', 'sound effects', '3D sound',\n 'OpenAL', 'traffic'], install_requires=['pyopenal'], classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8'], package_data={'SumoSound': [\n 'stock_sounds/*.wav']})\n",
"step-3": "<mask token>\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\nsetup(name='SumoSound', packages=['SumoSound'], version='1.0.2', license=\n 'MIT', description=\n 'A python library to add 3D sound to a Sumo traffic simulation.',\n long_description=long_description, long_description_content_type=\n 'text/markdown', author='Patrick Malcolm', author_email=\n '[email protected]', url=\n 'https://github.com/patmalcolm91/SumoSound', download_url=\n 'https://github.com/patmalcolm91/SumoSound/archive/v_1.0.2.tar.gz',\n keywords=['sumo', 'TraCI', 'sound', 'sound effects', '3D sound',\n 'OpenAL', 'traffic'], install_requires=['pyopenal'], classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8'], package_data={'SumoSound': [\n 'stock_sounds/*.wav']})\n",
"step-4": "from setuptools import setup\nfrom os import path\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\nsetup(name='SumoSound', packages=['SumoSound'], version='1.0.2', license=\n 'MIT', description=\n 'A python library to add 3D sound to a Sumo traffic simulation.',\n long_description=long_description, long_description_content_type=\n 'text/markdown', author='Patrick Malcolm', author_email=\n '[email protected]', url=\n 'https://github.com/patmalcolm91/SumoSound', download_url=\n 'https://github.com/patmalcolm91/SumoSound/archive/v_1.0.2.tar.gz',\n keywords=['sumo', 'TraCI', 'sound', 'sound effects', '3D sound',\n 'OpenAL', 'traffic'], install_requires=['pyopenal'], classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8'], package_data={'SumoSound': [\n 'stock_sounds/*.wav']})\n",
"step-5": "from setuptools import setup\nfrom os import path\n\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='SumoSound',\n packages=['SumoSound'],\n version='1.0.2',\n license='MIT',\n description='A python library to add 3D sound to a Sumo traffic simulation.',\n long_description=long_description,\n long_description_content_type='text/markdown',\n author='Patrick Malcolm',\n author_email='[email protected]',\n url='https://github.com/patmalcolm91/SumoSound',\n download_url='https://github.com/patmalcolm91/SumoSound/archive/v_1.0.2.tar.gz',\n keywords=['sumo', 'TraCI', 'sound', 'sound effects', '3D sound', 'OpenAL', 'traffic'],\n install_requires=[\n 'pyopenal',\n ],\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8'\n ],\n package_data={'SumoSound': ['stock_sounds/*.wav']}\n)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [migrations.CreateModel(name='News', fields=[('id', models
.AutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')), ('title', models.TextField(verbose_name=
'Title')), ('body', models.TextField(verbose_name='Body')), (
'view_count', models.IntegerField(verbose_name='View Count')), (
'root_category', models.CharField(max_length=64, verbose_name=
'Root Category')), ('category', models.CharField(max_length=64,
verbose_name='Category')), ('image', models.TextField(verbose_name=
'Image')), ('publish_time', models.TimeField(verbose_name=
'Publish Time')), ('publish_date', models.DateField(verbose_name=
'Date')), ('lead', models.TextField(verbose_name='Lead Text'))])]
<|reserved_special_token_1|>
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [migrations.CreateModel(name='News', fields=[('id', models
.AutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')), ('title', models.TextField(verbose_name=
'Title')), ('body', models.TextField(verbose_name='Body')), (
'view_count', models.IntegerField(verbose_name='View Count')), (
'root_category', models.CharField(max_length=64, verbose_name=
'Root Category')), ('category', models.CharField(max_length=64,
verbose_name='Category')), ('image', models.TextField(verbose_name=
'Image')), ('publish_time', models.TimeField(verbose_name=
'Publish Time')), ('publish_date', models.DateField(verbose_name=
'Date')), ('lead', models.TextField(verbose_name='Lead Text'))])]
<|reserved_special_token_1|>
# Generated by Django 2.2.5 on 2020-01-05 04:05
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='News',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.TextField(verbose_name='Title')),
('body', models.TextField(verbose_name='Body')),
('view_count', models.IntegerField(verbose_name='View Count')),
('root_category', models.CharField(max_length=64, verbose_name='Root Category')),
('category', models.CharField(max_length=64, verbose_name='Category')),
('image', models.TextField(verbose_name='Image')),
('publish_time', models.TimeField(verbose_name='Publish Time')),
('publish_date', models.DateField(verbose_name='Date')),
('lead', models.TextField(verbose_name='Lead Text')),
],
),
]
|
flexible
|
{
"blob_id": "d40e1cfa2ef43f698e846c25ac9f5471d69e71a0",
"index": 5253,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='News', fields=[('id', models\n .AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('title', models.TextField(verbose_name=\n 'Title')), ('body', models.TextField(verbose_name='Body')), (\n 'view_count', models.IntegerField(verbose_name='View Count')), (\n 'root_category', models.CharField(max_length=64, verbose_name=\n 'Root Category')), ('category', models.CharField(max_length=64,\n verbose_name='Category')), ('image', models.TextField(verbose_name=\n 'Image')), ('publish_time', models.TimeField(verbose_name=\n 'Publish Time')), ('publish_date', models.DateField(verbose_name=\n 'Date')), ('lead', models.TextField(verbose_name='Lead Text'))])]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='News', fields=[('id', models\n .AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('title', models.TextField(verbose_name=\n 'Title')), ('body', models.TextField(verbose_name='Body')), (\n 'view_count', models.IntegerField(verbose_name='View Count')), (\n 'root_category', models.CharField(max_length=64, verbose_name=\n 'Root Category')), ('category', models.CharField(max_length=64,\n verbose_name='Category')), ('image', models.TextField(verbose_name=\n 'Image')), ('publish_time', models.TimeField(verbose_name=\n 'Publish Time')), ('publish_date', models.DateField(verbose_name=\n 'Date')), ('lead', models.TextField(verbose_name='Lead Text'))])]\n",
"step-5": "# Generated by Django 2.2.5 on 2020-01-05 04:05\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='News',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.TextField(verbose_name='Title')),\n ('body', models.TextField(verbose_name='Body')),\n ('view_count', models.IntegerField(verbose_name='View Count')),\n ('root_category', models.CharField(max_length=64, verbose_name='Root Category')),\n ('category', models.CharField(max_length=64, verbose_name='Category')),\n ('image', models.TextField(verbose_name='Image')),\n ('publish_time', models.TimeField(verbose_name='Publish Time')),\n ('publish_date', models.DateField(verbose_name='Date')),\n ('lead', models.TextField(verbose_name='Lead Text')),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from context import vicemergencyapi
from vicemergencyapi.vicemergency import VicEmergency
from geographiclib.geodesic import Geodesic
from shapely.geometry import Point
def geoDistance(p1, p2):
return Geodesic.WGS84.Inverse(p1.y, p1.x, p2.y, p2.x)['s12']
melbourne = Point(144.962272, -37.812274)
def compare(f):
return geoDistance(f.getLocation(), melbourne)
for i in sorted(VicEmergency.getItems(), key=compare):
print(i.properties["sourceTitle"])
print(i.properties["category1"])
print(i.properties["location"])
print("{:.0f}km".format(geoDistance(i.getLocation(), melbourne) / 1000))
print("============================")
|
normal
|
{
"blob_id": "920f00632599945397364dd0f52f21234e17f9ef",
"index": 9445,
"step-1": "<mask token>\n\n\ndef geoDistance(p1, p2):\n return Geodesic.WGS84.Inverse(p1.y, p1.x, p2.y, p2.x)['s12']\n\n\n<mask token>\n\n\ndef compare(f):\n return geoDistance(f.getLocation(), melbourne)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef geoDistance(p1, p2):\n return Geodesic.WGS84.Inverse(p1.y, p1.x, p2.y, p2.x)['s12']\n\n\n<mask token>\n\n\ndef compare(f):\n return geoDistance(f.getLocation(), melbourne)\n\n\nfor i in sorted(VicEmergency.getItems(), key=compare):\n print(i.properties['sourceTitle'])\n print(i.properties['category1'])\n print(i.properties['location'])\n print('{:.0f}km'.format(geoDistance(i.getLocation(), melbourne) / 1000))\n print('============================')\n",
"step-3": "<mask token>\n\n\ndef geoDistance(p1, p2):\n return Geodesic.WGS84.Inverse(p1.y, p1.x, p2.y, p2.x)['s12']\n\n\nmelbourne = Point(144.962272, -37.812274)\n\n\ndef compare(f):\n return geoDistance(f.getLocation(), melbourne)\n\n\nfor i in sorted(VicEmergency.getItems(), key=compare):\n print(i.properties['sourceTitle'])\n print(i.properties['category1'])\n print(i.properties['location'])\n print('{:.0f}km'.format(geoDistance(i.getLocation(), melbourne) / 1000))\n print('============================')\n",
"step-4": "from context import vicemergencyapi\nfrom vicemergencyapi.vicemergency import VicEmergency\nfrom geographiclib.geodesic import Geodesic\nfrom shapely.geometry import Point\n\n\ndef geoDistance(p1, p2):\n return Geodesic.WGS84.Inverse(p1.y, p1.x, p2.y, p2.x)['s12']\n\n\nmelbourne = Point(144.962272, -37.812274)\n\n\ndef compare(f):\n return geoDistance(f.getLocation(), melbourne)\n\n\nfor i in sorted(VicEmergency.getItems(), key=compare):\n print(i.properties['sourceTitle'])\n print(i.properties['category1'])\n print(i.properties['location'])\n print('{:.0f}km'.format(geoDistance(i.getLocation(), melbourne) / 1000))\n print('============================')\n",
"step-5": "from context import vicemergencyapi\nfrom vicemergencyapi.vicemergency import VicEmergency\n\nfrom geographiclib.geodesic import Geodesic\nfrom shapely.geometry import Point\n\n\ndef geoDistance(p1, p2):\n return Geodesic.WGS84.Inverse(p1.y, p1.x, p2.y, p2.x)['s12']\n\n\nmelbourne = Point(144.962272, -37.812274)\n\ndef compare(f):\n return geoDistance(f.getLocation(), melbourne)\n\nfor i in sorted(VicEmergency.getItems(), key=compare):\n\n print(i.properties[\"sourceTitle\"])\n print(i.properties[\"category1\"])\n print(i.properties[\"location\"])\n print(\"{:.0f}km\".format(geoDistance(i.getLocation(), melbourne) / 1000))\n\n print(\"============================\")\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from . import match
from . import mimetype
from .mimetype import MIMEType
def sniff_unknown(resource: bytes, sniff_scriptable: bool = False): #might need more arguments
raise NotImplementedError
def sniff_mislabeled_binary(resource: bytes) -> MIMEType:
raise NotImplementedError
def sniff_mislabeled_feed(resource: bytes) -> MIMEType:
raise NotImplementedError
def sniff(resource: bytes, mime_type_string: str = "unknown/unknown", no_sniff: bool = False, check_for_apache_bug: bool = False) -> str:
mime_type = mimetype.parse_mime_type(mime_type_string)
if mime_type.is_unknown():
return sniff_unknown(resource, sniff_scriptable=not no_sniff)
if no_sniff:
return mime_type
if check_for_apache_bug:
return sniff_mislabeled_binary(resource)
if mime_type.is_xml():
return mime_type
if mime_type.essence() == "text/html":
sniff_mislabeled_feed(resource)
if mime_type.is_image(): #TODO: implement checking suppported image by user agent
match_type = match.match_image_type_pattern(resource)
if not match_type is None:
return match_type
if mime_type.is_video_audio(): #TODO: implement checking suppported image by user agent
match_type = match.match_image_type_pattern(resource)
if not match_type is None:
return match_type
return mime_type
|
normal
|
{
"blob_id": "a2344f405aa681daff12166b7aad1230652373de",
"index": 3499,
"step-1": "<mask token>\n\n\ndef sniff_mislabeled_feed(resource: bytes) ->MIMEType:\n raise NotImplementedError\n\n\ndef sniff(resource: bytes, mime_type_string: str='unknown/unknown',\n no_sniff: bool=False, check_for_apache_bug: bool=False) ->str:\n mime_type = mimetype.parse_mime_type(mime_type_string)\n if mime_type.is_unknown():\n return sniff_unknown(resource, sniff_scriptable=not no_sniff)\n if no_sniff:\n return mime_type\n if check_for_apache_bug:\n return sniff_mislabeled_binary(resource)\n if mime_type.is_xml():\n return mime_type\n if mime_type.essence() == 'text/html':\n sniff_mislabeled_feed(resource)\n if mime_type.is_image():\n match_type = match.match_image_type_pattern(resource)\n if not match_type is None:\n return match_type\n if mime_type.is_video_audio():\n match_type = match.match_image_type_pattern(resource)\n if not match_type is None:\n return match_type\n return mime_type\n",
"step-2": "<mask token>\n\n\ndef sniff_mislabeled_binary(resource: bytes) ->MIMEType:\n raise NotImplementedError\n\n\ndef sniff_mislabeled_feed(resource: bytes) ->MIMEType:\n raise NotImplementedError\n\n\ndef sniff(resource: bytes, mime_type_string: str='unknown/unknown',\n no_sniff: bool=False, check_for_apache_bug: bool=False) ->str:\n mime_type = mimetype.parse_mime_type(mime_type_string)\n if mime_type.is_unknown():\n return sniff_unknown(resource, sniff_scriptable=not no_sniff)\n if no_sniff:\n return mime_type\n if check_for_apache_bug:\n return sniff_mislabeled_binary(resource)\n if mime_type.is_xml():\n return mime_type\n if mime_type.essence() == 'text/html':\n sniff_mislabeled_feed(resource)\n if mime_type.is_image():\n match_type = match.match_image_type_pattern(resource)\n if not match_type is None:\n return match_type\n if mime_type.is_video_audio():\n match_type = match.match_image_type_pattern(resource)\n if not match_type is None:\n return match_type\n return mime_type\n",
"step-3": "<mask token>\n\n\ndef sniff_unknown(resource: bytes, sniff_scriptable: bool=False):\n raise NotImplementedError\n\n\ndef sniff_mislabeled_binary(resource: bytes) ->MIMEType:\n raise NotImplementedError\n\n\ndef sniff_mislabeled_feed(resource: bytes) ->MIMEType:\n raise NotImplementedError\n\n\ndef sniff(resource: bytes, mime_type_string: str='unknown/unknown',\n no_sniff: bool=False, check_for_apache_bug: bool=False) ->str:\n mime_type = mimetype.parse_mime_type(mime_type_string)\n if mime_type.is_unknown():\n return sniff_unknown(resource, sniff_scriptable=not no_sniff)\n if no_sniff:\n return mime_type\n if check_for_apache_bug:\n return sniff_mislabeled_binary(resource)\n if mime_type.is_xml():\n return mime_type\n if mime_type.essence() == 'text/html':\n sniff_mislabeled_feed(resource)\n if mime_type.is_image():\n match_type = match.match_image_type_pattern(resource)\n if not match_type is None:\n return match_type\n if mime_type.is_video_audio():\n match_type = match.match_image_type_pattern(resource)\n if not match_type is None:\n return match_type\n return mime_type\n",
"step-4": "from . import match\nfrom . import mimetype\nfrom .mimetype import MIMEType\n\n\ndef sniff_unknown(resource: bytes, sniff_scriptable: bool=False):\n raise NotImplementedError\n\n\ndef sniff_mislabeled_binary(resource: bytes) ->MIMEType:\n raise NotImplementedError\n\n\ndef sniff_mislabeled_feed(resource: bytes) ->MIMEType:\n raise NotImplementedError\n\n\ndef sniff(resource: bytes, mime_type_string: str='unknown/unknown',\n no_sniff: bool=False, check_for_apache_bug: bool=False) ->str:\n mime_type = mimetype.parse_mime_type(mime_type_string)\n if mime_type.is_unknown():\n return sniff_unknown(resource, sniff_scriptable=not no_sniff)\n if no_sniff:\n return mime_type\n if check_for_apache_bug:\n return sniff_mislabeled_binary(resource)\n if mime_type.is_xml():\n return mime_type\n if mime_type.essence() == 'text/html':\n sniff_mislabeled_feed(resource)\n if mime_type.is_image():\n match_type = match.match_image_type_pattern(resource)\n if not match_type is None:\n return match_type\n if mime_type.is_video_audio():\n match_type = match.match_image_type_pattern(resource)\n if not match_type is None:\n return match_type\n return mime_type\n",
"step-5": "from . import match\nfrom . import mimetype\nfrom .mimetype import MIMEType\n\ndef sniff_unknown(resource: bytes, sniff_scriptable: bool = False): #might need more arguments\n raise NotImplementedError\n\ndef sniff_mislabeled_binary(resource: bytes) -> MIMEType:\n raise NotImplementedError\n\ndef sniff_mislabeled_feed(resource: bytes) -> MIMEType:\n raise NotImplementedError\n\ndef sniff(resource: bytes, mime_type_string: str = \"unknown/unknown\", no_sniff: bool = False, check_for_apache_bug: bool = False) -> str:\n mime_type = mimetype.parse_mime_type(mime_type_string)\n if mime_type.is_unknown():\n return sniff_unknown(resource, sniff_scriptable=not no_sniff)\n if no_sniff:\n return mime_type\n if check_for_apache_bug:\n return sniff_mislabeled_binary(resource)\n if mime_type.is_xml():\n return mime_type\n if mime_type.essence() == \"text/html\":\n sniff_mislabeled_feed(resource)\n if mime_type.is_image(): #TODO: implement checking suppported image by user agent\n match_type = match.match_image_type_pattern(resource)\n if not match_type is None:\n return match_type\n if mime_type.is_video_audio(): #TODO: implement checking suppported image by user agent\n match_type = match.match_image_type_pattern(resource)\n if not match_type is None:\n return match_type\n return mime_type\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from .exec_generator import *
|
flexible
|
{
"blob_id": "b6ee3c980357ab22a7969c21207b34546c87092d",
"index": 7305,
"step-1": "<mask token>\n",
"step-2": "from .exec_generator import *\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
import time
import random
from BlockchainNetwork.MVB import *
from threading import Thread
coloredlogs.install()
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
log = logging.getLogger(__name__)
class MVBTest:
def __init__(self, initialNodeCnt):
self.mvb = MVB()
self.signingKeysList = []
self.pubKeysList = []
self.pubKeysByteList = []
self.__initialSigningKeys()
self.__initialPubKeys()
self.mvb.generateGenesisBlockFromJson()
self.mvb.initialNodes(initialNodeCnt)
for i, node in enumerate(self.mvb.networkNodes):
nodeThread = Thread(target=self.threadMining, args=(node, 1))
nodeThread.start()
def multipleValidTxTest(self):
"""
This method tests multiple valid transactions
"""
log.info("--------------------Multiple valid Tx tests now started-------------------")
self.mvb.txWaitingPool += self.readTxFromFile('./TxFiles/MultipleValidTestTx.json')
self.mvb.broadcastTxPools()
def doubleSpendTest(self):
"""
txOutputs is the genesis output.
txOutputs[0] was used twice in this test.
Both Tx1 and Tx2 make txOutputs[0] as input.
When Tx2 is mined, the verification will be failed.
"""
log.info("--------------------Double spend test now started-------------------")
log.info("A pair of valid and invalid transactions is added into GlobalTx Pool")
self.mvb.txWaitingPool += self.readTxFromFile('./TxFiles/DoubleSpendTestTx.json')
self.mvb.broadcastTxPools()
def inputOutputSumTest(self):
log.info("--------------------Input output sum test now started-------------------")
log.info("A pair of valid and invalid Transactions is added into GlobalTx Pool")
self.mvb.txWaitingPool += self.readTxFromFile('./TxFiles/InputOutputSumTestTx.json')
self.mvb.broadcastTxPools()
def sigVerifyTest(self):
log.info("--------------------Signature verify test now started-------------------")
log.info("A pair of valid and invalid Transactions is added into GlobalTx Pool")
self.mvb.txWaitingPool += self.readTxFromFile('./TxFiles/SigVerifyTestTx.json')
self.mvb.broadcastTxPools()
def numberHashTest(self):
log.info("--------------------Number hash test now started-------------------")
log.info("A pair of valid and invalid Transactions is added into GlobalTx Pool")
self.mvb.txWaitingPool += self.readTxFromFile('./TxFiles/NumberHashTestTx.json')
self.mvb.broadcastTxPools()
def txInputsExistTest(self):
log.info("--------------------Transaction inputs exist test now started-------------------")
log.info("A pair of valid and invalid Transactions is added into GlobalTx Pool")
self.mvb.txWaitingPool += self.readTxFromFile('./TxFiles/TxInputsExistTestTx.json')
self.mvb.broadcastTxPools()
def prevHashMatchTest(self):
log.info("--------------------Prev Hash test now started-------------------")
log.info("Node 2 broadcast a Block with invalid prev-hash to the other nodes")
txList = self.readTxFromFile('./TxFiles/PrevHashMatchTestTx.json')
self.mvb.networkNodes[1].mineInvalidBlock(txList[0], isInvalidPrevHash=True)
def blockPOWTest(self):
log.info("--------------------Block POW test now started-------------------")
log.info("Node 1 broadcast a Block with invalid POW to the other nodes")
txList = self.readTxFromFile('./TxFiles/BlockPOWTestTx.json')
self.mvb.networkNodes[0].mineInvalidBlock(txList[0], isInvalidPOW=True)
def threadMining(self, node: Node, i):
nowTime = time.time()
while True:
sleep(random.uniform(0.05, 0.1))
node.receiveBroadcastBlock()
for tx in node.globalTxPool:
node.mineBlock(tx)
if node.globalTxPool:
node.globalTxPool.remove(tx)
if time.time() - nowTime > 15:
break
node.saveToFile()
def createTxJsonFile(self, FILENAME: str, txList: List[Transaction]):
txListJsonObj = {'txList': []}
for tx in txList:
txListJsonObj['txList'].append(tx.getJsonObj())
with open(FILENAME, 'w', encoding='utf-8') as f:
f.write(json.dumps(txListJsonObj, indent=4))
def readTxFromFile(self, FILENAME: str) -> List[Transaction]:
txList = []
with open(FILENAME, 'r', encoding='utf-8') as f:
txListJsonObj = json.load(f)
for txObj in txListJsonObj['txList']:
newTx = Transaction(jsonObj=txObj)
txList.append(newTx)
return txList
def __initialSigningKeys(self) -> None:
"""
Generate and update signingKeys List for the network
"""
seedStr = '0' * 31
seedNum = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f']
seedList = []
for i in range(15):
seed = seedStr + seedNum[i]
seedList.append(seed.encode('utf-8'))
for seed in seedList:
self.signingKeysList.append(SigningKey(seed))
log.info("15 signing keys have been generated successfully")
def __initialPubKeys(self):
for signingKey in self.signingKeysList:
verifyKey = signingKey.verify_key
verifyKeyByte = verifyKey.encode(encoder=HexEncoder)
self.pubKeysList.append(verifyKey)
self.pubKeysByteList.append(verifyKeyByte)
log.info(str(len(self.pubKeysList)) + " public keys have been generated successfully")
|
normal
|
{
"blob_id": "8ad9efbbb2d9e2a5f73ebbb999da3ed93e4c1974",
"index": 9655,
"step-1": "<mask token>\n\n\nclass MVBTest:\n <mask token>\n <mask token>\n\n def doubleSpendTest(self):\n \"\"\"\n txOutputs is the genesis output.\n txOutputs[0] was used twice in this test.\n Both Tx1 and Tx2 make txOutputs[0] as input.\n When Tx2 is mined, the verification will be failed.\n \"\"\"\n log.info(\n '--------------------Double spend test now started-------------------'\n )\n log.info(\n 'A pair of valid and invalid transactions is added into GlobalTx Pool'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/DoubleSpendTestTx.json')\n self.mvb.broadcastTxPools()\n\n def inputOutputSumTest(self):\n log.info(\n '--------------------Input output sum test now started-------------------'\n )\n log.info(\n 'A pair of valid and invalid Transactions is added into GlobalTx Pool'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/InputOutputSumTestTx.json')\n self.mvb.broadcastTxPools()\n\n def sigVerifyTest(self):\n log.info(\n '--------------------Signature verify test now started-------------------'\n )\n log.info(\n 'A pair of valid and invalid Transactions is added into GlobalTx Pool'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/SigVerifyTestTx.json')\n self.mvb.broadcastTxPools()\n\n def numberHashTest(self):\n log.info(\n '--------------------Number hash test now started-------------------'\n )\n log.info(\n 'A pair of valid and invalid Transactions is added into GlobalTx Pool'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/NumberHashTestTx.json')\n self.mvb.broadcastTxPools()\n\n def txInputsExistTest(self):\n log.info(\n '--------------------Transaction inputs exist test now started-------------------'\n )\n log.info(\n 'A pair of valid and invalid Transactions is added into GlobalTx Pool'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/TxInputsExistTestTx.json')\n self.mvb.broadcastTxPools()\n <mask token>\n <mask token>\n\n def threadMining(self, node: Node, i):\n nowTime = time.time()\n while True:\n sleep(random.uniform(0.05, 0.1))\n node.receiveBroadcastBlock()\n for tx in node.globalTxPool:\n node.mineBlock(tx)\n if node.globalTxPool:\n node.globalTxPool.remove(tx)\n if time.time() - nowTime > 15:\n break\n node.saveToFile()\n\n def createTxJsonFile(self, FILENAME: str, txList: List[Transaction]):\n txListJsonObj = {'txList': []}\n for tx in txList:\n txListJsonObj['txList'].append(tx.getJsonObj())\n with open(FILENAME, 'w', encoding='utf-8') as f:\n f.write(json.dumps(txListJsonObj, indent=4))\n\n def readTxFromFile(self, FILENAME: str) ->List[Transaction]:\n txList = []\n with open(FILENAME, 'r', encoding='utf-8') as f:\n txListJsonObj = json.load(f)\n for txObj in txListJsonObj['txList']:\n newTx = Transaction(jsonObj=txObj)\n txList.append(newTx)\n return txList\n\n def __initialSigningKeys(self) ->None:\n \"\"\"\n Generate and update signingKeys List for the network\n \"\"\"\n seedStr = '0' * 31\n seedNum = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b',\n 'c', 'd', 'e', 'f']\n seedList = []\n for i in range(15):\n seed = seedStr + seedNum[i]\n seedList.append(seed.encode('utf-8'))\n for seed in seedList:\n self.signingKeysList.append(SigningKey(seed))\n log.info('15 signing keys have been generated successfully')\n\n def __initialPubKeys(self):\n for signingKey in self.signingKeysList:\n verifyKey = signingKey.verify_key\n verifyKeyByte = verifyKey.encode(encoder=HexEncoder)\n self.pubKeysList.append(verifyKey)\n self.pubKeysByteList.append(verifyKeyByte)\n log.info(str(len(self.pubKeysList)) +\n ' public keys have been generated successfully')\n",
"step-2": "<mask token>\n\n\nclass MVBTest:\n\n def __init__(self, initialNodeCnt):\n self.mvb = MVB()\n self.signingKeysList = []\n self.pubKeysList = []\n self.pubKeysByteList = []\n self.__initialSigningKeys()\n self.__initialPubKeys()\n self.mvb.generateGenesisBlockFromJson()\n self.mvb.initialNodes(initialNodeCnt)\n for i, node in enumerate(self.mvb.networkNodes):\n nodeThread = Thread(target=self.threadMining, args=(node, 1))\n nodeThread.start()\n\n def multipleValidTxTest(self):\n \"\"\"\n This method tests multiple valid transactions\n \"\"\"\n log.info(\n '--------------------Multiple valid Tx tests now started-------------------'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/MultipleValidTestTx.json')\n self.mvb.broadcastTxPools()\n\n def doubleSpendTest(self):\n \"\"\"\n txOutputs is the genesis output.\n txOutputs[0] was used twice in this test.\n Both Tx1 and Tx2 make txOutputs[0] as input.\n When Tx2 is mined, the verification will be failed.\n \"\"\"\n log.info(\n '--------------------Double spend test now started-------------------'\n )\n log.info(\n 'A pair of valid and invalid transactions is added into GlobalTx Pool'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/DoubleSpendTestTx.json')\n self.mvb.broadcastTxPools()\n\n def inputOutputSumTest(self):\n log.info(\n '--------------------Input output sum test now started-------------------'\n )\n log.info(\n 'A pair of valid and invalid Transactions is added into GlobalTx Pool'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/InputOutputSumTestTx.json')\n self.mvb.broadcastTxPools()\n\n def sigVerifyTest(self):\n log.info(\n '--------------------Signature verify test now started-------------------'\n )\n log.info(\n 'A pair of valid and invalid Transactions is added into GlobalTx Pool'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/SigVerifyTestTx.json')\n self.mvb.broadcastTxPools()\n\n def numberHashTest(self):\n log.info(\n '--------------------Number hash test now started-------------------'\n )\n log.info(\n 'A pair of valid and invalid Transactions is added into GlobalTx Pool'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/NumberHashTestTx.json')\n self.mvb.broadcastTxPools()\n\n def txInputsExistTest(self):\n log.info(\n '--------------------Transaction inputs exist test now started-------------------'\n )\n log.info(\n 'A pair of valid and invalid Transactions is added into GlobalTx Pool'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/TxInputsExistTestTx.json')\n self.mvb.broadcastTxPools()\n\n def prevHashMatchTest(self):\n log.info(\n '--------------------Prev Hash test now started-------------------'\n )\n log.info(\n 'Node 2 broadcast a Block with invalid prev-hash to the other nodes'\n )\n txList = self.readTxFromFile('./TxFiles/PrevHashMatchTestTx.json')\n self.mvb.networkNodes[1].mineInvalidBlock(txList[0],\n isInvalidPrevHash=True)\n\n def blockPOWTest(self):\n log.info(\n '--------------------Block POW test now started-------------------'\n )\n log.info('Node 1 broadcast a Block with invalid POW to the other nodes'\n )\n txList = self.readTxFromFile('./TxFiles/BlockPOWTestTx.json')\n self.mvb.networkNodes[0].mineInvalidBlock(txList[0], isInvalidPOW=True)\n\n def threadMining(self, node: Node, i):\n nowTime = time.time()\n while True:\n sleep(random.uniform(0.05, 0.1))\n node.receiveBroadcastBlock()\n for tx in node.globalTxPool:\n node.mineBlock(tx)\n if node.globalTxPool:\n node.globalTxPool.remove(tx)\n if time.time() - nowTime > 15:\n break\n node.saveToFile()\n\n def createTxJsonFile(self, FILENAME: str, txList: List[Transaction]):\n txListJsonObj = {'txList': []}\n for tx in txList:\n txListJsonObj['txList'].append(tx.getJsonObj())\n with open(FILENAME, 'w', encoding='utf-8') as f:\n f.write(json.dumps(txListJsonObj, indent=4))\n\n def readTxFromFile(self, FILENAME: str) ->List[Transaction]:\n txList = []\n with open(FILENAME, 'r', encoding='utf-8') as f:\n txListJsonObj = json.load(f)\n for txObj in txListJsonObj['txList']:\n newTx = Transaction(jsonObj=txObj)\n txList.append(newTx)\n return txList\n\n def __initialSigningKeys(self) ->None:\n \"\"\"\n Generate and update signingKeys List for the network\n \"\"\"\n seedStr = '0' * 31\n seedNum = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b',\n 'c', 'd', 'e', 'f']\n seedList = []\n for i in range(15):\n seed = seedStr + seedNum[i]\n seedList.append(seed.encode('utf-8'))\n for seed in seedList:\n self.signingKeysList.append(SigningKey(seed))\n log.info('15 signing keys have been generated successfully')\n\n def __initialPubKeys(self):\n for signingKey in self.signingKeysList:\n verifyKey = signingKey.verify_key\n verifyKeyByte = verifyKey.encode(encoder=HexEncoder)\n self.pubKeysList.append(verifyKey)\n self.pubKeysByteList.append(verifyKeyByte)\n log.info(str(len(self.pubKeysList)) +\n ' public keys have been generated successfully')\n",
"step-3": "<mask token>\ncoloredlogs.install()\nlogging.basicConfig(level=logging.INFO, format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nlog = logging.getLogger(__name__)\n\n\nclass MVBTest:\n\n def __init__(self, initialNodeCnt):\n self.mvb = MVB()\n self.signingKeysList = []\n self.pubKeysList = []\n self.pubKeysByteList = []\n self.__initialSigningKeys()\n self.__initialPubKeys()\n self.mvb.generateGenesisBlockFromJson()\n self.mvb.initialNodes(initialNodeCnt)\n for i, node in enumerate(self.mvb.networkNodes):\n nodeThread = Thread(target=self.threadMining, args=(node, 1))\n nodeThread.start()\n\n def multipleValidTxTest(self):\n \"\"\"\n This method tests multiple valid transactions\n \"\"\"\n log.info(\n '--------------------Multiple valid Tx tests now started-------------------'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/MultipleValidTestTx.json')\n self.mvb.broadcastTxPools()\n\n def doubleSpendTest(self):\n \"\"\"\n txOutputs is the genesis output.\n txOutputs[0] was used twice in this test.\n Both Tx1 and Tx2 make txOutputs[0] as input.\n When Tx2 is mined, the verification will be failed.\n \"\"\"\n log.info(\n '--------------------Double spend test now started-------------------'\n )\n log.info(\n 'A pair of valid and invalid transactions is added into GlobalTx Pool'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/DoubleSpendTestTx.json')\n self.mvb.broadcastTxPools()\n\n def inputOutputSumTest(self):\n log.info(\n '--------------------Input output sum test now started-------------------'\n )\n log.info(\n 'A pair of valid and invalid Transactions is added into GlobalTx Pool'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/InputOutputSumTestTx.json')\n self.mvb.broadcastTxPools()\n\n def sigVerifyTest(self):\n log.info(\n '--------------------Signature verify test now started-------------------'\n )\n log.info(\n 'A pair of valid and invalid Transactions is added into GlobalTx Pool'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/SigVerifyTestTx.json')\n self.mvb.broadcastTxPools()\n\n def numberHashTest(self):\n log.info(\n '--------------------Number hash test now started-------------------'\n )\n log.info(\n 'A pair of valid and invalid Transactions is added into GlobalTx Pool'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/NumberHashTestTx.json')\n self.mvb.broadcastTxPools()\n\n def txInputsExistTest(self):\n log.info(\n '--------------------Transaction inputs exist test now started-------------------'\n )\n log.info(\n 'A pair of valid and invalid Transactions is added into GlobalTx Pool'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/TxInputsExistTestTx.json')\n self.mvb.broadcastTxPools()\n\n def prevHashMatchTest(self):\n log.info(\n '--------------------Prev Hash test now started-------------------'\n )\n log.info(\n 'Node 2 broadcast a Block with invalid prev-hash to the other nodes'\n )\n txList = self.readTxFromFile('./TxFiles/PrevHashMatchTestTx.json')\n self.mvb.networkNodes[1].mineInvalidBlock(txList[0],\n isInvalidPrevHash=True)\n\n def blockPOWTest(self):\n log.info(\n '--------------------Block POW test now started-------------------'\n )\n log.info('Node 1 broadcast a Block with invalid POW to the other nodes'\n )\n txList = self.readTxFromFile('./TxFiles/BlockPOWTestTx.json')\n self.mvb.networkNodes[0].mineInvalidBlock(txList[0], isInvalidPOW=True)\n\n def threadMining(self, node: Node, i):\n nowTime = time.time()\n while True:\n sleep(random.uniform(0.05, 0.1))\n node.receiveBroadcastBlock()\n for tx in node.globalTxPool:\n node.mineBlock(tx)\n if node.globalTxPool:\n node.globalTxPool.remove(tx)\n if time.time() - nowTime > 15:\n break\n node.saveToFile()\n\n def createTxJsonFile(self, FILENAME: str, txList: List[Transaction]):\n txListJsonObj = {'txList': []}\n for tx in txList:\n txListJsonObj['txList'].append(tx.getJsonObj())\n with open(FILENAME, 'w', encoding='utf-8') as f:\n f.write(json.dumps(txListJsonObj, indent=4))\n\n def readTxFromFile(self, FILENAME: str) ->List[Transaction]:\n txList = []\n with open(FILENAME, 'r', encoding='utf-8') as f:\n txListJsonObj = json.load(f)\n for txObj in txListJsonObj['txList']:\n newTx = Transaction(jsonObj=txObj)\n txList.append(newTx)\n return txList\n\n def __initialSigningKeys(self) ->None:\n \"\"\"\n Generate and update signingKeys List for the network\n \"\"\"\n seedStr = '0' * 31\n seedNum = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b',\n 'c', 'd', 'e', 'f']\n seedList = []\n for i in range(15):\n seed = seedStr + seedNum[i]\n seedList.append(seed.encode('utf-8'))\n for seed in seedList:\n self.signingKeysList.append(SigningKey(seed))\n log.info('15 signing keys have been generated successfully')\n\n def __initialPubKeys(self):\n for signingKey in self.signingKeysList:\n verifyKey = signingKey.verify_key\n verifyKeyByte = verifyKey.encode(encoder=HexEncoder)\n self.pubKeysList.append(verifyKey)\n self.pubKeysByteList.append(verifyKeyByte)\n log.info(str(len(self.pubKeysList)) +\n ' public keys have been generated successfully')\n",
"step-4": "import time\nimport random\nfrom BlockchainNetwork.MVB import *\nfrom threading import Thread\ncoloredlogs.install()\nlogging.basicConfig(level=logging.INFO, format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nlog = logging.getLogger(__name__)\n\n\nclass MVBTest:\n\n def __init__(self, initialNodeCnt):\n self.mvb = MVB()\n self.signingKeysList = []\n self.pubKeysList = []\n self.pubKeysByteList = []\n self.__initialSigningKeys()\n self.__initialPubKeys()\n self.mvb.generateGenesisBlockFromJson()\n self.mvb.initialNodes(initialNodeCnt)\n for i, node in enumerate(self.mvb.networkNodes):\n nodeThread = Thread(target=self.threadMining, args=(node, 1))\n nodeThread.start()\n\n def multipleValidTxTest(self):\n \"\"\"\n This method tests multiple valid transactions\n \"\"\"\n log.info(\n '--------------------Multiple valid Tx tests now started-------------------'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/MultipleValidTestTx.json')\n self.mvb.broadcastTxPools()\n\n def doubleSpendTest(self):\n \"\"\"\n txOutputs is the genesis output.\n txOutputs[0] was used twice in this test.\n Both Tx1 and Tx2 make txOutputs[0] as input.\n When Tx2 is mined, the verification will be failed.\n \"\"\"\n log.info(\n '--------------------Double spend test now started-------------------'\n )\n log.info(\n 'A pair of valid and invalid transactions is added into GlobalTx Pool'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/DoubleSpendTestTx.json')\n self.mvb.broadcastTxPools()\n\n def inputOutputSumTest(self):\n log.info(\n '--------------------Input output sum test now started-------------------'\n )\n log.info(\n 'A pair of valid and invalid Transactions is added into GlobalTx Pool'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/InputOutputSumTestTx.json')\n self.mvb.broadcastTxPools()\n\n def sigVerifyTest(self):\n log.info(\n '--------------------Signature verify test now started-------------------'\n )\n log.info(\n 'A pair of valid and invalid Transactions is added into GlobalTx Pool'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/SigVerifyTestTx.json')\n self.mvb.broadcastTxPools()\n\n def numberHashTest(self):\n log.info(\n '--------------------Number hash test now started-------------------'\n )\n log.info(\n 'A pair of valid and invalid Transactions is added into GlobalTx Pool'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/NumberHashTestTx.json')\n self.mvb.broadcastTxPools()\n\n def txInputsExistTest(self):\n log.info(\n '--------------------Transaction inputs exist test now started-------------------'\n )\n log.info(\n 'A pair of valid and invalid Transactions is added into GlobalTx Pool'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/TxInputsExistTestTx.json')\n self.mvb.broadcastTxPools()\n\n def prevHashMatchTest(self):\n log.info(\n '--------------------Prev Hash test now started-------------------'\n )\n log.info(\n 'Node 2 broadcast a Block with invalid prev-hash to the other nodes'\n )\n txList = self.readTxFromFile('./TxFiles/PrevHashMatchTestTx.json')\n self.mvb.networkNodes[1].mineInvalidBlock(txList[0],\n isInvalidPrevHash=True)\n\n def blockPOWTest(self):\n log.info(\n '--------------------Block POW test now started-------------------'\n )\n log.info('Node 1 broadcast a Block with invalid POW to the other nodes'\n )\n txList = self.readTxFromFile('./TxFiles/BlockPOWTestTx.json')\n self.mvb.networkNodes[0].mineInvalidBlock(txList[0], isInvalidPOW=True)\n\n def threadMining(self, node: Node, i):\n nowTime = time.time()\n while True:\n sleep(random.uniform(0.05, 0.1))\n node.receiveBroadcastBlock()\n for tx in node.globalTxPool:\n node.mineBlock(tx)\n if node.globalTxPool:\n node.globalTxPool.remove(tx)\n if time.time() - nowTime > 15:\n break\n node.saveToFile()\n\n def createTxJsonFile(self, FILENAME: str, txList: List[Transaction]):\n txListJsonObj = {'txList': []}\n for tx in txList:\n txListJsonObj['txList'].append(tx.getJsonObj())\n with open(FILENAME, 'w', encoding='utf-8') as f:\n f.write(json.dumps(txListJsonObj, indent=4))\n\n def readTxFromFile(self, FILENAME: str) ->List[Transaction]:\n txList = []\n with open(FILENAME, 'r', encoding='utf-8') as f:\n txListJsonObj = json.load(f)\n for txObj in txListJsonObj['txList']:\n newTx = Transaction(jsonObj=txObj)\n txList.append(newTx)\n return txList\n\n def __initialSigningKeys(self) ->None:\n \"\"\"\n Generate and update signingKeys List for the network\n \"\"\"\n seedStr = '0' * 31\n seedNum = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b',\n 'c', 'd', 'e', 'f']\n seedList = []\n for i in range(15):\n seed = seedStr + seedNum[i]\n seedList.append(seed.encode('utf-8'))\n for seed in seedList:\n self.signingKeysList.append(SigningKey(seed))\n log.info('15 signing keys have been generated successfully')\n\n def __initialPubKeys(self):\n for signingKey in self.signingKeysList:\n verifyKey = signingKey.verify_key\n verifyKeyByte = verifyKey.encode(encoder=HexEncoder)\n self.pubKeysList.append(verifyKey)\n self.pubKeysByteList.append(verifyKeyByte)\n log.info(str(len(self.pubKeysList)) +\n ' public keys have been generated successfully')\n",
"step-5": "import time\nimport random\n\nfrom BlockchainNetwork.MVB import *\nfrom threading import Thread\n\ncoloredlogs.install()\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nlog = logging.getLogger(__name__)\n\n\nclass MVBTest:\n def __init__(self, initialNodeCnt):\n self.mvb = MVB()\n self.signingKeysList = []\n self.pubKeysList = []\n self.pubKeysByteList = []\n self.__initialSigningKeys()\n self.__initialPubKeys()\n\n self.mvb.generateGenesisBlockFromJson()\n self.mvb.initialNodes(initialNodeCnt)\n\n for i, node in enumerate(self.mvb.networkNodes):\n nodeThread = Thread(target=self.threadMining, args=(node, 1))\n nodeThread.start()\n\n def multipleValidTxTest(self):\n \"\"\"\n This method tests multiple valid transactions\n \"\"\"\n log.info(\"--------------------Multiple valid Tx tests now started-------------------\")\n\n self.mvb.txWaitingPool += self.readTxFromFile('./TxFiles/MultipleValidTestTx.json')\n self.mvb.broadcastTxPools()\n\n def doubleSpendTest(self):\n \"\"\"\n txOutputs is the genesis output.\n txOutputs[0] was used twice in this test.\n Both Tx1 and Tx2 make txOutputs[0] as input.\n When Tx2 is mined, the verification will be failed.\n \"\"\"\n log.info(\"--------------------Double spend test now started-------------------\")\n log.info(\"A pair of valid and invalid transactions is added into GlobalTx Pool\")\n\n self.mvb.txWaitingPool += self.readTxFromFile('./TxFiles/DoubleSpendTestTx.json')\n self.mvb.broadcastTxPools()\n\n def inputOutputSumTest(self):\n log.info(\"--------------------Input output sum test now started-------------------\")\n log.info(\"A pair of valid and invalid Transactions is added into GlobalTx Pool\")\n\n self.mvb.txWaitingPool += self.readTxFromFile('./TxFiles/InputOutputSumTestTx.json')\n self.mvb.broadcastTxPools()\n\n def sigVerifyTest(self):\n log.info(\"--------------------Signature verify test now started-------------------\")\n log.info(\"A pair of valid and invalid Transactions is added into GlobalTx Pool\")\n\n self.mvb.txWaitingPool += self.readTxFromFile('./TxFiles/SigVerifyTestTx.json')\n self.mvb.broadcastTxPools()\n\n def numberHashTest(self):\n log.info(\"--------------------Number hash test now started-------------------\")\n log.info(\"A pair of valid and invalid Transactions is added into GlobalTx Pool\")\n\n self.mvb.txWaitingPool += self.readTxFromFile('./TxFiles/NumberHashTestTx.json')\n self.mvb.broadcastTxPools()\n\n def txInputsExistTest(self):\n log.info(\"--------------------Transaction inputs exist test now started-------------------\")\n log.info(\"A pair of valid and invalid Transactions is added into GlobalTx Pool\")\n\n self.mvb.txWaitingPool += self.readTxFromFile('./TxFiles/TxInputsExistTestTx.json')\n self.mvb.broadcastTxPools()\n\n def prevHashMatchTest(self):\n log.info(\"--------------------Prev Hash test now started-------------------\")\n log.info(\"Node 2 broadcast a Block with invalid prev-hash to the other nodes\")\n\n txList = self.readTxFromFile('./TxFiles/PrevHashMatchTestTx.json')\n self.mvb.networkNodes[1].mineInvalidBlock(txList[0], isInvalidPrevHash=True)\n\n def blockPOWTest(self):\n log.info(\"--------------------Block POW test now started-------------------\")\n log.info(\"Node 1 broadcast a Block with invalid POW to the other nodes\")\n\n txList = self.readTxFromFile('./TxFiles/BlockPOWTestTx.json')\n self.mvb.networkNodes[0].mineInvalidBlock(txList[0], isInvalidPOW=True)\n\n def threadMining(self, node: Node, i):\n nowTime = time.time()\n while True:\n sleep(random.uniform(0.05, 0.1))\n node.receiveBroadcastBlock()\n for tx in node.globalTxPool:\n node.mineBlock(tx)\n if node.globalTxPool:\n node.globalTxPool.remove(tx)\n if time.time() - nowTime > 15:\n break\n\n node.saveToFile()\n\n def createTxJsonFile(self, FILENAME: str, txList: List[Transaction]):\n txListJsonObj = {'txList': []}\n for tx in txList:\n txListJsonObj['txList'].append(tx.getJsonObj())\n with open(FILENAME, 'w', encoding='utf-8') as f:\n f.write(json.dumps(txListJsonObj, indent=4))\n\n def readTxFromFile(self, FILENAME: str) -> List[Transaction]:\n txList = []\n with open(FILENAME, 'r', encoding='utf-8') as f:\n txListJsonObj = json.load(f)\n for txObj in txListJsonObj['txList']:\n newTx = Transaction(jsonObj=txObj)\n txList.append(newTx)\n return txList\n\n def __initialSigningKeys(self) -> None:\n \"\"\"\n Generate and update signingKeys List for the network\n \"\"\"\n seedStr = '0' * 31\n seedNum = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f']\n seedList = []\n for i in range(15):\n seed = seedStr + seedNum[i]\n seedList.append(seed.encode('utf-8'))\n\n for seed in seedList:\n self.signingKeysList.append(SigningKey(seed))\n log.info(\"15 signing keys have been generated successfully\")\n\n def __initialPubKeys(self):\n for signingKey in self.signingKeysList:\n verifyKey = signingKey.verify_key\n verifyKeyByte = verifyKey.encode(encoder=HexEncoder)\n self.pubKeysList.append(verifyKey)\n self.pubKeysByteList.append(verifyKeyByte)\n log.info(str(len(self.pubKeysList)) + \" public keys have been generated successfully\")\n",
"step-ids": [
11,
15,
17,
18,
19
]
}
|
[
11,
15,
17,
18,
19
] |
import json
from typing import TYPE_CHECKING
import pytest
from eth_utils import is_checksum_address
from rotkehlchen.globaldb.handler import GlobalDBHandler
from rotkehlchen.types import ChainID
if TYPE_CHECKING:
from rotkehlchen.chain.ethereum.node_inquirer import EthereumInquirer
def test_evm_contracts_data(globaldb):
"""Test that all evm contract entries in the packaged global DB have legal data"""
serialized_chain_ids = [x.serialize_for_db() for x in ChainID]
with globaldb.conn.read_ctx() as cursor:
cursor.execute('SELECT address, chain_id, abi, deployed_block FROM contract_data')
for entry in cursor:
assert is_checksum_address(entry[0])
assert isinstance(entry[1], int) and entry[1] in serialized_chain_ids
assert isinstance(entry[2], int)
assert isinstance(entry[3], int) and entry[3] > 0
def test_evm_abi_data(globaldb):
"""Test that the evm abi entries in the packaged globalDB have legal data"""
abis_set = {0}
with globaldb.conn.read_ctx() as cursor:
cursor.execute('SELECT id, value FROM contract_abi')
for entry in cursor:
assert isinstance(entry[0], int)
# read the abi, and make sure it's the most compressed version it can be
# and that it's unique
assert isinstance(entry[1], str)
json_abi = json.loads(entry[1])
serialized_abi = json.dumps(json_abi, separators=(',', ':'))
assert serialized_abi == entry[1]
assert entry[1] not in abis_set
abis_set.add(entry[1])
@pytest.mark.parametrize('sql_vm_instructions_cb', [2])
def test_fallback_to_packaged_db(ethereum_inquirer: 'EthereumInquirer'):
"""
Test that if a contract / abi is missing in the globaldb, it is searched in the packaged db.
"""
with GlobalDBHandler().conn.read_ctx() as cursor:
# Delete one contract and its abi
cursor.execute(
'SELECT contract_data.address, contract_abi.value FROM contract_data INNER JOIN '
'contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 LIMIT 1',
)
(address, abi) = cursor.fetchone() # There has to be at least one entry
cursor.execute('DELETE FROM contract_data WHERE address=? AND chain_id=1', (address,))
cursor.execute('DELETE FROM contract_abi WHERE value=?', (abi,))
# Now query the contract, let it get to packaged global DB and also see that
# database packaged_db is locked is also not raised
ethereum_inquirer.contracts.contract(address)
with GlobalDBHandler().conn.read_ctx() as cursor:
# Check that the contract and the abi were copied to the global db
cursor.execute(
'SELECT COUNT(*) FROM contract_data INNER JOIN '
'contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 AND '
'contract_data.address=? AND contract_abi.value=?',
(address, abi),
)
assert cursor.fetchone()[0] == 1
|
normal
|
{
"blob_id": "52dc8a4f9165a88dddc1da16e0adb045c4d851ed",
"index": 5017,
"step-1": "<mask token>\n\n\ndef test_evm_contracts_data(globaldb):\n \"\"\"Test that all evm contract entries in the packaged global DB have legal data\"\"\"\n serialized_chain_ids = [x.serialize_for_db() for x in ChainID]\n with globaldb.conn.read_ctx() as cursor:\n cursor.execute(\n 'SELECT address, chain_id, abi, deployed_block FROM contract_data')\n for entry in cursor:\n assert is_checksum_address(entry[0])\n assert isinstance(entry[1], int) and entry[1\n ] in serialized_chain_ids\n assert isinstance(entry[2], int)\n assert isinstance(entry[3], int) and entry[3] > 0\n\n\n<mask token>\n\n\[email protected]('sql_vm_instructions_cb', [2])\ndef test_fallback_to_packaged_db(ethereum_inquirer: 'EthereumInquirer'):\n \"\"\"\n Test that if a contract / abi is missing in the globaldb, it is searched in the packaged db.\n \"\"\"\n with GlobalDBHandler().conn.read_ctx() as cursor:\n cursor.execute(\n 'SELECT contract_data.address, contract_abi.value FROM contract_data INNER JOIN contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 LIMIT 1'\n )\n address, abi = cursor.fetchone()\n cursor.execute(\n 'DELETE FROM contract_data WHERE address=? AND chain_id=1', (\n address,))\n cursor.execute('DELETE FROM contract_abi WHERE value=?', (abi,))\n ethereum_inquirer.contracts.contract(address)\n with GlobalDBHandler().conn.read_ctx() as cursor:\n cursor.execute(\n 'SELECT COUNT(*) FROM contract_data INNER JOIN contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 AND contract_data.address=? AND contract_abi.value=?'\n , (address, abi))\n assert cursor.fetchone()[0] == 1\n",
"step-2": "<mask token>\n\n\ndef test_evm_contracts_data(globaldb):\n \"\"\"Test that all evm contract entries in the packaged global DB have legal data\"\"\"\n serialized_chain_ids = [x.serialize_for_db() for x in ChainID]\n with globaldb.conn.read_ctx() as cursor:\n cursor.execute(\n 'SELECT address, chain_id, abi, deployed_block FROM contract_data')\n for entry in cursor:\n assert is_checksum_address(entry[0])\n assert isinstance(entry[1], int) and entry[1\n ] in serialized_chain_ids\n assert isinstance(entry[2], int)\n assert isinstance(entry[3], int) and entry[3] > 0\n\n\ndef test_evm_abi_data(globaldb):\n \"\"\"Test that the evm abi entries in the packaged globalDB have legal data\"\"\"\n abis_set = {0}\n with globaldb.conn.read_ctx() as cursor:\n cursor.execute('SELECT id, value FROM contract_abi')\n for entry in cursor:\n assert isinstance(entry[0], int)\n assert isinstance(entry[1], str)\n json_abi = json.loads(entry[1])\n serialized_abi = json.dumps(json_abi, separators=(',', ':'))\n assert serialized_abi == entry[1]\n assert entry[1] not in abis_set\n abis_set.add(entry[1])\n\n\[email protected]('sql_vm_instructions_cb', [2])\ndef test_fallback_to_packaged_db(ethereum_inquirer: 'EthereumInquirer'):\n \"\"\"\n Test that if a contract / abi is missing in the globaldb, it is searched in the packaged db.\n \"\"\"\n with GlobalDBHandler().conn.read_ctx() as cursor:\n cursor.execute(\n 'SELECT contract_data.address, contract_abi.value FROM contract_data INNER JOIN contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 LIMIT 1'\n )\n address, abi = cursor.fetchone()\n cursor.execute(\n 'DELETE FROM contract_data WHERE address=? AND chain_id=1', (\n address,))\n cursor.execute('DELETE FROM contract_abi WHERE value=?', (abi,))\n ethereum_inquirer.contracts.contract(address)\n with GlobalDBHandler().conn.read_ctx() as cursor:\n cursor.execute(\n 'SELECT COUNT(*) FROM contract_data INNER JOIN contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 AND contract_data.address=? AND contract_abi.value=?'\n , (address, abi))\n assert cursor.fetchone()[0] == 1\n",
"step-3": "<mask token>\nif TYPE_CHECKING:\n from rotkehlchen.chain.ethereum.node_inquirer import EthereumInquirer\n\n\ndef test_evm_contracts_data(globaldb):\n \"\"\"Test that all evm contract entries in the packaged global DB have legal data\"\"\"\n serialized_chain_ids = [x.serialize_for_db() for x in ChainID]\n with globaldb.conn.read_ctx() as cursor:\n cursor.execute(\n 'SELECT address, chain_id, abi, deployed_block FROM contract_data')\n for entry in cursor:\n assert is_checksum_address(entry[0])\n assert isinstance(entry[1], int) and entry[1\n ] in serialized_chain_ids\n assert isinstance(entry[2], int)\n assert isinstance(entry[3], int) and entry[3] > 0\n\n\ndef test_evm_abi_data(globaldb):\n \"\"\"Test that the evm abi entries in the packaged globalDB have legal data\"\"\"\n abis_set = {0}\n with globaldb.conn.read_ctx() as cursor:\n cursor.execute('SELECT id, value FROM contract_abi')\n for entry in cursor:\n assert isinstance(entry[0], int)\n assert isinstance(entry[1], str)\n json_abi = json.loads(entry[1])\n serialized_abi = json.dumps(json_abi, separators=(',', ':'))\n assert serialized_abi == entry[1]\n assert entry[1] not in abis_set\n abis_set.add(entry[1])\n\n\[email protected]('sql_vm_instructions_cb', [2])\ndef test_fallback_to_packaged_db(ethereum_inquirer: 'EthereumInquirer'):\n \"\"\"\n Test that if a contract / abi is missing in the globaldb, it is searched in the packaged db.\n \"\"\"\n with GlobalDBHandler().conn.read_ctx() as cursor:\n cursor.execute(\n 'SELECT contract_data.address, contract_abi.value FROM contract_data INNER JOIN contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 LIMIT 1'\n )\n address, abi = cursor.fetchone()\n cursor.execute(\n 'DELETE FROM contract_data WHERE address=? AND chain_id=1', (\n address,))\n cursor.execute('DELETE FROM contract_abi WHERE value=?', (abi,))\n ethereum_inquirer.contracts.contract(address)\n with GlobalDBHandler().conn.read_ctx() as cursor:\n cursor.execute(\n 'SELECT COUNT(*) FROM contract_data INNER JOIN contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 AND contract_data.address=? AND contract_abi.value=?'\n , (address, abi))\n assert cursor.fetchone()[0] == 1\n",
"step-4": "import json\nfrom typing import TYPE_CHECKING\nimport pytest\nfrom eth_utils import is_checksum_address\nfrom rotkehlchen.globaldb.handler import GlobalDBHandler\nfrom rotkehlchen.types import ChainID\nif TYPE_CHECKING:\n from rotkehlchen.chain.ethereum.node_inquirer import EthereumInquirer\n\n\ndef test_evm_contracts_data(globaldb):\n \"\"\"Test that all evm contract entries in the packaged global DB have legal data\"\"\"\n serialized_chain_ids = [x.serialize_for_db() for x in ChainID]\n with globaldb.conn.read_ctx() as cursor:\n cursor.execute(\n 'SELECT address, chain_id, abi, deployed_block FROM contract_data')\n for entry in cursor:\n assert is_checksum_address(entry[0])\n assert isinstance(entry[1], int) and entry[1\n ] in serialized_chain_ids\n assert isinstance(entry[2], int)\n assert isinstance(entry[3], int) and entry[3] > 0\n\n\ndef test_evm_abi_data(globaldb):\n \"\"\"Test that the evm abi entries in the packaged globalDB have legal data\"\"\"\n abis_set = {0}\n with globaldb.conn.read_ctx() as cursor:\n cursor.execute('SELECT id, value FROM contract_abi')\n for entry in cursor:\n assert isinstance(entry[0], int)\n assert isinstance(entry[1], str)\n json_abi = json.loads(entry[1])\n serialized_abi = json.dumps(json_abi, separators=(',', ':'))\n assert serialized_abi == entry[1]\n assert entry[1] not in abis_set\n abis_set.add(entry[1])\n\n\[email protected]('sql_vm_instructions_cb', [2])\ndef test_fallback_to_packaged_db(ethereum_inquirer: 'EthereumInquirer'):\n \"\"\"\n Test that if a contract / abi is missing in the globaldb, it is searched in the packaged db.\n \"\"\"\n with GlobalDBHandler().conn.read_ctx() as cursor:\n cursor.execute(\n 'SELECT contract_data.address, contract_abi.value FROM contract_data INNER JOIN contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 LIMIT 1'\n )\n address, abi = cursor.fetchone()\n cursor.execute(\n 'DELETE FROM contract_data WHERE address=? AND chain_id=1', (\n address,))\n cursor.execute('DELETE FROM contract_abi WHERE value=?', (abi,))\n ethereum_inquirer.contracts.contract(address)\n with GlobalDBHandler().conn.read_ctx() as cursor:\n cursor.execute(\n 'SELECT COUNT(*) FROM contract_data INNER JOIN contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 AND contract_data.address=? AND contract_abi.value=?'\n , (address, abi))\n assert cursor.fetchone()[0] == 1\n",
"step-5": "import json\nfrom typing import TYPE_CHECKING\n\nimport pytest\nfrom eth_utils import is_checksum_address\n\nfrom rotkehlchen.globaldb.handler import GlobalDBHandler\nfrom rotkehlchen.types import ChainID\n\nif TYPE_CHECKING:\n from rotkehlchen.chain.ethereum.node_inquirer import EthereumInquirer\n\n\ndef test_evm_contracts_data(globaldb):\n \"\"\"Test that all evm contract entries in the packaged global DB have legal data\"\"\"\n serialized_chain_ids = [x.serialize_for_db() for x in ChainID]\n with globaldb.conn.read_ctx() as cursor:\n cursor.execute('SELECT address, chain_id, abi, deployed_block FROM contract_data')\n for entry in cursor:\n assert is_checksum_address(entry[0])\n assert isinstance(entry[1], int) and entry[1] in serialized_chain_ids\n assert isinstance(entry[2], int)\n assert isinstance(entry[3], int) and entry[3] > 0\n\n\ndef test_evm_abi_data(globaldb):\n \"\"\"Test that the evm abi entries in the packaged globalDB have legal data\"\"\"\n abis_set = {0}\n with globaldb.conn.read_ctx() as cursor:\n cursor.execute('SELECT id, value FROM contract_abi')\n for entry in cursor:\n assert isinstance(entry[0], int)\n # read the abi, and make sure it's the most compressed version it can be\n # and that it's unique\n assert isinstance(entry[1], str)\n json_abi = json.loads(entry[1])\n serialized_abi = json.dumps(json_abi, separators=(',', ':'))\n assert serialized_abi == entry[1]\n assert entry[1] not in abis_set\n abis_set.add(entry[1])\n\n\[email protected]('sql_vm_instructions_cb', [2])\ndef test_fallback_to_packaged_db(ethereum_inquirer: 'EthereumInquirer'):\n \"\"\"\n Test that if a contract / abi is missing in the globaldb, it is searched in the packaged db.\n \"\"\"\n with GlobalDBHandler().conn.read_ctx() as cursor:\n # Delete one contract and its abi\n cursor.execute(\n 'SELECT contract_data.address, contract_abi.value FROM contract_data INNER JOIN '\n 'contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 LIMIT 1',\n )\n (address, abi) = cursor.fetchone() # There has to be at least one entry\n cursor.execute('DELETE FROM contract_data WHERE address=? AND chain_id=1', (address,))\n cursor.execute('DELETE FROM contract_abi WHERE value=?', (abi,))\n\n # Now query the contract, let it get to packaged global DB and also see that\n # database packaged_db is locked is also not raised\n ethereum_inquirer.contracts.contract(address)\n\n with GlobalDBHandler().conn.read_ctx() as cursor:\n # Check that the contract and the abi were copied to the global db\n cursor.execute(\n 'SELECT COUNT(*) FROM contract_data INNER JOIN '\n 'contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 AND '\n 'contract_data.address=? AND contract_abi.value=?',\n (address, abi),\n )\n assert cursor.fetchone()[0] == 1\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
filename = 'learning_python.txt'
# with open(filename) as file_object:
# contents = file_object.read()
# print(contents)
# with open(filename) as file_object:
# for line in file_object:
# print(line.rstrip())
with open(filename) as file_object:
lines = file_object.readlines()
c_string = ''
for line in lines:
c_string += line.rstrip()
print(f"{c_string.replace('Python', 'Scala')}")
|
normal
|
{
"blob_id": "2f0dc8697e979f307c86a08832b0eae86357d416",
"index": 2497,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open(filename) as file_object:\n lines = file_object.readlines()\n<mask token>\nfor line in lines:\n c_string += line.rstrip()\nprint(f\"{c_string.replace('Python', 'Scala')}\")\n",
"step-3": "filename = 'learning_python.txt'\nwith open(filename) as file_object:\n lines = file_object.readlines()\nc_string = ''\nfor line in lines:\n c_string += line.rstrip()\nprint(f\"{c_string.replace('Python', 'Scala')}\")\n",
"step-4": "filename = 'learning_python.txt'\n\n# with open(filename) as file_object:\n# \tcontents = file_object.read()\n# print(contents)\n\n# with open(filename) as file_object:\n# \tfor line in file_object:\n# \t\tprint(line.rstrip())\n\nwith open(filename) as file_object:\n\tlines = file_object.readlines()\n\nc_string = ''\nfor line in lines:\n\tc_string += line.rstrip()\n\t\nprint(f\"{c_string.replace('Python', 'Scala')}\")\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""
Given an array nums and a value val, remove all instances of
that value in-place and return the new length.
Do not allocate extra space for another array, you must do
this by modifying the input array in-place with O(1) extra memory.
The order of elements can be changed. It doesn't matter
what you leave beyond the new length.
"""
class Solution:
def remove_element(self, nums: list[int], val: int) -> int:
last_position = 0
for num in nums:
if num != val:
nums[last_position] = num
last_position += 1
return last_position
"""
Complexity: Time : O(n) | Space: O(1)
"""
|
normal
|
{
"blob_id": "8be4bf5c1a5a7b841edc915793571686ee0bffe6",
"index": 113,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def remove_element(self, nums: list[int], val: int) ->int:\n last_position = 0\n for num in nums:\n if num != val:\n nums[last_position] = num\n last_position += 1\n return last_position\n\n\n<mask token>\n",
"step-4": "\"\"\"\n Given an array nums and a value val, remove all instances of\n that value in-place and return the new length.\n\n Do not allocate extra space for another array, you must do\n this by modifying the input array in-place with O(1) extra memory.\n\n The order of elements can be changed. It doesn't matter\n what you leave beyond the new length.\n\"\"\"\nclass Solution:\n def remove_element(self, nums: list[int], val: int) -> int:\n last_position = 0\n\n for num in nums:\n if num != val:\n nums[last_position] = num\n last_position += 1\n\n return last_position\n\n\"\"\"\n Complexity: Time : O(n) | Space: O(1)\n\"\"\"",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
from ks_auth import sess
from ks_auth import trust_auth
from ks_auth import ks
from ks_auth import utils
from novaclient import client
import novaclient.exceptions
from time import sleep
from uuid import uuid4
import sys
# RDTIBCC-1042
VERSION = '2'
nova = client.Client(VERSION, session=sess)
username = 'standard_user'
search_opts = {
'all_tenants': True
}
class PollingLimitException(Exception):
pass
def poll_server(server, interval=2, limit=4, *args, **kwargs):
for i in range(0, limit):
yield nova.servers.get(server.id)
sleep(interval)
raise PollingLimitException()
def is_active(server):
return (server.status == 'ACTIVE')
def is_shutoff(server):
return (server.status == 'SHUTOFF')
if __name__ == '__main__':
try:
instance_id = sys.argv[1]
except IndexError:
sys.exit('Specify an instance_id')
# Print some info
print('Initial Auth Info:')
for authtype, params in utils.initial_auth_info(ks.auth.client.session):
print(' %s' % authtype)
print(' %s' % params)
print('Access Info:')
for k, v in utils.access_info_vars(sess).iteritems():
print('* {}: {}'.format(k, v))
retry_count = 3
try:
server = nova.servers.get(instance_id)
print('* Deleting %s' % server.name)
for i in range(1, retry_count+1):
print('** Attempt %d' % i)
server.delete()
try:
for state in poll_server(server):
if state == 'DELETING':
print('** still deleting')
except novaclient.exceptions.NotFound:
print('*** done deleting')
break
except Exception, e:
print(e)
|
normal
|
{
"blob_id": "9f40162348d33d70639692dac87777a2799999e9",
"index": 6688,
"step-1": "#!/usr/bin/env python\nfrom ks_auth import sess\nfrom ks_auth import trust_auth\nfrom ks_auth import ks\nfrom ks_auth import utils\nfrom novaclient import client\nimport novaclient.exceptions\nfrom time import sleep\nfrom uuid import uuid4\nimport sys\n\n# RDTIBCC-1042\n\nVERSION = '2'\nnova = client.Client(VERSION, session=sess)\n\nusername = 'standard_user'\nsearch_opts = {\n 'all_tenants': True\n}\n\nclass PollingLimitException(Exception):\n pass\n\ndef poll_server(server, interval=2, limit=4, *args, **kwargs):\n for i in range(0, limit):\n yield nova.servers.get(server.id)\n sleep(interval)\n raise PollingLimitException()\n\ndef is_active(server):\n return (server.status == 'ACTIVE')\n\ndef is_shutoff(server):\n return (server.status == 'SHUTOFF')\n\n\nif __name__ == '__main__':\n try:\n instance_id = sys.argv[1]\n except IndexError:\n sys.exit('Specify an instance_id')\n\n # Print some info\n print('Initial Auth Info:')\n for authtype, params in utils.initial_auth_info(ks.auth.client.session):\n print(' %s' % authtype)\n print(' %s' % params)\n\n print('Access Info:')\n for k, v in utils.access_info_vars(sess).iteritems():\n print('* {}: {}'.format(k, v))\n\n retry_count = 3\n try:\n server = nova.servers.get(instance_id)\n print('* Deleting %s' % server.name)\n for i in range(1, retry_count+1):\n print('** Attempt %d' % i)\n server.delete()\n try:\n for state in poll_server(server):\n if state == 'DELETING':\n print('** still deleting')\n except novaclient.exceptions.NotFound:\n print('*** done deleting')\n break\n except Exception, e:\n print(e)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from ._monitor import TMonitor as TMonitor, TqdmSynchronisationWarning as TqdmSynchronisationWarning
from ._tqdm_pandas import tqdm_pandas as tqdm_pandas
from .cli import main as main
from .gui import tqdm as tqdm_gui, trange as tgrange
from .std import TqdmDeprecationWarning as TqdmDeprecationWarning, TqdmExperimentalWarning as TqdmExperimentalWarning, TqdmKeyError as TqdmKeyError, TqdmMonitorWarning as TqdmMonitorWarning, TqdmTypeError as TqdmTypeError, TqdmWarning as TqdmWarning, tqdm as tqdm, trange as trange
def tqdm_notebook(*args, **kwargs):
...
def tnrange(*args, **kwargs):
...
|
normal
|
{
"blob_id": "25b7af2a8036f35a0bca665867d1729b7c9c113c",
"index": 5846,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef tnrange(*args, **kwargs):\n ...\n",
"step-3": "<mask token>\n\n\ndef tqdm_notebook(*args, **kwargs):\n ...\n\n\ndef tnrange(*args, **kwargs):\n ...\n",
"step-4": "from ._monitor import TMonitor as TMonitor, TqdmSynchronisationWarning as TqdmSynchronisationWarning\nfrom ._tqdm_pandas import tqdm_pandas as tqdm_pandas\nfrom .cli import main as main\nfrom .gui import tqdm as tqdm_gui, trange as tgrange\nfrom .std import TqdmDeprecationWarning as TqdmDeprecationWarning, TqdmExperimentalWarning as TqdmExperimentalWarning, TqdmKeyError as TqdmKeyError, TqdmMonitorWarning as TqdmMonitorWarning, TqdmTypeError as TqdmTypeError, TqdmWarning as TqdmWarning, tqdm as tqdm, trange as trange\n\n\ndef tqdm_notebook(*args, **kwargs):\n ...\n\n\ndef tnrange(*args, **kwargs):\n ...\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns = [path('', HomeView.as_view(), name='HomeView'), path(
'LoginView/', LoginView.as_view(), name='LoginView'), path(
'SignUpView/', SignUpView.as_view(), name='SignUpView'), path(
'SettingsView/', SettingsView.as_view(), name='SettingsView'), path(
'LogoutView/', LogoutView.as_view(), name='LogoutView'), path(
'social_auth/', include('social_django.urls', namespace='social')),
path('users_list/', views.users_list, name='users_list'), path(
'CreatePostView/', CreatePostView.as_view(), name='CreatePostView'),
path('like/<int:id>/', views.like, name='like'), path(
'CommentPostView/<int:id>/', CommentPostView.as_view(), name=
'CommentPostView'), path('follow/<int:id>/', views.follow, name=
'follow'), path('followback/<int:id>/', views.followback, name=
'followback'), path('delete_request/<int:id>/', views.delete_request,
name='delete_request'), path('unfriend/<int:id>/', views.unfriend, name
='unfriend'), path('friendslist/<int:id>/', views.friendslist, name=
'friendslist'), path('PasswordChangeView/', PasswordChangeView.as_view(
), name='PasswordChangeView'), path('DetailsChangeView/',
DetailsChangeView.as_view(), name='DetailsChangeView'), path(
'user_profile_view/<int:id>/', views.user_profile_view, name=
'user_profile_view'), path('start_chat/<int:id>/', views.start_chat,
name='start_chat'), path('search_function/', views.search_function,
name='search_function')]
<|reserved_special_token_1|>
from django.urls import path, include
from . import views
from user.views import DetailsChangeView, HomeView, PasswordChangeView, SignUpView, LoginView, SettingsView, LogoutView, CreatePostView, CommentPostView, PasswordChangeView
urlpatterns = [path('', HomeView.as_view(), name='HomeView'), path(
'LoginView/', LoginView.as_view(), name='LoginView'), path(
'SignUpView/', SignUpView.as_view(), name='SignUpView'), path(
'SettingsView/', SettingsView.as_view(), name='SettingsView'), path(
'LogoutView/', LogoutView.as_view(), name='LogoutView'), path(
'social_auth/', include('social_django.urls', namespace='social')),
path('users_list/', views.users_list, name='users_list'), path(
'CreatePostView/', CreatePostView.as_view(), name='CreatePostView'),
path('like/<int:id>/', views.like, name='like'), path(
'CommentPostView/<int:id>/', CommentPostView.as_view(), name=
'CommentPostView'), path('follow/<int:id>/', views.follow, name=
'follow'), path('followback/<int:id>/', views.followback, name=
'followback'), path('delete_request/<int:id>/', views.delete_request,
name='delete_request'), path('unfriend/<int:id>/', views.unfriend, name
='unfriend'), path('friendslist/<int:id>/', views.friendslist, name=
'friendslist'), path('PasswordChangeView/', PasswordChangeView.as_view(
), name='PasswordChangeView'), path('DetailsChangeView/',
DetailsChangeView.as_view(), name='DetailsChangeView'), path(
'user_profile_view/<int:id>/', views.user_profile_view, name=
'user_profile_view'), path('start_chat/<int:id>/', views.start_chat,
name='start_chat'), path('search_function/', views.search_function,
name='search_function')]
<|reserved_special_token_1|>
from django.urls import path,include
from.import views
from user.views import DetailsChangeView, HomeView, PasswordChangeView,SignUpView,LoginView,SettingsView,LogoutView,CreatePostView,CommentPostView,PasswordChangeView
urlpatterns = [
path('', HomeView.as_view(), name = 'HomeView'),
path('LoginView/', LoginView.as_view(), name = 'LoginView'),
path('SignUpView/',SignUpView.as_view(), name = 'SignUpView' ),
path('SettingsView/', SettingsView.as_view(), name = 'SettingsView'),
path('LogoutView/', LogoutView.as_view(), name = 'LogoutView'),
path('social_auth/', include('social_django.urls', namespace = 'social')),
path('users_list/', views.users_list, name = 'users_list'),
path('CreatePostView/', CreatePostView.as_view(), name = 'CreatePostView'),
path('like/<int:id>/', views.like , name = 'like'),
path('CommentPostView/<int:id>/', CommentPostView.as_view(), name = 'CommentPostView'),
path('follow/<int:id>/', views.follow , name = 'follow'),
path('followback/<int:id>/', views.followback, name = 'followback'),
path('delete_request/<int:id>/',views.delete_request, name = 'delete_request'),
path('unfriend/<int:id>/', views.unfriend, name = 'unfriend'),
path('friendslist/<int:id>/',views.friendslist, name = 'friendslist'),
# path('FollowListView/<int:id>/',FollowListView.as_view(), name = 'FollowListView')
path('PasswordChangeView/', PasswordChangeView.as_view(), name = 'PasswordChangeView'),
path('DetailsChangeView/', DetailsChangeView.as_view(), name= 'DetailsChangeView'),
path('user_profile_view/<int:id>/',views.user_profile_view, name = 'user_profile_view'),
path('start_chat/<int:id>/', views.start_chat, name= 'start_chat'),
path('search_function/', views.search_function, name='search_function')
]
|
flexible
|
{
"blob_id": "5bd8cee2595215fda6ab523a646cf918e3d84a50",
"index": 937,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('', HomeView.as_view(), name='HomeView'), path(\n 'LoginView/', LoginView.as_view(), name='LoginView'), path(\n 'SignUpView/', SignUpView.as_view(), name='SignUpView'), path(\n 'SettingsView/', SettingsView.as_view(), name='SettingsView'), path(\n 'LogoutView/', LogoutView.as_view(), name='LogoutView'), path(\n 'social_auth/', include('social_django.urls', namespace='social')),\n path('users_list/', views.users_list, name='users_list'), path(\n 'CreatePostView/', CreatePostView.as_view(), name='CreatePostView'),\n path('like/<int:id>/', views.like, name='like'), path(\n 'CommentPostView/<int:id>/', CommentPostView.as_view(), name=\n 'CommentPostView'), path('follow/<int:id>/', views.follow, name=\n 'follow'), path('followback/<int:id>/', views.followback, name=\n 'followback'), path('delete_request/<int:id>/', views.delete_request,\n name='delete_request'), path('unfriend/<int:id>/', views.unfriend, name\n ='unfriend'), path('friendslist/<int:id>/', views.friendslist, name=\n 'friendslist'), path('PasswordChangeView/', PasswordChangeView.as_view(\n ), name='PasswordChangeView'), path('DetailsChangeView/',\n DetailsChangeView.as_view(), name='DetailsChangeView'), path(\n 'user_profile_view/<int:id>/', views.user_profile_view, name=\n 'user_profile_view'), path('start_chat/<int:id>/', views.start_chat,\n name='start_chat'), path('search_function/', views.search_function,\n name='search_function')]\n",
"step-3": "from django.urls import path, include\nfrom . import views\nfrom user.views import DetailsChangeView, HomeView, PasswordChangeView, SignUpView, LoginView, SettingsView, LogoutView, CreatePostView, CommentPostView, PasswordChangeView\nurlpatterns = [path('', HomeView.as_view(), name='HomeView'), path(\n 'LoginView/', LoginView.as_view(), name='LoginView'), path(\n 'SignUpView/', SignUpView.as_view(), name='SignUpView'), path(\n 'SettingsView/', SettingsView.as_view(), name='SettingsView'), path(\n 'LogoutView/', LogoutView.as_view(), name='LogoutView'), path(\n 'social_auth/', include('social_django.urls', namespace='social')),\n path('users_list/', views.users_list, name='users_list'), path(\n 'CreatePostView/', CreatePostView.as_view(), name='CreatePostView'),\n path('like/<int:id>/', views.like, name='like'), path(\n 'CommentPostView/<int:id>/', CommentPostView.as_view(), name=\n 'CommentPostView'), path('follow/<int:id>/', views.follow, name=\n 'follow'), path('followback/<int:id>/', views.followback, name=\n 'followback'), path('delete_request/<int:id>/', views.delete_request,\n name='delete_request'), path('unfriend/<int:id>/', views.unfriend, name\n ='unfriend'), path('friendslist/<int:id>/', views.friendslist, name=\n 'friendslist'), path('PasswordChangeView/', PasswordChangeView.as_view(\n ), name='PasswordChangeView'), path('DetailsChangeView/',\n DetailsChangeView.as_view(), name='DetailsChangeView'), path(\n 'user_profile_view/<int:id>/', views.user_profile_view, name=\n 'user_profile_view'), path('start_chat/<int:id>/', views.start_chat,\n name='start_chat'), path('search_function/', views.search_function,\n name='search_function')]\n",
"step-4": "from django.urls import path,include\nfrom.import views\nfrom user.views import DetailsChangeView, HomeView, PasswordChangeView,SignUpView,LoginView,SettingsView,LogoutView,CreatePostView,CommentPostView,PasswordChangeView\n\nurlpatterns = [\n path('', HomeView.as_view(), name = 'HomeView'),\n path('LoginView/', LoginView.as_view(), name = 'LoginView'),\n path('SignUpView/',SignUpView.as_view(), name = 'SignUpView' ),\n path('SettingsView/', SettingsView.as_view(), name = 'SettingsView'),\n path('LogoutView/', LogoutView.as_view(), name = 'LogoutView'),\n path('social_auth/', include('social_django.urls', namespace = 'social')),\n path('users_list/', views.users_list, name = 'users_list'),\n path('CreatePostView/', CreatePostView.as_view(), name = 'CreatePostView'),\n path('like/<int:id>/', views.like , name = 'like'),\n path('CommentPostView/<int:id>/', CommentPostView.as_view(), name = 'CommentPostView'),\n path('follow/<int:id>/', views.follow , name = 'follow'),\n path('followback/<int:id>/', views.followback, name = 'followback'),\n path('delete_request/<int:id>/',views.delete_request, name = 'delete_request'),\n path('unfriend/<int:id>/', views.unfriend, name = 'unfriend'),\n path('friendslist/<int:id>/',views.friendslist, name = 'friendslist'),\n # path('FollowListView/<int:id>/',FollowListView.as_view(), name = 'FollowListView')\n path('PasswordChangeView/', PasswordChangeView.as_view(), name = 'PasswordChangeView'),\n path('DetailsChangeView/', DetailsChangeView.as_view(), name= 'DetailsChangeView'),\n path('user_profile_view/<int:id>/',views.user_profile_view, name = 'user_profile_view'),\n path('start_chat/<int:id>/', views.start_chat, name= 'start_chat'),\n path('search_function/', views.search_function, name='search_function')\n \n \n]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import json
from asgiref.sync import async_to_sync
from daphne_API.diversifier import activate_diversifier
from daphne_API.models import Design
def send_archs_back(channel_layer, channel_name, archs):
async_to_sync(channel_layer.send)(channel_name,
{
'type': 'ga.new_archs',
'archs': archs
})
def send_archs_from_queue_to_main_dataset(context):
background_queue_qs = Design.objects.filter(activecontext_id__exact=context.eosscontext.activecontext.id)
arch_list = []
for design in background_queue_qs.all():
design.activecontext = None
design.eosscontext = context.eosscontext
design.save()
context.eosscontext.added_archs_count += 1
context.eosscontext.save()
arch_list.append({
'id': design.id,
'inputs': json.loads(design.inputs),
'outputs': json.loads(design.outputs),
})
if context.eosscontext.added_archs_count >= 5:
context.eosscontext.added_archs_count = 0
context.eosscontext.save()
activate_diversifier(context.eosscontext)
return arch_list
|
normal
|
{
"blob_id": "564c613491b0d1797b216a0bd425690e9fae12bc",
"index": 7725,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef send_archs_from_queue_to_main_dataset(context):\n background_queue_qs = Design.objects.filter(activecontext_id__exact=\n context.eosscontext.activecontext.id)\n arch_list = []\n for design in background_queue_qs.all():\n design.activecontext = None\n design.eosscontext = context.eosscontext\n design.save()\n context.eosscontext.added_archs_count += 1\n context.eosscontext.save()\n arch_list.append({'id': design.id, 'inputs': json.loads(design.\n inputs), 'outputs': json.loads(design.outputs)})\n if context.eosscontext.added_archs_count >= 5:\n context.eosscontext.added_archs_count = 0\n context.eosscontext.save()\n activate_diversifier(context.eosscontext)\n return arch_list\n",
"step-3": "<mask token>\n\n\ndef send_archs_back(channel_layer, channel_name, archs):\n async_to_sync(channel_layer.send)(channel_name, {'type': 'ga.new_archs',\n 'archs': archs})\n\n\ndef send_archs_from_queue_to_main_dataset(context):\n background_queue_qs = Design.objects.filter(activecontext_id__exact=\n context.eosscontext.activecontext.id)\n arch_list = []\n for design in background_queue_qs.all():\n design.activecontext = None\n design.eosscontext = context.eosscontext\n design.save()\n context.eosscontext.added_archs_count += 1\n context.eosscontext.save()\n arch_list.append({'id': design.id, 'inputs': json.loads(design.\n inputs), 'outputs': json.loads(design.outputs)})\n if context.eosscontext.added_archs_count >= 5:\n context.eosscontext.added_archs_count = 0\n context.eosscontext.save()\n activate_diversifier(context.eosscontext)\n return arch_list\n",
"step-4": "import json\nfrom asgiref.sync import async_to_sync\nfrom daphne_API.diversifier import activate_diversifier\nfrom daphne_API.models import Design\n\n\ndef send_archs_back(channel_layer, channel_name, archs):\n async_to_sync(channel_layer.send)(channel_name, {'type': 'ga.new_archs',\n 'archs': archs})\n\n\ndef send_archs_from_queue_to_main_dataset(context):\n background_queue_qs = Design.objects.filter(activecontext_id__exact=\n context.eosscontext.activecontext.id)\n arch_list = []\n for design in background_queue_qs.all():\n design.activecontext = None\n design.eosscontext = context.eosscontext\n design.save()\n context.eosscontext.added_archs_count += 1\n context.eosscontext.save()\n arch_list.append({'id': design.id, 'inputs': json.loads(design.\n inputs), 'outputs': json.loads(design.outputs)})\n if context.eosscontext.added_archs_count >= 5:\n context.eosscontext.added_archs_count = 0\n context.eosscontext.save()\n activate_diversifier(context.eosscontext)\n return arch_list\n",
"step-5": "import json\n\nfrom asgiref.sync import async_to_sync\n\nfrom daphne_API.diversifier import activate_diversifier\nfrom daphne_API.models import Design\n\n\ndef send_archs_back(channel_layer, channel_name, archs):\n async_to_sync(channel_layer.send)(channel_name,\n {\n 'type': 'ga.new_archs',\n 'archs': archs\n })\n\n\ndef send_archs_from_queue_to_main_dataset(context):\n background_queue_qs = Design.objects.filter(activecontext_id__exact=context.eosscontext.activecontext.id)\n arch_list = []\n for design in background_queue_qs.all():\n design.activecontext = None\n design.eosscontext = context.eosscontext\n design.save()\n context.eosscontext.added_archs_count += 1\n context.eosscontext.save()\n arch_list.append({\n 'id': design.id,\n 'inputs': json.loads(design.inputs),\n 'outputs': json.loads(design.outputs),\n })\n\n if context.eosscontext.added_archs_count >= 5:\n context.eosscontext.added_archs_count = 0\n context.eosscontext.save()\n activate_diversifier(context.eosscontext)\n\n return arch_list",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import sys
import pytest
from presidio_evaluator.evaluation import Evaluator
from tests.conftest import assert_model_results_gt
from presidio_evaluator.models.flair_model import FlairModel
@pytest.mark.slow
@pytest.mark.skipif("flair" not in sys.modules, reason="requires the Flair library")
def test_flair_simple(small_dataset):
flair_model = FlairModel(model_path="ner", entities_to_keep=["PERSON"])
evaluator = Evaluator(model=flair_model)
evaluation_results = evaluator.evaluate_all(small_dataset)
scores = evaluator.calculate_score(evaluation_results)
assert_model_results_gt(scores, "PERSON", 0)
|
normal
|
{
"blob_id": "813d27e8f9c1a416dab2f891dd71e4791bb92dbb",
"index": 1040,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]\[email protected]('flair' not in sys.modules, reason=\n 'requires the Flair library')\ndef test_flair_simple(small_dataset):\n flair_model = FlairModel(model_path='ner', entities_to_keep=['PERSON'])\n evaluator = Evaluator(model=flair_model)\n evaluation_results = evaluator.evaluate_all(small_dataset)\n scores = evaluator.calculate_score(evaluation_results)\n assert_model_results_gt(scores, 'PERSON', 0)\n",
"step-3": "import sys\nimport pytest\nfrom presidio_evaluator.evaluation import Evaluator\nfrom tests.conftest import assert_model_results_gt\nfrom presidio_evaluator.models.flair_model import FlairModel\n\n\[email protected]\[email protected]('flair' not in sys.modules, reason=\n 'requires the Flair library')\ndef test_flair_simple(small_dataset):\n flair_model = FlairModel(model_path='ner', entities_to_keep=['PERSON'])\n evaluator = Evaluator(model=flair_model)\n evaluation_results = evaluator.evaluate_all(small_dataset)\n scores = evaluator.calculate_score(evaluation_results)\n assert_model_results_gt(scores, 'PERSON', 0)\n",
"step-4": "import sys\n\nimport pytest\n\nfrom presidio_evaluator.evaluation import Evaluator\nfrom tests.conftest import assert_model_results_gt\nfrom presidio_evaluator.models.flair_model import FlairModel\n\n\[email protected]\[email protected](\"flair\" not in sys.modules, reason=\"requires the Flair library\")\ndef test_flair_simple(small_dataset):\n\n flair_model = FlairModel(model_path=\"ner\", entities_to_keep=[\"PERSON\"])\n evaluator = Evaluator(model=flair_model)\n evaluation_results = evaluator.evaluate_all(small_dataset)\n scores = evaluator.calculate_score(evaluation_results)\n\n assert_model_results_gt(scores, \"PERSON\", 0)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import cv2 as cv
def nothing(x):
pass
cv.namedWindow('Binary')
cv.createTrackbar('threshold', 'Binary', 0, 255, nothing)
cv.setTrackbarPos('threshold', 'Binary', 127)
img_color = cv.imread('../sample/ball.png', cv.IMREAD_COLOR)
img_gray = cv.cvtColor(img_color, cv.COLOR_BGR2GRAY)
while(True):
thre = cv.getTrackbarPos('threshold', 'Binary')
# THRESH_BINARY_INV : 이진화 결과를 반전 시킴
ret, img_binary = cv.threshold(img_gray, thre, 255, cv.THRESH_BINARY_INV)
img_result = cv.bitwise_and(img_color, img_color, mask=img_binary)
cv.imshow('Result', img_result)
cv.imshow('Binary', img_binary)
if cv.waitKey(1) == 27:
break
cv.destroyAllWindows()
|
normal
|
{
"blob_id": "034d4027ea98bca656178b66c5c6e6e8b13e4b9e",
"index": 4219,
"step-1": "<mask token>\n\n\ndef nothing(x):\n pass\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef nothing(x):\n pass\n\n\ncv.namedWindow('Binary')\ncv.createTrackbar('threshold', 'Binary', 0, 255, nothing)\ncv.setTrackbarPos('threshold', 'Binary', 127)\n<mask token>\nwhile True:\n thre = cv.getTrackbarPos('threshold', 'Binary')\n ret, img_binary = cv.threshold(img_gray, thre, 255, cv.THRESH_BINARY_INV)\n img_result = cv.bitwise_and(img_color, img_color, mask=img_binary)\n cv.imshow('Result', img_result)\n cv.imshow('Binary', img_binary)\n if cv.waitKey(1) == 27:\n break\ncv.destroyAllWindows()\n",
"step-3": "<mask token>\n\n\ndef nothing(x):\n pass\n\n\ncv.namedWindow('Binary')\ncv.createTrackbar('threshold', 'Binary', 0, 255, nothing)\ncv.setTrackbarPos('threshold', 'Binary', 127)\nimg_color = cv.imread('../sample/ball.png', cv.IMREAD_COLOR)\nimg_gray = cv.cvtColor(img_color, cv.COLOR_BGR2GRAY)\nwhile True:\n thre = cv.getTrackbarPos('threshold', 'Binary')\n ret, img_binary = cv.threshold(img_gray, thre, 255, cv.THRESH_BINARY_INV)\n img_result = cv.bitwise_and(img_color, img_color, mask=img_binary)\n cv.imshow('Result', img_result)\n cv.imshow('Binary', img_binary)\n if cv.waitKey(1) == 27:\n break\ncv.destroyAllWindows()\n",
"step-4": "import cv2 as cv\n\n\ndef nothing(x):\n pass\n\n\ncv.namedWindow('Binary')\ncv.createTrackbar('threshold', 'Binary', 0, 255, nothing)\ncv.setTrackbarPos('threshold', 'Binary', 127)\nimg_color = cv.imread('../sample/ball.png', cv.IMREAD_COLOR)\nimg_gray = cv.cvtColor(img_color, cv.COLOR_BGR2GRAY)\nwhile True:\n thre = cv.getTrackbarPos('threshold', 'Binary')\n ret, img_binary = cv.threshold(img_gray, thre, 255, cv.THRESH_BINARY_INV)\n img_result = cv.bitwise_and(img_color, img_color, mask=img_binary)\n cv.imshow('Result', img_result)\n cv.imshow('Binary', img_binary)\n if cv.waitKey(1) == 27:\n break\ncv.destroyAllWindows()\n",
"step-5": "import cv2 as cv\n\ndef nothing(x):\n pass\n\n\ncv.namedWindow('Binary')\ncv.createTrackbar('threshold', 'Binary', 0, 255, nothing)\ncv.setTrackbarPos('threshold', 'Binary', 127)\n\nimg_color = cv.imread('../sample/ball.png', cv.IMREAD_COLOR)\nimg_gray = cv.cvtColor(img_color, cv.COLOR_BGR2GRAY)\n\nwhile(True):\n thre = cv.getTrackbarPos('threshold', 'Binary')\n # THRESH_BINARY_INV : 이진화 결과를 반전 시킴\n ret, img_binary = cv.threshold(img_gray, thre, 255, cv.THRESH_BINARY_INV)\n\n img_result = cv.bitwise_and(img_color, img_color, mask=img_binary)\n\n cv.imshow('Result', img_result)\n cv.imshow('Binary', img_binary)\n\n if cv.waitKey(1) == 27:\n break\n\n\ncv.destroyAllWindows()",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/env python
'''
Script for analysis of wavefunctions on GaSb/InAs/GaSb simmetric quantum wells.
This piece code is part of the project "phd_gasb_inas", which comprises the work
related to the Phd. Dissertation named: "Quantum transport of charge and spin in
topological insulators 2D".
Author: Marcos Medeiros
email: [email protected]
'''
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# Kwant related stuff
import kwant
import tinyarray
# local application imports
from hamiltonians import gasb_hamiltonian as gasb
from system_geometry import shapes
from transport_tools import bands_and_currents as tools
def map_density(ax, syst, psi_sqrd, colormap = "Reds"):
# Plot the results:
# print(max(psi_sqrd))
kwant.plotter.map(syst, psi_sqrd, ax = ax, fig_size = (7,3), cmap = colormap, vmax = 0.99*max(psi_sqrd))
tools.edit_axis(ax,'dens')
return 0
def density_in_line(syst, states, Op = np.eye(6)):
y_stack = []
def line(site):
(x, y) = site.pos
half = 0
delta = shapes.A_STD
ans = abs(x - half) < delta
if ans == True : y_stack.append(y)
return ans
rho_line = kwant.operator.Density(syst, Op, where = line, sum = False)
dos_line = np.array([rho_line(p) for p in states])
return dos_line, np.array(y_stack)
def plot_dos_in_line(dos_line):
fig, ax = plt.subplots(1, 2, figsize = (10,5))
ax[0].plot(dos_line[0], color = 'red')
ax[1].plot(dos_line[1], color = 'blue')
plt.tight_layout()
plt.show()
def normalize(dos_in_line):
# return sum(dos_in_line)
return sum(dos_in_line)/max(sum(dos_in_line))
def print_info_dos_line(y_values, dos_in_line):
print(80*"=")
print("Size of dos_both: ", dos_in_line.shape)
print("Size of y_both: ", y_values.shape)
print("y_both:\n", y_values)
def main():
# Define the system:
hamiltonian = gasb.hamiltonian_97_k_plus()
# hamiltonian = gasb.hamiltonian_97_down()
lead_ham = gasb.free_ham(6)
centralShape = shapes.Rect()
syst = gasb.system_builder(hamiltonian, lead_ham, centralShape)
# Calculate the wave_function:
energia = 448
parametros = gasb.params_97
parametros['Eta3'] = 0
parametros['Eta2'] = 0
parametros['eF'] = 60
parametros = dict(GammaLead = parametros["GammaC"], V = 100, **parametros )
wf = kwant.wave_function(syst, energy = energia, params = parametros)
modes_left = wf(0)
modes_right = wf(1)
# modes_total = np.vstack((wf(0), wf(1)))
# Calculate the density:
sigma_z = tinyarray.array([[1,0],[0,-1]])
spin_proj= np.kron(sigma_z, np.eye(3))
identity = np.eye(6)
rho = kwant.operator.Density(syst, spin_proj)
psi_left = sum(rho(p) for p in modes_left)
psi_right = sum(rho(p) for p in modes_right)
# Calculate dos in a line
dos_in_line_from_left = density_in_line(syst, psi)
dos_in_line_from_both = density_in_line(syst, np.vstack((wf(0),wf(1))))
plt.plot(sum(dos_in_line_from_both))
plt.show()
# print(dos_in_line.shape)
# print(dos_in_line)
plot_dos_in_line(dos_in_line_from_left)
# plot_dos_in_line(dos_in_line_from_both)
print(sum(dos_in_line_from_both).shape)
# Plot the results:
colorRight = "seismic"
colorLeft = "seismic"
fig, ax = plt.subplots(2,2,figsize=(14,6))
y_values_left = y_values_left * (shapes.A0 / 10) # conversion to nm^{-1}
y_values_right = y_values_right * (shapes.A0 / 10) # conversion to nm^{-1}
min_line, max_line = -0.7 * shapes.L_STD, 0.7 * shapes.L_STD
map_density(ax[0][0], syst, psi_left, colormap = colorRight)
ax[0][0].vlines(0, min_line, max_line, linestyle = "--")
ax[0][0].set_title("left lead")
map_density(ax[1][0], syst, psi_right, colormap = colorLeft)
ax[1][0].vlines(0, min_line, max_line, linestyle = "--")
ax[1][0].set_title("right lead")
ax[0][1].plot(y_values_left, normalize(dos_in_line_from_left),
marker = ".", markersize = 2.5, linestyle = "-" )
ax[1][1].plot(y_values_right, normalize(dos_in_line_from_right),
marker = ".", markersize = 2.5, linestyle = "-" )
plt.tight_layout()
plt.show()
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "a012055d11202c68d9eddf5cf2a17043f9bbaf0a",
"index": 6851,
"step-1": "<mask token>\n\n\ndef map_density(ax, syst, psi_sqrd, colormap='Reds'):\n kwant.plotter.map(syst, psi_sqrd, ax=ax, fig_size=(7, 3), cmap=colormap,\n vmax=0.99 * max(psi_sqrd))\n tools.edit_axis(ax, 'dens')\n return 0\n\n\ndef density_in_line(syst, states, Op=np.eye(6)):\n y_stack = []\n\n def line(site):\n x, y = site.pos\n half = 0\n delta = shapes.A_STD\n ans = abs(x - half) < delta\n if ans == True:\n y_stack.append(y)\n return ans\n rho_line = kwant.operator.Density(syst, Op, where=line, sum=False)\n dos_line = np.array([rho_line(p) for p in states])\n return dos_line, np.array(y_stack)\n\n\ndef plot_dos_in_line(dos_line):\n fig, ax = plt.subplots(1, 2, figsize=(10, 5))\n ax[0].plot(dos_line[0], color='red')\n ax[1].plot(dos_line[1], color='blue')\n plt.tight_layout()\n plt.show()\n\n\n<mask token>\n\n\ndef print_info_dos_line(y_values, dos_in_line):\n print(80 * '=')\n print('Size of dos_both: ', dos_in_line.shape)\n print('Size of y_both: ', y_values.shape)\n print('y_both:\\n', y_values)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef map_density(ax, syst, psi_sqrd, colormap='Reds'):\n kwant.plotter.map(syst, psi_sqrd, ax=ax, fig_size=(7, 3), cmap=colormap,\n vmax=0.99 * max(psi_sqrd))\n tools.edit_axis(ax, 'dens')\n return 0\n\n\ndef density_in_line(syst, states, Op=np.eye(6)):\n y_stack = []\n\n def line(site):\n x, y = site.pos\n half = 0\n delta = shapes.A_STD\n ans = abs(x - half) < delta\n if ans == True:\n y_stack.append(y)\n return ans\n rho_line = kwant.operator.Density(syst, Op, where=line, sum=False)\n dos_line = np.array([rho_line(p) for p in states])\n return dos_line, np.array(y_stack)\n\n\ndef plot_dos_in_line(dos_line):\n fig, ax = plt.subplots(1, 2, figsize=(10, 5))\n ax[0].plot(dos_line[0], color='red')\n ax[1].plot(dos_line[1], color='blue')\n plt.tight_layout()\n plt.show()\n\n\n<mask token>\n\n\ndef print_info_dos_line(y_values, dos_in_line):\n print(80 * '=')\n print('Size of dos_both: ', dos_in_line.shape)\n print('Size of y_both: ', y_values.shape)\n print('y_both:\\n', y_values)\n\n\ndef main():\n hamiltonian = gasb.hamiltonian_97_k_plus()\n lead_ham = gasb.free_ham(6)\n centralShape = shapes.Rect()\n syst = gasb.system_builder(hamiltonian, lead_ham, centralShape)\n energia = 448\n parametros = gasb.params_97\n parametros['Eta3'] = 0\n parametros['Eta2'] = 0\n parametros['eF'] = 60\n parametros = dict(GammaLead=parametros['GammaC'], V=100, **parametros)\n wf = kwant.wave_function(syst, energy=energia, params=parametros)\n modes_left = wf(0)\n modes_right = wf(1)\n sigma_z = tinyarray.array([[1, 0], [0, -1]])\n spin_proj = np.kron(sigma_z, np.eye(3))\n identity = np.eye(6)\n rho = kwant.operator.Density(syst, spin_proj)\n psi_left = sum(rho(p) for p in modes_left)\n psi_right = sum(rho(p) for p in modes_right)\n dos_in_line_from_left = density_in_line(syst, psi)\n dos_in_line_from_both = density_in_line(syst, np.vstack((wf(0), wf(1))))\n plt.plot(sum(dos_in_line_from_both))\n plt.show()\n plot_dos_in_line(dos_in_line_from_left)\n print(sum(dos_in_line_from_both).shape)\n colorRight = 'seismic'\n colorLeft = 'seismic'\n fig, ax = plt.subplots(2, 2, figsize=(14, 6))\n y_values_left = y_values_left * (shapes.A0 / 10)\n y_values_right = y_values_right * (shapes.A0 / 10)\n min_line, max_line = -0.7 * shapes.L_STD, 0.7 * shapes.L_STD\n map_density(ax[0][0], syst, psi_left, colormap=colorRight)\n ax[0][0].vlines(0, min_line, max_line, linestyle='--')\n ax[0][0].set_title('left lead')\n map_density(ax[1][0], syst, psi_right, colormap=colorLeft)\n ax[1][0].vlines(0, min_line, max_line, linestyle='--')\n ax[1][0].set_title('right lead')\n ax[0][1].plot(y_values_left, normalize(dos_in_line_from_left), marker=\n '.', markersize=2.5, linestyle='-')\n ax[1][1].plot(y_values_right, normalize(dos_in_line_from_right), marker\n ='.', markersize=2.5, linestyle='-')\n plt.tight_layout()\n plt.show()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef map_density(ax, syst, psi_sqrd, colormap='Reds'):\n kwant.plotter.map(syst, psi_sqrd, ax=ax, fig_size=(7, 3), cmap=colormap,\n vmax=0.99 * max(psi_sqrd))\n tools.edit_axis(ax, 'dens')\n return 0\n\n\ndef density_in_line(syst, states, Op=np.eye(6)):\n y_stack = []\n\n def line(site):\n x, y = site.pos\n half = 0\n delta = shapes.A_STD\n ans = abs(x - half) < delta\n if ans == True:\n y_stack.append(y)\n return ans\n rho_line = kwant.operator.Density(syst, Op, where=line, sum=False)\n dos_line = np.array([rho_line(p) for p in states])\n return dos_line, np.array(y_stack)\n\n\ndef plot_dos_in_line(dos_line):\n fig, ax = plt.subplots(1, 2, figsize=(10, 5))\n ax[0].plot(dos_line[0], color='red')\n ax[1].plot(dos_line[1], color='blue')\n plt.tight_layout()\n plt.show()\n\n\ndef normalize(dos_in_line):\n return sum(dos_in_line) / max(sum(dos_in_line))\n\n\ndef print_info_dos_line(y_values, dos_in_line):\n print(80 * '=')\n print('Size of dos_both: ', dos_in_line.shape)\n print('Size of y_both: ', y_values.shape)\n print('y_both:\\n', y_values)\n\n\ndef main():\n hamiltonian = gasb.hamiltonian_97_k_plus()\n lead_ham = gasb.free_ham(6)\n centralShape = shapes.Rect()\n syst = gasb.system_builder(hamiltonian, lead_ham, centralShape)\n energia = 448\n parametros = gasb.params_97\n parametros['Eta3'] = 0\n parametros['Eta2'] = 0\n parametros['eF'] = 60\n parametros = dict(GammaLead=parametros['GammaC'], V=100, **parametros)\n wf = kwant.wave_function(syst, energy=energia, params=parametros)\n modes_left = wf(0)\n modes_right = wf(1)\n sigma_z = tinyarray.array([[1, 0], [0, -1]])\n spin_proj = np.kron(sigma_z, np.eye(3))\n identity = np.eye(6)\n rho = kwant.operator.Density(syst, spin_proj)\n psi_left = sum(rho(p) for p in modes_left)\n psi_right = sum(rho(p) for p in modes_right)\n dos_in_line_from_left = density_in_line(syst, psi)\n dos_in_line_from_both = density_in_line(syst, np.vstack((wf(0), wf(1))))\n plt.plot(sum(dos_in_line_from_both))\n plt.show()\n plot_dos_in_line(dos_in_line_from_left)\n print(sum(dos_in_line_from_both).shape)\n colorRight = 'seismic'\n colorLeft = 'seismic'\n fig, ax = plt.subplots(2, 2, figsize=(14, 6))\n y_values_left = y_values_left * (shapes.A0 / 10)\n y_values_right = y_values_right * (shapes.A0 / 10)\n min_line, max_line = -0.7 * shapes.L_STD, 0.7 * shapes.L_STD\n map_density(ax[0][0], syst, psi_left, colormap=colorRight)\n ax[0][0].vlines(0, min_line, max_line, linestyle='--')\n ax[0][0].set_title('left lead')\n map_density(ax[1][0], syst, psi_right, colormap=colorLeft)\n ax[1][0].vlines(0, min_line, max_line, linestyle='--')\n ax[1][0].set_title('right lead')\n ax[0][1].plot(y_values_left, normalize(dos_in_line_from_left), marker=\n '.', markersize=2.5, linestyle='-')\n ax[1][1].plot(y_values_right, normalize(dos_in_line_from_right), marker\n ='.', markersize=2.5, linestyle='-')\n plt.tight_layout()\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport kwant\nimport tinyarray\nfrom hamiltonians import gasb_hamiltonian as gasb\nfrom system_geometry import shapes\nfrom transport_tools import bands_and_currents as tools\n\n\ndef map_density(ax, syst, psi_sqrd, colormap='Reds'):\n kwant.plotter.map(syst, psi_sqrd, ax=ax, fig_size=(7, 3), cmap=colormap,\n vmax=0.99 * max(psi_sqrd))\n tools.edit_axis(ax, 'dens')\n return 0\n\n\ndef density_in_line(syst, states, Op=np.eye(6)):\n y_stack = []\n\n def line(site):\n x, y = site.pos\n half = 0\n delta = shapes.A_STD\n ans = abs(x - half) < delta\n if ans == True:\n y_stack.append(y)\n return ans\n rho_line = kwant.operator.Density(syst, Op, where=line, sum=False)\n dos_line = np.array([rho_line(p) for p in states])\n return dos_line, np.array(y_stack)\n\n\ndef plot_dos_in_line(dos_line):\n fig, ax = plt.subplots(1, 2, figsize=(10, 5))\n ax[0].plot(dos_line[0], color='red')\n ax[1].plot(dos_line[1], color='blue')\n plt.tight_layout()\n plt.show()\n\n\ndef normalize(dos_in_line):\n return sum(dos_in_line) / max(sum(dos_in_line))\n\n\ndef print_info_dos_line(y_values, dos_in_line):\n print(80 * '=')\n print('Size of dos_both: ', dos_in_line.shape)\n print('Size of y_both: ', y_values.shape)\n print('y_both:\\n', y_values)\n\n\ndef main():\n hamiltonian = gasb.hamiltonian_97_k_plus()\n lead_ham = gasb.free_ham(6)\n centralShape = shapes.Rect()\n syst = gasb.system_builder(hamiltonian, lead_ham, centralShape)\n energia = 448\n parametros = gasb.params_97\n parametros['Eta3'] = 0\n parametros['Eta2'] = 0\n parametros['eF'] = 60\n parametros = dict(GammaLead=parametros['GammaC'], V=100, **parametros)\n wf = kwant.wave_function(syst, energy=energia, params=parametros)\n modes_left = wf(0)\n modes_right = wf(1)\n sigma_z = tinyarray.array([[1, 0], [0, -1]])\n spin_proj = np.kron(sigma_z, np.eye(3))\n identity = np.eye(6)\n rho = kwant.operator.Density(syst, spin_proj)\n psi_left = sum(rho(p) for p in modes_left)\n psi_right = sum(rho(p) for p in modes_right)\n dos_in_line_from_left = density_in_line(syst, psi)\n dos_in_line_from_both = density_in_line(syst, np.vstack((wf(0), wf(1))))\n plt.plot(sum(dos_in_line_from_both))\n plt.show()\n plot_dos_in_line(dos_in_line_from_left)\n print(sum(dos_in_line_from_both).shape)\n colorRight = 'seismic'\n colorLeft = 'seismic'\n fig, ax = plt.subplots(2, 2, figsize=(14, 6))\n y_values_left = y_values_left * (shapes.A0 / 10)\n y_values_right = y_values_right * (shapes.A0 / 10)\n min_line, max_line = -0.7 * shapes.L_STD, 0.7 * shapes.L_STD\n map_density(ax[0][0], syst, psi_left, colormap=colorRight)\n ax[0][0].vlines(0, min_line, max_line, linestyle='--')\n ax[0][0].set_title('left lead')\n map_density(ax[1][0], syst, psi_right, colormap=colorLeft)\n ax[1][0].vlines(0, min_line, max_line, linestyle='--')\n ax[1][0].set_title('right lead')\n ax[0][1].plot(y_values_left, normalize(dos_in_line_from_left), marker=\n '.', markersize=2.5, linestyle='-')\n ax[1][1].plot(y_values_right, normalize(dos_in_line_from_right), marker\n ='.', markersize=2.5, linestyle='-')\n plt.tight_layout()\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python\n'''\nScript for analysis of wavefunctions on GaSb/InAs/GaSb simmetric quantum wells.\n\nThis piece code is part of the project \"phd_gasb_inas\", which comprises the work\nrelated to the Phd. Dissertation named: \"Quantum transport of charge and spin in\ntopological insulators 2D\".\n\nAuthor: Marcos Medeiros\nemail: [email protected]\n'''\n\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\n\n# Kwant related stuff\nimport kwant\nimport tinyarray\n\n# local application imports\nfrom hamiltonians import gasb_hamiltonian as gasb\nfrom system_geometry import shapes\nfrom transport_tools import bands_and_currents as tools\n\n\ndef map_density(ax, syst, psi_sqrd, colormap = \"Reds\"):\n # Plot the results:\n # print(max(psi_sqrd))\n kwant.plotter.map(syst, psi_sqrd, ax = ax, fig_size = (7,3), cmap = colormap, vmax = 0.99*max(psi_sqrd))\n tools.edit_axis(ax,'dens')\n return 0\n\ndef density_in_line(syst, states, Op = np.eye(6)):\n y_stack = []\n def line(site):\n (x, y) = site.pos\n half = 0\n delta = shapes.A_STD\n ans = abs(x - half) < delta\n if ans == True : y_stack.append(y)\n return ans\n\n rho_line = kwant.operator.Density(syst, Op, where = line, sum = False)\n dos_line = np.array([rho_line(p) for p in states])\n return dos_line, np.array(y_stack)\n\ndef plot_dos_in_line(dos_line):\n fig, ax = plt.subplots(1, 2, figsize = (10,5))\n ax[0].plot(dos_line[0], color = 'red')\n ax[1].plot(dos_line[1], color = 'blue')\n plt.tight_layout()\n plt.show()\n\ndef normalize(dos_in_line):\n # return sum(dos_in_line)\n return sum(dos_in_line)/max(sum(dos_in_line))\n\ndef print_info_dos_line(y_values, dos_in_line):\n print(80*\"=\")\n print(\"Size of dos_both: \", dos_in_line.shape)\n print(\"Size of y_both: \", y_values.shape)\n print(\"y_both:\\n\", y_values)\n\n\n\ndef main():\n # Define the system:\n hamiltonian = gasb.hamiltonian_97_k_plus()\n # hamiltonian = gasb.hamiltonian_97_down()\n lead_ham = gasb.free_ham(6)\n centralShape = shapes.Rect()\n syst = gasb.system_builder(hamiltonian, lead_ham, centralShape)\n\n\n # Calculate the wave_function:\n energia = 448\n parametros = gasb.params_97\n parametros['Eta3'] = 0\n parametros['Eta2'] = 0\n parametros['eF'] = 60\n parametros = dict(GammaLead = parametros[\"GammaC\"], V = 100, **parametros )\n wf = kwant.wave_function(syst, energy = energia, params = parametros)\n modes_left = wf(0)\n modes_right = wf(1)\n # modes_total = np.vstack((wf(0), wf(1)))\n\n\n # Calculate the density:\n sigma_z = tinyarray.array([[1,0],[0,-1]])\n spin_proj= np.kron(sigma_z, np.eye(3))\n identity = np.eye(6)\n rho = kwant.operator.Density(syst, spin_proj)\n psi_left = sum(rho(p) for p in modes_left)\n psi_right = sum(rho(p) for p in modes_right)\n\n\n # Calculate dos in a line\n dos_in_line_from_left = density_in_line(syst, psi)\n dos_in_line_from_both = density_in_line(syst, np.vstack((wf(0),wf(1))))\n plt.plot(sum(dos_in_line_from_both))\n plt.show()\n # print(dos_in_line.shape)\n # print(dos_in_line)\n plot_dos_in_line(dos_in_line_from_left)\n # plot_dos_in_line(dos_in_line_from_both)\n print(sum(dos_in_line_from_both).shape)\n\n\n # Plot the results:\n colorRight = \"seismic\"\n colorLeft = \"seismic\"\n fig, ax = plt.subplots(2,2,figsize=(14,6))\n y_values_left = y_values_left * (shapes.A0 / 10) # conversion to nm^{-1}\n y_values_right = y_values_right * (shapes.A0 / 10) # conversion to nm^{-1}\n min_line, max_line = -0.7 * shapes.L_STD, 0.7 * shapes.L_STD\n\n map_density(ax[0][0], syst, psi_left, colormap = colorRight)\n ax[0][0].vlines(0, min_line, max_line, linestyle = \"--\")\n ax[0][0].set_title(\"left lead\")\n map_density(ax[1][0], syst, psi_right, colormap = colorLeft)\n ax[1][0].vlines(0, min_line, max_line, linestyle = \"--\")\n ax[1][0].set_title(\"right lead\")\n\n ax[0][1].plot(y_values_left, normalize(dos_in_line_from_left),\n marker = \".\", markersize = 2.5, linestyle = \"-\" )\n ax[1][1].plot(y_values_right, normalize(dos_in_line_from_right),\n marker = \".\", markersize = 2.5, linestyle = \"-\" )\n plt.tight_layout()\n plt.show()\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
4,
5,
7,
8,
9
]
}
|
[
4,
5,
7,
8,
9
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.