code
stringlengths 13
1.2M
| order_type
stringclasses 1
value | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
from collections import deque
from etaprogress.eta import ETA
def test_linear_slope_1():
eta = ETA(100)
eta._timing_data = deque([(10, 10), (20, 20), (30, 30), (40, 40)])
getattr(eta, '_calculate')()
assert 100 == eta.eta_epoch
assert 1.0 == eta.rate
assert 1.0 == eta.rate_unstable
def test_linear_slope_2():
eta = ETA(100)
eta._timing_data = deque([(10, 20), (20, 40), (30, 60), (40, 80)])
getattr(eta, '_calculate')()
assert 50 == eta.eta_epoch
assert 2.0 == eta.rate
assert 2.0 == eta.rate_unstable
def test_linear_transform():
"""Wolfram Alpha:
x is the timestamp. y is the numerator. 120 is the denominator.
linear fit {1.2, 22},{2.4, 58},{3.1, 102},{4.4, 118}
The closer we get to 100%, the more vertical shift/transform is applied to the line.
As we near the end we want the line to get closer to the last point on the graph.
This avoids having 99% with an ETA in the past.
"""
eta = ETA(120)
eta._timing_data = deque([(1.2, 22), (2.4, 58), (3.1, 102), (4.4, 118)])
getattr(eta, '_calculate')()
assert 4.4 < eta.eta_epoch < 4.6
assert 30 < eta.rate < 35
assert 12 < eta.rate_unstable < 13
def test_linear_transform_undefined():
eta = ETA()
eta._timing_data = deque([(1.2, 22), (2.4, 58), (3.1, 102), (4.4, 118)])
getattr(eta, '_calculate')()
assert eta.eta_epoch is None
assert 30 < eta.rate < 35
assert 12 < eta.rate_unstable < 13
|
normal
|
{
"blob_id": "810017cd5814fc20ebcdbdf26a32ea1bcfc88625",
"index": 2164,
"step-1": "<mask token>\n\n\ndef test_linear_slope_2():\n eta = ETA(100)\n eta._timing_data = deque([(10, 20), (20, 40), (30, 60), (40, 80)])\n getattr(eta, '_calculate')()\n assert 50 == eta.eta_epoch\n assert 2.0 == eta.rate\n assert 2.0 == eta.rate_unstable\n\n\ndef test_linear_transform():\n \"\"\"Wolfram Alpha:\n x is the timestamp. y is the numerator. 120 is the denominator.\n linear fit {1.2, 22},{2.4, 58},{3.1, 102},{4.4, 118}\n\n The closer we get to 100%, the more vertical shift/transform is applied to the line.\n As we near the end we want the line to get closer to the last point on the graph.\n This avoids having 99% with an ETA in the past.\n \"\"\"\n eta = ETA(120)\n eta._timing_data = deque([(1.2, 22), (2.4, 58), (3.1, 102), (4.4, 118)])\n getattr(eta, '_calculate')()\n assert 4.4 < eta.eta_epoch < 4.6\n assert 30 < eta.rate < 35\n assert 12 < eta.rate_unstable < 13\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_linear_slope_1():\n eta = ETA(100)\n eta._timing_data = deque([(10, 10), (20, 20), (30, 30), (40, 40)])\n getattr(eta, '_calculate')()\n assert 100 == eta.eta_epoch\n assert 1.0 == eta.rate\n assert 1.0 == eta.rate_unstable\n\n\ndef test_linear_slope_2():\n eta = ETA(100)\n eta._timing_data = deque([(10, 20), (20, 40), (30, 60), (40, 80)])\n getattr(eta, '_calculate')()\n assert 50 == eta.eta_epoch\n assert 2.0 == eta.rate\n assert 2.0 == eta.rate_unstable\n\n\ndef test_linear_transform():\n \"\"\"Wolfram Alpha:\n x is the timestamp. y is the numerator. 120 is the denominator.\n linear fit {1.2, 22},{2.4, 58},{3.1, 102},{4.4, 118}\n\n The closer we get to 100%, the more vertical shift/transform is applied to the line.\n As we near the end we want the line to get closer to the last point on the graph.\n This avoids having 99% with an ETA in the past.\n \"\"\"\n eta = ETA(120)\n eta._timing_data = deque([(1.2, 22), (2.4, 58), (3.1, 102), (4.4, 118)])\n getattr(eta, '_calculate')()\n assert 4.4 < eta.eta_epoch < 4.6\n assert 30 < eta.rate < 35\n assert 12 < eta.rate_unstable < 13\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef test_linear_slope_1():\n eta = ETA(100)\n eta._timing_data = deque([(10, 10), (20, 20), (30, 30), (40, 40)])\n getattr(eta, '_calculate')()\n assert 100 == eta.eta_epoch\n assert 1.0 == eta.rate\n assert 1.0 == eta.rate_unstable\n\n\ndef test_linear_slope_2():\n eta = ETA(100)\n eta._timing_data = deque([(10, 20), (20, 40), (30, 60), (40, 80)])\n getattr(eta, '_calculate')()\n assert 50 == eta.eta_epoch\n assert 2.0 == eta.rate\n assert 2.0 == eta.rate_unstable\n\n\ndef test_linear_transform():\n \"\"\"Wolfram Alpha:\n x is the timestamp. y is the numerator. 120 is the denominator.\n linear fit {1.2, 22},{2.4, 58},{3.1, 102},{4.4, 118}\n\n The closer we get to 100%, the more vertical shift/transform is applied to the line.\n As we near the end we want the line to get closer to the last point on the graph.\n This avoids having 99% with an ETA in the past.\n \"\"\"\n eta = ETA(120)\n eta._timing_data = deque([(1.2, 22), (2.4, 58), (3.1, 102), (4.4, 118)])\n getattr(eta, '_calculate')()\n assert 4.4 < eta.eta_epoch < 4.6\n assert 30 < eta.rate < 35\n assert 12 < eta.rate_unstable < 13\n\n\ndef test_linear_transform_undefined():\n eta = ETA()\n eta._timing_data = deque([(1.2, 22), (2.4, 58), (3.1, 102), (4.4, 118)])\n getattr(eta, '_calculate')()\n assert eta.eta_epoch is None\n assert 30 < eta.rate < 35\n assert 12 < eta.rate_unstable < 13\n",
"step-4": "from collections import deque\nfrom etaprogress.eta import ETA\n\n\ndef test_linear_slope_1():\n eta = ETA(100)\n eta._timing_data = deque([(10, 10), (20, 20), (30, 30), (40, 40)])\n getattr(eta, '_calculate')()\n assert 100 == eta.eta_epoch\n assert 1.0 == eta.rate\n assert 1.0 == eta.rate_unstable\n\n\ndef test_linear_slope_2():\n eta = ETA(100)\n eta._timing_data = deque([(10, 20), (20, 40), (30, 60), (40, 80)])\n getattr(eta, '_calculate')()\n assert 50 == eta.eta_epoch\n assert 2.0 == eta.rate\n assert 2.0 == eta.rate_unstable\n\n\ndef test_linear_transform():\n \"\"\"Wolfram Alpha:\n x is the timestamp. y is the numerator. 120 is the denominator.\n linear fit {1.2, 22},{2.4, 58},{3.1, 102},{4.4, 118}\n\n The closer we get to 100%, the more vertical shift/transform is applied to the line.\n As we near the end we want the line to get closer to the last point on the graph.\n This avoids having 99% with an ETA in the past.\n \"\"\"\n eta = ETA(120)\n eta._timing_data = deque([(1.2, 22), (2.4, 58), (3.1, 102), (4.4, 118)])\n getattr(eta, '_calculate')()\n assert 4.4 < eta.eta_epoch < 4.6\n assert 30 < eta.rate < 35\n assert 12 < eta.rate_unstable < 13\n\n\ndef test_linear_transform_undefined():\n eta = ETA()\n eta._timing_data = deque([(1.2, 22), (2.4, 58), (3.1, 102), (4.4, 118)])\n getattr(eta, '_calculate')()\n assert eta.eta_epoch is None\n assert 30 < eta.rate < 35\n assert 12 < eta.rate_unstable < 13\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-07-21 12:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='history',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uname', models.CharField(max_length=50, verbose_name='用户名')),
('uword', models.CharField(max_length=50, verbose_name='单词')),
('time', models.DateTimeField(auto_now=True, verbose_name='查询时间')),
('isban', models.BooleanField(default=False, verbose_name='禁用')),
('isdelete', models.BooleanField(default=False, verbose_name='删除')),
],
),
]
|
normal
|
{
"blob_id": "722739086d2777085fdbfdbddef205aaf025580d",
"index": 4291,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('user', '0001_initial')]\n operations = [migrations.CreateModel(name='history', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('uname', models.CharField(max_length=\n 50, verbose_name='用户名')), ('uword', models.CharField(max_length=50,\n verbose_name='单词')), ('time', models.DateTimeField(auto_now=True,\n verbose_name='查询时间')), ('isban', models.BooleanField(default=False,\n verbose_name='禁用')), ('isdelete', models.BooleanField(default=False,\n verbose_name='删除'))])]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('user', '0001_initial')]\n operations = [migrations.CreateModel(name='history', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('uname', models.CharField(max_length=\n 50, verbose_name='用户名')), ('uword', models.CharField(max_length=50,\n verbose_name='单词')), ('time', models.DateTimeField(auto_now=True,\n verbose_name='查询时间')), ('isban', models.BooleanField(default=False,\n verbose_name='禁用')), ('isdelete', models.BooleanField(default=False,\n verbose_name='删除'))])]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.8 on 2018-07-21 12:51\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('user', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='history',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('uname', models.CharField(max_length=50, verbose_name='用户名')),\n ('uword', models.CharField(max_length=50, verbose_name='单词')),\n ('time', models.DateTimeField(auto_now=True, verbose_name='查询时间')),\n ('isban', models.BooleanField(default=False, verbose_name='禁用')),\n ('isdelete', models.BooleanField(default=False, verbose_name='删除')),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# content of conftest.py
import pytest
import sys
sys.path.insert(1, '../Generic')
import PQ9Client
def pytest_configure(config):
print("pytest_configure")
def pytest_collection_modifyitems(session, config, items):
print("sono qui", items)
def pytest_ignore_collect(path, config):
print(path)
print("mamma ", config.getoption("--destination"))
return False
def pytest_addoption(parser):
print("Option ")
parser.addoption(
"--destination", action="store", help="subsystem address", dest="destination",
)
@pytest.fixture
def destination(request):
print(request.config.getoption("--html"))
#print(request.config.getoption("kkk"))
return request.config.getoption("--destination")
@pytest.fixture(scope="session") #only 'make' this object once per session.
def pq9_connection():
pq9client = PQ9Client.PQ9Client("localhost","10000")
pq9client.connect()
yield pq9client
pq9client.close()
|
normal
|
{
"blob_id": "ad88685e3f1cd5e0ddb42a5982a05ff8ee7b8111",
"index": 1586,
"step-1": "<mask token>\n\n\ndef pytest_addoption(parser):\n print('Option ')\n parser.addoption('--destination', action='store', help=\n 'subsystem address', dest='destination')\n\n\[email protected]\ndef destination(request):\n print(request.config.getoption('--html'))\n return request.config.getoption('--destination')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef pytest_collection_modifyitems(session, config, items):\n print('sono qui', items)\n\n\ndef pytest_ignore_collect(path, config):\n print(path)\n print('mamma ', config.getoption('--destination'))\n return False\n\n\ndef pytest_addoption(parser):\n print('Option ')\n parser.addoption('--destination', action='store', help=\n 'subsystem address', dest='destination')\n\n\[email protected]\ndef destination(request):\n print(request.config.getoption('--html'))\n return request.config.getoption('--destination')\n\n\[email protected](scope='session')\ndef pq9_connection():\n pq9client = PQ9Client.PQ9Client('localhost', '10000')\n pq9client.connect()\n yield pq9client\n pq9client.close()\n",
"step-3": "<mask token>\n\n\ndef pytest_configure(config):\n print('pytest_configure')\n\n\ndef pytest_collection_modifyitems(session, config, items):\n print('sono qui', items)\n\n\ndef pytest_ignore_collect(path, config):\n print(path)\n print('mamma ', config.getoption('--destination'))\n return False\n\n\ndef pytest_addoption(parser):\n print('Option ')\n parser.addoption('--destination', action='store', help=\n 'subsystem address', dest='destination')\n\n\[email protected]\ndef destination(request):\n print(request.config.getoption('--html'))\n return request.config.getoption('--destination')\n\n\[email protected](scope='session')\ndef pq9_connection():\n pq9client = PQ9Client.PQ9Client('localhost', '10000')\n pq9client.connect()\n yield pq9client\n pq9client.close()\n",
"step-4": "<mask token>\nsys.path.insert(1, '../Generic')\n<mask token>\n\n\ndef pytest_configure(config):\n print('pytest_configure')\n\n\ndef pytest_collection_modifyitems(session, config, items):\n print('sono qui', items)\n\n\ndef pytest_ignore_collect(path, config):\n print(path)\n print('mamma ', config.getoption('--destination'))\n return False\n\n\ndef pytest_addoption(parser):\n print('Option ')\n parser.addoption('--destination', action='store', help=\n 'subsystem address', dest='destination')\n\n\[email protected]\ndef destination(request):\n print(request.config.getoption('--html'))\n return request.config.getoption('--destination')\n\n\[email protected](scope='session')\ndef pq9_connection():\n pq9client = PQ9Client.PQ9Client('localhost', '10000')\n pq9client.connect()\n yield pq9client\n pq9client.close()\n",
"step-5": "# content of conftest.py\nimport pytest\nimport sys\nsys.path.insert(1, '../Generic')\nimport PQ9Client\n \ndef pytest_configure(config):\n print(\"pytest_configure\")\n \ndef pytest_collection_modifyitems(session, config, items):\n print(\"sono qui\", items)\n \ndef pytest_ignore_collect(path, config):\n print(path)\n print(\"mamma \", config.getoption(\"--destination\"))\n return False \n\ndef pytest_addoption(parser):\n print(\"Option \")\n parser.addoption(\n \"--destination\", action=\"store\", help=\"subsystem address\", dest=\"destination\",\n )\[email protected]\ndef destination(request):\n print(request.config.getoption(\"--html\"))\n #print(request.config.getoption(\"kkk\"))\n return request.config.getoption(\"--destination\")\n\[email protected](scope=\"session\") #only 'make' this object once per session.\ndef pq9_connection():\n pq9client = PQ9Client.PQ9Client(\"localhost\",\"10000\")\n pq9client.connect()\n\n yield pq9client\n pq9client.close()\n",
"step-ids": [
2,
5,
6,
7,
9
]
}
|
[
2,
5,
6,
7,
9
] |
from django.db import models
# Create your models here.
class Glo_EstadoPlan(models.Model):
descripcion_estado = models.CharField(max_length=100)
def __str__(self):
return '{}'.format(self.descripcion_estado)
|
normal
|
{
"blob_id": "b0a51877b59e14eefdd662bac468e8ce12343e6b",
"index": 3885,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Glo_EstadoPlan(models.Model):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Glo_EstadoPlan(models.Model):\n descripcion_estado = models.CharField(max_length=100)\n\n def __str__(self):\n return '{}'.format(self.descripcion_estado)\n",
"step-4": "from django.db import models\n\n\nclass Glo_EstadoPlan(models.Model):\n descripcion_estado = models.CharField(max_length=100)\n\n def __str__(self):\n return '{}'.format(self.descripcion_estado)\n",
"step-5": "from django.db import models\r\n\r\n# Create your models here.\r\nclass Glo_EstadoPlan(models.Model):\r\n descripcion_estado = models.CharField(max_length=100)\r\n\r\n def __str__(self):\r\n return '{}'.format(self.descripcion_estado)",
"step-ids": [
0,
1,
3,
4,
5
]
}
|
[
0,
1,
3,
4,
5
] |
import knn
datingDataMat,datingLabels = knn.file2matrix('datingTestSet2.txt')
normMat,ranges,minVals = knn.autoNorm(datingDataMat)
print normMat
print ranges
print minVals
|
normal
|
{
"blob_id": "f28222625e28939b34b1b5c21d28dbf9c49c6374",
"index": 8635,
"step-1": "import knn\n\ndatingDataMat,datingLabels = knn.file2matrix('datingTestSet2.txt')\nnormMat,ranges,minVals = knn.autoNorm(datingDataMat)\n\nprint normMat\nprint ranges\nprint minVals",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from ROOT import *
gSystem.Load("libAnalysis")
import sys
import argparse
parser = argparse.ArgumentParser(description="Python script to process and merge showers.")
group = parser.add_mutually_exclusive_group()
group.add_argument("-v", "--verbose", help="Turn on verbose output",
action="store_true")
group.add_argument("-q", "--quiet", help="Turn off most output",
action="store_true")
parser.add_argument("-s","--source",help="Name of input file")
parser.add_argument("-o","--data-output",help="Output data file, if event is changed")
parser.add_argument("-a","--ana-output",help="Analysis output file")
parser.add_argument("-n", "--num-events",help="Number of events to process")
parser.add_argument("-d","--display",help="Turn on the display to see each view before and after." )
args = parser.parse_args()
if len(sys.argv) == 1:
parser.print_help()
if args.verbose:
print "Verbose mode turned on."
if args.source != None:
print "\tSource file is " + args.source
if args.data_output != None:
print "\tData output file is " + args.data_output
if args.ana_output != None:
print "\tAna output file is " + args.ana_output
if args.source == None:
print "Error: please specificy an input file with -s or --source."
quit()
if args.data_output == None:
args.data_output = "default_event_output.root"
print "No event output file selected. If necessary, output will go to:"
print "\t"+args.data_output
if args.ana_output == None:
args.ana_output = "default_ana_output.root"
print "No ana output file selected. If necessary, output will go to:"
print "\t"+args.ana_output
ana_proc = larlight.ana_processor()
if args.verbose:
ana_proc.set_verbosity(larlight.MSG.DEBUG)
# Not sure what this does
ana_proc.set_io_mode(larlight.storage_manager.BOTH)
# Add the input file. Not sure if the above takes multiple input files yet
ana_proc.add_input_file(args.source)
# ?
larlight.storage_manager.get().set_in_rootdir("scanner")
# set output file
ana_proc.set_output_file(args.data_output)
# Set destination for ana stuff
ana_proc.set_ana_output_file(args.ana_output)
my_merge_alg = larlight.ClusterMergeAlg()
my_merger = larlight.ClusterMerge()
my_merger.set_mergealg(my_merge_alg)
ana_proc.add_process(my_merge_alg)
ana_proc.add_process(my_merger)
c=TCanvas("c","Wire v. Time Cluster Viewer",900,600)
while ana_proc.process_event() and ana_proc.get_process_status() == ana_proc.PROCESSING:
currentview = 0;
print my_merge_alg.GetMergeTree()
for iview in xrange(0,3):
for iclus in xrange(ana_proc.GetClusterGraph_Reco(int(iview),bool(true)).size()):
gstart=ana_proc.GetClusterGraph_Reco(int(iview),bool(true)).at(iclus)
gend =ana_proc.GetClusterGraph_Reco(int(iview),bool(false)).at(iclus)
xmin=ana_proc.GetHisto_Hits(int(iview)).GetXaxis().GetXmin()
xmax=ana_proc.GetHisto_Hits(int(iview)).GetXaxis().GetXmax()
ymin=ana_proc.GetHisto_Hits(int(iview)).GetYaxis().GetXmin()
ymax=ana_proc.GetHisto_Hits(int(iview)).GetYaxis().GetXmax()
gstart.GetXaxis().SetLimits(xmin,xmax)
gend.GetXaxis().SetLimits(xmin,xmax)
gstart.GetYaxis().SetRangeUser(ymin,ymax)
gend.GetYaxis().SetRangeUser(ymin,ymax)
gstart.SetTitle("View: %d, Cluster: %d"%(iview+1,iclus))
gstart.SetMarkerSize(3)
gstart.SetMarkerStyle(30)
gend.SetMarkerSize(3)
gend.SetMarkerStyle(29)
gstart.Draw("ALP")
gend.Draw("LP")
ana_proc.GetHisto_Reco(int(iview)).at(iclus).Draw("same")
leg = TLegend(0.6,0.65,0.88,0.85)
leg.AddEntry(gstart,"Start Point","p")
leg.AddEntry(gend,"End Point","p")
leg.Draw()
c_graph.Update()
print "Drawing cluster %d out of %d for view %d. To look at the next cluster hit enter." % (iclus,ana_proc.GetClusterGraph_Reco(int(iview),bool(true)).size()-1,iview+1)
sys.stdin.readline()
print "Hit Enter to continue to next evt..."
sys.stdin.readline()
#ana_proc.run()
|
normal
|
{
"blob_id": "d57b91bf41f031e3362dabdef8c67a0da04fe577",
"index": 7540,
"step-1": "from ROOT import *\ngSystem.Load(\"libAnalysis\")\nimport sys\n\nimport argparse\n\nparser = argparse.ArgumentParser(description=\"Python script to process and merge showers.\")\ngroup = parser.add_mutually_exclusive_group()\ngroup.add_argument(\"-v\", \"--verbose\", help=\"Turn on verbose output\",\n action=\"store_true\")\ngroup.add_argument(\"-q\", \"--quiet\", help=\"Turn off most output\",\n action=\"store_true\")\nparser.add_argument(\"-s\",\"--source\",help=\"Name of input file\")\nparser.add_argument(\"-o\",\"--data-output\",help=\"Output data file, if event is changed\")\nparser.add_argument(\"-a\",\"--ana-output\",help=\"Analysis output file\")\nparser.add_argument(\"-n\", \"--num-events\",help=\"Number of events to process\")\nparser.add_argument(\"-d\",\"--display\",help=\"Turn on the display to see each view before and after.\" )\nargs = parser.parse_args()\n\nif len(sys.argv) == 1:\n parser.print_help()\n\nif args.verbose:\n print \"Verbose mode turned on.\"\n if args.source != None:\n print \"\\tSource file is \" + args.source\n if args.data_output != None:\n print \"\\tData output file is \" + args.data_output\n if args.ana_output != None:\n print \"\\tAna output file is \" + args.ana_output\n\nif args.source == None:\n print \"Error: please specificy an input file with -s or --source.\"\n quit()\n\nif args.data_output == None:\n args.data_output = \"default_event_output.root\"\n print \"No event output file selected. If necessary, output will go to:\"\n print \"\\t\"+args.data_output\n\nif args.ana_output == None:\n args.ana_output = \"default_ana_output.root\"\n print \"No ana output file selected. If necessary, output will go to:\"\n print \"\\t\"+args.ana_output\n\n\nana_proc = larlight.ana_processor()\n\nif args.verbose:\n ana_proc.set_verbosity(larlight.MSG.DEBUG)\n\n# Not sure what this does\nana_proc.set_io_mode(larlight.storage_manager.BOTH)\n\n# Add the input file. Not sure if the above takes multiple input files yet\nana_proc.add_input_file(args.source)\n\n# ?\nlarlight.storage_manager.get().set_in_rootdir(\"scanner\")\n\n# set output file\nana_proc.set_output_file(args.data_output)\n\n# Set destination for ana stuff\nana_proc.set_ana_output_file(args.ana_output)\n\nmy_merge_alg = larlight.ClusterMergeAlg()\nmy_merger = larlight.ClusterMerge()\n\nmy_merger.set_mergealg(my_merge_alg)\n\nana_proc.add_process(my_merge_alg)\n\nana_proc.add_process(my_merger)\n\nc=TCanvas(\"c\",\"Wire v. Time Cluster Viewer\",900,600)\n\n\n\nwhile ana_proc.process_event() and ana_proc.get_process_status() == ana_proc.PROCESSING:\n currentview = 0;\n print my_merge_alg.GetMergeTree()\n for iview in xrange(0,3):\n for iclus in xrange(ana_proc.GetClusterGraph_Reco(int(iview),bool(true)).size()):\n gstart=ana_proc.GetClusterGraph_Reco(int(iview),bool(true)).at(iclus)\n gend =ana_proc.GetClusterGraph_Reco(int(iview),bool(false)).at(iclus)\n xmin=ana_proc.GetHisto_Hits(int(iview)).GetXaxis().GetXmin()\n xmax=ana_proc.GetHisto_Hits(int(iview)).GetXaxis().GetXmax()\n ymin=ana_proc.GetHisto_Hits(int(iview)).GetYaxis().GetXmin()\n ymax=ana_proc.GetHisto_Hits(int(iview)).GetYaxis().GetXmax()\n gstart.GetXaxis().SetLimits(xmin,xmax)\n gend.GetXaxis().SetLimits(xmin,xmax) \n gstart.GetYaxis().SetRangeUser(ymin,ymax)\n gend.GetYaxis().SetRangeUser(ymin,ymax)\n gstart.SetTitle(\"View: %d, Cluster: %d\"%(iview+1,iclus))\n gstart.SetMarkerSize(3)\n gstart.SetMarkerStyle(30)\n gend.SetMarkerSize(3)\n gend.SetMarkerStyle(29)\n gstart.Draw(\"ALP\")\n gend.Draw(\"LP\")\n ana_proc.GetHisto_Reco(int(iview)).at(iclus).Draw(\"same\")\n leg = TLegend(0.6,0.65,0.88,0.85)\n leg.AddEntry(gstart,\"Start Point\",\"p\")\n leg.AddEntry(gend,\"End Point\",\"p\")\n leg.Draw()\n c_graph.Update()\n print \"Drawing cluster %d out of %d for view %d. To look at the next cluster hit enter.\" % (iclus,ana_proc.GetClusterGraph_Reco(int(iview),bool(true)).size()-1,iview+1)\n sys.stdin.readline()\n\n print \"Hit Enter to continue to next evt...\"\n sys.stdin.readline()\n\n#ana_proc.run()\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#LIBRERIAS
import cv2
import numpy as np
#FUNCION: recibe una imagen y te devuelve las coordenadas de las caras
def face_detector(img, face_cascade, eye_cascade, face_f):
#variables face_f
xf = face_f[0]
yf = face_f[1]
wf = face_f[2]
hf = face_f[3]
#variables img
xi = 0
yi = 0
wi = img.shape[1]
hi = img.shape[0]
#apertura de face_f con relacion a la img
c = float(0.1) #esto es un 10 %
print("face_f: ", xf, xf + wf, yf, yf + hf)
#roi_i = img[yf: yf + hf, xf: xf + wf]
#cv2.imshow("roi_i", roi_i)
if xf != xi or yf != yi or wf != wi or hf != hi: #(tendre que ver si AND o OR)
#face_f no es igual a img, hace falta la apertura
y1 = yf - round(c * hf)
y2 = yf + hf + round(c * hf)
x1 = xf - round(c * wf)
x2 = xf + wf + round(c * wf)
roi_f = img[y1: y2, x1: x2]
print("Face apertura: ", x1, x2, y1, y2)
cv2.imshow('Face apertura',roi_f)
else:
#face_f es igual a img, no hace falta la apertura
roi_f = img[face_f[1] : face_f[1] + face_f[3], face_f[0] : face_f[0] + face_f[2]]
#cv2.imshow('roi_f',roi_f)
#paso el roi_f a gris para un mejor tratamiento
gray_img = cv2.cvtColor(roi_f,cv2.COLOR_BGR2GRAY)
cv2.imshow("gray_img",gray_img)
#aplicar el clasificador de caras sobre la imagen y guardo el resultado en faces: seran la x, y, height y width
faces = face_cascade.detectMultiScale(gray_img, scaleFactor=1.04, minNeighbors=5)
print("Faces: ", faces)
if type(faces) == np.ndarray:
flag = -1
for x,y,w,h in faces:
flag = flag + 1
#print("Face: ", x,y,w,h)
if w >= 100 and w <= 125 and h >= 100 and h <= 125:
print("Entro en el if de tamaño")
#Region Of Interest
print("Face: ", x,y,w,h)
roi_gray = gray_img[y:y+h, x:x+w]
cv2.imshow("roi_gray", roi_gray)
#aplico el clasificador de ojos sobre la imagen de interes que se supone que es una cara y guardo el resultado en eyes
eyes = eye_cascade.detectMultiScale(roi_gray)
c_eyes = 0
for ex,ey,ew,eh in eyes:
c_eyes = c_eyes + 1
if c_eyes >= 2: #si hay mínimo dos ojos (a veces la boca abierta la detecta como un tercer ojo), es una cara
print("faces[flag]", faces[flag])
return faces[flag]
|
normal
|
{
"blob_id": "1df3a5dc8ed767e20d34c2836eed79872a21a016",
"index": 9948,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef face_detector(img, face_cascade, eye_cascade, face_f):\n xf = face_f[0]\n yf = face_f[1]\n wf = face_f[2]\n hf = face_f[3]\n xi = 0\n yi = 0\n wi = img.shape[1]\n hi = img.shape[0]\n c = float(0.1)\n print('face_f: ', xf, xf + wf, yf, yf + hf)\n if xf != xi or yf != yi or wf != wi or hf != hi:\n y1 = yf - round(c * hf)\n y2 = yf + hf + round(c * hf)\n x1 = xf - round(c * wf)\n x2 = xf + wf + round(c * wf)\n roi_f = img[y1:y2, x1:x2]\n print('Face apertura: ', x1, x2, y1, y2)\n cv2.imshow('Face apertura', roi_f)\n else:\n roi_f = img[face_f[1]:face_f[1] + face_f[3], face_f[0]:face_f[0] +\n face_f[2]]\n gray_img = cv2.cvtColor(roi_f, cv2.COLOR_BGR2GRAY)\n cv2.imshow('gray_img', gray_img)\n faces = face_cascade.detectMultiScale(gray_img, scaleFactor=1.04,\n minNeighbors=5)\n print('Faces: ', faces)\n if type(faces) == np.ndarray:\n flag = -1\n for x, y, w, h in faces:\n flag = flag + 1\n if w >= 100 and w <= 125 and h >= 100 and h <= 125:\n print('Entro en el if de tamaño')\n print('Face: ', x, y, w, h)\n roi_gray = gray_img[y:y + h, x:x + w]\n cv2.imshow('roi_gray', roi_gray)\n eyes = eye_cascade.detectMultiScale(roi_gray)\n c_eyes = 0\n for ex, ey, ew, eh in eyes:\n c_eyes = c_eyes + 1\n if c_eyes >= 2:\n print('faces[flag]', faces[flag])\n return faces[flag]\n",
"step-3": "import cv2\nimport numpy as np\n\n\ndef face_detector(img, face_cascade, eye_cascade, face_f):\n xf = face_f[0]\n yf = face_f[1]\n wf = face_f[2]\n hf = face_f[3]\n xi = 0\n yi = 0\n wi = img.shape[1]\n hi = img.shape[0]\n c = float(0.1)\n print('face_f: ', xf, xf + wf, yf, yf + hf)\n if xf != xi or yf != yi or wf != wi or hf != hi:\n y1 = yf - round(c * hf)\n y2 = yf + hf + round(c * hf)\n x1 = xf - round(c * wf)\n x2 = xf + wf + round(c * wf)\n roi_f = img[y1:y2, x1:x2]\n print('Face apertura: ', x1, x2, y1, y2)\n cv2.imshow('Face apertura', roi_f)\n else:\n roi_f = img[face_f[1]:face_f[1] + face_f[3], face_f[0]:face_f[0] +\n face_f[2]]\n gray_img = cv2.cvtColor(roi_f, cv2.COLOR_BGR2GRAY)\n cv2.imshow('gray_img', gray_img)\n faces = face_cascade.detectMultiScale(gray_img, scaleFactor=1.04,\n minNeighbors=5)\n print('Faces: ', faces)\n if type(faces) == np.ndarray:\n flag = -1\n for x, y, w, h in faces:\n flag = flag + 1\n if w >= 100 and w <= 125 and h >= 100 and h <= 125:\n print('Entro en el if de tamaño')\n print('Face: ', x, y, w, h)\n roi_gray = gray_img[y:y + h, x:x + w]\n cv2.imshow('roi_gray', roi_gray)\n eyes = eye_cascade.detectMultiScale(roi_gray)\n c_eyes = 0\n for ex, ey, ew, eh in eyes:\n c_eyes = c_eyes + 1\n if c_eyes >= 2:\n print('faces[flag]', faces[flag])\n return faces[flag]\n",
"step-4": "#LIBRERIAS\nimport cv2\nimport numpy as np\n\n#FUNCION: recibe una imagen y te devuelve las coordenadas de las caras\ndef face_detector(img, face_cascade, eye_cascade, face_f): \n\n #variables face_f\n xf = face_f[0]\n yf = face_f[1]\n wf = face_f[2]\n hf = face_f[3]\n \n #variables img\n xi = 0\n yi = 0\n wi = img.shape[1]\n hi = img.shape[0]\n\n #apertura de face_f con relacion a la img\n c = float(0.1) #esto es un 10 %\n \n print(\"face_f: \", xf, xf + wf, yf, yf + hf)\n #roi_i = img[yf: yf + hf, xf: xf + wf]\n #cv2.imshow(\"roi_i\", roi_i)\n\n if xf != xi or yf != yi or wf != wi or hf != hi: #(tendre que ver si AND o OR)\n #face_f no es igual a img, hace falta la apertura\n \n y1 = yf - round(c * hf)\n y2 = yf + hf + round(c * hf)\n x1 = xf - round(c * wf)\n x2 = xf + wf + round(c * wf)\n\n roi_f = img[y1: y2, x1: x2]\n \n print(\"Face apertura: \", x1, x2, y1, y2)\n cv2.imshow('Face apertura',roi_f)\n\n else:\n\n #face_f es igual a img, no hace falta la apertura\n \n roi_f = img[face_f[1] : face_f[1] + face_f[3], face_f[0] : face_f[0] + face_f[2]]\n\n #cv2.imshow('roi_f',roi_f)\n\n\n\n #paso el roi_f a gris para un mejor tratamiento\n gray_img = cv2.cvtColor(roi_f,cv2.COLOR_BGR2GRAY)\n cv2.imshow(\"gray_img\",gray_img)\n \n #aplicar el clasificador de caras sobre la imagen y guardo el resultado en faces: seran la x, y, height y width\n faces = face_cascade.detectMultiScale(gray_img, scaleFactor=1.04, minNeighbors=5)\n print(\"Faces: \", faces)\n\n if type(faces) == np.ndarray:\n\n flag = -1\n\n for x,y,w,h in faces:\n\n flag = flag + 1\n\n #print(\"Face: \", x,y,w,h)\n \n if w >= 100 and w <= 125 and h >= 100 and h <= 125:\n print(\"Entro en el if de tamaño\")\n #Region Of Interest\n print(\"Face: \", x,y,w,h)\n roi_gray = gray_img[y:y+h, x:x+w]\n \n cv2.imshow(\"roi_gray\", roi_gray)\n\n #aplico el clasificador de ojos sobre la imagen de interes que se supone que es una cara y guardo el resultado en eyes\n eyes = eye_cascade.detectMultiScale(roi_gray)\n \n c_eyes = 0\n\n for ex,ey,ew,eh in eyes:\n \n c_eyes = c_eyes + 1\n\n if c_eyes >= 2: #si hay mínimo dos ojos (a veces la boca abierta la detecta como un tercer ojo), es una cara\n print(\"faces[flag]\", faces[flag])\n return faces[flag]\n \n \n \n \n ",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
'''
Функція replace() може використовуватися для заміни будь-якого слова у рядку іншим словом.
Прочитайте кожен рядок зі створеного у попередньому завданні файлу learning_python.txt і замініть слово Python назвою іншої мови,
наприклад C при виведенні на екран. Це завдання написати в окремій функції.
'''
def reader():
with open('possibilities.txt', 'r') as file1:
file_lines = [x.strip() for x in file1.readlines()]
for e in file_lines:
n = e.replace('Python', 'C++')
print(n)
if __name__ == '__main__':
reader()
|
normal
|
{
"blob_id": "6d80a89a47b68fd8d81739787897355671ca94e9",
"index": 5815,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef reader():\n with open('possibilities.txt', 'r') as file1:\n file_lines = [x.strip() for x in file1.readlines()]\n for e in file_lines:\n n = e.replace('Python', 'C++')\n print(n)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef reader():\n with open('possibilities.txt', 'r') as file1:\n file_lines = [x.strip() for x in file1.readlines()]\n for e in file_lines:\n n = e.replace('Python', 'C++')\n print(n)\n\n\nif __name__ == '__main__':\n reader()\n",
"step-4": "'''\nФункція replace() може використовуватися для заміни будь-якого слова у рядку іншим словом.\nПрочитайте кожен рядок зі створеного у попередньому завданні файлу learning_python.txt і замініть слово Python назвою іншої мови,\nнаприклад C при виведенні на екран. Це завдання написати в окремій функції.\n'''\n\n\ndef reader():\n with open('possibilities.txt', 'r') as file1:\n file_lines = [x.strip() for x in file1.readlines()]\n for e in file_lines:\n n = e.replace('Python', 'C++')\n print(n)\n\n\nif __name__ == '__main__':\n reader()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.core.validators import RegexValidator
from django.db import models
from .image import Image
class AffiliatedStoreManager(models.Manager):
def get_queryset(self):
return super().get_queryset() \
.select_related('icon') \
.select_related('icon__image_type')
def find_all(self):
return self.all()
def find_by_id(self, id):
return self.get(id=id)
class AffiliatedStore(models.Model):
class Meta:
db_table = 'affiliated_store'
objects = AffiliatedStoreManager()
title = models.CharField(max_length=255)
server_url = models.CharField(max_length=2083,
validators=[RegexValidator(regex='^(https|http)://.*$',
code='invalid url',
message='server_url must be a url')])
icon = models.ForeignKey(Image, related_name='+', null=True, blank=True)
is_enabled = models.BooleanField(default=True)
def __repr__(self):
return 'AffiliatedStore(id={0!s}, title="{1!s}")'.format(self.id, self.title)
def __str__(self):
return repr(self)
|
normal
|
{
"blob_id": "e2b439974b66e45a899605bc7234850783c3dfb0",
"index": 2231,
"step-1": "<mask token>\n\n\nclass AffiliatedStoreManager(models.Manager):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass AffiliatedStore(models.Model):\n\n\n class Meta:\n db_table = 'affiliated_store'\n objects = AffiliatedStoreManager()\n title = models.CharField(max_length=255)\n server_url = models.CharField(max_length=2083, validators=[\n RegexValidator(regex='^(https|http)://.*$', code='invalid url',\n message='server_url must be a url')])\n icon = models.ForeignKey(Image, related_name='+', null=True, blank=True)\n is_enabled = models.BooleanField(default=True)\n\n def __repr__(self):\n return 'AffiliatedStore(id={0!s}, title=\"{1!s}\")'.format(self.id,\n self.title)\n\n def __str__(self):\n return repr(self)\n",
"step-2": "<mask token>\n\n\nclass AffiliatedStoreManager(models.Manager):\n\n def get_queryset(self):\n return super().get_queryset().select_related('icon').select_related(\n 'icon__image_type')\n <mask token>\n <mask token>\n\n\nclass AffiliatedStore(models.Model):\n\n\n class Meta:\n db_table = 'affiliated_store'\n objects = AffiliatedStoreManager()\n title = models.CharField(max_length=255)\n server_url = models.CharField(max_length=2083, validators=[\n RegexValidator(regex='^(https|http)://.*$', code='invalid url',\n message='server_url must be a url')])\n icon = models.ForeignKey(Image, related_name='+', null=True, blank=True)\n is_enabled = models.BooleanField(default=True)\n\n def __repr__(self):\n return 'AffiliatedStore(id={0!s}, title=\"{1!s}\")'.format(self.id,\n self.title)\n\n def __str__(self):\n return repr(self)\n",
"step-3": "<mask token>\n\n\nclass AffiliatedStoreManager(models.Manager):\n\n def get_queryset(self):\n return super().get_queryset().select_related('icon').select_related(\n 'icon__image_type')\n <mask token>\n\n def find_by_id(self, id):\n return self.get(id=id)\n\n\nclass AffiliatedStore(models.Model):\n\n\n class Meta:\n db_table = 'affiliated_store'\n objects = AffiliatedStoreManager()\n title = models.CharField(max_length=255)\n server_url = models.CharField(max_length=2083, validators=[\n RegexValidator(regex='^(https|http)://.*$', code='invalid url',\n message='server_url must be a url')])\n icon = models.ForeignKey(Image, related_name='+', null=True, blank=True)\n is_enabled = models.BooleanField(default=True)\n\n def __repr__(self):\n return 'AffiliatedStore(id={0!s}, title=\"{1!s}\")'.format(self.id,\n self.title)\n\n def __str__(self):\n return repr(self)\n",
"step-4": "from django.core.validators import RegexValidator\nfrom django.db import models\nfrom .image import Image\n\n\nclass AffiliatedStoreManager(models.Manager):\n\n def get_queryset(self):\n return super().get_queryset().select_related('icon').select_related(\n 'icon__image_type')\n\n def find_all(self):\n return self.all()\n\n def find_by_id(self, id):\n return self.get(id=id)\n\n\nclass AffiliatedStore(models.Model):\n\n\n class Meta:\n db_table = 'affiliated_store'\n objects = AffiliatedStoreManager()\n title = models.CharField(max_length=255)\n server_url = models.CharField(max_length=2083, validators=[\n RegexValidator(regex='^(https|http)://.*$', code='invalid url',\n message='server_url must be a url')])\n icon = models.ForeignKey(Image, related_name='+', null=True, blank=True)\n is_enabled = models.BooleanField(default=True)\n\n def __repr__(self):\n return 'AffiliatedStore(id={0!s}, title=\"{1!s}\")'.format(self.id,\n self.title)\n\n def __str__(self):\n return repr(self)\n",
"step-5": "from django.core.validators import RegexValidator\nfrom django.db import models\n\nfrom .image import Image\n\n\nclass AffiliatedStoreManager(models.Manager):\n\n def get_queryset(self):\n return super().get_queryset() \\\n .select_related('icon') \\\n .select_related('icon__image_type')\n\n def find_all(self):\n return self.all()\n\n def find_by_id(self, id):\n return self.get(id=id)\n\n\nclass AffiliatedStore(models.Model):\n class Meta:\n db_table = 'affiliated_store'\n\n objects = AffiliatedStoreManager()\n\n title = models.CharField(max_length=255)\n\n server_url = models.CharField(max_length=2083,\n validators=[RegexValidator(regex='^(https|http)://.*$',\n code='invalid url',\n message='server_url must be a url')])\n\n icon = models.ForeignKey(Image, related_name='+', null=True, blank=True)\n\n is_enabled = models.BooleanField(default=True)\n\n def __repr__(self):\n return 'AffiliatedStore(id={0!s}, title=\"{1!s}\")'.format(self.id, self.title)\n\n def __str__(self):\n return repr(self)\n",
"step-ids": [
5,
6,
7,
9,
10
]
}
|
[
5,
6,
7,
9,
10
] |
import flask
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
import pickle
from recent_earnings_tickers import ok_tickers
import re
#---------- Model ----------------#
#with open('/Users/samfunk/ds/metis/project_mcnulty/code/REPLACE_WITH_MODEL_PICKLE', 'rb') as f:
#PREDICTOR = pickle.load(f)
'''Have final model in the pickle file
Should be prefit to main data
Simply ask for a company/list of companies
Input the ticker into model (which will scrape web for current features)
Pray some of them are right'''
#---------- URLS AND WEB PAGES -------------#
app = flask.Flask(__name__)
@app.route('/')
def home_page():
with open("/Users/samfunk/ds/metis/project_mcnulty/stock_page.html",'r') as viz_file:
return viz_file.read()
@app.route("/stock", methods=["POST"])
def stock(ok_tickers=ok_tickers()):
data = flask.request.json
ticker = str(data["ticker"]).upper()
if ticker in ok_tickers:
earnings_soup = BeautifulSoup(requests.get("https://finance.yahoo.com/quote/%s/analysts?p=%s" % (ticker, ticker)).text, 'html.parser')
surprise_string = earnings_soup.find_all('table')[2].tbody.find_all('tr')[3].find_all('td')[4].text
surprise = float(re.search(r'(.*)%', surprise_string)[1])
#score = PREDICTOR.predict_proba(x)
if abs(surprise) < 5.0:
score = 0
else:
score = 1
else:
surprise_string = 'null'
score = 'null'
#score = PREDICTOR.predict_proba(x)
results = {"surprise": surprise_string, "score": score}
print(ticker, results)
return flask.jsonify(results)
if __name__ == '__main__':
app.run()
|
normal
|
{
"blob_id": "3be1947ead65f8e8a9bf73cc8cae2c7d69d8b756",
"index": 1641,
"step-1": "<mask token>\n\n\[email protected]('/')\ndef home_page():\n with open('/Users/samfunk/ds/metis/project_mcnulty/stock_page.html', 'r'\n ) as viz_file:\n return viz_file.read()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/')\ndef home_page():\n with open('/Users/samfunk/ds/metis/project_mcnulty/stock_page.html', 'r'\n ) as viz_file:\n return viz_file.read()\n\n\[email protected]('/stock', methods=['POST'])\ndef stock(ok_tickers=ok_tickers()):\n data = flask.request.json\n ticker = str(data['ticker']).upper()\n if ticker in ok_tickers:\n earnings_soup = BeautifulSoup(requests.get(\n 'https://finance.yahoo.com/quote/%s/analysts?p=%s' % (ticker,\n ticker)).text, 'html.parser')\n surprise_string = earnings_soup.find_all('table')[2].tbody.find_all(\n 'tr')[3].find_all('td')[4].text\n surprise = float(re.search('(.*)%', surprise_string)[1])\n if abs(surprise) < 5.0:\n score = 0\n else:\n score = 1\n else:\n surprise_string = 'null'\n score = 'null'\n results = {'surprise': surprise_string, 'score': score}\n print(ticker, results)\n return flask.jsonify(results)\n\n\nif __name__ == '__main__':\n app.run()\n",
"step-3": "<mask token>\napp = flask.Flask(__name__)\n\n\[email protected]('/')\ndef home_page():\n with open('/Users/samfunk/ds/metis/project_mcnulty/stock_page.html', 'r'\n ) as viz_file:\n return viz_file.read()\n\n\[email protected]('/stock', methods=['POST'])\ndef stock(ok_tickers=ok_tickers()):\n data = flask.request.json\n ticker = str(data['ticker']).upper()\n if ticker in ok_tickers:\n earnings_soup = BeautifulSoup(requests.get(\n 'https://finance.yahoo.com/quote/%s/analysts?p=%s' % (ticker,\n ticker)).text, 'html.parser')\n surprise_string = earnings_soup.find_all('table')[2].tbody.find_all(\n 'tr')[3].find_all('td')[4].text\n surprise = float(re.search('(.*)%', surprise_string)[1])\n if abs(surprise) < 5.0:\n score = 0\n else:\n score = 1\n else:\n surprise_string = 'null'\n score = 'null'\n results = {'surprise': surprise_string, 'score': score}\n print(ticker, results)\n return flask.jsonify(results)\n\n\nif __name__ == '__main__':\n app.run()\n",
"step-4": "import flask\nimport numpy as np\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\nimport pickle\nfrom recent_earnings_tickers import ok_tickers\nimport re\n<mask token>\napp = flask.Flask(__name__)\n\n\[email protected]('/')\ndef home_page():\n with open('/Users/samfunk/ds/metis/project_mcnulty/stock_page.html', 'r'\n ) as viz_file:\n return viz_file.read()\n\n\[email protected]('/stock', methods=['POST'])\ndef stock(ok_tickers=ok_tickers()):\n data = flask.request.json\n ticker = str(data['ticker']).upper()\n if ticker in ok_tickers:\n earnings_soup = BeautifulSoup(requests.get(\n 'https://finance.yahoo.com/quote/%s/analysts?p=%s' % (ticker,\n ticker)).text, 'html.parser')\n surprise_string = earnings_soup.find_all('table')[2].tbody.find_all(\n 'tr')[3].find_all('td')[4].text\n surprise = float(re.search('(.*)%', surprise_string)[1])\n if abs(surprise) < 5.0:\n score = 0\n else:\n score = 1\n else:\n surprise_string = 'null'\n score = 'null'\n results = {'surprise': surprise_string, 'score': score}\n print(ticker, results)\n return flask.jsonify(results)\n\n\nif __name__ == '__main__':\n app.run()\n",
"step-5": "import flask\nimport numpy as np\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\nimport pickle\nfrom recent_earnings_tickers import ok_tickers\nimport re\n\n#---------- Model ----------------#\n\n#with open('/Users/samfunk/ds/metis/project_mcnulty/code/REPLACE_WITH_MODEL_PICKLE', 'rb') as f:\n #PREDICTOR = pickle.load(f)\n\n\n'''Have final model in the pickle file\nShould be prefit to main data\nSimply ask for a company/list of companies\nInput the ticker into model (which will scrape web for current features)\nPray some of them are right'''\n\n\n\n#---------- URLS AND WEB PAGES -------------#\napp = flask.Flask(__name__)\n\[email protected]('/')\ndef home_page():\n with open(\"/Users/samfunk/ds/metis/project_mcnulty/stock_page.html\",'r') as viz_file:\n return viz_file.read()\n\n\[email protected](\"/stock\", methods=[\"POST\"])\ndef stock(ok_tickers=ok_tickers()):\n\n data = flask.request.json\n ticker = str(data[\"ticker\"]).upper()\n if ticker in ok_tickers:\n earnings_soup = BeautifulSoup(requests.get(\"https://finance.yahoo.com/quote/%s/analysts?p=%s\" % (ticker, ticker)).text, 'html.parser')\n surprise_string = earnings_soup.find_all('table')[2].tbody.find_all('tr')[3].find_all('td')[4].text\n surprise = float(re.search(r'(.*)%', surprise_string)[1])\n\n\n #score = PREDICTOR.predict_proba(x)\n\n if abs(surprise) < 5.0:\n score = 0\n else:\n score = 1\n else:\n surprise_string = 'null'\n score = 'null'\n #score = PREDICTOR.predict_proba(x)\n results = {\"surprise\": surprise_string, \"score\": score}\n\n print(ticker, results)\n return flask.jsonify(results)\n\nif __name__ == '__main__':\n app.run()\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
from os import wait
import cv2
import numpy as np
import math
import sys
import types
import operator
## orb 및 bf matcher 선언
orb = cv2.cv2.ORB_create(
nfeatures=5000,
scaleFactor=1.2,
nlevels=8,
edgeThreshold=31,
firstLevel=0,
WTA_K=2,
scoreType=cv2.ORB_FAST_SCORE,
patchSize=31,
fastThreshold=25,
)
bf = cv2.BFMatcher(cv2.NORM_HAMMING)
def getScale(NumFrame, t_gt, seq_num):
txt_file = open('/media/cordin/새 볼륨/rosbag/dataset/poses/{0:02d}.txt'.format(seq_num))
x_prev = float(t_gt[0])
y_prev = float(t_gt[1])
z_prev = float(t_gt[2])
line = txt_file.readlines()
line_sp = line[NumFrame].split(' ')
x = float(line_sp[3])
y = float(line_sp[7])
z = float(line_sp[11])
t_gt[0] = x
t_gt[1] = y
t_gt[2] = z
txt_file.close()
scale = math.sqrt((x-x_prev)**2 + (y-y_prev)**2 + (z-z_prev)**2)
return scale, t_gt
if __name__ == "__main__":
MAX_FRAME = 1000
SEQ_NUM = 2
#Camera intrinsic parameter
focal = 718.8560
pp = (607.1928, 185.2157)
textOrg1 = (10,30)
textOrg2 = (10,80)
textOrg3 = (10,130)
img_1_c = cv2.imread("/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/000000.png".format(SEQ_NUM))
img_2_c = cv2.imread("/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/000001.png".format(SEQ_NUM))
img_1 = cv2.cvtColor(img_1_c,cv2.COLOR_BGR2GRAY)
img_2 = cv2.cvtColor(img_2_c,cv2.COLOR_BGR2GRAY)
kp1, des1 = orb.detectAndCompute(img_1,None)
kp2, des2 = orb.detectAndCompute(img_2,None)
matches = bf.match(des1,des2)
matches = sorted(matches, key = lambda x:x.distance)
idx = matches[0:1500]
pts1 = []
pts2 = []
for i in idx:
pts1.append(kp1[i.queryIdx].pt)
pts2.append(kp2[i.trainIdx].pt)
pts1 = np.array(pts1)
pts2 = np.array(pts2)
E, mask = cv2.findEssentialMat(pts1,pts2,focal = focal, pp = pp, method=cv2.RANSAC, prob = 0.999, threshold=1.0)
_, R_f, t_f, _ = cv2.recoverPose(E, pts1, pts2, focal = focal, pp = pp)
R_f_seg = R_f
t_f_seg = t_f
t_gt = np.zeros((3,1),dtype=np.float64)
prevImage = img_2
kp_prev = kp2
des_prev = des2
traj = np.zeros((1000,2000),dtype=np.uint8)
traj = cv2.cvtColor(traj,cv2.COLOR_GRAY2BGR)
rmse_total = 0
for numFrame in range(2, MAX_FRAME):
filename = '/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/{1:06d}.png'.format(SEQ_NUM,numFrame)
currImage_c = cv2.imread(filename)
currImage = cv2.cvtColor(currImage_c,cv2.COLOR_BGR2GRAY)
# feature extraction
kp_curr, des_curr = orb.detectAndCompute(currImage,None)
# feature matching
matches = bf.match(des_prev,des_curr)
matches = sorted(matches, key = lambda x:x.distance)
idx = matches[0:1500]
pts1 = []
pts2 = []
for i in idx:
pts1.append(kp_prev[i.queryIdx].pt)
pts2.append(kp_curr[i.trainIdx].pt)
pts1 = np.array(pts1)
pts2 = np.array(pts2)
# caculate R, t
E_mat, mask_n = cv2.findEssentialMat(pts2, pts1, focal = focal, pp = pp, method=cv2.RANSAC, prob = 0.999, threshold=1.0)
_, R, t, _ = cv2.recoverPose(E_mat, pts2, pts1, focal = focal, pp = pp)
# get scale
abs_scale, t_gt = getScale(numFrame, t_gt, SEQ_NUM)
# update trajectory
t_f = t_f + abs_scale*R_f.dot(t)
R_f = R.dot(R_f)
# caculate Error
error = map(operator.sub,t_gt,t_f)
error_sum_square = sum(map(lambda x:x*x,error))
rmse = math.sqrt(error_sum_square/3)
rmse_total = rmse_total + rmse
print("rmse = ",rmse_total/numFrame)
prevImage = currImage
kp_prev = kp_curr
des_prev = des_curr
# visualization
x_gt = int(t_gt[0]) + 1000
y_gt = int(t_gt[2]) + 100
x = int(t_f[0]) + 1000
y = int(t_f[2]) + 100
cv2.circle(traj, (x,y), 1 , (0,0,255), 2)
cv2.circle(traj, (x_gt,y_gt), 1 , (0,255,0), 2)
cv2.rectangle(traj, (10,10), (700,150), (0,0,0), -1)
text1 = 'orb Coordinates: x = {0:02f}m y = {1:02f}m z = {2:02f}m'.format(float(t_f[0]),float(t_f[1]),float(t_f[2]))
cv2.putText(traj, text1, textOrg1, cv2.FONT_HERSHEY_PLAIN,1,(255,255,255),1,8)
text3 = 'gt Coordinates: x = {0:02f}m y = {1:02f}m z = {2:02f}m'.format(float(t_gt[0]),float(t_gt[1]),float(t_gt[2]))
cv2.putText(traj, text3, textOrg3, cv2.FONT_HERSHEY_PLAIN,1,(255,255,255),1,8)
feature_img = cv2.drawKeypoints(currImage_c, kp_curr, None)
cv2.imshow("trajectory", traj)
cv2.imshow("feat_img", feature_img)
cv2.waitKey(1)
cv2.imwrite("result_{0:02d}.png".format(SEQ_NUM),traj)
|
normal
|
{
"blob_id": "73e7e43e9cfb3c0884480809bc03ade687d641d6",
"index": 733,
"step-1": "<mask token>\n\n\ndef getScale(NumFrame, t_gt, seq_num):\n txt_file = open('/media/cordin/새 볼륨/rosbag/dataset/poses/{0:02d}.txt'.\n format(seq_num))\n x_prev = float(t_gt[0])\n y_prev = float(t_gt[1])\n z_prev = float(t_gt[2])\n line = txt_file.readlines()\n line_sp = line[NumFrame].split(' ')\n x = float(line_sp[3])\n y = float(line_sp[7])\n z = float(line_sp[11])\n t_gt[0] = x\n t_gt[1] = y\n t_gt[2] = z\n txt_file.close()\n scale = math.sqrt((x - x_prev) ** 2 + (y - y_prev) ** 2 + (z - z_prev) ** 2\n )\n return scale, t_gt\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef getScale(NumFrame, t_gt, seq_num):\n txt_file = open('/media/cordin/새 볼륨/rosbag/dataset/poses/{0:02d}.txt'.\n format(seq_num))\n x_prev = float(t_gt[0])\n y_prev = float(t_gt[1])\n z_prev = float(t_gt[2])\n line = txt_file.readlines()\n line_sp = line[NumFrame].split(' ')\n x = float(line_sp[3])\n y = float(line_sp[7])\n z = float(line_sp[11])\n t_gt[0] = x\n t_gt[1] = y\n t_gt[2] = z\n txt_file.close()\n scale = math.sqrt((x - x_prev) ** 2 + (y - y_prev) ** 2 + (z - z_prev) ** 2\n )\n return scale, t_gt\n\n\nif __name__ == '__main__':\n MAX_FRAME = 1000\n SEQ_NUM = 2\n focal = 718.856\n pp = 607.1928, 185.2157\n textOrg1 = 10, 30\n textOrg2 = 10, 80\n textOrg3 = 10, 130\n img_1_c = cv2.imread(\n '/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/000000.png'\n .format(SEQ_NUM))\n img_2_c = cv2.imread(\n '/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/000001.png'\n .format(SEQ_NUM))\n img_1 = cv2.cvtColor(img_1_c, cv2.COLOR_BGR2GRAY)\n img_2 = cv2.cvtColor(img_2_c, cv2.COLOR_BGR2GRAY)\n kp1, des1 = orb.detectAndCompute(img_1, None)\n kp2, des2 = orb.detectAndCompute(img_2, None)\n matches = bf.match(des1, des2)\n matches = sorted(matches, key=lambda x: x.distance)\n idx = matches[0:1500]\n pts1 = []\n pts2 = []\n for i in idx:\n pts1.append(kp1[i.queryIdx].pt)\n pts2.append(kp2[i.trainIdx].pt)\n pts1 = np.array(pts1)\n pts2 = np.array(pts2)\n E, mask = cv2.findEssentialMat(pts1, pts2, focal=focal, pp=pp, method=\n cv2.RANSAC, prob=0.999, threshold=1.0)\n _, R_f, t_f, _ = cv2.recoverPose(E, pts1, pts2, focal=focal, pp=pp)\n R_f_seg = R_f\n t_f_seg = t_f\n t_gt = np.zeros((3, 1), dtype=np.float64)\n prevImage = img_2\n kp_prev = kp2\n des_prev = des2\n traj = np.zeros((1000, 2000), dtype=np.uint8)\n traj = cv2.cvtColor(traj, cv2.COLOR_GRAY2BGR)\n rmse_total = 0\n for numFrame in range(2, MAX_FRAME):\n filename = (\n '/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/{1:06d}.png'\n .format(SEQ_NUM, numFrame))\n currImage_c = cv2.imread(filename)\n currImage = cv2.cvtColor(currImage_c, cv2.COLOR_BGR2GRAY)\n kp_curr, des_curr = orb.detectAndCompute(currImage, None)\n matches = bf.match(des_prev, des_curr)\n matches = sorted(matches, key=lambda x: x.distance)\n idx = matches[0:1500]\n pts1 = []\n pts2 = []\n for i in idx:\n pts1.append(kp_prev[i.queryIdx].pt)\n pts2.append(kp_curr[i.trainIdx].pt)\n pts1 = np.array(pts1)\n pts2 = np.array(pts2)\n E_mat, mask_n = cv2.findEssentialMat(pts2, pts1, focal=focal, pp=pp,\n method=cv2.RANSAC, prob=0.999, threshold=1.0)\n _, R, t, _ = cv2.recoverPose(E_mat, pts2, pts1, focal=focal, pp=pp)\n abs_scale, t_gt = getScale(numFrame, t_gt, SEQ_NUM)\n t_f = t_f + abs_scale * R_f.dot(t)\n R_f = R.dot(R_f)\n error = map(operator.sub, t_gt, t_f)\n error_sum_square = sum(map(lambda x: x * x, error))\n rmse = math.sqrt(error_sum_square / 3)\n rmse_total = rmse_total + rmse\n print('rmse = ', rmse_total / numFrame)\n prevImage = currImage\n kp_prev = kp_curr\n des_prev = des_curr\n x_gt = int(t_gt[0]) + 1000\n y_gt = int(t_gt[2]) + 100\n x = int(t_f[0]) + 1000\n y = int(t_f[2]) + 100\n cv2.circle(traj, (x, y), 1, (0, 0, 255), 2)\n cv2.circle(traj, (x_gt, y_gt), 1, (0, 255, 0), 2)\n cv2.rectangle(traj, (10, 10), (700, 150), (0, 0, 0), -1)\n text1 = ('orb Coordinates: x = {0:02f}m y = {1:02f}m z = {2:02f}m'.\n format(float(t_f[0]), float(t_f[1]), float(t_f[2])))\n cv2.putText(traj, text1, textOrg1, cv2.FONT_HERSHEY_PLAIN, 1, (255,\n 255, 255), 1, 8)\n text3 = ('gt Coordinates: x = {0:02f}m y = {1:02f}m z = {2:02f}m'.\n format(float(t_gt[0]), float(t_gt[1]), float(t_gt[2])))\n cv2.putText(traj, text3, textOrg3, cv2.FONT_HERSHEY_PLAIN, 1, (255,\n 255, 255), 1, 8)\n feature_img = cv2.drawKeypoints(currImage_c, kp_curr, None)\n cv2.imshow('trajectory', traj)\n cv2.imshow('feat_img', feature_img)\n cv2.waitKey(1)\n cv2.imwrite('result_{0:02d}.png'.format(SEQ_NUM), traj)\n",
"step-3": "<mask token>\norb = cv2.cv2.ORB_create(nfeatures=5000, scaleFactor=1.2, nlevels=8,\n edgeThreshold=31, firstLevel=0, WTA_K=2, scoreType=cv2.ORB_FAST_SCORE,\n patchSize=31, fastThreshold=25)\nbf = cv2.BFMatcher(cv2.NORM_HAMMING)\n\n\ndef getScale(NumFrame, t_gt, seq_num):\n txt_file = open('/media/cordin/새 볼륨/rosbag/dataset/poses/{0:02d}.txt'.\n format(seq_num))\n x_prev = float(t_gt[0])\n y_prev = float(t_gt[1])\n z_prev = float(t_gt[2])\n line = txt_file.readlines()\n line_sp = line[NumFrame].split(' ')\n x = float(line_sp[3])\n y = float(line_sp[7])\n z = float(line_sp[11])\n t_gt[0] = x\n t_gt[1] = y\n t_gt[2] = z\n txt_file.close()\n scale = math.sqrt((x - x_prev) ** 2 + (y - y_prev) ** 2 + (z - z_prev) ** 2\n )\n return scale, t_gt\n\n\nif __name__ == '__main__':\n MAX_FRAME = 1000\n SEQ_NUM = 2\n focal = 718.856\n pp = 607.1928, 185.2157\n textOrg1 = 10, 30\n textOrg2 = 10, 80\n textOrg3 = 10, 130\n img_1_c = cv2.imread(\n '/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/000000.png'\n .format(SEQ_NUM))\n img_2_c = cv2.imread(\n '/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/000001.png'\n .format(SEQ_NUM))\n img_1 = cv2.cvtColor(img_1_c, cv2.COLOR_BGR2GRAY)\n img_2 = cv2.cvtColor(img_2_c, cv2.COLOR_BGR2GRAY)\n kp1, des1 = orb.detectAndCompute(img_1, None)\n kp2, des2 = orb.detectAndCompute(img_2, None)\n matches = bf.match(des1, des2)\n matches = sorted(matches, key=lambda x: x.distance)\n idx = matches[0:1500]\n pts1 = []\n pts2 = []\n for i in idx:\n pts1.append(kp1[i.queryIdx].pt)\n pts2.append(kp2[i.trainIdx].pt)\n pts1 = np.array(pts1)\n pts2 = np.array(pts2)\n E, mask = cv2.findEssentialMat(pts1, pts2, focal=focal, pp=pp, method=\n cv2.RANSAC, prob=0.999, threshold=1.0)\n _, R_f, t_f, _ = cv2.recoverPose(E, pts1, pts2, focal=focal, pp=pp)\n R_f_seg = R_f\n t_f_seg = t_f\n t_gt = np.zeros((3, 1), dtype=np.float64)\n prevImage = img_2\n kp_prev = kp2\n des_prev = des2\n traj = np.zeros((1000, 2000), dtype=np.uint8)\n traj = cv2.cvtColor(traj, cv2.COLOR_GRAY2BGR)\n rmse_total = 0\n for numFrame in range(2, MAX_FRAME):\n filename = (\n '/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/{1:06d}.png'\n .format(SEQ_NUM, numFrame))\n currImage_c = cv2.imread(filename)\n currImage = cv2.cvtColor(currImage_c, cv2.COLOR_BGR2GRAY)\n kp_curr, des_curr = orb.detectAndCompute(currImage, None)\n matches = bf.match(des_prev, des_curr)\n matches = sorted(matches, key=lambda x: x.distance)\n idx = matches[0:1500]\n pts1 = []\n pts2 = []\n for i in idx:\n pts1.append(kp_prev[i.queryIdx].pt)\n pts2.append(kp_curr[i.trainIdx].pt)\n pts1 = np.array(pts1)\n pts2 = np.array(pts2)\n E_mat, mask_n = cv2.findEssentialMat(pts2, pts1, focal=focal, pp=pp,\n method=cv2.RANSAC, prob=0.999, threshold=1.0)\n _, R, t, _ = cv2.recoverPose(E_mat, pts2, pts1, focal=focal, pp=pp)\n abs_scale, t_gt = getScale(numFrame, t_gt, SEQ_NUM)\n t_f = t_f + abs_scale * R_f.dot(t)\n R_f = R.dot(R_f)\n error = map(operator.sub, t_gt, t_f)\n error_sum_square = sum(map(lambda x: x * x, error))\n rmse = math.sqrt(error_sum_square / 3)\n rmse_total = rmse_total + rmse\n print('rmse = ', rmse_total / numFrame)\n prevImage = currImage\n kp_prev = kp_curr\n des_prev = des_curr\n x_gt = int(t_gt[0]) + 1000\n y_gt = int(t_gt[2]) + 100\n x = int(t_f[0]) + 1000\n y = int(t_f[2]) + 100\n cv2.circle(traj, (x, y), 1, (0, 0, 255), 2)\n cv2.circle(traj, (x_gt, y_gt), 1, (0, 255, 0), 2)\n cv2.rectangle(traj, (10, 10), (700, 150), (0, 0, 0), -1)\n text1 = ('orb Coordinates: x = {0:02f}m y = {1:02f}m z = {2:02f}m'.\n format(float(t_f[0]), float(t_f[1]), float(t_f[2])))\n cv2.putText(traj, text1, textOrg1, cv2.FONT_HERSHEY_PLAIN, 1, (255,\n 255, 255), 1, 8)\n text3 = ('gt Coordinates: x = {0:02f}m y = {1:02f}m z = {2:02f}m'.\n format(float(t_gt[0]), float(t_gt[1]), float(t_gt[2])))\n cv2.putText(traj, text3, textOrg3, cv2.FONT_HERSHEY_PLAIN, 1, (255,\n 255, 255), 1, 8)\n feature_img = cv2.drawKeypoints(currImage_c, kp_curr, None)\n cv2.imshow('trajectory', traj)\n cv2.imshow('feat_img', feature_img)\n cv2.waitKey(1)\n cv2.imwrite('result_{0:02d}.png'.format(SEQ_NUM), traj)\n",
"step-4": "from os import wait\nimport cv2\nimport numpy as np\nimport math\nimport sys\nimport types\nimport operator\norb = cv2.cv2.ORB_create(nfeatures=5000, scaleFactor=1.2, nlevels=8,\n edgeThreshold=31, firstLevel=0, WTA_K=2, scoreType=cv2.ORB_FAST_SCORE,\n patchSize=31, fastThreshold=25)\nbf = cv2.BFMatcher(cv2.NORM_HAMMING)\n\n\ndef getScale(NumFrame, t_gt, seq_num):\n txt_file = open('/media/cordin/새 볼륨/rosbag/dataset/poses/{0:02d}.txt'.\n format(seq_num))\n x_prev = float(t_gt[0])\n y_prev = float(t_gt[1])\n z_prev = float(t_gt[2])\n line = txt_file.readlines()\n line_sp = line[NumFrame].split(' ')\n x = float(line_sp[3])\n y = float(line_sp[7])\n z = float(line_sp[11])\n t_gt[0] = x\n t_gt[1] = y\n t_gt[2] = z\n txt_file.close()\n scale = math.sqrt((x - x_prev) ** 2 + (y - y_prev) ** 2 + (z - z_prev) ** 2\n )\n return scale, t_gt\n\n\nif __name__ == '__main__':\n MAX_FRAME = 1000\n SEQ_NUM = 2\n focal = 718.856\n pp = 607.1928, 185.2157\n textOrg1 = 10, 30\n textOrg2 = 10, 80\n textOrg3 = 10, 130\n img_1_c = cv2.imread(\n '/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/000000.png'\n .format(SEQ_NUM))\n img_2_c = cv2.imread(\n '/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/000001.png'\n .format(SEQ_NUM))\n img_1 = cv2.cvtColor(img_1_c, cv2.COLOR_BGR2GRAY)\n img_2 = cv2.cvtColor(img_2_c, cv2.COLOR_BGR2GRAY)\n kp1, des1 = orb.detectAndCompute(img_1, None)\n kp2, des2 = orb.detectAndCompute(img_2, None)\n matches = bf.match(des1, des2)\n matches = sorted(matches, key=lambda x: x.distance)\n idx = matches[0:1500]\n pts1 = []\n pts2 = []\n for i in idx:\n pts1.append(kp1[i.queryIdx].pt)\n pts2.append(kp2[i.trainIdx].pt)\n pts1 = np.array(pts1)\n pts2 = np.array(pts2)\n E, mask = cv2.findEssentialMat(pts1, pts2, focal=focal, pp=pp, method=\n cv2.RANSAC, prob=0.999, threshold=1.0)\n _, R_f, t_f, _ = cv2.recoverPose(E, pts1, pts2, focal=focal, pp=pp)\n R_f_seg = R_f\n t_f_seg = t_f\n t_gt = np.zeros((3, 1), dtype=np.float64)\n prevImage = img_2\n kp_prev = kp2\n des_prev = des2\n traj = np.zeros((1000, 2000), dtype=np.uint8)\n traj = cv2.cvtColor(traj, cv2.COLOR_GRAY2BGR)\n rmse_total = 0\n for numFrame in range(2, MAX_FRAME):\n filename = (\n '/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/{1:06d}.png'\n .format(SEQ_NUM, numFrame))\n currImage_c = cv2.imread(filename)\n currImage = cv2.cvtColor(currImage_c, cv2.COLOR_BGR2GRAY)\n kp_curr, des_curr = orb.detectAndCompute(currImage, None)\n matches = bf.match(des_prev, des_curr)\n matches = sorted(matches, key=lambda x: x.distance)\n idx = matches[0:1500]\n pts1 = []\n pts2 = []\n for i in idx:\n pts1.append(kp_prev[i.queryIdx].pt)\n pts2.append(kp_curr[i.trainIdx].pt)\n pts1 = np.array(pts1)\n pts2 = np.array(pts2)\n E_mat, mask_n = cv2.findEssentialMat(pts2, pts1, focal=focal, pp=pp,\n method=cv2.RANSAC, prob=0.999, threshold=1.0)\n _, R, t, _ = cv2.recoverPose(E_mat, pts2, pts1, focal=focal, pp=pp)\n abs_scale, t_gt = getScale(numFrame, t_gt, SEQ_NUM)\n t_f = t_f + abs_scale * R_f.dot(t)\n R_f = R.dot(R_f)\n error = map(operator.sub, t_gt, t_f)\n error_sum_square = sum(map(lambda x: x * x, error))\n rmse = math.sqrt(error_sum_square / 3)\n rmse_total = rmse_total + rmse\n print('rmse = ', rmse_total / numFrame)\n prevImage = currImage\n kp_prev = kp_curr\n des_prev = des_curr\n x_gt = int(t_gt[0]) + 1000\n y_gt = int(t_gt[2]) + 100\n x = int(t_f[0]) + 1000\n y = int(t_f[2]) + 100\n cv2.circle(traj, (x, y), 1, (0, 0, 255), 2)\n cv2.circle(traj, (x_gt, y_gt), 1, (0, 255, 0), 2)\n cv2.rectangle(traj, (10, 10), (700, 150), (0, 0, 0), -1)\n text1 = ('orb Coordinates: x = {0:02f}m y = {1:02f}m z = {2:02f}m'.\n format(float(t_f[0]), float(t_f[1]), float(t_f[2])))\n cv2.putText(traj, text1, textOrg1, cv2.FONT_HERSHEY_PLAIN, 1, (255,\n 255, 255), 1, 8)\n text3 = ('gt Coordinates: x = {0:02f}m y = {1:02f}m z = {2:02f}m'.\n format(float(t_gt[0]), float(t_gt[1]), float(t_gt[2])))\n cv2.putText(traj, text3, textOrg3, cv2.FONT_HERSHEY_PLAIN, 1, (255,\n 255, 255), 1, 8)\n feature_img = cv2.drawKeypoints(currImage_c, kp_curr, None)\n cv2.imshow('trajectory', traj)\n cv2.imshow('feat_img', feature_img)\n cv2.waitKey(1)\n cv2.imwrite('result_{0:02d}.png'.format(SEQ_NUM), traj)\n",
"step-5": "from os import wait\nimport cv2\nimport numpy as np\nimport math\nimport sys\nimport types\nimport operator\n\n## orb 및 bf matcher 선언\norb = cv2.cv2.ORB_create(\n nfeatures=5000,\n scaleFactor=1.2,\n nlevels=8,\n edgeThreshold=31,\n firstLevel=0,\n WTA_K=2,\n scoreType=cv2.ORB_FAST_SCORE,\n patchSize=31,\n fastThreshold=25,\n )\n\nbf = cv2.BFMatcher(cv2.NORM_HAMMING)\n\n\ndef getScale(NumFrame, t_gt, seq_num):\n\n txt_file = open('/media/cordin/새 볼륨/rosbag/dataset/poses/{0:02d}.txt'.format(seq_num))\n \n x_prev = float(t_gt[0])\n y_prev = float(t_gt[1])\n z_prev = float(t_gt[2])\n\n line = txt_file.readlines()\n line_sp = line[NumFrame].split(' ')\n\n x = float(line_sp[3])\n y = float(line_sp[7])\n z = float(line_sp[11])\n\n t_gt[0] = x\n t_gt[1] = y\n t_gt[2] = z\n\n txt_file.close()\n\n scale = math.sqrt((x-x_prev)**2 + (y-y_prev)**2 + (z-z_prev)**2)\n return scale, t_gt\n\n\nif __name__ == \"__main__\":\n MAX_FRAME = 1000\n SEQ_NUM = 2\n\n #Camera intrinsic parameter\n focal = 718.8560\n pp = (607.1928, 185.2157)\n\n textOrg1 = (10,30)\n textOrg2 = (10,80)\n textOrg3 = (10,130)\n\n img_1_c = cv2.imread(\"/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/000000.png\".format(SEQ_NUM))\n img_2_c = cv2.imread(\"/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/000001.png\".format(SEQ_NUM))\n img_1 = cv2.cvtColor(img_1_c,cv2.COLOR_BGR2GRAY)\n img_2 = cv2.cvtColor(img_2_c,cv2.COLOR_BGR2GRAY)\n\n kp1, des1 = orb.detectAndCompute(img_1,None)\n kp2, des2 = orb.detectAndCompute(img_2,None)\n\n matches = bf.match(des1,des2)\n matches = sorted(matches, key = lambda x:x.distance)\n\n idx = matches[0:1500]\n\n pts1 = []\n pts2 = []\n\n for i in idx:\n pts1.append(kp1[i.queryIdx].pt)\n pts2.append(kp2[i.trainIdx].pt)\n\n\n pts1 = np.array(pts1)\n pts2 = np.array(pts2)\n\n E, mask = cv2.findEssentialMat(pts1,pts2,focal = focal, pp = pp, method=cv2.RANSAC, prob = 0.999, threshold=1.0)\n _, R_f, t_f, _ = cv2.recoverPose(E, pts1, pts2, focal = focal, pp = pp)\n\n R_f_seg = R_f\n t_f_seg = t_f\n\n t_gt = np.zeros((3,1),dtype=np.float64)\n\n prevImage = img_2\n kp_prev = kp2\n des_prev = des2\n\n traj = np.zeros((1000,2000),dtype=np.uint8)\n traj = cv2.cvtColor(traj,cv2.COLOR_GRAY2BGR)\n\n rmse_total = 0\n \n for numFrame in range(2, MAX_FRAME):\n filename = '/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/{1:06d}.png'.format(SEQ_NUM,numFrame)\n \n currImage_c = cv2.imread(filename)\n currImage = cv2.cvtColor(currImage_c,cv2.COLOR_BGR2GRAY)\n\n # feature extraction\n kp_curr, des_curr = orb.detectAndCompute(currImage,None)\n\n # feature matching\n matches = bf.match(des_prev,des_curr)\n matches = sorted(matches, key = lambda x:x.distance)\n idx = matches[0:1500]\n\n pts1 = []\n pts2 = []\n\n for i in idx:\n pts1.append(kp_prev[i.queryIdx].pt)\n pts2.append(kp_curr[i.trainIdx].pt)\n\n pts1 = np.array(pts1)\n pts2 = np.array(pts2)\n\n # caculate R, t\n E_mat, mask_n = cv2.findEssentialMat(pts2, pts1, focal = focal, pp = pp, method=cv2.RANSAC, prob = 0.999, threshold=1.0)\n _, R, t, _ = cv2.recoverPose(E_mat, pts2, pts1, focal = focal, pp = pp)\n\n # get scale\n abs_scale, t_gt = getScale(numFrame, t_gt, SEQ_NUM)\n \n # update trajectory\n t_f = t_f + abs_scale*R_f.dot(t)\n R_f = R.dot(R_f)\n\n # caculate Error\n error = map(operator.sub,t_gt,t_f)\n error_sum_square = sum(map(lambda x:x*x,error))\n rmse = math.sqrt(error_sum_square/3)\n rmse_total = rmse_total + rmse\n\n print(\"rmse = \",rmse_total/numFrame)\n\n prevImage = currImage\n kp_prev = kp_curr\n des_prev = des_curr\n\n # visualization\n x_gt = int(t_gt[0]) + 1000\n y_gt = int(t_gt[2]) + 100\n\n x = int(t_f[0]) + 1000\n y = int(t_f[2]) + 100\n\n cv2.circle(traj, (x,y), 1 , (0,0,255), 2)\n cv2.circle(traj, (x_gt,y_gt), 1 , (0,255,0), 2)\n \n\n cv2.rectangle(traj, (10,10), (700,150), (0,0,0), -1)\n text1 = 'orb Coordinates: x = {0:02f}m y = {1:02f}m z = {2:02f}m'.format(float(t_f[0]),float(t_f[1]),float(t_f[2]))\n cv2.putText(traj, text1, textOrg1, cv2.FONT_HERSHEY_PLAIN,1,(255,255,255),1,8)\n\n text3 = 'gt Coordinates: x = {0:02f}m y = {1:02f}m z = {2:02f}m'.format(float(t_gt[0]),float(t_gt[1]),float(t_gt[2]))\n cv2.putText(traj, text3, textOrg3, cv2.FONT_HERSHEY_PLAIN,1,(255,255,255),1,8)\n\n feature_img = cv2.drawKeypoints(currImage_c, kp_curr, None)\n\n cv2.imshow(\"trajectory\", traj)\n cv2.imshow(\"feat_img\", feature_img)\n\n cv2.waitKey(1)\n \n cv2.imwrite(\"result_{0:02d}.png\".format(SEQ_NUM),traj)",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import os
import random
import argparse
from vapory import *
from data import colors, object_types
class Torus(POVRayElement):
""""""
def render_scene(filename, object_type, color, location, rotation):
assert (object_type in object_types)
assert (color in colors)
color = colors[color]
size = 2
radius = size/2
attributes = Texture(Pigment('color', color)), Finish('ambient', 0.7), 'rotate', (0, rotation, 0)
if object_type == 'box':
location.insert(1, size/2)
obj = Box([x - size/2 for x in location], [x + size/2 for x in location], *attributes)
if object_type == 'sphere':
location.insert(1, radius)
obj = Sphere(location, radius, *attributes)
if object_type == 'torus':
location.insert(1, radius/2)
obj = Torus(radius, radius/2, 'translate', location, *attributes)
if object_type == 'ellipsoid':
location.insert(1, radius)
obj = Sphere(location, radius, 'scale', (0.75, 0.45, 1.5), *attributes)
if object_type == 'cylinder':
location.insert(1, 0)
location2 = list(location)
location2[1] = size*2
obj = Cylinder(location, location2, radius, *attributes)
camera = Camera('location', [0, 8, 7], 'look_at', [0, 0, 0])
light = LightSource([0, 10, 0], 'color', [1, 1, 1])
chessboard = Plane([0, 1, 0], 0, 'hollow',
Texture(Pigment('checker',
'color', [.47, .6, .74],
'color', [.34, 0.48, 0.6]),
'scale', 4), Finish('ambient', 0.5))
scene = Scene(camera, objects=[light, obj, chessboard])
scene.render(filename, width=128, height=128, antialiasing=1.0)
parser = argparse.ArgumentParser()
parser.add_argument('--n_samples', type=int, default=100)
parser.add_argument('--seed', type=int, default=2018)
args = parser.parse_args()
random.seed(args.seed)
os.makedirs('assets', exist_ok=True)
print("Rendering scenes...")
for color in colors:
for object_type in object_types:
for i in range(args.n_samples):
filename = 'assets/%s-%s-%d' % (color, object_type, i)
if os.path.exists(filename):
print("%s exists, skipping" % filename)
continue
location = [random.uniform(-3, 3), random.uniform(-3, 3)]
rotation = random.uniform(0, 360)
render_scene(filename, object_type, color, location, rotation)
print("Finished")
|
normal
|
{
"blob_id": "f8972067fa88e7e74e05cdcc7bdec184116dec4a",
"index": 7771,
"step-1": "<mask token>\n\n\nclass Torus(POVRayElement):\n \"\"\"\"\"\"\n\n\ndef render_scene(filename, object_type, color, location, rotation):\n assert object_type in object_types\n assert color in colors\n color = colors[color]\n size = 2\n radius = size / 2\n attributes = Texture(Pigment('color', color)), Finish('ambient', 0.7\n ), 'rotate', (0, rotation, 0)\n if object_type == 'box':\n location.insert(1, size / 2)\n obj = Box([(x - size / 2) for x in location], [(x + size / 2) for x in\n location], *attributes)\n if object_type == 'sphere':\n location.insert(1, radius)\n obj = Sphere(location, radius, *attributes)\n if object_type == 'torus':\n location.insert(1, radius / 2)\n obj = Torus(radius, radius / 2, 'translate', location, *attributes)\n if object_type == 'ellipsoid':\n location.insert(1, radius)\n obj = Sphere(location, radius, 'scale', (0.75, 0.45, 1.5), *attributes)\n if object_type == 'cylinder':\n location.insert(1, 0)\n location2 = list(location)\n location2[1] = size * 2\n obj = Cylinder(location, location2, radius, *attributes)\n camera = Camera('location', [0, 8, 7], 'look_at', [0, 0, 0])\n light = LightSource([0, 10, 0], 'color', [1, 1, 1])\n chessboard = Plane([0, 1, 0], 0, 'hollow', Texture(Pigment('checker',\n 'color', [0.47, 0.6, 0.74], 'color', [0.34, 0.48, 0.6]), 'scale', 4\n ), Finish('ambient', 0.5))\n scene = Scene(camera, objects=[light, obj, chessboard])\n scene.render(filename, width=128, height=128, antialiasing=1.0)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Torus(POVRayElement):\n \"\"\"\"\"\"\n\n\ndef render_scene(filename, object_type, color, location, rotation):\n assert object_type in object_types\n assert color in colors\n color = colors[color]\n size = 2\n radius = size / 2\n attributes = Texture(Pigment('color', color)), Finish('ambient', 0.7\n ), 'rotate', (0, rotation, 0)\n if object_type == 'box':\n location.insert(1, size / 2)\n obj = Box([(x - size / 2) for x in location], [(x + size / 2) for x in\n location], *attributes)\n if object_type == 'sphere':\n location.insert(1, radius)\n obj = Sphere(location, radius, *attributes)\n if object_type == 'torus':\n location.insert(1, radius / 2)\n obj = Torus(radius, radius / 2, 'translate', location, *attributes)\n if object_type == 'ellipsoid':\n location.insert(1, radius)\n obj = Sphere(location, radius, 'scale', (0.75, 0.45, 1.5), *attributes)\n if object_type == 'cylinder':\n location.insert(1, 0)\n location2 = list(location)\n location2[1] = size * 2\n obj = Cylinder(location, location2, radius, *attributes)\n camera = Camera('location', [0, 8, 7], 'look_at', [0, 0, 0])\n light = LightSource([0, 10, 0], 'color', [1, 1, 1])\n chessboard = Plane([0, 1, 0], 0, 'hollow', Texture(Pigment('checker',\n 'color', [0.47, 0.6, 0.74], 'color', [0.34, 0.48, 0.6]), 'scale', 4\n ), Finish('ambient', 0.5))\n scene = Scene(camera, objects=[light, obj, chessboard])\n scene.render(filename, width=128, height=128, antialiasing=1.0)\n\n\n<mask token>\nparser.add_argument('--n_samples', type=int, default=100)\nparser.add_argument('--seed', type=int, default=2018)\n<mask token>\nrandom.seed(args.seed)\nos.makedirs('assets', exist_ok=True)\nprint('Rendering scenes...')\nfor color in colors:\n for object_type in object_types:\n for i in range(args.n_samples):\n filename = 'assets/%s-%s-%d' % (color, object_type, i)\n if os.path.exists(filename):\n print('%s exists, skipping' % filename)\n continue\n location = [random.uniform(-3, 3), random.uniform(-3, 3)]\n rotation = random.uniform(0, 360)\n render_scene(filename, object_type, color, location, rotation)\nprint('Finished')\n",
"step-3": "<mask token>\n\n\nclass Torus(POVRayElement):\n \"\"\"\"\"\"\n\n\ndef render_scene(filename, object_type, color, location, rotation):\n assert object_type in object_types\n assert color in colors\n color = colors[color]\n size = 2\n radius = size / 2\n attributes = Texture(Pigment('color', color)), Finish('ambient', 0.7\n ), 'rotate', (0, rotation, 0)\n if object_type == 'box':\n location.insert(1, size / 2)\n obj = Box([(x - size / 2) for x in location], [(x + size / 2) for x in\n location], *attributes)\n if object_type == 'sphere':\n location.insert(1, radius)\n obj = Sphere(location, radius, *attributes)\n if object_type == 'torus':\n location.insert(1, radius / 2)\n obj = Torus(radius, radius / 2, 'translate', location, *attributes)\n if object_type == 'ellipsoid':\n location.insert(1, radius)\n obj = Sphere(location, radius, 'scale', (0.75, 0.45, 1.5), *attributes)\n if object_type == 'cylinder':\n location.insert(1, 0)\n location2 = list(location)\n location2[1] = size * 2\n obj = Cylinder(location, location2, radius, *attributes)\n camera = Camera('location', [0, 8, 7], 'look_at', [0, 0, 0])\n light = LightSource([0, 10, 0], 'color', [1, 1, 1])\n chessboard = Plane([0, 1, 0], 0, 'hollow', Texture(Pigment('checker',\n 'color', [0.47, 0.6, 0.74], 'color', [0.34, 0.48, 0.6]), 'scale', 4\n ), Finish('ambient', 0.5))\n scene = Scene(camera, objects=[light, obj, chessboard])\n scene.render(filename, width=128, height=128, antialiasing=1.0)\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--n_samples', type=int, default=100)\nparser.add_argument('--seed', type=int, default=2018)\nargs = parser.parse_args()\nrandom.seed(args.seed)\nos.makedirs('assets', exist_ok=True)\nprint('Rendering scenes...')\nfor color in colors:\n for object_type in object_types:\n for i in range(args.n_samples):\n filename = 'assets/%s-%s-%d' % (color, object_type, i)\n if os.path.exists(filename):\n print('%s exists, skipping' % filename)\n continue\n location = [random.uniform(-3, 3), random.uniform(-3, 3)]\n rotation = random.uniform(0, 360)\n render_scene(filename, object_type, color, location, rotation)\nprint('Finished')\n",
"step-4": "import os\nimport random\nimport argparse\nfrom vapory import *\nfrom data import colors, object_types\n\n\nclass Torus(POVRayElement):\n \"\"\"\"\"\"\n\n\ndef render_scene(filename, object_type, color, location, rotation):\n assert object_type in object_types\n assert color in colors\n color = colors[color]\n size = 2\n radius = size / 2\n attributes = Texture(Pigment('color', color)), Finish('ambient', 0.7\n ), 'rotate', (0, rotation, 0)\n if object_type == 'box':\n location.insert(1, size / 2)\n obj = Box([(x - size / 2) for x in location], [(x + size / 2) for x in\n location], *attributes)\n if object_type == 'sphere':\n location.insert(1, radius)\n obj = Sphere(location, radius, *attributes)\n if object_type == 'torus':\n location.insert(1, radius / 2)\n obj = Torus(radius, radius / 2, 'translate', location, *attributes)\n if object_type == 'ellipsoid':\n location.insert(1, radius)\n obj = Sphere(location, radius, 'scale', (0.75, 0.45, 1.5), *attributes)\n if object_type == 'cylinder':\n location.insert(1, 0)\n location2 = list(location)\n location2[1] = size * 2\n obj = Cylinder(location, location2, radius, *attributes)\n camera = Camera('location', [0, 8, 7], 'look_at', [0, 0, 0])\n light = LightSource([0, 10, 0], 'color', [1, 1, 1])\n chessboard = Plane([0, 1, 0], 0, 'hollow', Texture(Pigment('checker',\n 'color', [0.47, 0.6, 0.74], 'color', [0.34, 0.48, 0.6]), 'scale', 4\n ), Finish('ambient', 0.5))\n scene = Scene(camera, objects=[light, obj, chessboard])\n scene.render(filename, width=128, height=128, antialiasing=1.0)\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--n_samples', type=int, default=100)\nparser.add_argument('--seed', type=int, default=2018)\nargs = parser.parse_args()\nrandom.seed(args.seed)\nos.makedirs('assets', exist_ok=True)\nprint('Rendering scenes...')\nfor color in colors:\n for object_type in object_types:\n for i in range(args.n_samples):\n filename = 'assets/%s-%s-%d' % (color, object_type, i)\n if os.path.exists(filename):\n print('%s exists, skipping' % filename)\n continue\n location = [random.uniform(-3, 3), random.uniform(-3, 3)]\n rotation = random.uniform(0, 360)\n render_scene(filename, object_type, color, location, rotation)\nprint('Finished')\n",
"step-5": "import os\nimport random\nimport argparse\n\nfrom vapory import *\n\nfrom data import colors, object_types\n\n\nclass Torus(POVRayElement):\n \"\"\"\"\"\"\n\n\ndef render_scene(filename, object_type, color, location, rotation):\n assert (object_type in object_types)\n assert (color in colors)\n\n color = colors[color]\n size = 2\n radius = size/2\n attributes = Texture(Pigment('color', color)), Finish('ambient', 0.7), 'rotate', (0, rotation, 0)\n if object_type == 'box':\n location.insert(1, size/2)\n obj = Box([x - size/2 for x in location], [x + size/2 for x in location], *attributes)\n if object_type == 'sphere':\n location.insert(1, radius)\n obj = Sphere(location, radius, *attributes)\n if object_type == 'torus':\n location.insert(1, radius/2)\n obj = Torus(radius, radius/2, 'translate', location, *attributes)\n if object_type == 'ellipsoid':\n location.insert(1, radius)\n obj = Sphere(location, radius, 'scale', (0.75, 0.45, 1.5), *attributes)\n if object_type == 'cylinder':\n location.insert(1, 0)\n location2 = list(location)\n location2[1] = size*2\n obj = Cylinder(location, location2, radius, *attributes)\n\n camera = Camera('location', [0, 8, 7], 'look_at', [0, 0, 0])\n light = LightSource([0, 10, 0], 'color', [1, 1, 1])\n\n chessboard = Plane([0, 1, 0], 0, 'hollow',\n Texture(Pigment('checker',\n 'color', [.47, .6, .74],\n 'color', [.34, 0.48, 0.6]),\n 'scale', 4), Finish('ambient', 0.5))\n\n scene = Scene(camera, objects=[light, obj, chessboard])\n scene.render(filename, width=128, height=128, antialiasing=1.0)\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--n_samples', type=int, default=100)\nparser.add_argument('--seed', type=int, default=2018)\nargs = parser.parse_args()\n\nrandom.seed(args.seed)\n\nos.makedirs('assets', exist_ok=True)\n\nprint(\"Rendering scenes...\")\nfor color in colors:\n for object_type in object_types:\n for i in range(args.n_samples):\n filename = 'assets/%s-%s-%d' % (color, object_type, i)\n if os.path.exists(filename):\n print(\"%s exists, skipping\" % filename)\n continue\n location = [random.uniform(-3, 3), random.uniform(-3, 3)]\n rotation = random.uniform(0, 360)\n render_scene(filename, object_type, color, location, rotation)\n\nprint(\"Finished\")\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# Author: Charse
# py 列表的使用
import copy
name = ["111", "222", "333", "444", "555"]
# 从列表中取得元素
print(name[0], name[2]) # 111 333
print(name[1:3]) # 切片 ['222', '333']
print(name[:3]) # ['111', '222', '333'] 与下标从0开始是一样的
print(name[0:3]) # ['111', '222', '333']
print(name[-2:]) # ['444', '555'] 与name
# 往列表中添加元素
name.append("666") # 直接在末尾添加
name.insert(1, "999") # 在指定位置插入 : 将999插入到下标为1的位置, 原来位置中元素就直接往后顺延
print(name)
# 修改列表中元素
name[0] = "000"
print(name)
# 删除元素
name.pop() # 默认是删除最后一个下标
print(name)
name.pop(2)
print(name)
# 取出指定元素的下标
print(name.index("999"))
# 反转 改变的是分组里面的元素
name.reverse()
print(name)
# 特殊字符, 数字, 大写字母, 小写字母排序. 改变的是数组中的元素
name.sort()
print(name)
# name.clear() remove all items 删除所有的元素
# 复制列表
name2 = name.copy() # 这个是浅copy,如果列表中还有列表,列表的中元素修改了,新的中也同样是修改了
print(name2)
name[1] = "xxx" # name2中是不会进行修改的
names = ["1", [1, 2], "2"]
names[1][0] = 9
print(names)
names1 = copy.copy(names) # 这个是浅copy,与列表的copy是一样的.只是一个引用的copy
names3 = name[:]
print("name3:", names3)
# 进行深copy
names2 = copy.deepcopy(names)
# 对列表的元素进行修改,两者是同样的被修改
# names2 元素内的列表是不会被修改的
names[1][1] = 3
print(names)
print(names1)
print(names2)
# 遍历列表
for i in names2:
print(i)
# 跳跃打印: 从0 开始打印, 到末尾, 步长为2
print(name[0:-1:2])
# 0, -1可以进行省略
print(name[::2])
'''
深浅copy
'''
|
normal
|
{
"blob_id": "d517c1e2eb4d37a2584f1603c704efce6834df92",
"index": 7443,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(name[0], name[2])\nprint(name[1:3])\nprint(name[:3])\nprint(name[0:3])\nprint(name[-2:])\nname.append('666')\nname.insert(1, '999')\nprint(name)\n<mask token>\nprint(name)\nname.pop()\nprint(name)\nname.pop(2)\nprint(name)\nprint(name.index('999'))\nname.reverse()\nprint(name)\nname.sort()\nprint(name)\n<mask token>\nprint(name2)\n<mask token>\nprint(names)\n<mask token>\nprint('name3:', names3)\n<mask token>\nprint(names)\nprint(names1)\nprint(names2)\nfor i in names2:\n print(i)\nprint(name[0:-1:2])\nprint(name[::2])\n<mask token>\n",
"step-3": "<mask token>\nname = ['111', '222', '333', '444', '555']\nprint(name[0], name[2])\nprint(name[1:3])\nprint(name[:3])\nprint(name[0:3])\nprint(name[-2:])\nname.append('666')\nname.insert(1, '999')\nprint(name)\nname[0] = '000'\nprint(name)\nname.pop()\nprint(name)\nname.pop(2)\nprint(name)\nprint(name.index('999'))\nname.reverse()\nprint(name)\nname.sort()\nprint(name)\nname2 = name.copy()\nprint(name2)\nname[1] = 'xxx'\nnames = ['1', [1, 2], '2']\nnames[1][0] = 9\nprint(names)\nnames1 = copy.copy(names)\nnames3 = name[:]\nprint('name3:', names3)\nnames2 = copy.deepcopy(names)\nnames[1][1] = 3\nprint(names)\nprint(names1)\nprint(names2)\nfor i in names2:\n print(i)\nprint(name[0:-1:2])\nprint(name[::2])\n<mask token>\n",
"step-4": "import copy\nname = ['111', '222', '333', '444', '555']\nprint(name[0], name[2])\nprint(name[1:3])\nprint(name[:3])\nprint(name[0:3])\nprint(name[-2:])\nname.append('666')\nname.insert(1, '999')\nprint(name)\nname[0] = '000'\nprint(name)\nname.pop()\nprint(name)\nname.pop(2)\nprint(name)\nprint(name.index('999'))\nname.reverse()\nprint(name)\nname.sort()\nprint(name)\nname2 = name.copy()\nprint(name2)\nname[1] = 'xxx'\nnames = ['1', [1, 2], '2']\nnames[1][0] = 9\nprint(names)\nnames1 = copy.copy(names)\nnames3 = name[:]\nprint('name3:', names3)\nnames2 = copy.deepcopy(names)\nnames[1][1] = 3\nprint(names)\nprint(names1)\nprint(names2)\nfor i in names2:\n print(i)\nprint(name[0:-1:2])\nprint(name[::2])\n<mask token>\n",
"step-5": "# Author: Charse\n# py 列表的使用\n\nimport copy\n\n\nname = [\"111\", \"222\", \"333\", \"444\", \"555\"]\n\n# 从列表中取得元素\nprint(name[0], name[2]) # 111 333\nprint(name[1:3]) # 切片 ['222', '333']\nprint(name[:3]) # ['111', '222', '333'] 与下标从0开始是一样的\nprint(name[0:3]) # ['111', '222', '333']\nprint(name[-2:]) # ['444', '555'] 与name\n\n# 往列表中添加元素\nname.append(\"666\") # 直接在末尾添加\nname.insert(1, \"999\") # 在指定位置插入 : 将999插入到下标为1的位置, 原来位置中元素就直接往后顺延\nprint(name)\n\n# 修改列表中元素\nname[0] = \"000\"\nprint(name)\n\n# 删除元素\nname.pop() # 默认是删除最后一个下标\nprint(name)\nname.pop(2)\nprint(name)\n\n# 取出指定元素的下标\nprint(name.index(\"999\"))\n\n# 反转 改变的是分组里面的元素\nname.reverse()\nprint(name)\n\n# 特殊字符, 数字, 大写字母, 小写字母排序. 改变的是数组中的元素\nname.sort()\nprint(name)\n\n# name.clear() remove all items 删除所有的元素\n\n# 复制列表\nname2 = name.copy() # 这个是浅copy,如果列表中还有列表,列表的中元素修改了,新的中也同样是修改了\nprint(name2)\nname[1] = \"xxx\" # name2中是不会进行修改的\n\nnames = [\"1\", [1, 2], \"2\"]\n\nnames[1][0] = 9\nprint(names)\n\nnames1 = copy.copy(names) # 这个是浅copy,与列表的copy是一样的.只是一个引用的copy\n\nnames3 = name[:]\n\nprint(\"name3:\", names3)\n\n\n# 进行深copy\nnames2 = copy.deepcopy(names)\n\n# 对列表的元素进行修改,两者是同样的被修改\n# names2 元素内的列表是不会被修改的\nnames[1][1] = 3\n\nprint(names)\nprint(names1)\nprint(names2)\n\n# 遍历列表\nfor i in names2:\n print(i)\n\n\n\n# 跳跃打印: 从0 开始打印, 到末尾, 步长为2\nprint(name[0:-1:2])\n# 0, -1可以进行省略\nprint(name[::2])\n\n\n'''\n深浅copy\n\n\n'''\n\n\n\n\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import datetime
import logging
import os
from functools import lru_cache
from authlib.jose import JsonWebKey, jwt
from flask import g, request, jsonify
from lorem_ipsum.model import User, AppContext
import lorem_ipsum
from lorem_ipsum.model import Permission, BlacklistToken
LOGGER = logging.getLogger('lorem-ipsum')
def app_context():
if 'app_context' not in g:
g.app_context = lorem_ipsum.create_app_context()
return g.app_context
@lru_cache()
def get_jwk():
LOGGER.debug('Loading jwk from public key...')
key_data = None
with open(app_context().config['jwk_public_key_path'], 'rb') as _key_file:
key_data = _key_file.read()
LOGGER.debug(key_data)
key = JsonWebKey.import_key(key_data, {'kty': 'RSA'})
_jwks = {'keys': [{**key.as_dict(), 'kid': 'demo_key'}]}
LOGGER.debug(_jwks)
return _jwks
class AuthenticationError(ValueError):
pass
class AuthorizationError(ValueError):
pass
class BearerTokenValidator:
def __init__(self, access_token, app_context: AppContext):
self.access_token = access_token
user_service = app_context.user_service
self.blacklist_token_repo = app_context.blacklist_token_repo
self.payload = user_service.decode_auth_token(access_token, get_jwk())
def check_is_blacklisted(self):
is_blacklisted_token = BlacklistToken.check_blacklist(self.access_token, self.blacklist_token_repo)
if is_blacklisted_token:
LOGGER.debug('Token blacklisted.')
raise AuthenticationError('Invalid token.')
return self
def check_username_claim(self):
if not self.payload.get('sub'):
LOGGER.debug('Token missing sub.')
raise AuthorizationError('Forbidden.')
return self
def check_user_exists(self, user):
if not user:
LOGGER.debug('Token user not found.')
raise AuthorizationError('Forbidden.')
return self
def check_has_permissions(self, user: User, permissions: list):
has_permissions = True
for permission in permissions:
if not user.role.has_permission(Permission.from_enum(permission)):
LOGGER.debug(f'Missing permission {permission}.')
has_permissions = False
LOGGER.debug(f'Required permissions: {permissions}')
if not has_permissions:
raise AuthorizationError('Forbidden.')
return self
@staticmethod
def from_authorization_header(authorization_header: str, app_context: AppContext):
if not authorization_header:
LOGGER.debug('Authorization header not found.')
raise AuthenticationError('Invalid token.')
if 'Bearer ' not in authorization_header:
LOGGER.debug('Bearer token not found.')
raise AuthenticationError('Invalid token.')
access_token = authorization_header.split('Bearer')[1].strip()
LOGGER.debug(f'Bearer token is:\n"{access_token}"')
return BearerTokenValidator(access_token, app_context)
def should_skip_auth(flask_request):
"""
Return true if should skip auth, e.g. when method is OPTIONS like when performing a React request.
:param flask_request: Flask request.
:return:
"""
return flask_request.method in ['HEAD', 'OPTIONS']
def requires_permission(permissions: list):
def requires_permission_decorator(function):
def wrapper(*args, **kwargs):
LOGGER.info(f'Authorization...\n{request.headers}')
if should_skip_auth(request):
return jsonify('ok')
authorization_header = request.headers.get('Authorization')
context = app_context()
with context.transaction_manager.transaction:
bearer_token_validator = BearerTokenValidator.from_authorization_header(authorization_header, context) \
.check_is_blacklisted() \
.check_username_claim()
user = context.user_repo.get(username=bearer_token_validator.payload['sub'])
bearer_token_validator.check_user_exists(user) \
.check_has_permissions(user, permissions)
g.access_token = bearer_token_validator.access_token
g.user = user
_result = function(*args, **kwargs)
return _result
wrapper.__name__ = function.__name__
return wrapper
return requires_permission_decorator
class ExceptionHandlers:
def __init__(self, app):
@app.errorhandler(AuthorizationError)
def handle_authorization_exception(e):
"""Return403 forbidden."""
return jsonify(str(e)), 403
@app.errorhandler(AuthenticationError)
def handle_authentication_exception(e):
"""Return401 authentication error."""
return jsonify(str(e)), 401
@lru_cache()
def jwk_key():
jwk_path = os.environ.get('jwk_private_key_path') or app_context().config['jwk_private_key_path']
with open(jwk_path, 'rb') as f:
key = JsonWebKey.import_key(f.read())
return key
def new_token(payload: dict):
key = jwk_key()
header = {'alg': 'RS256', 'kid': 'demo_key'}
token = jwt.encode(header, payload, key)
LOGGER.debug(token)
return token.decode('utf-8')
def issue_token_for_user(user: User):
access_token = new_token({
"iss": "lorem.ipsum.dev",
"aud": "lorem.ipsum.auth",
"sub": user.username,
"email": user.email,
"roles": [
user.role.name
],
"exp": datetime.datetime.now(tz=datetime.timezone.utc) + datetime.timedelta(hours=4),
"iat": datetime.datetime.now(tz=datetime.timezone.utc)
})
return access_token
|
normal
|
{
"blob_id": "97d4387c7bfd141b5a7019b221adb550105d4351",
"index": 604,
"step-1": "<mask token>\n\n\nclass AuthorizationError(ValueError):\n pass\n\n\nclass BearerTokenValidator:\n\n def __init__(self, access_token, app_context: AppContext):\n self.access_token = access_token\n user_service = app_context.user_service\n self.blacklist_token_repo = app_context.blacklist_token_repo\n self.payload = user_service.decode_auth_token(access_token, get_jwk())\n\n def check_is_blacklisted(self):\n is_blacklisted_token = BlacklistToken.check_blacklist(self.\n access_token, self.blacklist_token_repo)\n if is_blacklisted_token:\n LOGGER.debug('Token blacklisted.')\n raise AuthenticationError('Invalid token.')\n return self\n\n def check_username_claim(self):\n if not self.payload.get('sub'):\n LOGGER.debug('Token missing sub.')\n raise AuthorizationError('Forbidden.')\n return self\n\n def check_user_exists(self, user):\n if not user:\n LOGGER.debug('Token user not found.')\n raise AuthorizationError('Forbidden.')\n return self\n\n def check_has_permissions(self, user: User, permissions: list):\n has_permissions = True\n for permission in permissions:\n if not user.role.has_permission(Permission.from_enum(permission)):\n LOGGER.debug(f'Missing permission {permission}.')\n has_permissions = False\n LOGGER.debug(f'Required permissions: {permissions}')\n if not has_permissions:\n raise AuthorizationError('Forbidden.')\n return self\n\n @staticmethod\n def from_authorization_header(authorization_header: str, app_context:\n AppContext):\n if not authorization_header:\n LOGGER.debug('Authorization header not found.')\n raise AuthenticationError('Invalid token.')\n if 'Bearer ' not in authorization_header:\n LOGGER.debug('Bearer token not found.')\n raise AuthenticationError('Invalid token.')\n access_token = authorization_header.split('Bearer')[1].strip()\n LOGGER.debug(f'Bearer token is:\\n\"{access_token}\"')\n return BearerTokenValidator(access_token, app_context)\n\n\n<mask token>\n\n\nclass ExceptionHandlers:\n\n def __init__(self, app):\n\n @app.errorhandler(AuthorizationError)\n def handle_authorization_exception(e):\n \"\"\"Return403 forbidden.\"\"\"\n return jsonify(str(e)), 403\n\n @app.errorhandler(AuthenticationError)\n def handle_authentication_exception(e):\n \"\"\"Return401 authentication error.\"\"\"\n return jsonify(str(e)), 401\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass AuthenticationError(ValueError):\n pass\n\n\nclass AuthorizationError(ValueError):\n pass\n\n\nclass BearerTokenValidator:\n\n def __init__(self, access_token, app_context: AppContext):\n self.access_token = access_token\n user_service = app_context.user_service\n self.blacklist_token_repo = app_context.blacklist_token_repo\n self.payload = user_service.decode_auth_token(access_token, get_jwk())\n\n def check_is_blacklisted(self):\n is_blacklisted_token = BlacklistToken.check_blacklist(self.\n access_token, self.blacklist_token_repo)\n if is_blacklisted_token:\n LOGGER.debug('Token blacklisted.')\n raise AuthenticationError('Invalid token.')\n return self\n\n def check_username_claim(self):\n if not self.payload.get('sub'):\n LOGGER.debug('Token missing sub.')\n raise AuthorizationError('Forbidden.')\n return self\n\n def check_user_exists(self, user):\n if not user:\n LOGGER.debug('Token user not found.')\n raise AuthorizationError('Forbidden.')\n return self\n\n def check_has_permissions(self, user: User, permissions: list):\n has_permissions = True\n for permission in permissions:\n if not user.role.has_permission(Permission.from_enum(permission)):\n LOGGER.debug(f'Missing permission {permission}.')\n has_permissions = False\n LOGGER.debug(f'Required permissions: {permissions}')\n if not has_permissions:\n raise AuthorizationError('Forbidden.')\n return self\n\n @staticmethod\n def from_authorization_header(authorization_header: str, app_context:\n AppContext):\n if not authorization_header:\n LOGGER.debug('Authorization header not found.')\n raise AuthenticationError('Invalid token.')\n if 'Bearer ' not in authorization_header:\n LOGGER.debug('Bearer token not found.')\n raise AuthenticationError('Invalid token.')\n access_token = authorization_header.split('Bearer')[1].strip()\n LOGGER.debug(f'Bearer token is:\\n\"{access_token}\"')\n return BearerTokenValidator(access_token, app_context)\n\n\n<mask token>\n\n\nclass ExceptionHandlers:\n\n def __init__(self, app):\n\n @app.errorhandler(AuthorizationError)\n def handle_authorization_exception(e):\n \"\"\"Return403 forbidden.\"\"\"\n return jsonify(str(e)), 403\n\n @app.errorhandler(AuthenticationError)\n def handle_authentication_exception(e):\n \"\"\"Return401 authentication error.\"\"\"\n return jsonify(str(e)), 401\n\n\n<mask token>\n\n\ndef issue_token_for_user(user: User):\n access_token = new_token({'iss': 'lorem.ipsum.dev', 'aud':\n 'lorem.ipsum.auth', 'sub': user.username, 'email': user.email,\n 'roles': [user.role.name], 'exp': datetime.datetime.now(tz=datetime\n .timezone.utc) + datetime.timedelta(hours=4), 'iat': datetime.\n datetime.now(tz=datetime.timezone.utc)})\n return access_token\n",
"step-3": "<mask token>\n\n\ndef app_context():\n if 'app_context' not in g:\n g.app_context = lorem_ipsum.create_app_context()\n return g.app_context\n\n\n@lru_cache()\ndef get_jwk():\n LOGGER.debug('Loading jwk from public key...')\n key_data = None\n with open(app_context().config['jwk_public_key_path'], 'rb') as _key_file:\n key_data = _key_file.read()\n LOGGER.debug(key_data)\n key = JsonWebKey.import_key(key_data, {'kty': 'RSA'})\n _jwks = {'keys': [{**key.as_dict(), 'kid': 'demo_key'}]}\n LOGGER.debug(_jwks)\n return _jwks\n\n\nclass AuthenticationError(ValueError):\n pass\n\n\nclass AuthorizationError(ValueError):\n pass\n\n\nclass BearerTokenValidator:\n\n def __init__(self, access_token, app_context: AppContext):\n self.access_token = access_token\n user_service = app_context.user_service\n self.blacklist_token_repo = app_context.blacklist_token_repo\n self.payload = user_service.decode_auth_token(access_token, get_jwk())\n\n def check_is_blacklisted(self):\n is_blacklisted_token = BlacklistToken.check_blacklist(self.\n access_token, self.blacklist_token_repo)\n if is_blacklisted_token:\n LOGGER.debug('Token blacklisted.')\n raise AuthenticationError('Invalid token.')\n return self\n\n def check_username_claim(self):\n if not self.payload.get('sub'):\n LOGGER.debug('Token missing sub.')\n raise AuthorizationError('Forbidden.')\n return self\n\n def check_user_exists(self, user):\n if not user:\n LOGGER.debug('Token user not found.')\n raise AuthorizationError('Forbidden.')\n return self\n\n def check_has_permissions(self, user: User, permissions: list):\n has_permissions = True\n for permission in permissions:\n if not user.role.has_permission(Permission.from_enum(permission)):\n LOGGER.debug(f'Missing permission {permission}.')\n has_permissions = False\n LOGGER.debug(f'Required permissions: {permissions}')\n if not has_permissions:\n raise AuthorizationError('Forbidden.')\n return self\n\n @staticmethod\n def from_authorization_header(authorization_header: str, app_context:\n AppContext):\n if not authorization_header:\n LOGGER.debug('Authorization header not found.')\n raise AuthenticationError('Invalid token.')\n if 'Bearer ' not in authorization_header:\n LOGGER.debug('Bearer token not found.')\n raise AuthenticationError('Invalid token.')\n access_token = authorization_header.split('Bearer')[1].strip()\n LOGGER.debug(f'Bearer token is:\\n\"{access_token}\"')\n return BearerTokenValidator(access_token, app_context)\n\n\ndef should_skip_auth(flask_request):\n \"\"\"\n Return true if should skip auth, e.g. when method is OPTIONS like when performing a React request.\n :param flask_request: Flask request.\n :return:\n \"\"\"\n return flask_request.method in ['HEAD', 'OPTIONS']\n\n\n<mask token>\n\n\nclass ExceptionHandlers:\n\n def __init__(self, app):\n\n @app.errorhandler(AuthorizationError)\n def handle_authorization_exception(e):\n \"\"\"Return403 forbidden.\"\"\"\n return jsonify(str(e)), 403\n\n @app.errorhandler(AuthenticationError)\n def handle_authentication_exception(e):\n \"\"\"Return401 authentication error.\"\"\"\n return jsonify(str(e)), 401\n\n\n@lru_cache()\ndef jwk_key():\n jwk_path = os.environ.get('jwk_private_key_path') or app_context().config[\n 'jwk_private_key_path']\n with open(jwk_path, 'rb') as f:\n key = JsonWebKey.import_key(f.read())\n return key\n\n\ndef new_token(payload: dict):\n key = jwk_key()\n header = {'alg': 'RS256', 'kid': 'demo_key'}\n token = jwt.encode(header, payload, key)\n LOGGER.debug(token)\n return token.decode('utf-8')\n\n\ndef issue_token_for_user(user: User):\n access_token = new_token({'iss': 'lorem.ipsum.dev', 'aud':\n 'lorem.ipsum.auth', 'sub': user.username, 'email': user.email,\n 'roles': [user.role.name], 'exp': datetime.datetime.now(tz=datetime\n .timezone.utc) + datetime.timedelta(hours=4), 'iat': datetime.\n datetime.now(tz=datetime.timezone.utc)})\n return access_token\n",
"step-4": "<mask token>\nLOGGER = logging.getLogger('lorem-ipsum')\n\n\ndef app_context():\n if 'app_context' not in g:\n g.app_context = lorem_ipsum.create_app_context()\n return g.app_context\n\n\n@lru_cache()\ndef get_jwk():\n LOGGER.debug('Loading jwk from public key...')\n key_data = None\n with open(app_context().config['jwk_public_key_path'], 'rb') as _key_file:\n key_data = _key_file.read()\n LOGGER.debug(key_data)\n key = JsonWebKey.import_key(key_data, {'kty': 'RSA'})\n _jwks = {'keys': [{**key.as_dict(), 'kid': 'demo_key'}]}\n LOGGER.debug(_jwks)\n return _jwks\n\n\nclass AuthenticationError(ValueError):\n pass\n\n\nclass AuthorizationError(ValueError):\n pass\n\n\nclass BearerTokenValidator:\n\n def __init__(self, access_token, app_context: AppContext):\n self.access_token = access_token\n user_service = app_context.user_service\n self.blacklist_token_repo = app_context.blacklist_token_repo\n self.payload = user_service.decode_auth_token(access_token, get_jwk())\n\n def check_is_blacklisted(self):\n is_blacklisted_token = BlacklistToken.check_blacklist(self.\n access_token, self.blacklist_token_repo)\n if is_blacklisted_token:\n LOGGER.debug('Token blacklisted.')\n raise AuthenticationError('Invalid token.')\n return self\n\n def check_username_claim(self):\n if not self.payload.get('sub'):\n LOGGER.debug('Token missing sub.')\n raise AuthorizationError('Forbidden.')\n return self\n\n def check_user_exists(self, user):\n if not user:\n LOGGER.debug('Token user not found.')\n raise AuthorizationError('Forbidden.')\n return self\n\n def check_has_permissions(self, user: User, permissions: list):\n has_permissions = True\n for permission in permissions:\n if not user.role.has_permission(Permission.from_enum(permission)):\n LOGGER.debug(f'Missing permission {permission}.')\n has_permissions = False\n LOGGER.debug(f'Required permissions: {permissions}')\n if not has_permissions:\n raise AuthorizationError('Forbidden.')\n return self\n\n @staticmethod\n def from_authorization_header(authorization_header: str, app_context:\n AppContext):\n if not authorization_header:\n LOGGER.debug('Authorization header not found.')\n raise AuthenticationError('Invalid token.')\n if 'Bearer ' not in authorization_header:\n LOGGER.debug('Bearer token not found.')\n raise AuthenticationError('Invalid token.')\n access_token = authorization_header.split('Bearer')[1].strip()\n LOGGER.debug(f'Bearer token is:\\n\"{access_token}\"')\n return BearerTokenValidator(access_token, app_context)\n\n\ndef should_skip_auth(flask_request):\n \"\"\"\n Return true if should skip auth, e.g. when method is OPTIONS like when performing a React request.\n :param flask_request: Flask request.\n :return:\n \"\"\"\n return flask_request.method in ['HEAD', 'OPTIONS']\n\n\ndef requires_permission(permissions: list):\n\n def requires_permission_decorator(function):\n\n def wrapper(*args, **kwargs):\n LOGGER.info(f'Authorization...\\n{request.headers}')\n if should_skip_auth(request):\n return jsonify('ok')\n authorization_header = request.headers.get('Authorization')\n context = app_context()\n with context.transaction_manager.transaction:\n bearer_token_validator = (BearerTokenValidator.\n from_authorization_header(authorization_header, context\n ).check_is_blacklisted().check_username_claim())\n user = context.user_repo.get(username=\n bearer_token_validator.payload['sub'])\n bearer_token_validator.check_user_exists(user\n ).check_has_permissions(user, permissions)\n g.access_token = bearer_token_validator.access_token\n g.user = user\n _result = function(*args, **kwargs)\n return _result\n wrapper.__name__ = function.__name__\n return wrapper\n return requires_permission_decorator\n\n\nclass ExceptionHandlers:\n\n def __init__(self, app):\n\n @app.errorhandler(AuthorizationError)\n def handle_authorization_exception(e):\n \"\"\"Return403 forbidden.\"\"\"\n return jsonify(str(e)), 403\n\n @app.errorhandler(AuthenticationError)\n def handle_authentication_exception(e):\n \"\"\"Return401 authentication error.\"\"\"\n return jsonify(str(e)), 401\n\n\n@lru_cache()\ndef jwk_key():\n jwk_path = os.environ.get('jwk_private_key_path') or app_context().config[\n 'jwk_private_key_path']\n with open(jwk_path, 'rb') as f:\n key = JsonWebKey.import_key(f.read())\n return key\n\n\ndef new_token(payload: dict):\n key = jwk_key()\n header = {'alg': 'RS256', 'kid': 'demo_key'}\n token = jwt.encode(header, payload, key)\n LOGGER.debug(token)\n return token.decode('utf-8')\n\n\ndef issue_token_for_user(user: User):\n access_token = new_token({'iss': 'lorem.ipsum.dev', 'aud':\n 'lorem.ipsum.auth', 'sub': user.username, 'email': user.email,\n 'roles': [user.role.name], 'exp': datetime.datetime.now(tz=datetime\n .timezone.utc) + datetime.timedelta(hours=4), 'iat': datetime.\n datetime.now(tz=datetime.timezone.utc)})\n return access_token\n",
"step-5": "import datetime\nimport logging\nimport os\nfrom functools import lru_cache\nfrom authlib.jose import JsonWebKey, jwt\n\nfrom flask import g, request, jsonify\nfrom lorem_ipsum.model import User, AppContext\nimport lorem_ipsum\nfrom lorem_ipsum.model import Permission, BlacklistToken\n\nLOGGER = logging.getLogger('lorem-ipsum')\n\n\ndef app_context():\n if 'app_context' not in g:\n g.app_context = lorem_ipsum.create_app_context()\n return g.app_context\n\n\n@lru_cache()\ndef get_jwk():\n LOGGER.debug('Loading jwk from public key...')\n key_data = None\n with open(app_context().config['jwk_public_key_path'], 'rb') as _key_file:\n key_data = _key_file.read()\n LOGGER.debug(key_data)\n key = JsonWebKey.import_key(key_data, {'kty': 'RSA'})\n _jwks = {'keys': [{**key.as_dict(), 'kid': 'demo_key'}]}\n LOGGER.debug(_jwks)\n return _jwks\n\n\nclass AuthenticationError(ValueError):\n pass\n\n\nclass AuthorizationError(ValueError):\n pass\n\n\nclass BearerTokenValidator:\n def __init__(self, access_token, app_context: AppContext):\n self.access_token = access_token\n user_service = app_context.user_service\n self.blacklist_token_repo = app_context.blacklist_token_repo\n self.payload = user_service.decode_auth_token(access_token, get_jwk())\n\n def check_is_blacklisted(self):\n is_blacklisted_token = BlacklistToken.check_blacklist(self.access_token, self.blacklist_token_repo)\n if is_blacklisted_token:\n LOGGER.debug('Token blacklisted.')\n raise AuthenticationError('Invalid token.')\n return self\n\n def check_username_claim(self):\n if not self.payload.get('sub'):\n LOGGER.debug('Token missing sub.')\n raise AuthorizationError('Forbidden.')\n return self\n\n def check_user_exists(self, user):\n if not user:\n LOGGER.debug('Token user not found.')\n raise AuthorizationError('Forbidden.')\n return self\n\n def check_has_permissions(self, user: User, permissions: list):\n has_permissions = True\n for permission in permissions:\n if not user.role.has_permission(Permission.from_enum(permission)):\n LOGGER.debug(f'Missing permission {permission}.')\n has_permissions = False\n LOGGER.debug(f'Required permissions: {permissions}')\n if not has_permissions:\n raise AuthorizationError('Forbidden.')\n return self\n\n @staticmethod\n def from_authorization_header(authorization_header: str, app_context: AppContext):\n if not authorization_header:\n LOGGER.debug('Authorization header not found.')\n raise AuthenticationError('Invalid token.')\n if 'Bearer ' not in authorization_header:\n LOGGER.debug('Bearer token not found.')\n raise AuthenticationError('Invalid token.')\n access_token = authorization_header.split('Bearer')[1].strip()\n LOGGER.debug(f'Bearer token is:\\n\"{access_token}\"')\n return BearerTokenValidator(access_token, app_context)\n\n\ndef should_skip_auth(flask_request):\n \"\"\"\n Return true if should skip auth, e.g. when method is OPTIONS like when performing a React request.\n :param flask_request: Flask request.\n :return:\n \"\"\"\n return flask_request.method in ['HEAD', 'OPTIONS']\n\n\ndef requires_permission(permissions: list):\n def requires_permission_decorator(function):\n def wrapper(*args, **kwargs):\n LOGGER.info(f'Authorization...\\n{request.headers}')\n if should_skip_auth(request):\n return jsonify('ok')\n authorization_header = request.headers.get('Authorization')\n context = app_context()\n with context.transaction_manager.transaction:\n bearer_token_validator = BearerTokenValidator.from_authorization_header(authorization_header, context) \\\n .check_is_blacklisted() \\\n .check_username_claim()\n user = context.user_repo.get(username=bearer_token_validator.payload['sub'])\n\n bearer_token_validator.check_user_exists(user) \\\n .check_has_permissions(user, permissions)\n g.access_token = bearer_token_validator.access_token\n g.user = user\n\n _result = function(*args, **kwargs)\n return _result\n\n wrapper.__name__ = function.__name__\n return wrapper\n\n return requires_permission_decorator\n\n\nclass ExceptionHandlers:\n def __init__(self, app):\n @app.errorhandler(AuthorizationError)\n def handle_authorization_exception(e):\n \"\"\"Return403 forbidden.\"\"\"\n return jsonify(str(e)), 403\n\n @app.errorhandler(AuthenticationError)\n def handle_authentication_exception(e):\n \"\"\"Return401 authentication error.\"\"\"\n return jsonify(str(e)), 401\n\n\n@lru_cache()\ndef jwk_key():\n jwk_path = os.environ.get('jwk_private_key_path') or app_context().config['jwk_private_key_path']\n with open(jwk_path, 'rb') as f:\n key = JsonWebKey.import_key(f.read())\n return key\n\n\ndef new_token(payload: dict):\n key = jwk_key()\n header = {'alg': 'RS256', 'kid': 'demo_key'}\n token = jwt.encode(header, payload, key)\n LOGGER.debug(token)\n return token.decode('utf-8')\n\n\ndef issue_token_for_user(user: User):\n access_token = new_token({\n \"iss\": \"lorem.ipsum.dev\",\n \"aud\": \"lorem.ipsum.auth\",\n \"sub\": user.username,\n \"email\": user.email,\n \"roles\": [\n user.role.name\n ],\n \"exp\": datetime.datetime.now(tz=datetime.timezone.utc) + datetime.timedelta(hours=4),\n \"iat\": datetime.datetime.now(tz=datetime.timezone.utc)\n })\n return access_token\n",
"step-ids": [
10,
12,
17,
19,
21
]
}
|
[
10,
12,
17,
19,
21
] |
# Copyright (c) 2008 Johns Hopkins University.
# All rights reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose, without fee, and without written
# agreement is hereby granted, provided that the above copyright
# notice, the (updated) modification history and the author appear in
# all copies of this source code.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS `AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, LOSS OF USE, DATA,
# OR PROFITS) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# @author Razvan Musaloiu-E. <[email protected]>
"""A library that implements the T2 serial communication.
This library has two parts: one that deals with sending and receiving
packets using the serial format from T2 (TEP113) and a second one that
tries to simplifies the work with arbitrary packets.
"""
import sys, struct, time, serial, socket, operator, thread
import Queue
from threading import Lock, Condition
__version__ = "$Id: tos.py,v 1.1 2008/05/17 01:17:03 razvanm Exp $"
__all__ = ['Serial', 'AM',
'Packet', 'RawPacket',
'AckFrame', 'DataFrame', 'NoAckDataFrame',
'ActiveMessage']
def list2hex(v):
return " ".join(["%02x" % p for p in v])
class Serial:
"""
A Serial object offers a way to send and data using a HDLC-like
formating.
"""
HDLC_FLAG_BYTE = 0x7e
HDLC_CTLESC_BYTE = 0x7d
TOS_SERIAL_ACTIVE_MESSAGE_ID = 0
TOS_SERIAL_CC1000_ID = 1
TOS_SERIAL_802_15_4_ID = 2
TOS_SERIAL_UNKNOWN_ID = 255
SERIAL_PROTO_ACK = 67
SERIAL_PROTO_PACKET_ACK = 68
SERIAL_PROTO_PACKET_NOACK = 69
SERIAL_PROTO_PACKET_UNKNOWN = 255
def __init__(self, port, baudrate, flush=False, debug=False, qsize=10):
self._debug = debug
self._in_queue = Queue.Queue(qsize)
self._out_lock = Lock()
self._out_ack = Condition()
self._seqno = 0
self._ack = None
self._write_counter = 0
self._write_counter_failures = 0
self._read_counter = 0
self._ts = None
self._s = serial.Serial(port, baudrate, rtscts=0, timeout=0.5)
self._s.flushInput()
start = time.time();
if flush:
print >>sys.stdout, "Flushing the serial port",
while time.time() - start < 1:
p = self._read()
sys.stdout.write(".")
if not self._debug:
sys.stdout.write("\n")
self._s.close()
self._s = serial.Serial(port, baudrate, rtscts=0, timeout=None)
thread.start_new_thread(self.run, ())
def run(self):
while True:
p = self._read()
self._read_counter += 1
if self._debug:
print "Serial:run: got a packet(%d): %s" % (self._read_counter, p)
ack = AckFrame(p.data)
if ack.protocol == self.SERIAL_PROTO_ACK:
if not self._ack:
self._ack = ack
if self._debug:
print "Serial:run: got an ack:", ack
self._ack = ack
# Wake up the writer
self._out_ack.acquire()
self._out_ack.notify()
self._out_ack.release()
else:
ampkt = ActiveMessage(NoAckDataFrame(p.data).data)
if ampkt.type == 100:
for t in "".join([chr(i) for i in ampkt.data]).strip('\n\0').split('\n'):
print "PRINTF:", t.strip('\n')
else:
if self._in_queue.full():
print "Warning: Buffer overflow"
self._in_queue.get()
self._in_queue.put(p, block=False)
# Returns the next incoming serial packet
def _read(self):
"""Wait for a packet and return it as a RawPacket."""
try:
d = self._get_byte()
ts = time.time()
while d != self.HDLC_FLAG_BYTE:
d = self._get_byte()
ts = time.time()
packet = [d]
d = self._get_byte()
if d == self.HDLC_FLAG_BYTE:
d = self._get_byte()
ts = time.time()
else:
packet.append(d)
while d != self.HDLC_FLAG_BYTE:
d = self._get_byte()
packet.append(d)
if self._debug == True:
print "Serial:_read: unescaped", packet
packet = self._unescape(packet)
crc = self._crc16(0, packet[1:-3])
packet_crc = self._decode(packet[-3:-1])
if crc != packet_crc:
print "Warning: wrong CRC! %x != %x %s" % (crc, packet_crc, ["%2x" % i for i in packet])
if self._debug:
if self._ts == None:
self._ts = ts
else:
print "Serial:_read: %.4f (%.4f) Recv:" % (ts, ts - self._ts), self._format_packet(packet[1:-3])
self._ts = ts
return RawPacket(ts, packet[1:-3], crc == packet_crc)
except socket.timeout:
return None
def read(self, timeout=None):
start = time.time();
done = False
while not done:
p = None
while p == None:
if timeout == 0 or time.time() - start < timeout:
try:
p = self._in_queue.get(True, timeout)
except Queue.Empty:
return None
else:
return None
if p.crc:
done = True
else:
p = None
# In the current TinyOS the packets from the mote are always NoAckDataFrame
return NoAckDataFrame(p.data)
def write(self, payload):
"""
Write a packet. If the payload argument is a list, it is
assumed to be exactly the payload. Otherwise the payload is
assume to be a Packet and the real payload is obtain by
calling the .payload().
"""
if type(payload) != type([]):
# Assume this will be derived from Packet
payload = payload.payload()
self._out_lock.acquire()
self._seqno = (self._seqno + 1) % 100
packet = DataFrame();
packet.protocol = self.SERIAL_PROTO_PACKET_ACK
packet.seqno = self._seqno
packet.dispatch = 0
packet.data = payload
packet = packet.payload()
crc = self._crc16(0, packet)
packet.append(crc & 0xff)
packet.append((crc >> 8) & 0xff)
packet = [self.HDLC_FLAG_BYTE] + self._escape(packet) + [self.HDLC_FLAG_BYTE]
while True:
self._put_bytes(packet)
self._write_counter += 1
if self._debug == True:
print "Send(%d/%d): %s" % (self._write_counter, self._write_counter_failures, packet)
print "Wait for ack %d ..." % (self._seqno)
self._out_ack.acquire()
self._out_ack.wait(0.2)
if self._debug:
print "Wait for ack %d done. Latest ack:" % (self._seqno), self._ack
self._out_ack.release()
if self._ack and self._ack.seqno == self._seqno:
if self._debug:
print "The packet was acked."
self._out_lock.release()
if self._debug:
print "Returning from Serial.write..."
return True
else:
self._write_counter_failures += 1
if self._debug:
print "The packet was not acked. Try again."
# break # make only one sending attempt
self._out_lock.release()
return False
def _format_packet(self, payload):
f = NoAckDataFrame(payload)
if f.protocol == self.SERIAL_PROTO_ACK:
rpacket = AckFrame(payload)
return "Ack seqno: %d" % (rpacket.seqno)
else:
rpacket = ActiveMessage(f.data)
return "D: %04x S: %04x L: %02x G: %02x T: %02x | %s" % \
(rpacket.destination, rpacket.source,
rpacket.length, rpacket.group, rpacket.type,
list2hex(rpacket.data))
def _crc16(self, base_crc, frame_data):
crc = base_crc
for b in frame_data:
crc = crc ^ (b << 8)
for i in range(0, 8):
if crc & 0x8000 == 0x8000:
crc = (crc << 1) ^ 0x1021
else:
crc = crc << 1
crc = crc & 0xffff
return crc
def _encode(self, val, dim):
output = []
for i in range(dim):
output.append(val & 0xFF)
val = val >> 8
return output
def _decode(self, v):
r = long(0)
for i in v[::-1]:
r = (r << 8) + i
return r
def _get_byte(self):
try:
r = struct.unpack("B", self._s.read())[0]
return r
except struct.error:
# Serial port read timeout
raise socket.timeout
def _put_bytes(self, data):
#print "DEBUG: _put_bytes:", data
for b in data:
self._s.write(struct.pack('B', b))
def _unescape(self, packet):
r = []
esc = False
for b in packet:
if esc:
r.append(b ^ 0x20)
esc = False
elif b == self.HDLC_CTLESC_BYTE:
esc = True
else:
r.append(b)
return r
def _escape(self, packet):
r = []
for b in packet:
if b == self.HDLC_FLAG_BYTE or b == self.HDLC_CTLESC_BYTE:
r.append(self.HDLC_CTLESC_BYTE)
r.append(b ^ 0x20)
else:
r.append(b)
return r
def debug(self, debug):
self._debug = debug
class SFClient:
def __init__(self, host, port, qsize=10):
self._in_queue = Queue(qsize)
self._s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._s.connect((host, port))
data = self._s.recv(2)
if data != 'U ':
print "Wrong handshake"
self._s.send("U ")
print "Connected"
thread.start_new_thread(self.run, ())
def run(self):
while True:
length = ord(self._s.recv(1))
data = self._s.recv(length)
data = [ord(c) for c in data][1:]
#print "Recv %d bytes" % (length), ActiveMessage(data)
if self._in_queue.full():
print "Warning: Buffer overflow"
self._in_queue.get()
p = RawPacket()
p.crc = 1
p.data = data
self._in_queue.put(p, block=False)
def read(self, timeout=0):
return self._in_queue.get()
def write(self, payload):
print "SFClient: write:", payload
if type(payload) != type([]):
# Assume this will be derived from Packet
payload = payload.payload()
payload = [0] + payload
self._s.send(chr(len(payload)))
self._s.send(''.join([chr(c) for c in payload]))
return True
class AM:
def __init__(self, s):
self._s = s
def read(self, timeout=None):
frame = self._s.read(timeout)
if frame:
return ActiveMessage(frame.data)
return frame
def write(self, packet, amid):
return self._s.write(ActiveMessage(packet, amid=amid))
class Packet:
"""
The Packet class offers a handy way to build pack and unpack
binary data based on a given pattern.
"""
def _decode(self, v):
r = long(0)
for i in v:
r = (r << 8) + i
return r
def _encode(self, val, dim):
output = []
for i in range(dim):
output.append(int(val & 0xFF))
val = val >> 8
output.reverse()
return output
def __init__(self, desc, packet = None):
offset = 0
boffset = 0
sum = 0
for i in range(len(desc)-1, -1, -1):
(n, t, s) = desc[i]
if s == None:
if sum > 0:
desc[i] = (n, t, -sum)
break
sum += s
self.__dict__['_schema'] = [(t, s) for (n, t, s) in desc]
self.__dict__['_names'] = [n for (n, t, s) in desc]
self.__dict__['_values'] = []
if type(packet) == type([]):
for (t, s) in self._schema:
if t == 'int':
self._values.append(self._decode(packet[offset:offset + s]))
offset += s
elif t == 'bint':
doffset = 8 - (boffset + s)
self._values.append((packet[offset] >> doffset) & ((1<<s) - 1))
boffset += s
if boffset == 8:
offset += 1
boffset = 0
elif t == 'string':
self._values.append(''.join([chr(i) for i in packet[offset:offset + s]]))
offset += s
elif t == 'blob':
if s:
if s > 0:
self._values.append(packet[offset:offset + s])
offset += s
else:
self._values.append(packet[offset:s])
offset = len(packet) + s
else:
self._values.append(packet[offset:])
elif type(packet) == type(()):
for i in packet:
self._values.append(i)
else:
for v in self._schema:
self._values.append(None)
def __repr__(self):
return self._values.__repr__()
def __str__(self):
r = ""
for i in range(len(self._names)):
r += "%s: %s " % (self._names[i], self._values[i])
for i in range(len(self._names), len(self._values)):
r += "%s" % self._values[i]
return r
# return self._values.__str__()
# Implement the map behavior
def __getitem__(self, key):
return self.__getattr__(key)
def __setitem__(self, key, value):
self.__setattr__(key, value)
def __len__(self):
return len(self._values)
def keys(self):
return self._names
def values(self):
return self._names
# Implement the struct behavior
def __getattr__(self, name):
#print "DEBUG: __getattr__", name
if type(name) == type(0):
return self._names[name]
else:
return self._values[self._names.index(name)]
def __setattr__(self, name, value):
if type(name) == type(0):
self._values[name] = value
else:
self._values[self._names.index(name)] = value
def __ne__(self, other):
if other.__class__ == self.__class__:
return self._values != other._values
else:
return True
def __eq__(self, other):
if other.__class__ == self.__class__:
return self._values == other._values
else:
return False
def __nonzero__(self):
return True;
# Custom
def names(self):
return self._names
def sizes(self):
return self._schema
def payload(self):
r = []
boffset = 0
for i in range(len(self._schema)):
(t, s) = self._schema[i]
if t == 'int':
r += self._encode(self._values[i], s)
boffset = 0
elif t == 'bint':
doffset = 8 - (boffset + s)
if boffset == 0:
r += [self._values[i] << doffset]
else:
r[-1] |= self._values[i] << doffset
boffset += s
if boffset == 8:
boffset = 0
elif self._values[i] != []:
r += self._values[i]
for i in self._values[len(self._schema):]:
r += i
return r
class RawPacket(Packet):
def __init__(self, ts = None, data = None, crc = None):
Packet.__init__(self,
[('ts' , 'int', 4),
('crc', 'int', 1),
('data', 'blob', None)],
None)
self.ts = ts;
self.data = data
self.crc = crc
class AckFrame(Packet):
def __init__(self, payload = None):
Packet.__init__(self,
[('protocol', 'int', 1),
('seqno', 'int', 1)],
payload)
class DataFrame(Packet):
def __init__(self, payload = None):
if payload != None and type(payload) != type([]):
# Assume is a Packet
payload = payload.payload()
Packet.__init__(self,
[('protocol', 'int', 1),
('seqno', 'int', 1),
('dispatch', 'int', 1),
('data', 'blob', None)],
payload)
class NoAckDataFrame(Packet):
def __init__(self, payload = None):
if payload != None and type(payload) != type([]):
# Assume is a Packet
payload = payload.payload()
Packet.__init__(self,
[('protocol', 'int', 1),
('dispatch', 'int', 1),
('data', 'blob', None)],
payload)
class ActiveMessage(Packet):
def __init__(self, gpacket = None, amid = 0x00, dest = 0xFFFF):
if type(gpacket) == type([]):
payload = gpacket
else:
# Assume this will be derived from Packet
payload = None
Packet.__init__(self,
[('destination', 'int', 2),
('source', 'int', 2),
('length', 'int', 1),
('group', 'int', 1),
('type', 'int', 1),
('data', 'blob', None)],
payload)
if payload == None:
self.destination = dest
self.source = 0x0000
self.group = 0x00
self.type = amid
self.data = []
if gpacket:
self.data = gpacket.payload()
self.length = len(self.data)
|
normal
|
{
"blob_id": "f614287a2a118484b67f2b16e429a3335416d186",
"index": 3738,
"step-1": "# Copyright (c) 2008 Johns Hopkins University.\n# All rights reserved.\n#\n# Permission to use, copy, modify, and distribute this software and its\n# documentation for any purpose, without fee, and without written\n# agreement is hereby granted, provided that the above copyright\n# notice, the (updated) modification history and the author appear in\n# all copies of this source code.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS `AS IS'\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS\n# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, LOSS OF USE, DATA,\n# OR PROFITS) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n# THE POSSIBILITY OF SUCH DAMAGE.\n\n# @author Razvan Musaloiu-E. <[email protected]>\n\n\"\"\"A library that implements the T2 serial communication.\n\nThis library has two parts: one that deals with sending and receiving\npackets using the serial format from T2 (TEP113) and a second one that\ntries to simplifies the work with arbitrary packets.\n\n\"\"\"\n\nimport sys, struct, time, serial, socket, operator, thread\nimport Queue\nfrom threading import Lock, Condition\n\n__version__ = \"$Id: tos.py,v 1.1 2008/05/17 01:17:03 razvanm Exp $\"\n\n__all__ = ['Serial', 'AM',\n 'Packet', 'RawPacket',\n 'AckFrame', 'DataFrame', 'NoAckDataFrame',\n 'ActiveMessage']\n\ndef list2hex(v):\n return \" \".join([\"%02x\" % p for p in v])\n\nclass Serial:\n \"\"\"\n A Serial object offers a way to send and data using a HDLC-like\n formating.\n \"\"\"\n \n HDLC_FLAG_BYTE = 0x7e\n HDLC_CTLESC_BYTE = 0x7d\n \n TOS_SERIAL_ACTIVE_MESSAGE_ID = 0\n TOS_SERIAL_CC1000_ID = 1\n TOS_SERIAL_802_15_4_ID = 2\n TOS_SERIAL_UNKNOWN_ID = 255\n \n SERIAL_PROTO_ACK = 67\n SERIAL_PROTO_PACKET_ACK = 68\n SERIAL_PROTO_PACKET_NOACK = 69\n SERIAL_PROTO_PACKET_UNKNOWN = 255\n \n def __init__(self, port, baudrate, flush=False, debug=False, qsize=10):\n self._debug = debug\n self._in_queue = Queue.Queue(qsize)\n self._out_lock = Lock()\n self._out_ack = Condition()\n self._seqno = 0\n self._ack = None\n self._write_counter = 0\n self._write_counter_failures = 0\n self._read_counter = 0\n self._ts = None\n\n self._s = serial.Serial(port, baudrate, rtscts=0, timeout=0.5)\n self._s.flushInput()\n start = time.time();\n if flush:\n print >>sys.stdout, \"Flushing the serial port\",\n while time.time() - start < 1:\n p = self._read()\n sys.stdout.write(\".\")\n if not self._debug:\n sys.stdout.write(\"\\n\")\n self._s.close()\n self._s = serial.Serial(port, baudrate, rtscts=0, timeout=None)\n\n thread.start_new_thread(self.run, ())\n\n def run(self):\n \n while True:\n p = self._read()\n self._read_counter += 1\n if self._debug:\n print \"Serial:run: got a packet(%d): %s\" % (self._read_counter, p)\n ack = AckFrame(p.data)\n if ack.protocol == self.SERIAL_PROTO_ACK:\n if not self._ack:\n self._ack = ack\n if self._debug:\n print \"Serial:run: got an ack:\", ack\n self._ack = ack\n # Wake up the writer\n self._out_ack.acquire()\n self._out_ack.notify()\n self._out_ack.release()\n else:\n ampkt = ActiveMessage(NoAckDataFrame(p.data).data)\n if ampkt.type == 100:\n for t in \"\".join([chr(i) for i in ampkt.data]).strip('\\n\\0').split('\\n'):\n print \"PRINTF:\", t.strip('\\n')\n else:\n if self._in_queue.full():\n print \"Warning: Buffer overflow\"\n self._in_queue.get()\n self._in_queue.put(p, block=False)\n\n\n # Returns the next incoming serial packet\n def _read(self):\n \"\"\"Wait for a packet and return it as a RawPacket.\"\"\"\n \n try:\n d = self._get_byte()\n ts = time.time()\n while d != self.HDLC_FLAG_BYTE:\n d = self._get_byte()\n ts = time.time()\n packet = [d]\n d = self._get_byte()\n if d == self.HDLC_FLAG_BYTE:\n d = self._get_byte()\n ts = time.time()\n else:\n packet.append(d)\n while d != self.HDLC_FLAG_BYTE:\n d = self._get_byte()\n packet.append(d)\n if self._debug == True:\n print \"Serial:_read: unescaped\", packet\n packet = self._unescape(packet)\n \n crc = self._crc16(0, packet[1:-3])\n packet_crc = self._decode(packet[-3:-1])\n \n if crc != packet_crc:\n print \"Warning: wrong CRC! %x != %x %s\" % (crc, packet_crc, [\"%2x\" % i for i in packet])\n if self._debug:\n if self._ts == None:\n self._ts = ts\n else:\n print \"Serial:_read: %.4f (%.4f) Recv:\" % (ts, ts - self._ts), self._format_packet(packet[1:-3])\n self._ts = ts\n return RawPacket(ts, packet[1:-3], crc == packet_crc)\n except socket.timeout:\n return None\n\n\n def read(self, timeout=None):\n start = time.time();\n done = False\n while not done:\n p = None\n while p == None:\n if timeout == 0 or time.time() - start < timeout:\n try:\n p = self._in_queue.get(True, timeout)\n except Queue.Empty:\n return None\n else:\n return None\n if p.crc:\n done = True\n else:\n p = None\n # In the current TinyOS the packets from the mote are always NoAckDataFrame\n return NoAckDataFrame(p.data)\n\n def write(self, payload):\n \"\"\"\n Write a packet. If the payload argument is a list, it is\n assumed to be exactly the payload. Otherwise the payload is\n assume to be a Packet and the real payload is obtain by\n calling the .payload().\n \"\"\"\n \n if type(payload) != type([]):\n # Assume this will be derived from Packet\n payload = payload.payload()\n self._out_lock.acquire()\n self._seqno = (self._seqno + 1) % 100\n packet = DataFrame();\n packet.protocol = self.SERIAL_PROTO_PACKET_ACK\n packet.seqno = self._seqno\n packet.dispatch = 0\n packet.data = payload\n packet = packet.payload()\n crc = self._crc16(0, packet)\n packet.append(crc & 0xff)\n packet.append((crc >> 8) & 0xff)\n packet = [self.HDLC_FLAG_BYTE] + self._escape(packet) + [self.HDLC_FLAG_BYTE]\n\n while True:\n self._put_bytes(packet)\n self._write_counter += 1\n if self._debug == True:\n print \"Send(%d/%d): %s\" % (self._write_counter, self._write_counter_failures, packet)\n print \"Wait for ack %d ...\" % (self._seqno)\n self._out_ack.acquire()\n self._out_ack.wait(0.2)\n if self._debug:\n print \"Wait for ack %d done. Latest ack:\" % (self._seqno), self._ack\n self._out_ack.release()\n if self._ack and self._ack.seqno == self._seqno:\n if self._debug:\n print \"The packet was acked.\"\n self._out_lock.release()\n if self._debug:\n print \"Returning from Serial.write...\"\n return True\n else:\n self._write_counter_failures += 1\n if self._debug:\n print \"The packet was not acked. Try again.\"\n # break # make only one sending attempt\n self._out_lock.release()\n return False\n\n\n def _format_packet(self, payload):\n f = NoAckDataFrame(payload)\n if f.protocol == self.SERIAL_PROTO_ACK:\n rpacket = AckFrame(payload)\n return \"Ack seqno: %d\" % (rpacket.seqno)\n else:\n rpacket = ActiveMessage(f.data)\n return \"D: %04x S: %04x L: %02x G: %02x T: %02x | %s\" % \\\n (rpacket.destination, rpacket.source,\n rpacket.length, rpacket.group, rpacket.type,\n list2hex(rpacket.data))\n\n def _crc16(self, base_crc, frame_data):\n crc = base_crc\n for b in frame_data:\n crc = crc ^ (b << 8)\n for i in range(0, 8):\n if crc & 0x8000 == 0x8000:\n crc = (crc << 1) ^ 0x1021\n else:\n crc = crc << 1\n crc = crc & 0xffff\n return crc\n \n def _encode(self, val, dim):\n output = []\n for i in range(dim):\n output.append(val & 0xFF)\n val = val >> 8\n return output\n \n def _decode(self, v):\n r = long(0)\n for i in v[::-1]:\n r = (r << 8) + i\n return r\n \n def _get_byte(self):\n try:\n r = struct.unpack(\"B\", self._s.read())[0]\n return r\n except struct.error:\n # Serial port read timeout\n raise socket.timeout\n \n def _put_bytes(self, data):\n #print \"DEBUG: _put_bytes:\", data\n for b in data:\n self._s.write(struct.pack('B', b))\n \n def _unescape(self, packet):\n r = []\n esc = False\n for b in packet:\n if esc:\n r.append(b ^ 0x20)\n esc = False\n elif b == self.HDLC_CTLESC_BYTE:\n esc = True\n else:\n r.append(b)\n return r\n \n def _escape(self, packet):\n r = []\n for b in packet:\n if b == self.HDLC_FLAG_BYTE or b == self.HDLC_CTLESC_BYTE:\n r.append(self.HDLC_CTLESC_BYTE)\n r.append(b ^ 0x20)\n else:\n r.append(b)\n return r\n \n def debug(self, debug):\n self._debug = debug\n\n\nclass SFClient:\n def __init__(self, host, port, qsize=10):\n self._in_queue = Queue(qsize)\n self._s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._s.connect((host, port))\n data = self._s.recv(2)\n if data != 'U ':\n print \"Wrong handshake\"\n self._s.send(\"U \")\n print \"Connected\"\n thread.start_new_thread(self.run, ())\n\n def run(self):\n while True:\n length = ord(self._s.recv(1))\n data = self._s.recv(length)\n data = [ord(c) for c in data][1:]\n #print \"Recv %d bytes\" % (length), ActiveMessage(data)\n if self._in_queue.full():\n print \"Warning: Buffer overflow\"\n self._in_queue.get()\n p = RawPacket()\n p.crc = 1\n p.data = data\n self._in_queue.put(p, block=False)\n\n def read(self, timeout=0):\n return self._in_queue.get()\n\n def write(self, payload):\n print \"SFClient: write:\", payload\n if type(payload) != type([]):\n # Assume this will be derived from Packet\n payload = payload.payload()\n payload = [0] + payload\n self._s.send(chr(len(payload)))\n self._s.send(''.join([chr(c) for c in payload]))\n return True\n\nclass AM:\n def __init__(self, s):\n self._s = s\n\n def read(self, timeout=None):\n frame = self._s.read(timeout)\n if frame:\n return ActiveMessage(frame.data)\n return frame\n\n def write(self, packet, amid):\n return self._s.write(ActiveMessage(packet, amid=amid))\n\n\nclass Packet:\n \"\"\"\n The Packet class offers a handy way to build pack and unpack\n binary data based on a given pattern.\n \"\"\"\n\n def _decode(self, v):\n r = long(0)\n for i in v:\n r = (r << 8) + i\n return r\n \n def _encode(self, val, dim):\n output = []\n for i in range(dim):\n output.append(int(val & 0xFF))\n val = val >> 8\n output.reverse()\n return output\n \n def __init__(self, desc, packet = None):\n offset = 0\n boffset = 0\n sum = 0\n for i in range(len(desc)-1, -1, -1):\n (n, t, s) = desc[i]\n if s == None:\n if sum > 0:\n desc[i] = (n, t, -sum)\n break\n sum += s\n self.__dict__['_schema'] = [(t, s) for (n, t, s) in desc]\n self.__dict__['_names'] = [n for (n, t, s) in desc]\n self.__dict__['_values'] = []\n if type(packet) == type([]):\n for (t, s) in self._schema:\n if t == 'int':\n self._values.append(self._decode(packet[offset:offset + s]))\n offset += s\n elif t == 'bint':\n doffset = 8 - (boffset + s)\n self._values.append((packet[offset] >> doffset) & ((1<<s) - 1))\n boffset += s\n if boffset == 8:\n offset += 1\n boffset = 0\n elif t == 'string':\n self._values.append(''.join([chr(i) for i in packet[offset:offset + s]]))\n offset += s\n elif t == 'blob':\n if s:\n if s > 0:\n self._values.append(packet[offset:offset + s])\n offset += s\n else:\n self._values.append(packet[offset:s])\n offset = len(packet) + s\n else:\n self._values.append(packet[offset:])\n elif type(packet) == type(()):\n for i in packet:\n self._values.append(i)\n else:\n for v in self._schema:\n self._values.append(None)\n\n def __repr__(self):\n return self._values.__repr__()\n\n def __str__(self):\n r = \"\"\n for i in range(len(self._names)):\n r += \"%s: %s \" % (self._names[i], self._values[i])\n for i in range(len(self._names), len(self._values)):\n r += \"%s\" % self._values[i]\n return r\n# return self._values.__str__()\n\n # Implement the map behavior\n def __getitem__(self, key):\n return self.__getattr__(key)\n\n def __setitem__(self, key, value):\n self.__setattr__(key, value)\n\n def __len__(self):\n return len(self._values)\n\n def keys(self):\n return self._names\n\n def values(self):\n return self._names\n\n # Implement the struct behavior\n def __getattr__(self, name):\n #print \"DEBUG: __getattr__\", name\n if type(name) == type(0):\n return self._names[name]\n else:\n return self._values[self._names.index(name)]\n\n def __setattr__(self, name, value):\n if type(name) == type(0):\n self._values[name] = value\n else:\n self._values[self._names.index(name)] = value\n\n def __ne__(self, other):\n if other.__class__ == self.__class__:\n return self._values != other._values\n else:\n return True\n\n def __eq__(self, other):\n if other.__class__ == self.__class__:\n return self._values == other._values\n else:\n return False\n\n def __nonzero__(self):\n return True;\n\n # Custom\n def names(self):\n return self._names\n\n def sizes(self):\n return self._schema\n\n def payload(self):\n r = []\n boffset = 0\n for i in range(len(self._schema)):\n (t, s) = self._schema[i]\n if t == 'int':\n r += self._encode(self._values[i], s)\n boffset = 0\n elif t == 'bint':\n doffset = 8 - (boffset + s)\n if boffset == 0:\n r += [self._values[i] << doffset]\n else:\n r[-1] |= self._values[i] << doffset\n boffset += s\n if boffset == 8:\n boffset = 0\n elif self._values[i] != []:\n r += self._values[i]\n for i in self._values[len(self._schema):]:\n r += i\n return r\n\n\nclass RawPacket(Packet):\n def __init__(self, ts = None, data = None, crc = None):\n Packet.__init__(self,\n [('ts' , 'int', 4),\n ('crc', 'int', 1),\n ('data', 'blob', None)],\n None)\n self.ts = ts;\n self.data = data\n self.crc = crc\n \nclass AckFrame(Packet):\n def __init__(self, payload = None):\n Packet.__init__(self,\n [('protocol', 'int', 1),\n ('seqno', 'int', 1)],\n payload)\n\nclass DataFrame(Packet):\n def __init__(self, payload = None):\n if payload != None and type(payload) != type([]):\n # Assume is a Packet\n payload = payload.payload()\n Packet.__init__(self,\n [('protocol', 'int', 1),\n ('seqno', 'int', 1),\n ('dispatch', 'int', 1),\n ('data', 'blob', None)],\n payload)\n\nclass NoAckDataFrame(Packet):\n def __init__(self, payload = None):\n if payload != None and type(payload) != type([]):\n # Assume is a Packet\n payload = payload.payload()\n Packet.__init__(self,\n [('protocol', 'int', 1),\n ('dispatch', 'int', 1),\n ('data', 'blob', None)],\n payload)\n\nclass ActiveMessage(Packet):\n def __init__(self, gpacket = None, amid = 0x00, dest = 0xFFFF):\n if type(gpacket) == type([]):\n payload = gpacket\n else:\n # Assume this will be derived from Packet\n payload = None\n Packet.__init__(self,\n [('destination', 'int', 2),\n ('source', 'int', 2),\n ('length', 'int', 1),\n ('group', 'int', 1),\n ('type', 'int', 1),\n ('data', 'blob', None)],\n payload)\n if payload == None:\n self.destination = dest\n self.source = 0x0000\n self.group = 0x00\n self.type = amid\n self.data = []\n if gpacket:\n self.data = gpacket.payload()\n self.length = len(self.data)\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from web3.auto.infura import w3
import json
import os
with open("contract_abi.json") as f:
info_json = json.load(f)
abi = info_json
mycontract = w3.eth.contract(address='0x091FDeb7990D3E00d13c31b81841d56b33164AD7', abi=abi)
myfilter = mycontract.events.currentResponderState.createFilter(fromBlock=16147303)
#myfilter.fromBlock = "16181508"
#mycontract.eventFilter('currentResponderState', {'fromBlock': 16181508,'toBlock': 'latest'})
print(abi)
print (myfilter)
|
normal
|
{
"blob_id": "8921c0a17e90f7113d1e0be630a15fc9d74d1780",
"index": 8519,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('contract_abi.json') as f:\n info_json = json.load(f)\n<mask token>\nprint(abi)\nprint(myfilter)\n",
"step-3": "<mask token>\nwith open('contract_abi.json') as f:\n info_json = json.load(f)\nabi = info_json\nmycontract = w3.eth.contract(address=\n '0x091FDeb7990D3E00d13c31b81841d56b33164AD7', abi=abi)\nmyfilter = mycontract.events.currentResponderState.createFilter(fromBlock=\n 16147303)\nprint(abi)\nprint(myfilter)\n",
"step-4": "from web3.auto.infura import w3\nimport json\nimport os\nwith open('contract_abi.json') as f:\n info_json = json.load(f)\nabi = info_json\nmycontract = w3.eth.contract(address=\n '0x091FDeb7990D3E00d13c31b81841d56b33164AD7', abi=abi)\nmyfilter = mycontract.events.currentResponderState.createFilter(fromBlock=\n 16147303)\nprint(abi)\nprint(myfilter)\n",
"step-5": "from web3.auto.infura import w3\nimport json\nimport os\n\nwith open(\"contract_abi.json\") as f:\n info_json = json.load(f)\nabi = info_json\nmycontract = w3.eth.contract(address='0x091FDeb7990D3E00d13c31b81841d56b33164AD7', abi=abi)\nmyfilter = mycontract.events.currentResponderState.createFilter(fromBlock=16147303)\n#myfilter.fromBlock = \"16181508\"\n#mycontract.eventFilter('currentResponderState', {'fromBlock': 16181508,'toBlock': 'latest'})\nprint(abi)\nprint (myfilter)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
width,height = int(input("Width? ")), int(input("Height? "))
on_row = 0
while on_row <= height:
if on_row == 0 or on_row == height:
print("*"*width)
else:
stars = "*" + " "*(width-2) + "*"
print(stars)
on_row += 1
# height = 0
# width = 0
# while True:
# try:
# height = int(input("Height? \n"))
# width = int(input("width? \n"))
# break
# except ValueError:
# print("choose an integer")
# print("* " * width)
# while height > 0:
# print(f"* " + " " * {width} + " *")
# height -+ 1
# print("* " * width)
|
normal
|
{
"blob_id": "63e96b41906f49f557529a0815da7314d74f6c33",
"index": 6216,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile on_row <= height:\n if on_row == 0 or on_row == height:\n print('*' * width)\n else:\n stars = '*' + ' ' * (width - 2) + '*'\n print(stars)\n on_row += 1\n",
"step-3": "width, height = int(input('Width? ')), int(input('Height? '))\non_row = 0\nwhile on_row <= height:\n if on_row == 0 or on_row == height:\n print('*' * width)\n else:\n stars = '*' + ' ' * (width - 2) + '*'\n print(stars)\n on_row += 1\n",
"step-4": "width,height = int(input(\"Width? \")), int(input(\"Height? \"))\n\non_row = 0\nwhile on_row <= height:\n if on_row == 0 or on_row == height:\n print(\"*\"*width)\n else:\n stars = \"*\" + \" \"*(width-2) + \"*\"\n print(stars)\n on_row += 1\n\n\n# height = 0\n# width = 0\n\n# while True:\n# try:\n# height = int(input(\"Height? \\n\"))\n# width = int(input(\"width? \\n\"))\n# break\n# except ValueError:\n# print(\"choose an integer\")\n\n# print(\"* \" * width)\n\n# while height > 0:\n# print(f\"* \" + \" \" * {width} + \" *\")\n# height -+ 1\n\n# print(\"* \" * width)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from bottle import response,request,route,run
from json import dumps
import ConfigParser
import pickle
import pandas as pd
import numpy as np
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.cross_validation import cross_val_score
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
from sklearn.ensemble import RandomForestClassifier
import pickle
from nltk.stem import WordNetLemmatizer
wordnet_lemmatizer = WordNetLemmatizer()
def fun(dat):
big=[]
for i in dat['Summary']:
st=''
ls=[]
for j in i.split(','):
#print j
ls.append(wordnet_lemmatizer.lemmatize(j))
#print ls
big.append(' '.join(ls))
return big
#Initialization starts
#configParser=ConfigParser.RawConfigParser()
#configFilePath="Config.cfg"
#configParser.read(configFilePath)
#Host=configParser.get('file','host')
#Port=configParser.get('file','port')
#Config read ends
#This method trains and creates a classifier from training data in csv file
@route('/trainBot',method='POST')
def trainBot():
response.content_type='application/json'
data2=[]
print "training...."
data=pd.read_csv('trainData.csv',header=None)
import preprocess
from preprocess import number_removal,generate_word_frequency
import re
#print data
data.columns=['Intent','Summary']
data['Summary']=data.apply(number_removal,axis=1)
data['Summary'] = data.apply(generate_word_frequency,axis=1)
data['Summary']=fun(data)
from nltk.corpus import stopwords
stop = stopwords.words('english')
stop.extend(('.', ',', '"', "'", '?', '!', ':', ';', '(', ')', '[', ']', '{', '}','/','-'))
for i in ['ask','alexa','allexa','tel','tell']:
stop.append(i)
le=LabelEncoder()
X=data['Summary'].fillna('')
y=data['Intent'].fillna('')
y=le.fit_transform(y)
classifier = Pipeline([
('vec',CountVectorizer(strip_accents='unicode',stop_words=stop)),
('tfidf', TfidfTransformer()),
('clf', RandomForestClassifier(n_estimators=10,random_state=0))])
classifier=classifier.fit(X, y)
f = open('random_forest_model.pickle', 'wb')
pickle.dump(classifier, f)
f.close()
f = open('label.pickle', 'wb')
pickle.dump(le, f)
f.close()
print "training completed"
item={"result":"training completed"}
data2.append(item)
return dumps(data2)
#This method classifies the input text based on the trained classifier
@route('/classify2',method='POST')
def classify2():
# read python dict back from the file
f = open('random_forest_model.pickle', 'rb')
classifier=pickle.load(f)
f.close()
f = open('label.pickle', 'rb')
label=pickle.load(f)
f.close()
response.content_type='application/json'
data=[]
inputText=request.json["input"]
print "input text : ",inputText
confidence=classifier.predict_proba([inputText])
index=np.argmax(confidence)
predicted_class=label.inverse_transform(classifier.predict([inputText]))
print str(round(confidence[0][index],2))+" "+ predicted_class[0]
item={"result":str(round(confidence[0][index],2))+" "+ predicted_class[0]}
data.append(item)
return dumps(data)
#This method classifies and returns others based on confidence score
def classifyTextWithScore(inputText):
f = open('random_forest_model.pickle', 'rb')
classifier=pickle.load(f)
f.close()
f = open('label.pickle', 'rb')
label=pickle.load(f)
f.close()
confidence=classifier.predict_proba([inputText])
index=np.argmax(confidence)
predicted_class=label.inverse_transform(classifier.predict([inputText]))
print round(confidence[0][index],2),predicted_class
if (round(confidence[0][index],2)<0.7):
return "others"
elif(len(inputText.split(" "))<2):
return "others"
else:
return predicted_class[0]
#run(host='172.31.45.19', port=7500)
#print "hai"
print classifyTextWithScore("payments made last week where remitter bank wants to stop the payment")
#run(host='192.168.1.7',port=8000)
|
normal
|
{
"blob_id": "f0b5ad49fc47adc54fb16a151b4a0ed563f53a42",
"index": 9482,
"step-1": "from bottle import response,request,route,run\nfrom json import dumps\nimport ConfigParser\nimport pickle\nimport pandas as pd\nimport numpy as np\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.cross_validation import cross_val_score\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.ensemble import RandomForestClassifier\nimport pickle\n\nfrom nltk.stem import WordNetLemmatizer\nwordnet_lemmatizer = WordNetLemmatizer()\ndef fun(dat):\n big=[]\n for i in dat['Summary']:\n st=''\n ls=[]\n for j in i.split(','):\n #print j\n ls.append(wordnet_lemmatizer.lemmatize(j))\n #print ls\n big.append(' '.join(ls))\n return big\n\n\n\n\n#Initialization starts\n#configParser=ConfigParser.RawConfigParser()\n#configFilePath=\"Config.cfg\"\n#configParser.read(configFilePath)\n#Host=configParser.get('file','host')\n#Port=configParser.get('file','port')\n\n#Config read ends\n\n\n#This method trains and creates a classifier from training data in csv file\n@route('/trainBot',method='POST')\ndef trainBot():\n response.content_type='application/json'\n data2=[]\n print \"training....\"\n data=pd.read_csv('trainData.csv',header=None)\n import preprocess\n from preprocess import number_removal,generate_word_frequency\n import re\n #print data\n data.columns=['Intent','Summary']\n \n data['Summary']=data.apply(number_removal,axis=1)\n data['Summary'] = data.apply(generate_word_frequency,axis=1)\n \n data['Summary']=fun(data)\n \n from nltk.corpus import stopwords\n stop = stopwords.words('english')\n stop.extend(('.', ',', '\"', \"'\", '?', '!', ':', ';', '(', ')', '[', ']', '{', '}','/','-'))\n \n for i in ['ask','alexa','allexa','tel','tell']:\n stop.append(i)\n \n le=LabelEncoder()\n X=data['Summary'].fillna('')\n y=data['Intent'].fillna('')\n y=le.fit_transform(y)\n \n classifier = Pipeline([\n ('vec',CountVectorizer(strip_accents='unicode',stop_words=stop)),\n ('tfidf', TfidfTransformer()),\n ('clf', RandomForestClassifier(n_estimators=10,random_state=0))])\n \n classifier=classifier.fit(X, y)\n \n \n f = open('random_forest_model.pickle', 'wb')\n pickle.dump(classifier, f)\n f.close()\n \n f = open('label.pickle', 'wb')\n pickle.dump(le, f)\n f.close()\n \n print \"training completed\"\n item={\"result\":\"training completed\"}\n data2.append(item)\n return dumps(data2)\n\n\t\n#This method classifies the input text based on the trained classifier\n@route('/classify2',method='POST')\ndef classify2():\n # read python dict back from the file\n f = open('random_forest_model.pickle', 'rb')\n classifier=pickle.load(f)\n f.close()\n \n f = open('label.pickle', 'rb')\n label=pickle.load(f)\n f.close()\n response.content_type='application/json'\n data=[]\n inputText=request.json[\"input\"]\n print \"input text : \",inputText\n confidence=classifier.predict_proba([inputText])\n index=np.argmax(confidence)\n \n predicted_class=label.inverse_transform(classifier.predict([inputText]))\n \n print str(round(confidence[0][index],2))+\" \"+ predicted_class[0]\n \n item={\"result\":str(round(confidence[0][index],2))+\" \"+ predicted_class[0]}\n data.append(item)\n return dumps(data)\n\n\n#This method classifies and returns others based on confidence score\ndef classifyTextWithScore(inputText):\n f = open('random_forest_model.pickle', 'rb')\n classifier=pickle.load(f)\n f.close()\n \n f = open('label.pickle', 'rb')\n label=pickle.load(f)\n f.close()\n \n confidence=classifier.predict_proba([inputText])\n index=np.argmax(confidence)\n \n predicted_class=label.inverse_transform(classifier.predict([inputText]))\n \n \n print round(confidence[0][index],2),predicted_class\n if (round(confidence[0][index],2)<0.7):\n return \"others\"\n elif(len(inputText.split(\" \"))<2):\n\treturn \"others\"\n else:\n\treturn predicted_class[0]\n\t\n#run(host='172.31.45.19', port=7500)\n#print \"hai\"\nprint classifyTextWithScore(\"payments made last week where remitter bank wants to stop the payment\")\n\n\n#run(host='192.168.1.7',port=8000)\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/env python2.7
from __future__ import print_function, division
import numpy as np
import matplotlib
import os
#checks if there is a display to use.
if os.environ.get('DISPLAY') is None:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.colors as clr
import dtk
import sys
import time
import numpy.random
from matplotlib.colors import LogNorm
from scipy.optimize import minimize
from calc_ngal import *
from generate_parameter_dist import *
from zmr import ZMR
from matplotlib import rc
rc('text', usetex=True)
rc('font', **{'family':'serif', 'serif':['Computer Modern Roman'], })
rc('font', size=18)
def load_clusters(file_name):
if file_name not in load_clusters._cache:
cluster_data = ClusterData()
cluster_data.load_file(file_name)
else:
cluster_data = load_clusters._cache[file_name]
return cluster_data
load_clusters._cache = {}
def get_ngal_fit(param_fname, cluster_num, color, plot_fit=True, spider=False, manual_calc=False):
param = dtk.Param(param_fname)
cluster_loc = param.get_string('cluster_loc')
if cluster_num is None:
cluster_num = param.get_int('cluster_load_num')
zmrh5_loc = param.get_string('zmrh5_loc')
zmr_sdss = ZMR(zmrh5_loc)
zmr_fit = ZMR("output/"+param_fname+"/zmr_lkhd_cores.param")
m_bins = zmr_fit.m_bins
r_bins = zmr_fit.r_bins
zmr_core_ngal, zmr_core_ngal_err = zmr_fit.get_ngal() # only one z-bin, so we don't select it out
zmr_core_ngal = zmr_core_ngal[0]
zmr_core_ngal_err = zmr_core_ngal_err[0]
zmr_sdss_ngal, zmr_sdss_ngal_err = zmr_sdss.get_ngal()
zmr_sdss_ngal = zmr_sdss_ngal[0]
zmr_sdss_ngal_err = zmr_sdss_ngal_err[0]
if manual_calc:
model_fit_fname = "figs/"+param_fname+"/calc_likelihood_bounds.py/grid_fit_param.txt"
model_fit = load_fit_limits(model_fit_fname)
m_infall = 10**model_fit['mi']
if 'rd' in model_fit:
# print(model_fit['rd'])
r_disrupt = model_fit['rd']/1000.0 #convert to mpc/h from kpc/h
else:
r_disrupt = np.inf
# print("\ncalculating ngal for ", param_fname)
# print("\tmodel_fit_fname:", model_fit_fname)
# print("\tmodel params: {:.2e} {:.3f}".format(m_infall, r_disrupt))
print(cluster_loc)
cluster_data = load_clusters(cluster_loc)
if cluster_num == -1:
cluster_num = cluster_data.num
cluster_ngal = np.zeros(cluster_num)
cluster_m_i = np.zeros(cluster_num)
for i in range(0, cluster_num):
mass_index = cluster_data.get_cluster_mass_bin(i, m_bins)
cluster_m_i[i] = mass_index
cluster_ngal[i] = cluster_data.get_ngal(i, m_infall, r_disrupt)[1]
ngal_mean = np.zeros(len(m_bins)-1)
ngal_err = np.zeros(len(m_bins)-1)
ngal_std = np.zeros(len(m_bins)-1)
for i in range(0, len(m_bins)-1):
slct = cluster_m_i == i
ngal_mean[i] = np.mean(cluster_ngal[slct])
ngal_std[i] = np.std(cluster_ngal[slct])
ngal_err[i] = ngal_std[i]/np.sqrt(np.sum(slct))
# print("{:.2e}->{:.2e}: {}".format(m_bins[i], m_bins[i+1], np.sum(slct)))
plt.plot(dtk.bins_avg(m_bins), ngal_mean, '-x', color=color, label='Ngal recalc')
if plot_fit:
plt.plot(dtk.bins_avg(m_bins), zmr_core_ngal, '-', color=color)
plt.fill_between(dtk.bins_avg(m_bins), zmr_core_ngal-zmr_core_ngal_err, zmr_core_ngal+zmr_core_ngal_err, color=color, alpha=0.3)
offset_amount = 1.025
if spider:
markerfacecolor='None'
markeredgecolor=color
xaxis_offset=offset_amount
lw = 1
else:
markerfacecolor=color
markeredgecolor='None'
xaxis_offset=1./offset_amount
lw = 2
# remove problematic 2.5 L* low mass cluster in the spider sample
if "mstar-1" in param_fname and "spider" in param_fname:
print("SPIDERSS!: ", zmr_sdss_ngal)
zmr_sdss_ngal[zmr_sdss_ngal < 0.1 ] = np.nan
plt.errorbar(dtk.bins_avg(m_bins)*xaxis_offset, zmr_sdss_ngal,
yerr=zmr_sdss_ngal_err, fmt='o', capsize=0, lw=lw, color=color,
markeredgecolor=markeredgecolor, markerfacecolor=markerfacecolor)
# plt.fill_between(dtk.bins_avg(m_bins), ngal_mean-ngal_err, ngal_mean+ngal_err, color=color, alpha=0.3)
plt.yscale('log')
plt.xscale('log')
# plt.legend(loc='best')
def format_plot():
p4 = plt.plot([],[], 'tab:purple', lw=5, label=r'{:1.2f}~L$_*$'.format(0.4))
p3 = plt.plot([],[], 'tab:red', lw=5, label=r'{:1.2f}~L$_*$'.format(0.63))
p2 = plt.plot([],[], 'tab:green', lw=5, label=r'{:1.2f}~L$_*$'.format(1.0))
p12 = plt.plot([],[], 'tab:orange',lw=5, label=r'{:1.2f}~L$_*$'.format(1.58))
p1 = plt.plot([],[], 'tab:blue',lw=5, label=r'{:1.2f}~L$_*$'.format(2.5))
plt.errorbar([], [], yerr=[], fmt='o', lw=2, color='k', label="redMaPPer", capsize=0)
plt.plot([], [], color='k', label="Core Model")
# plt.errorbar([], [], yerr=[], fmt='o', lw=1, color='k', markerfacecolor='none', label='SPIDERS clusters', capsize=0)
plt.legend(ncol=2, loc='best', framealpha=0.0)
plt.xlabel(r'M$_{200c}$ [h$^{-1}$ M$_\odot$]')
plt.ylabel(r'Projected N$_{\rm{gal}}$')
plt.ylim([1e-1, 3e3])
plt.xlim([1e14, 5e15])
plt.tight_layout()
def plot_ngal_fits():
get_ngal_fit("params/cfn/simet/mstar1/mean/a3_rd.param", None, 'c')
get_ngal_fit("params/cfn/simet/mstar0.5/mean/a3_rd.param", None, 'g')
get_ngal_fit("params/cfn/simet/mstar0/mean/a3_rd.param", None, 'b')
get_ngal_fit("params/cfn/simet/mstar-1/mean/a3_rd.param", None, 'r')
#just spider points
get_ngal_fit("params/cfn/spider/mstar1/mean/spider_rd.param", None, 'c', plot_fit=False, spider=True)
get_ngal_fit("params/cfn/spider/mstar0.5/mean/spider_rd.param", None, 'g', plot_fit=False, spider=True)
get_ngal_fit("params/cfn/spider/mstar0/mean/spider_rd.param", None, 'b', plot_fit=False, spider=True)
get_ngal_fit("params/cfn/spider/mstar-1/mean/spider_rd.param", None, 'r', plot_fit=False, spider=True)
# get_ngal_fit("params/cfn/spider/mstar0/mean/spider_rd.param", None, 'm', plot_fit=False, spider=True)
# get_ngal_fit("params/cfn/spider/mstar0/mean/bcg_rd.param", None, 'c', plot_fit=False, spider=True)
format_plot()
def plot_ngal_fits2(pattern, mstars):
color_cycle = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:purple', 'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive', 'tab:cyan']
for mstar, color in zip(mstars, color_cycle):
get_ngal_fit(pattern.replace("${mstarval}", mstar), None, color)
format_plot()
if __name__ == "__main__":
if len(sys.argv) > 2:
plot_name = sys.argv[1]
else:
plot_name = "OR_McClintock2019"
mstars = ['-1', '-0.5', '0', '0.5', '1']
if plot_name == "OR_Simet2017":
pattern = 'params/rmba/auto/make_all_OR.high_richness.low_rez.min20.sh/crit/mstar${mstarval}/OR_rd_zoom.param'
plot_ngal_fits2(pattern, mstars)
elif plot_name == "OR_McClintock2019":
pattern = 'params/rmba/auto/make_all_OR.McClintock.high_richness.low_rez.min20.sh/crit/mstar${mstarval}/OR_rd_zoom.param'
plot_ngal_fits2(pattern, mstars)
# plot_ngal_fits()
dtk.save_figs("figs/"+__file__+"/"+plot_name+"/", extension='.pdf')
plt.show()
|
normal
|
{
"blob_id": "3acbb37809462ee69ff8792b4ad86b31dba5d630",
"index": 3821,
"step-1": "<mask token>\n\n\ndef load_clusters(file_name):\n if file_name not in load_clusters._cache:\n cluster_data = ClusterData()\n cluster_data.load_file(file_name)\n else:\n cluster_data = load_clusters._cache[file_name]\n return cluster_data\n\n\n<mask token>\n\n\ndef get_ngal_fit(param_fname, cluster_num, color, plot_fit=True, spider=\n False, manual_calc=False):\n param = dtk.Param(param_fname)\n cluster_loc = param.get_string('cluster_loc')\n if cluster_num is None:\n cluster_num = param.get_int('cluster_load_num')\n zmrh5_loc = param.get_string('zmrh5_loc')\n zmr_sdss = ZMR(zmrh5_loc)\n zmr_fit = ZMR('output/' + param_fname + '/zmr_lkhd_cores.param')\n m_bins = zmr_fit.m_bins\n r_bins = zmr_fit.r_bins\n zmr_core_ngal, zmr_core_ngal_err = zmr_fit.get_ngal()\n zmr_core_ngal = zmr_core_ngal[0]\n zmr_core_ngal_err = zmr_core_ngal_err[0]\n zmr_sdss_ngal, zmr_sdss_ngal_err = zmr_sdss.get_ngal()\n zmr_sdss_ngal = zmr_sdss_ngal[0]\n zmr_sdss_ngal_err = zmr_sdss_ngal_err[0]\n if manual_calc:\n model_fit_fname = ('figs/' + param_fname +\n '/calc_likelihood_bounds.py/grid_fit_param.txt')\n model_fit = load_fit_limits(model_fit_fname)\n m_infall = 10 ** model_fit['mi']\n if 'rd' in model_fit:\n r_disrupt = model_fit['rd'] / 1000.0\n else:\n r_disrupt = np.inf\n print(cluster_loc)\n cluster_data = load_clusters(cluster_loc)\n if cluster_num == -1:\n cluster_num = cluster_data.num\n cluster_ngal = np.zeros(cluster_num)\n cluster_m_i = np.zeros(cluster_num)\n for i in range(0, cluster_num):\n mass_index = cluster_data.get_cluster_mass_bin(i, m_bins)\n cluster_m_i[i] = mass_index\n cluster_ngal[i] = cluster_data.get_ngal(i, m_infall, r_disrupt)[1]\n ngal_mean = np.zeros(len(m_bins) - 1)\n ngal_err = np.zeros(len(m_bins) - 1)\n ngal_std = np.zeros(len(m_bins) - 1)\n for i in range(0, len(m_bins) - 1):\n slct = cluster_m_i == i\n ngal_mean[i] = np.mean(cluster_ngal[slct])\n ngal_std[i] = np.std(cluster_ngal[slct])\n ngal_err[i] = ngal_std[i] / np.sqrt(np.sum(slct))\n plt.plot(dtk.bins_avg(m_bins), ngal_mean, '-x', color=color, label=\n 'Ngal recalc')\n if plot_fit:\n plt.plot(dtk.bins_avg(m_bins), zmr_core_ngal, '-', color=color)\n plt.fill_between(dtk.bins_avg(m_bins), zmr_core_ngal -\n zmr_core_ngal_err, zmr_core_ngal + zmr_core_ngal_err, color=\n color, alpha=0.3)\n offset_amount = 1.025\n if spider:\n markerfacecolor = 'None'\n markeredgecolor = color\n xaxis_offset = offset_amount\n lw = 1\n else:\n markerfacecolor = color\n markeredgecolor = 'None'\n xaxis_offset = 1.0 / offset_amount\n lw = 2\n if 'mstar-1' in param_fname and 'spider' in param_fname:\n print('SPIDERSS!: ', zmr_sdss_ngal)\n zmr_sdss_ngal[zmr_sdss_ngal < 0.1] = np.nan\n plt.errorbar(dtk.bins_avg(m_bins) * xaxis_offset, zmr_sdss_ngal, yerr=\n zmr_sdss_ngal_err, fmt='o', capsize=0, lw=lw, color=color,\n markeredgecolor=markeredgecolor, markerfacecolor=markerfacecolor)\n plt.yscale('log')\n plt.xscale('log')\n\n\ndef format_plot():\n p4 = plt.plot([], [], 'tab:purple', lw=5, label='{:1.2f}~L$_*$'.format(0.4)\n )\n p3 = plt.plot([], [], 'tab:red', lw=5, label='{:1.2f}~L$_*$'.format(0.63))\n p2 = plt.plot([], [], 'tab:green', lw=5, label='{:1.2f}~L$_*$'.format(1.0))\n p12 = plt.plot([], [], 'tab:orange', lw=5, label='{:1.2f}~L$_*$'.format\n (1.58))\n p1 = plt.plot([], [], 'tab:blue', lw=5, label='{:1.2f}~L$_*$'.format(2.5))\n plt.errorbar([], [], yerr=[], fmt='o', lw=2, color='k', label=\n 'redMaPPer', capsize=0)\n plt.plot([], [], color='k', label='Core Model')\n plt.legend(ncol=2, loc='best', framealpha=0.0)\n plt.xlabel('M$_{200c}$ [h$^{-1}$ M$_\\\\odot$]')\n plt.ylabel('Projected N$_{\\\\rm{gal}}$')\n plt.ylim([0.1, 3000.0])\n plt.xlim([100000000000000.0, 5000000000000000.0])\n plt.tight_layout()\n\n\ndef plot_ngal_fits():\n get_ngal_fit('params/cfn/simet/mstar1/mean/a3_rd.param', None, 'c')\n get_ngal_fit('params/cfn/simet/mstar0.5/mean/a3_rd.param', None, 'g')\n get_ngal_fit('params/cfn/simet/mstar0/mean/a3_rd.param', None, 'b')\n get_ngal_fit('params/cfn/simet/mstar-1/mean/a3_rd.param', None, 'r')\n get_ngal_fit('params/cfn/spider/mstar1/mean/spider_rd.param', None, 'c',\n plot_fit=False, spider=True)\n get_ngal_fit('params/cfn/spider/mstar0.5/mean/spider_rd.param', None,\n 'g', plot_fit=False, spider=True)\n get_ngal_fit('params/cfn/spider/mstar0/mean/spider_rd.param', None, 'b',\n plot_fit=False, spider=True)\n get_ngal_fit('params/cfn/spider/mstar-1/mean/spider_rd.param', None,\n 'r', plot_fit=False, spider=True)\n format_plot()\n\n\ndef plot_ngal_fits2(pattern, mstars):\n color_cycle = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red',\n 'tab:purple', 'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive',\n 'tab:cyan']\n for mstar, color in zip(mstars, color_cycle):\n get_ngal_fit(pattern.replace('${mstarval}', mstar), None, color)\n format_plot()\n\n\n<mask token>\n",
"step-2": "<mask token>\nif os.environ.get('DISPLAY') is None:\n matplotlib.use('Agg')\n<mask token>\nrc('text', usetex=True)\nrc('font', **{'family': 'serif', 'serif': ['Computer Modern Roman']})\nrc('font', size=18)\n\n\ndef load_clusters(file_name):\n if file_name not in load_clusters._cache:\n cluster_data = ClusterData()\n cluster_data.load_file(file_name)\n else:\n cluster_data = load_clusters._cache[file_name]\n return cluster_data\n\n\n<mask token>\n\n\ndef get_ngal_fit(param_fname, cluster_num, color, plot_fit=True, spider=\n False, manual_calc=False):\n param = dtk.Param(param_fname)\n cluster_loc = param.get_string('cluster_loc')\n if cluster_num is None:\n cluster_num = param.get_int('cluster_load_num')\n zmrh5_loc = param.get_string('zmrh5_loc')\n zmr_sdss = ZMR(zmrh5_loc)\n zmr_fit = ZMR('output/' + param_fname + '/zmr_lkhd_cores.param')\n m_bins = zmr_fit.m_bins\n r_bins = zmr_fit.r_bins\n zmr_core_ngal, zmr_core_ngal_err = zmr_fit.get_ngal()\n zmr_core_ngal = zmr_core_ngal[0]\n zmr_core_ngal_err = zmr_core_ngal_err[0]\n zmr_sdss_ngal, zmr_sdss_ngal_err = zmr_sdss.get_ngal()\n zmr_sdss_ngal = zmr_sdss_ngal[0]\n zmr_sdss_ngal_err = zmr_sdss_ngal_err[0]\n if manual_calc:\n model_fit_fname = ('figs/' + param_fname +\n '/calc_likelihood_bounds.py/grid_fit_param.txt')\n model_fit = load_fit_limits(model_fit_fname)\n m_infall = 10 ** model_fit['mi']\n if 'rd' in model_fit:\n r_disrupt = model_fit['rd'] / 1000.0\n else:\n r_disrupt = np.inf\n print(cluster_loc)\n cluster_data = load_clusters(cluster_loc)\n if cluster_num == -1:\n cluster_num = cluster_data.num\n cluster_ngal = np.zeros(cluster_num)\n cluster_m_i = np.zeros(cluster_num)\n for i in range(0, cluster_num):\n mass_index = cluster_data.get_cluster_mass_bin(i, m_bins)\n cluster_m_i[i] = mass_index\n cluster_ngal[i] = cluster_data.get_ngal(i, m_infall, r_disrupt)[1]\n ngal_mean = np.zeros(len(m_bins) - 1)\n ngal_err = np.zeros(len(m_bins) - 1)\n ngal_std = np.zeros(len(m_bins) - 1)\n for i in range(0, len(m_bins) - 1):\n slct = cluster_m_i == i\n ngal_mean[i] = np.mean(cluster_ngal[slct])\n ngal_std[i] = np.std(cluster_ngal[slct])\n ngal_err[i] = ngal_std[i] / np.sqrt(np.sum(slct))\n plt.plot(dtk.bins_avg(m_bins), ngal_mean, '-x', color=color, label=\n 'Ngal recalc')\n if plot_fit:\n plt.plot(dtk.bins_avg(m_bins), zmr_core_ngal, '-', color=color)\n plt.fill_between(dtk.bins_avg(m_bins), zmr_core_ngal -\n zmr_core_ngal_err, zmr_core_ngal + zmr_core_ngal_err, color=\n color, alpha=0.3)\n offset_amount = 1.025\n if spider:\n markerfacecolor = 'None'\n markeredgecolor = color\n xaxis_offset = offset_amount\n lw = 1\n else:\n markerfacecolor = color\n markeredgecolor = 'None'\n xaxis_offset = 1.0 / offset_amount\n lw = 2\n if 'mstar-1' in param_fname and 'spider' in param_fname:\n print('SPIDERSS!: ', zmr_sdss_ngal)\n zmr_sdss_ngal[zmr_sdss_ngal < 0.1] = np.nan\n plt.errorbar(dtk.bins_avg(m_bins) * xaxis_offset, zmr_sdss_ngal, yerr=\n zmr_sdss_ngal_err, fmt='o', capsize=0, lw=lw, color=color,\n markeredgecolor=markeredgecolor, markerfacecolor=markerfacecolor)\n plt.yscale('log')\n plt.xscale('log')\n\n\ndef format_plot():\n p4 = plt.plot([], [], 'tab:purple', lw=5, label='{:1.2f}~L$_*$'.format(0.4)\n )\n p3 = plt.plot([], [], 'tab:red', lw=5, label='{:1.2f}~L$_*$'.format(0.63))\n p2 = plt.plot([], [], 'tab:green', lw=5, label='{:1.2f}~L$_*$'.format(1.0))\n p12 = plt.plot([], [], 'tab:orange', lw=5, label='{:1.2f}~L$_*$'.format\n (1.58))\n p1 = plt.plot([], [], 'tab:blue', lw=5, label='{:1.2f}~L$_*$'.format(2.5))\n plt.errorbar([], [], yerr=[], fmt='o', lw=2, color='k', label=\n 'redMaPPer', capsize=0)\n plt.plot([], [], color='k', label='Core Model')\n plt.legend(ncol=2, loc='best', framealpha=0.0)\n plt.xlabel('M$_{200c}$ [h$^{-1}$ M$_\\\\odot$]')\n plt.ylabel('Projected N$_{\\\\rm{gal}}$')\n plt.ylim([0.1, 3000.0])\n plt.xlim([100000000000000.0, 5000000000000000.0])\n plt.tight_layout()\n\n\ndef plot_ngal_fits():\n get_ngal_fit('params/cfn/simet/mstar1/mean/a3_rd.param', None, 'c')\n get_ngal_fit('params/cfn/simet/mstar0.5/mean/a3_rd.param', None, 'g')\n get_ngal_fit('params/cfn/simet/mstar0/mean/a3_rd.param', None, 'b')\n get_ngal_fit('params/cfn/simet/mstar-1/mean/a3_rd.param', None, 'r')\n get_ngal_fit('params/cfn/spider/mstar1/mean/spider_rd.param', None, 'c',\n plot_fit=False, spider=True)\n get_ngal_fit('params/cfn/spider/mstar0.5/mean/spider_rd.param', None,\n 'g', plot_fit=False, spider=True)\n get_ngal_fit('params/cfn/spider/mstar0/mean/spider_rd.param', None, 'b',\n plot_fit=False, spider=True)\n get_ngal_fit('params/cfn/spider/mstar-1/mean/spider_rd.param', None,\n 'r', plot_fit=False, spider=True)\n format_plot()\n\n\ndef plot_ngal_fits2(pattern, mstars):\n color_cycle = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red',\n 'tab:purple', 'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive',\n 'tab:cyan']\n for mstar, color in zip(mstars, color_cycle):\n get_ngal_fit(pattern.replace('${mstarval}', mstar), None, color)\n format_plot()\n\n\nif __name__ == '__main__':\n if len(sys.argv) > 2:\n plot_name = sys.argv[1]\n else:\n plot_name = 'OR_McClintock2019'\n mstars = ['-1', '-0.5', '0', '0.5', '1']\n if plot_name == 'OR_Simet2017':\n pattern = (\n 'params/rmba/auto/make_all_OR.high_richness.low_rez.min20.sh/crit/mstar${mstarval}/OR_rd_zoom.param'\n )\n plot_ngal_fits2(pattern, mstars)\n elif plot_name == 'OR_McClintock2019':\n pattern = (\n 'params/rmba/auto/make_all_OR.McClintock.high_richness.low_rez.min20.sh/crit/mstar${mstarval}/OR_rd_zoom.param'\n )\n plot_ngal_fits2(pattern, mstars)\n dtk.save_figs('figs/' + __file__ + '/' + plot_name + '/', extension='.pdf')\n plt.show()\n",
"step-3": "<mask token>\nif os.environ.get('DISPLAY') is None:\n matplotlib.use('Agg')\n<mask token>\nrc('text', usetex=True)\nrc('font', **{'family': 'serif', 'serif': ['Computer Modern Roman']})\nrc('font', size=18)\n\n\ndef load_clusters(file_name):\n if file_name not in load_clusters._cache:\n cluster_data = ClusterData()\n cluster_data.load_file(file_name)\n else:\n cluster_data = load_clusters._cache[file_name]\n return cluster_data\n\n\nload_clusters._cache = {}\n\n\ndef get_ngal_fit(param_fname, cluster_num, color, plot_fit=True, spider=\n False, manual_calc=False):\n param = dtk.Param(param_fname)\n cluster_loc = param.get_string('cluster_loc')\n if cluster_num is None:\n cluster_num = param.get_int('cluster_load_num')\n zmrh5_loc = param.get_string('zmrh5_loc')\n zmr_sdss = ZMR(zmrh5_loc)\n zmr_fit = ZMR('output/' + param_fname + '/zmr_lkhd_cores.param')\n m_bins = zmr_fit.m_bins\n r_bins = zmr_fit.r_bins\n zmr_core_ngal, zmr_core_ngal_err = zmr_fit.get_ngal()\n zmr_core_ngal = zmr_core_ngal[0]\n zmr_core_ngal_err = zmr_core_ngal_err[0]\n zmr_sdss_ngal, zmr_sdss_ngal_err = zmr_sdss.get_ngal()\n zmr_sdss_ngal = zmr_sdss_ngal[0]\n zmr_sdss_ngal_err = zmr_sdss_ngal_err[0]\n if manual_calc:\n model_fit_fname = ('figs/' + param_fname +\n '/calc_likelihood_bounds.py/grid_fit_param.txt')\n model_fit = load_fit_limits(model_fit_fname)\n m_infall = 10 ** model_fit['mi']\n if 'rd' in model_fit:\n r_disrupt = model_fit['rd'] / 1000.0\n else:\n r_disrupt = np.inf\n print(cluster_loc)\n cluster_data = load_clusters(cluster_loc)\n if cluster_num == -1:\n cluster_num = cluster_data.num\n cluster_ngal = np.zeros(cluster_num)\n cluster_m_i = np.zeros(cluster_num)\n for i in range(0, cluster_num):\n mass_index = cluster_data.get_cluster_mass_bin(i, m_bins)\n cluster_m_i[i] = mass_index\n cluster_ngal[i] = cluster_data.get_ngal(i, m_infall, r_disrupt)[1]\n ngal_mean = np.zeros(len(m_bins) - 1)\n ngal_err = np.zeros(len(m_bins) - 1)\n ngal_std = np.zeros(len(m_bins) - 1)\n for i in range(0, len(m_bins) - 1):\n slct = cluster_m_i == i\n ngal_mean[i] = np.mean(cluster_ngal[slct])\n ngal_std[i] = np.std(cluster_ngal[slct])\n ngal_err[i] = ngal_std[i] / np.sqrt(np.sum(slct))\n plt.plot(dtk.bins_avg(m_bins), ngal_mean, '-x', color=color, label=\n 'Ngal recalc')\n if plot_fit:\n plt.plot(dtk.bins_avg(m_bins), zmr_core_ngal, '-', color=color)\n plt.fill_between(dtk.bins_avg(m_bins), zmr_core_ngal -\n zmr_core_ngal_err, zmr_core_ngal + zmr_core_ngal_err, color=\n color, alpha=0.3)\n offset_amount = 1.025\n if spider:\n markerfacecolor = 'None'\n markeredgecolor = color\n xaxis_offset = offset_amount\n lw = 1\n else:\n markerfacecolor = color\n markeredgecolor = 'None'\n xaxis_offset = 1.0 / offset_amount\n lw = 2\n if 'mstar-1' in param_fname and 'spider' in param_fname:\n print('SPIDERSS!: ', zmr_sdss_ngal)\n zmr_sdss_ngal[zmr_sdss_ngal < 0.1] = np.nan\n plt.errorbar(dtk.bins_avg(m_bins) * xaxis_offset, zmr_sdss_ngal, yerr=\n zmr_sdss_ngal_err, fmt='o', capsize=0, lw=lw, color=color,\n markeredgecolor=markeredgecolor, markerfacecolor=markerfacecolor)\n plt.yscale('log')\n plt.xscale('log')\n\n\ndef format_plot():\n p4 = plt.plot([], [], 'tab:purple', lw=5, label='{:1.2f}~L$_*$'.format(0.4)\n )\n p3 = plt.plot([], [], 'tab:red', lw=5, label='{:1.2f}~L$_*$'.format(0.63))\n p2 = plt.plot([], [], 'tab:green', lw=5, label='{:1.2f}~L$_*$'.format(1.0))\n p12 = plt.plot([], [], 'tab:orange', lw=5, label='{:1.2f}~L$_*$'.format\n (1.58))\n p1 = plt.plot([], [], 'tab:blue', lw=5, label='{:1.2f}~L$_*$'.format(2.5))\n plt.errorbar([], [], yerr=[], fmt='o', lw=2, color='k', label=\n 'redMaPPer', capsize=0)\n plt.plot([], [], color='k', label='Core Model')\n plt.legend(ncol=2, loc='best', framealpha=0.0)\n plt.xlabel('M$_{200c}$ [h$^{-1}$ M$_\\\\odot$]')\n plt.ylabel('Projected N$_{\\\\rm{gal}}$')\n plt.ylim([0.1, 3000.0])\n plt.xlim([100000000000000.0, 5000000000000000.0])\n plt.tight_layout()\n\n\ndef plot_ngal_fits():\n get_ngal_fit('params/cfn/simet/mstar1/mean/a3_rd.param', None, 'c')\n get_ngal_fit('params/cfn/simet/mstar0.5/mean/a3_rd.param', None, 'g')\n get_ngal_fit('params/cfn/simet/mstar0/mean/a3_rd.param', None, 'b')\n get_ngal_fit('params/cfn/simet/mstar-1/mean/a3_rd.param', None, 'r')\n get_ngal_fit('params/cfn/spider/mstar1/mean/spider_rd.param', None, 'c',\n plot_fit=False, spider=True)\n get_ngal_fit('params/cfn/spider/mstar0.5/mean/spider_rd.param', None,\n 'g', plot_fit=False, spider=True)\n get_ngal_fit('params/cfn/spider/mstar0/mean/spider_rd.param', None, 'b',\n plot_fit=False, spider=True)\n get_ngal_fit('params/cfn/spider/mstar-1/mean/spider_rd.param', None,\n 'r', plot_fit=False, spider=True)\n format_plot()\n\n\ndef plot_ngal_fits2(pattern, mstars):\n color_cycle = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red',\n 'tab:purple', 'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive',\n 'tab:cyan']\n for mstar, color in zip(mstars, color_cycle):\n get_ngal_fit(pattern.replace('${mstarval}', mstar), None, color)\n format_plot()\n\n\nif __name__ == '__main__':\n if len(sys.argv) > 2:\n plot_name = sys.argv[1]\n else:\n plot_name = 'OR_McClintock2019'\n mstars = ['-1', '-0.5', '0', '0.5', '1']\n if plot_name == 'OR_Simet2017':\n pattern = (\n 'params/rmba/auto/make_all_OR.high_richness.low_rez.min20.sh/crit/mstar${mstarval}/OR_rd_zoom.param'\n )\n plot_ngal_fits2(pattern, mstars)\n elif plot_name == 'OR_McClintock2019':\n pattern = (\n 'params/rmba/auto/make_all_OR.McClintock.high_richness.low_rez.min20.sh/crit/mstar${mstarval}/OR_rd_zoom.param'\n )\n plot_ngal_fits2(pattern, mstars)\n dtk.save_figs('figs/' + __file__ + '/' + plot_name + '/', extension='.pdf')\n plt.show()\n",
"step-4": "from __future__ import print_function, division\nimport numpy as np\nimport matplotlib\nimport os\nif os.environ.get('DISPLAY') is None:\n matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as clr\nimport dtk\nimport sys\nimport time\nimport numpy.random\nfrom matplotlib.colors import LogNorm\nfrom scipy.optimize import minimize\nfrom calc_ngal import *\nfrom generate_parameter_dist import *\nfrom zmr import ZMR\nfrom matplotlib import rc\nrc('text', usetex=True)\nrc('font', **{'family': 'serif', 'serif': ['Computer Modern Roman']})\nrc('font', size=18)\n\n\ndef load_clusters(file_name):\n if file_name not in load_clusters._cache:\n cluster_data = ClusterData()\n cluster_data.load_file(file_name)\n else:\n cluster_data = load_clusters._cache[file_name]\n return cluster_data\n\n\nload_clusters._cache = {}\n\n\ndef get_ngal_fit(param_fname, cluster_num, color, plot_fit=True, spider=\n False, manual_calc=False):\n param = dtk.Param(param_fname)\n cluster_loc = param.get_string('cluster_loc')\n if cluster_num is None:\n cluster_num = param.get_int('cluster_load_num')\n zmrh5_loc = param.get_string('zmrh5_loc')\n zmr_sdss = ZMR(zmrh5_loc)\n zmr_fit = ZMR('output/' + param_fname + '/zmr_lkhd_cores.param')\n m_bins = zmr_fit.m_bins\n r_bins = zmr_fit.r_bins\n zmr_core_ngal, zmr_core_ngal_err = zmr_fit.get_ngal()\n zmr_core_ngal = zmr_core_ngal[0]\n zmr_core_ngal_err = zmr_core_ngal_err[0]\n zmr_sdss_ngal, zmr_sdss_ngal_err = zmr_sdss.get_ngal()\n zmr_sdss_ngal = zmr_sdss_ngal[0]\n zmr_sdss_ngal_err = zmr_sdss_ngal_err[0]\n if manual_calc:\n model_fit_fname = ('figs/' + param_fname +\n '/calc_likelihood_bounds.py/grid_fit_param.txt')\n model_fit = load_fit_limits(model_fit_fname)\n m_infall = 10 ** model_fit['mi']\n if 'rd' in model_fit:\n r_disrupt = model_fit['rd'] / 1000.0\n else:\n r_disrupt = np.inf\n print(cluster_loc)\n cluster_data = load_clusters(cluster_loc)\n if cluster_num == -1:\n cluster_num = cluster_data.num\n cluster_ngal = np.zeros(cluster_num)\n cluster_m_i = np.zeros(cluster_num)\n for i in range(0, cluster_num):\n mass_index = cluster_data.get_cluster_mass_bin(i, m_bins)\n cluster_m_i[i] = mass_index\n cluster_ngal[i] = cluster_data.get_ngal(i, m_infall, r_disrupt)[1]\n ngal_mean = np.zeros(len(m_bins) - 1)\n ngal_err = np.zeros(len(m_bins) - 1)\n ngal_std = np.zeros(len(m_bins) - 1)\n for i in range(0, len(m_bins) - 1):\n slct = cluster_m_i == i\n ngal_mean[i] = np.mean(cluster_ngal[slct])\n ngal_std[i] = np.std(cluster_ngal[slct])\n ngal_err[i] = ngal_std[i] / np.sqrt(np.sum(slct))\n plt.plot(dtk.bins_avg(m_bins), ngal_mean, '-x', color=color, label=\n 'Ngal recalc')\n if plot_fit:\n plt.plot(dtk.bins_avg(m_bins), zmr_core_ngal, '-', color=color)\n plt.fill_between(dtk.bins_avg(m_bins), zmr_core_ngal -\n zmr_core_ngal_err, zmr_core_ngal + zmr_core_ngal_err, color=\n color, alpha=0.3)\n offset_amount = 1.025\n if spider:\n markerfacecolor = 'None'\n markeredgecolor = color\n xaxis_offset = offset_amount\n lw = 1\n else:\n markerfacecolor = color\n markeredgecolor = 'None'\n xaxis_offset = 1.0 / offset_amount\n lw = 2\n if 'mstar-1' in param_fname and 'spider' in param_fname:\n print('SPIDERSS!: ', zmr_sdss_ngal)\n zmr_sdss_ngal[zmr_sdss_ngal < 0.1] = np.nan\n plt.errorbar(dtk.bins_avg(m_bins) * xaxis_offset, zmr_sdss_ngal, yerr=\n zmr_sdss_ngal_err, fmt='o', capsize=0, lw=lw, color=color,\n markeredgecolor=markeredgecolor, markerfacecolor=markerfacecolor)\n plt.yscale('log')\n plt.xscale('log')\n\n\ndef format_plot():\n p4 = plt.plot([], [], 'tab:purple', lw=5, label='{:1.2f}~L$_*$'.format(0.4)\n )\n p3 = plt.plot([], [], 'tab:red', lw=5, label='{:1.2f}~L$_*$'.format(0.63))\n p2 = plt.plot([], [], 'tab:green', lw=5, label='{:1.2f}~L$_*$'.format(1.0))\n p12 = plt.plot([], [], 'tab:orange', lw=5, label='{:1.2f}~L$_*$'.format\n (1.58))\n p1 = plt.plot([], [], 'tab:blue', lw=5, label='{:1.2f}~L$_*$'.format(2.5))\n plt.errorbar([], [], yerr=[], fmt='o', lw=2, color='k', label=\n 'redMaPPer', capsize=0)\n plt.plot([], [], color='k', label='Core Model')\n plt.legend(ncol=2, loc='best', framealpha=0.0)\n plt.xlabel('M$_{200c}$ [h$^{-1}$ M$_\\\\odot$]')\n plt.ylabel('Projected N$_{\\\\rm{gal}}$')\n plt.ylim([0.1, 3000.0])\n plt.xlim([100000000000000.0, 5000000000000000.0])\n plt.tight_layout()\n\n\ndef plot_ngal_fits():\n get_ngal_fit('params/cfn/simet/mstar1/mean/a3_rd.param', None, 'c')\n get_ngal_fit('params/cfn/simet/mstar0.5/mean/a3_rd.param', None, 'g')\n get_ngal_fit('params/cfn/simet/mstar0/mean/a3_rd.param', None, 'b')\n get_ngal_fit('params/cfn/simet/mstar-1/mean/a3_rd.param', None, 'r')\n get_ngal_fit('params/cfn/spider/mstar1/mean/spider_rd.param', None, 'c',\n plot_fit=False, spider=True)\n get_ngal_fit('params/cfn/spider/mstar0.5/mean/spider_rd.param', None,\n 'g', plot_fit=False, spider=True)\n get_ngal_fit('params/cfn/spider/mstar0/mean/spider_rd.param', None, 'b',\n plot_fit=False, spider=True)\n get_ngal_fit('params/cfn/spider/mstar-1/mean/spider_rd.param', None,\n 'r', plot_fit=False, spider=True)\n format_plot()\n\n\ndef plot_ngal_fits2(pattern, mstars):\n color_cycle = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red',\n 'tab:purple', 'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive',\n 'tab:cyan']\n for mstar, color in zip(mstars, color_cycle):\n get_ngal_fit(pattern.replace('${mstarval}', mstar), None, color)\n format_plot()\n\n\nif __name__ == '__main__':\n if len(sys.argv) > 2:\n plot_name = sys.argv[1]\n else:\n plot_name = 'OR_McClintock2019'\n mstars = ['-1', '-0.5', '0', '0.5', '1']\n if plot_name == 'OR_Simet2017':\n pattern = (\n 'params/rmba/auto/make_all_OR.high_richness.low_rez.min20.sh/crit/mstar${mstarval}/OR_rd_zoom.param'\n )\n plot_ngal_fits2(pattern, mstars)\n elif plot_name == 'OR_McClintock2019':\n pattern = (\n 'params/rmba/auto/make_all_OR.McClintock.high_richness.low_rez.min20.sh/crit/mstar${mstarval}/OR_rd_zoom.param'\n )\n plot_ngal_fits2(pattern, mstars)\n dtk.save_figs('figs/' + __file__ + '/' + plot_name + '/', extension='.pdf')\n plt.show()\n",
"step-5": "#!/usr/bin/env python2.7\n\nfrom __future__ import print_function, division \nimport numpy as np\nimport matplotlib\nimport os\n#checks if there is a display to use.\nif os.environ.get('DISPLAY') is None:\n matplotlib.use('Agg')\n\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as clr\nimport dtk\nimport sys\nimport time\nimport numpy.random\nfrom matplotlib.colors import LogNorm\nfrom scipy.optimize import minimize\n\nfrom calc_ngal import *\nfrom generate_parameter_dist import *\nfrom zmr import ZMR\nfrom matplotlib import rc\nrc('text', usetex=True)\nrc('font', **{'family':'serif', 'serif':['Computer Modern Roman'], })\nrc('font', size=18)\n\ndef load_clusters(file_name):\n if file_name not in load_clusters._cache:\n cluster_data = ClusterData()\n cluster_data.load_file(file_name)\n else:\n cluster_data = load_clusters._cache[file_name]\n return cluster_data\n\nload_clusters._cache = {}\n\ndef get_ngal_fit(param_fname, cluster_num, color, plot_fit=True, spider=False, manual_calc=False):\n param = dtk.Param(param_fname)\n cluster_loc = param.get_string('cluster_loc')\n if cluster_num is None:\n cluster_num = param.get_int('cluster_load_num')\n zmrh5_loc = param.get_string('zmrh5_loc')\n zmr_sdss = ZMR(zmrh5_loc)\n zmr_fit = ZMR(\"output/\"+param_fname+\"/zmr_lkhd_cores.param\")\n m_bins = zmr_fit.m_bins\n r_bins = zmr_fit.r_bins\n zmr_core_ngal, zmr_core_ngal_err = zmr_fit.get_ngal() # only one z-bin, so we don't select it out\n zmr_core_ngal = zmr_core_ngal[0]\n zmr_core_ngal_err = zmr_core_ngal_err[0]\n zmr_sdss_ngal, zmr_sdss_ngal_err = zmr_sdss.get_ngal()\n zmr_sdss_ngal = zmr_sdss_ngal[0]\n zmr_sdss_ngal_err = zmr_sdss_ngal_err[0]\n\n if manual_calc:\n model_fit_fname = \"figs/\"+param_fname+\"/calc_likelihood_bounds.py/grid_fit_param.txt\"\n model_fit = load_fit_limits(model_fit_fname)\n m_infall = 10**model_fit['mi']\n if 'rd' in model_fit:\n # print(model_fit['rd'])\n r_disrupt = model_fit['rd']/1000.0 #convert to mpc/h from kpc/h\n else:\n r_disrupt = np.inf\n # print(\"\\ncalculating ngal for \", param_fname)\n # print(\"\\tmodel_fit_fname:\", model_fit_fname)\n # print(\"\\tmodel params: {:.2e} {:.3f}\".format(m_infall, r_disrupt))\n print(cluster_loc)\n cluster_data = load_clusters(cluster_loc)\n if cluster_num == -1:\n cluster_num = cluster_data.num\n cluster_ngal = np.zeros(cluster_num)\n cluster_m_i = np.zeros(cluster_num)\n for i in range(0, cluster_num):\n mass_index = cluster_data.get_cluster_mass_bin(i, m_bins)\n cluster_m_i[i] = mass_index\n cluster_ngal[i] = cluster_data.get_ngal(i, m_infall, r_disrupt)[1]\n ngal_mean = np.zeros(len(m_bins)-1)\n ngal_err = np.zeros(len(m_bins)-1)\n ngal_std = np.zeros(len(m_bins)-1)\n for i in range(0, len(m_bins)-1):\n slct = cluster_m_i == i\n ngal_mean[i] = np.mean(cluster_ngal[slct])\n ngal_std[i] = np.std(cluster_ngal[slct])\n ngal_err[i] = ngal_std[i]/np.sqrt(np.sum(slct))\n # print(\"{:.2e}->{:.2e}: {}\".format(m_bins[i], m_bins[i+1], np.sum(slct)))\n plt.plot(dtk.bins_avg(m_bins), ngal_mean, '-x', color=color, label='Ngal recalc')\n if plot_fit:\n plt.plot(dtk.bins_avg(m_bins), zmr_core_ngal, '-', color=color)\n plt.fill_between(dtk.bins_avg(m_bins), zmr_core_ngal-zmr_core_ngal_err, zmr_core_ngal+zmr_core_ngal_err, color=color, alpha=0.3)\n offset_amount = 1.025\n if spider:\n markerfacecolor='None'\n markeredgecolor=color\n xaxis_offset=offset_amount\n lw = 1\n else:\n markerfacecolor=color\n markeredgecolor='None'\n xaxis_offset=1./offset_amount\n lw = 2\n \n # remove problematic 2.5 L* low mass cluster in the spider sample\n if \"mstar-1\" in param_fname and \"spider\" in param_fname:\n print(\"SPIDERSS!: \", zmr_sdss_ngal)\n zmr_sdss_ngal[zmr_sdss_ngal < 0.1 ] = np.nan\n plt.errorbar(dtk.bins_avg(m_bins)*xaxis_offset, zmr_sdss_ngal,\n yerr=zmr_sdss_ngal_err, fmt='o', capsize=0, lw=lw, color=color,\n markeredgecolor=markeredgecolor, markerfacecolor=markerfacecolor)\n # plt.fill_between(dtk.bins_avg(m_bins), ngal_mean-ngal_err, ngal_mean+ngal_err, color=color, alpha=0.3)\n plt.yscale('log')\n plt.xscale('log')\n # plt.legend(loc='best')\ndef format_plot():\n\n p4 = plt.plot([],[], 'tab:purple', lw=5, label=r'{:1.2f}~L$_*$'.format(0.4))\n p3 = plt.plot([],[], 'tab:red', lw=5, label=r'{:1.2f}~L$_*$'.format(0.63))\n p2 = plt.plot([],[], 'tab:green', lw=5, label=r'{:1.2f}~L$_*$'.format(1.0))\n p12 = plt.plot([],[], 'tab:orange',lw=5, label=r'{:1.2f}~L$_*$'.format(1.58))\n p1 = plt.plot([],[], 'tab:blue',lw=5, label=r'{:1.2f}~L$_*$'.format(2.5))\n plt.errorbar([], [], yerr=[], fmt='o', lw=2, color='k', label=\"redMaPPer\", capsize=0)\n plt.plot([], [], color='k', label=\"Core Model\")\n # plt.errorbar([], [], yerr=[], fmt='o', lw=1, color='k', markerfacecolor='none', label='SPIDERS clusters', capsize=0)\n plt.legend(ncol=2, loc='best', framealpha=0.0)\n\n plt.xlabel(r'M$_{200c}$ [h$^{-1}$ M$_\\odot$]')\n plt.ylabel(r'Projected N$_{\\rm{gal}}$')\n plt.ylim([1e-1, 3e3])\n plt.xlim([1e14, 5e15])\n plt.tight_layout()\n\ndef plot_ngal_fits():\n get_ngal_fit(\"params/cfn/simet/mstar1/mean/a3_rd.param\", None, 'c')\n get_ngal_fit(\"params/cfn/simet/mstar0.5/mean/a3_rd.param\", None, 'g')\n get_ngal_fit(\"params/cfn/simet/mstar0/mean/a3_rd.param\", None, 'b')\n get_ngal_fit(\"params/cfn/simet/mstar-1/mean/a3_rd.param\", None, 'r')\n\n #just spider points\n get_ngal_fit(\"params/cfn/spider/mstar1/mean/spider_rd.param\", None, 'c', plot_fit=False, spider=True)\n get_ngal_fit(\"params/cfn/spider/mstar0.5/mean/spider_rd.param\", None, 'g', plot_fit=False, spider=True)\n get_ngal_fit(\"params/cfn/spider/mstar0/mean/spider_rd.param\", None, 'b', plot_fit=False, spider=True)\n get_ngal_fit(\"params/cfn/spider/mstar-1/mean/spider_rd.param\", None, 'r', plot_fit=False, spider=True)\n\n # get_ngal_fit(\"params/cfn/spider/mstar0/mean/spider_rd.param\", None, 'm', plot_fit=False, spider=True)\n # get_ngal_fit(\"params/cfn/spider/mstar0/mean/bcg_rd.param\", None, 'c', plot_fit=False, spider=True)\n format_plot()\ndef plot_ngal_fits2(pattern, mstars):\n color_cycle = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:purple', 'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive', 'tab:cyan']\n for mstar, color in zip(mstars, color_cycle):\n get_ngal_fit(pattern.replace(\"${mstarval}\", mstar), None, color)\n format_plot()\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 2:\n plot_name = sys.argv[1]\n else:\n plot_name = \"OR_McClintock2019\"\n mstars = ['-1', '-0.5', '0', '0.5', '1']\n if plot_name == \"OR_Simet2017\":\n pattern = 'params/rmba/auto/make_all_OR.high_richness.low_rez.min20.sh/crit/mstar${mstarval}/OR_rd_zoom.param'\n plot_ngal_fits2(pattern, mstars)\n elif plot_name == \"OR_McClintock2019\":\n pattern = 'params/rmba/auto/make_all_OR.McClintock.high_richness.low_rez.min20.sh/crit/mstar${mstarval}/OR_rd_zoom.param'\n plot_ngal_fits2(pattern, mstars)\n # plot_ngal_fits()\n dtk.save_figs(\"figs/\"+__file__+\"/\"+plot_name+\"/\", extension='.pdf')\n plt.show()\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
from multiprocessing import Pool
from pathlib import Path
import os
import re
import json
import string
import math
import GLOBALS
stopWords = {"a", "about", "above", "after", "again", "against", "all", "am", "an", "and", "any", "are", "aren't",
"as", "at", "be", "because", "been", "before", "being", "below", "between", "both", "but", "by",
"can't",
"cannot", "could", "couldn't", "did", "didn't", "do", "does", "doesn't", "doing", "don't", "down",
"during",
"each", "few", "for", "from", "further", "had", "hadn't", "has", "hasn't", "have", "haven't", "having",
"he", "he'd",
"he'll", "he's", "her", "here", "here's", "hers", "herself", "him", "himself", "his", "how", "how's",
"i", "i'd", "i'll",
"i'm", "i've", "if", "in", "into", "is", "isn't", "it", "it's", "its", "itself", "let's", "me", "more",
"most", "mustn't", "my",
"myself", "no", "nor", "not", "of", "off", "on", "once", "only", "or", "other", "ought", "our", "ours",
"ourselves", "out", "over",
"own", "same", "shan't", "she", "she'd", "she'll", "she's", "should", "shouldn't", "so", "some",
"such", "than", "that", "that's",
"the", "their", "theirs", "them", "themselves", "then", "there", "there's", "these", "they", "they'd",
"they'll", "they're", "they've",
"this", "those", "through", "to", "too", "under", "until", "up", "very", "was", "wasn't", "we", "we'd",
"we'll", "we're", "we've", "were", "weren't",
"what", "what's", "when", "when's", "where", "where's", "which", "while", "who", "who's", "whom",
"why", "why's", "with", "won't", "would", "wouldn't",
"you", "you'd", "you'll", "you're", "you've", "your", "yours", "yourself", "yourselves"}
# Main Functions (aka functions called in __main__)
# Takes in query as str. Returns list of docs that match the OR query (inclusive)
def search(query, finalIndexPath):
listOfDicts = list()
queryList = set() # We use set() to remove duplicate terms, and we won't have to open a file twice
tempList = query.strip().lower().replace("'", "").split(" ")
for word in tempList:
if word not in stopWords:
queryList.add(word)
print("Cleaned query tokens:")
print(queryList, "\n") # query tokens with stopwords removed and replacing apostrohe and lower()
#convert set to list to enumerate
queryList = list(queryList)
for word in queryList:
charPath = word[0] #Get 1st char of current word, use to find subdir
# Get the file path of the final_indexed token.json file
jsonFilePath = str(Path(finalIndexPath) / charPath / word) + ".json"
try:
with open(jsonFilePath, "r") as file:
data = file.read()
jsonObj = json.loads(data)
docsDict = jsonObj["docList"]
listOfDicts.append(docsDict)
except:
pass
return intersectDicts(listOfDicts)
def getDocURLs(intersectedDocs, indexPath, cacheURLs):
listUrls = list() # holds unique file paths of .json files
#
# hashTablePath = Path(indexPath) / "hashurls.txt"
# with open(hashTablePath, "r") as file:
# data = file.read()
# hashSet = json.loads(data)
for docID in intersectedDocs:
if(docID in cacheURLs):
fileUrl = cacheURLs[docID]
listUrls.append( (fileUrl, intersectedDocs[docID]) )
return listUrls
# Helper Functions (aka functions called by other functions)
# Returns unique dict of file urls from hashurl.txt (or hasthtable.txt)
def intersectDicts(listOfDicts):
if len(listOfDicts) == 1:
return listOfDicts[0]
intersection = {}
for dictItem in listOfDicts:
for doc in dictItem:
if doc not in intersection:
intersection[doc] = dictItem[doc] #
else:
intersection[doc] += dictItem[doc] #adding tfidf weights
print("intersection = ", intersection)
return intersection
def flaskBackendQuery(queryUser, cacheURLs):
indexPath = GLOBALS.FINAL_INDEX
if (queryUser.strip() == ""):
print("Query needs to be at least one character")
unsortedDocs = search(queryUser, indexPath) #list of dictionaries
# Change filepaths to website URLs for displaying
unsortedURLs = getDocURLs(unsortedDocs, indexPath, cacheURLs)
# Sort docs by the TF-IDF score
sortedURLs = sorted(unsortedURLs, key=lambda x: x[1], reverse=True) #highest scores shown first
return sortedURLs[0:10] #return 10 results
if __name__ == '__main__':
#####
# Aljon
# finalIndexPath = "C:\\Users\\aljon\\Documents\\CS_121\\Assignment_3\\CS121_InvertedIndex\\final_index"
# indexPath = "C:\\Users\\aljon\\Documents\\CS_121\\Assignment_3\\CS121_InvertedIndex\\index"
# William
# folderPath = "C:\\1_Repos\\developer\\partial_indexes"
# folderPath = "C:\\Anaconda3\\envs\\Projects\\developer\\partial_indexes"
indexPath = "C:\\1_Repos\\developer"
finalIndexPath = "C:\\1_Repos\\developer"
# Jerome
#folderPath = "C:\\Users\\arkse\\Desktop\\CS121_InvertedIndex\\DEV"
# Art
# windows
#folderPath = "C:\\Users\\aghar\\Downloads\\DEV"
# linux
#folderPath = "/home/anon/Downloads/DEV"
#####
# Get query from user
query = input("Enter a search query: ")
if(query.strip() == ""):
print("Query needs to be at least one character")
# Fetch all results of query, intersect them to follow Bool-AND logic
unsortedDocs = search(query, finalIndexPath)
# Change filepaths to website URLs for displaying
unsortedURLs = getDocURLs(unsortedDocs, indexPath)
# Sort docs by the TF-IDF score
sortedURLs = sorted(unsortedURLs, key=lambda x: x[1], reverse=True)
# Print top 5 ranked file-urls for given query
print(f"\n------------ Top 5 Docs for '{query}' ------------\n")
for i, doc in enumerate(sortedURLs):
if (i > 5):
break
print(doc[0], " = ", doc[1])
print("\n------------ DONE! ------------\n")
|
normal
|
{
"blob_id": "19f17044d48c8cc0f9d366cde7edc846ff343462",
"index": 2598,
"step-1": "<mask token>\n\n\ndef search(query, finalIndexPath):\n listOfDicts = list()\n queryList = set()\n tempList = query.strip().lower().replace(\"'\", '').split(' ')\n for word in tempList:\n if word not in stopWords:\n queryList.add(word)\n print('Cleaned query tokens:')\n print(queryList, '\\n')\n queryList = list(queryList)\n for word in queryList:\n charPath = word[0]\n jsonFilePath = str(Path(finalIndexPath) / charPath / word) + '.json'\n try:\n with open(jsonFilePath, 'r') as file:\n data = file.read()\n jsonObj = json.loads(data)\n docsDict = jsonObj['docList']\n listOfDicts.append(docsDict)\n except:\n pass\n return intersectDicts(listOfDicts)\n\n\ndef getDocURLs(intersectedDocs, indexPath, cacheURLs):\n listUrls = list()\n for docID in intersectedDocs:\n if docID in cacheURLs:\n fileUrl = cacheURLs[docID]\n listUrls.append((fileUrl, intersectedDocs[docID]))\n return listUrls\n\n\ndef intersectDicts(listOfDicts):\n if len(listOfDicts) == 1:\n return listOfDicts[0]\n intersection = {}\n for dictItem in listOfDicts:\n for doc in dictItem:\n if doc not in intersection:\n intersection[doc] = dictItem[doc]\n else:\n intersection[doc] += dictItem[doc]\n print('intersection = ', intersection)\n return intersection\n\n\ndef flaskBackendQuery(queryUser, cacheURLs):\n indexPath = GLOBALS.FINAL_INDEX\n if queryUser.strip() == '':\n print('Query needs to be at least one character')\n unsortedDocs = search(queryUser, indexPath)\n unsortedURLs = getDocURLs(unsortedDocs, indexPath, cacheURLs)\n sortedURLs = sorted(unsortedURLs, key=lambda x: x[1], reverse=True)\n return sortedURLs[0:10]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef search(query, finalIndexPath):\n listOfDicts = list()\n queryList = set()\n tempList = query.strip().lower().replace(\"'\", '').split(' ')\n for word in tempList:\n if word not in stopWords:\n queryList.add(word)\n print('Cleaned query tokens:')\n print(queryList, '\\n')\n queryList = list(queryList)\n for word in queryList:\n charPath = word[0]\n jsonFilePath = str(Path(finalIndexPath) / charPath / word) + '.json'\n try:\n with open(jsonFilePath, 'r') as file:\n data = file.read()\n jsonObj = json.loads(data)\n docsDict = jsonObj['docList']\n listOfDicts.append(docsDict)\n except:\n pass\n return intersectDicts(listOfDicts)\n\n\ndef getDocURLs(intersectedDocs, indexPath, cacheURLs):\n listUrls = list()\n for docID in intersectedDocs:\n if docID in cacheURLs:\n fileUrl = cacheURLs[docID]\n listUrls.append((fileUrl, intersectedDocs[docID]))\n return listUrls\n\n\ndef intersectDicts(listOfDicts):\n if len(listOfDicts) == 1:\n return listOfDicts[0]\n intersection = {}\n for dictItem in listOfDicts:\n for doc in dictItem:\n if doc not in intersection:\n intersection[doc] = dictItem[doc]\n else:\n intersection[doc] += dictItem[doc]\n print('intersection = ', intersection)\n return intersection\n\n\ndef flaskBackendQuery(queryUser, cacheURLs):\n indexPath = GLOBALS.FINAL_INDEX\n if queryUser.strip() == '':\n print('Query needs to be at least one character')\n unsortedDocs = search(queryUser, indexPath)\n unsortedURLs = getDocURLs(unsortedDocs, indexPath, cacheURLs)\n sortedURLs = sorted(unsortedURLs, key=lambda x: x[1], reverse=True)\n return sortedURLs[0:10]\n\n\nif __name__ == '__main__':\n indexPath = 'C:\\\\1_Repos\\\\developer'\n finalIndexPath = 'C:\\\\1_Repos\\\\developer'\n query = input('Enter a search query: ')\n if query.strip() == '':\n print('Query needs to be at least one character')\n unsortedDocs = search(query, finalIndexPath)\n unsortedURLs = getDocURLs(unsortedDocs, indexPath)\n sortedURLs = sorted(unsortedURLs, key=lambda x: x[1], reverse=True)\n print(f\"\\n------------ Top 5 Docs for '{query}' ------------\\n\")\n for i, doc in enumerate(sortedURLs):\n if i > 5:\n break\n print(doc[0], ' = ', doc[1])\n print('\\n------------ DONE! ------------\\n')\n",
"step-3": "<mask token>\nstopWords = {'a', 'about', 'above', 'after', 'again', 'against', 'all',\n 'am', 'an', 'and', 'any', 'are', \"aren't\", 'as', 'at', 'be', 'because',\n 'been', 'before', 'being', 'below', 'between', 'both', 'but', 'by',\n \"can't\", 'cannot', 'could', \"couldn't\", 'did', \"didn't\", 'do', 'does',\n \"doesn't\", 'doing', \"don't\", 'down', 'during', 'each', 'few', 'for',\n 'from', 'further', 'had', \"hadn't\", 'has', \"hasn't\", 'have', \"haven't\",\n 'having', 'he', \"he'd\", \"he'll\", \"he's\", 'her', 'here', \"here's\",\n 'hers', 'herself', 'him', 'himself', 'his', 'how', \"how's\", 'i', \"i'd\",\n \"i'll\", \"i'm\", \"i've\", 'if', 'in', 'into', 'is', \"isn't\", 'it', \"it's\",\n 'its', 'itself', \"let's\", 'me', 'more', 'most', \"mustn't\", 'my',\n 'myself', 'no', 'nor', 'not', 'of', 'off', 'on', 'once', 'only', 'or',\n 'other', 'ought', 'our', 'ours', 'ourselves', 'out', 'over', 'own',\n 'same', \"shan't\", 'she', \"she'd\", \"she'll\", \"she's\", 'should',\n \"shouldn't\", 'so', 'some', 'such', 'than', 'that', \"that's\", 'the',\n 'their', 'theirs', 'them', 'themselves', 'then', 'there', \"there's\",\n 'these', 'they', \"they'd\", \"they'll\", \"they're\", \"they've\", 'this',\n 'those', 'through', 'to', 'too', 'under', 'until', 'up', 'very', 'was',\n \"wasn't\", 'we', \"we'd\", \"we'll\", \"we're\", \"we've\", 'were', \"weren't\",\n 'what', \"what's\", 'when', \"when's\", 'where', \"where's\", 'which',\n 'while', 'who', \"who's\", 'whom', 'why', \"why's\", 'with', \"won't\",\n 'would', \"wouldn't\", 'you', \"you'd\", \"you'll\", \"you're\", \"you've\",\n 'your', 'yours', 'yourself', 'yourselves'}\n\n\ndef search(query, finalIndexPath):\n listOfDicts = list()\n queryList = set()\n tempList = query.strip().lower().replace(\"'\", '').split(' ')\n for word in tempList:\n if word not in stopWords:\n queryList.add(word)\n print('Cleaned query tokens:')\n print(queryList, '\\n')\n queryList = list(queryList)\n for word in queryList:\n charPath = word[0]\n jsonFilePath = str(Path(finalIndexPath) / charPath / word) + '.json'\n try:\n with open(jsonFilePath, 'r') as file:\n data = file.read()\n jsonObj = json.loads(data)\n docsDict = jsonObj['docList']\n listOfDicts.append(docsDict)\n except:\n pass\n return intersectDicts(listOfDicts)\n\n\ndef getDocURLs(intersectedDocs, indexPath, cacheURLs):\n listUrls = list()\n for docID in intersectedDocs:\n if docID in cacheURLs:\n fileUrl = cacheURLs[docID]\n listUrls.append((fileUrl, intersectedDocs[docID]))\n return listUrls\n\n\ndef intersectDicts(listOfDicts):\n if len(listOfDicts) == 1:\n return listOfDicts[0]\n intersection = {}\n for dictItem in listOfDicts:\n for doc in dictItem:\n if doc not in intersection:\n intersection[doc] = dictItem[doc]\n else:\n intersection[doc] += dictItem[doc]\n print('intersection = ', intersection)\n return intersection\n\n\ndef flaskBackendQuery(queryUser, cacheURLs):\n indexPath = GLOBALS.FINAL_INDEX\n if queryUser.strip() == '':\n print('Query needs to be at least one character')\n unsortedDocs = search(queryUser, indexPath)\n unsortedURLs = getDocURLs(unsortedDocs, indexPath, cacheURLs)\n sortedURLs = sorted(unsortedURLs, key=lambda x: x[1], reverse=True)\n return sortedURLs[0:10]\n\n\nif __name__ == '__main__':\n indexPath = 'C:\\\\1_Repos\\\\developer'\n finalIndexPath = 'C:\\\\1_Repos\\\\developer'\n query = input('Enter a search query: ')\n if query.strip() == '':\n print('Query needs to be at least one character')\n unsortedDocs = search(query, finalIndexPath)\n unsortedURLs = getDocURLs(unsortedDocs, indexPath)\n sortedURLs = sorted(unsortedURLs, key=lambda x: x[1], reverse=True)\n print(f\"\\n------------ Top 5 Docs for '{query}' ------------\\n\")\n for i, doc in enumerate(sortedURLs):\n if i > 5:\n break\n print(doc[0], ' = ', doc[1])\n print('\\n------------ DONE! ------------\\n')\n",
"step-4": "from multiprocessing import Pool\nfrom pathlib import Path\nimport os\nimport re\nimport json\nimport string\nimport math\nimport GLOBALS\nstopWords = {'a', 'about', 'above', 'after', 'again', 'against', 'all',\n 'am', 'an', 'and', 'any', 'are', \"aren't\", 'as', 'at', 'be', 'because',\n 'been', 'before', 'being', 'below', 'between', 'both', 'but', 'by',\n \"can't\", 'cannot', 'could', \"couldn't\", 'did', \"didn't\", 'do', 'does',\n \"doesn't\", 'doing', \"don't\", 'down', 'during', 'each', 'few', 'for',\n 'from', 'further', 'had', \"hadn't\", 'has', \"hasn't\", 'have', \"haven't\",\n 'having', 'he', \"he'd\", \"he'll\", \"he's\", 'her', 'here', \"here's\",\n 'hers', 'herself', 'him', 'himself', 'his', 'how', \"how's\", 'i', \"i'd\",\n \"i'll\", \"i'm\", \"i've\", 'if', 'in', 'into', 'is', \"isn't\", 'it', \"it's\",\n 'its', 'itself', \"let's\", 'me', 'more', 'most', \"mustn't\", 'my',\n 'myself', 'no', 'nor', 'not', 'of', 'off', 'on', 'once', 'only', 'or',\n 'other', 'ought', 'our', 'ours', 'ourselves', 'out', 'over', 'own',\n 'same', \"shan't\", 'she', \"she'd\", \"she'll\", \"she's\", 'should',\n \"shouldn't\", 'so', 'some', 'such', 'than', 'that', \"that's\", 'the',\n 'their', 'theirs', 'them', 'themselves', 'then', 'there', \"there's\",\n 'these', 'they', \"they'd\", \"they'll\", \"they're\", \"they've\", 'this',\n 'those', 'through', 'to', 'too', 'under', 'until', 'up', 'very', 'was',\n \"wasn't\", 'we', \"we'd\", \"we'll\", \"we're\", \"we've\", 'were', \"weren't\",\n 'what', \"what's\", 'when', \"when's\", 'where', \"where's\", 'which',\n 'while', 'who', \"who's\", 'whom', 'why', \"why's\", 'with', \"won't\",\n 'would', \"wouldn't\", 'you', \"you'd\", \"you'll\", \"you're\", \"you've\",\n 'your', 'yours', 'yourself', 'yourselves'}\n\n\ndef search(query, finalIndexPath):\n listOfDicts = list()\n queryList = set()\n tempList = query.strip().lower().replace(\"'\", '').split(' ')\n for word in tempList:\n if word not in stopWords:\n queryList.add(word)\n print('Cleaned query tokens:')\n print(queryList, '\\n')\n queryList = list(queryList)\n for word in queryList:\n charPath = word[0]\n jsonFilePath = str(Path(finalIndexPath) / charPath / word) + '.json'\n try:\n with open(jsonFilePath, 'r') as file:\n data = file.read()\n jsonObj = json.loads(data)\n docsDict = jsonObj['docList']\n listOfDicts.append(docsDict)\n except:\n pass\n return intersectDicts(listOfDicts)\n\n\ndef getDocURLs(intersectedDocs, indexPath, cacheURLs):\n listUrls = list()\n for docID in intersectedDocs:\n if docID in cacheURLs:\n fileUrl = cacheURLs[docID]\n listUrls.append((fileUrl, intersectedDocs[docID]))\n return listUrls\n\n\ndef intersectDicts(listOfDicts):\n if len(listOfDicts) == 1:\n return listOfDicts[0]\n intersection = {}\n for dictItem in listOfDicts:\n for doc in dictItem:\n if doc not in intersection:\n intersection[doc] = dictItem[doc]\n else:\n intersection[doc] += dictItem[doc]\n print('intersection = ', intersection)\n return intersection\n\n\ndef flaskBackendQuery(queryUser, cacheURLs):\n indexPath = GLOBALS.FINAL_INDEX\n if queryUser.strip() == '':\n print('Query needs to be at least one character')\n unsortedDocs = search(queryUser, indexPath)\n unsortedURLs = getDocURLs(unsortedDocs, indexPath, cacheURLs)\n sortedURLs = sorted(unsortedURLs, key=lambda x: x[1], reverse=True)\n return sortedURLs[0:10]\n\n\nif __name__ == '__main__':\n indexPath = 'C:\\\\1_Repos\\\\developer'\n finalIndexPath = 'C:\\\\1_Repos\\\\developer'\n query = input('Enter a search query: ')\n if query.strip() == '':\n print('Query needs to be at least one character')\n unsortedDocs = search(query, finalIndexPath)\n unsortedURLs = getDocURLs(unsortedDocs, indexPath)\n sortedURLs = sorted(unsortedURLs, key=lambda x: x[1], reverse=True)\n print(f\"\\n------------ Top 5 Docs for '{query}' ------------\\n\")\n for i, doc in enumerate(sortedURLs):\n if i > 5:\n break\n print(doc[0], ' = ', doc[1])\n print('\\n------------ DONE! ------------\\n')\n",
"step-5": "from multiprocessing import Pool\nfrom pathlib import Path\nimport os\nimport re\nimport json\nimport string\nimport math\nimport GLOBALS\n\nstopWords = {\"a\", \"about\", \"above\", \"after\", \"again\", \"against\", \"all\", \"am\", \"an\", \"and\", \"any\", \"are\", \"aren't\",\n \"as\", \"at\", \"be\", \"because\", \"been\", \"before\", \"being\", \"below\", \"between\", \"both\", \"but\", \"by\",\n \"can't\",\n \"cannot\", \"could\", \"couldn't\", \"did\", \"didn't\", \"do\", \"does\", \"doesn't\", \"doing\", \"don't\", \"down\",\n \"during\",\n \"each\", \"few\", \"for\", \"from\", \"further\", \"had\", \"hadn't\", \"has\", \"hasn't\", \"have\", \"haven't\", \"having\",\n \"he\", \"he'd\",\n \"he'll\", \"he's\", \"her\", \"here\", \"here's\", \"hers\", \"herself\", \"him\", \"himself\", \"his\", \"how\", \"how's\",\n \"i\", \"i'd\", \"i'll\",\n \"i'm\", \"i've\", \"if\", \"in\", \"into\", \"is\", \"isn't\", \"it\", \"it's\", \"its\", \"itself\", \"let's\", \"me\", \"more\",\n \"most\", \"mustn't\", \"my\",\n \"myself\", \"no\", \"nor\", \"not\", \"of\", \"off\", \"on\", \"once\", \"only\", \"or\", \"other\", \"ought\", \"our\", \"ours\",\n \"ourselves\", \"out\", \"over\",\n \"own\", \"same\", \"shan't\", \"she\", \"she'd\", \"she'll\", \"she's\", \"should\", \"shouldn't\", \"so\", \"some\",\n \"such\", \"than\", \"that\", \"that's\",\n \"the\", \"their\", \"theirs\", \"them\", \"themselves\", \"then\", \"there\", \"there's\", \"these\", \"they\", \"they'd\",\n \"they'll\", \"they're\", \"they've\",\n \"this\", \"those\", \"through\", \"to\", \"too\", \"under\", \"until\", \"up\", \"very\", \"was\", \"wasn't\", \"we\", \"we'd\",\n \"we'll\", \"we're\", \"we've\", \"were\", \"weren't\",\n \"what\", \"what's\", \"when\", \"when's\", \"where\", \"where's\", \"which\", \"while\", \"who\", \"who's\", \"whom\",\n \"why\", \"why's\", \"with\", \"won't\", \"would\", \"wouldn't\",\n \"you\", \"you'd\", \"you'll\", \"you're\", \"you've\", \"your\", \"yours\", \"yourself\", \"yourselves\"}\n\n\n\n# Main Functions (aka functions called in __main__)\n\n# Takes in query as str. Returns list of docs that match the OR query (inclusive)\ndef search(query, finalIndexPath):\n listOfDicts = list()\n queryList = set() # We use set() to remove duplicate terms, and we won't have to open a file twice\n tempList = query.strip().lower().replace(\"'\", \"\").split(\" \")\n\n for word in tempList:\n if word not in stopWords:\n queryList.add(word)\n\n print(\"Cleaned query tokens:\")\n print(queryList, \"\\n\") # query tokens with stopwords removed and replacing apostrohe and lower()\n\n #convert set to list to enumerate\n queryList = list(queryList)\n\n for word in queryList:\n charPath = word[0] #Get 1st char of current word, use to find subdir\n\n # Get the file path of the final_indexed token.json file\n jsonFilePath = str(Path(finalIndexPath) / charPath / word) + \".json\"\n\n try:\n with open(jsonFilePath, \"r\") as file:\n data = file.read()\n jsonObj = json.loads(data)\n docsDict = jsonObj[\"docList\"]\n listOfDicts.append(docsDict)\n except:\n pass\n\n return intersectDicts(listOfDicts)\n\n\ndef getDocURLs(intersectedDocs, indexPath, cacheURLs):\n listUrls = list() # holds unique file paths of .json files\n #\n # hashTablePath = Path(indexPath) / \"hashurls.txt\"\n # with open(hashTablePath, \"r\") as file:\n # data = file.read()\n # hashSet = json.loads(data)\n\n for docID in intersectedDocs:\n if(docID in cacheURLs):\n fileUrl = cacheURLs[docID]\n listUrls.append( (fileUrl, intersectedDocs[docID]) )\n\n return listUrls\n\n\n\n# Helper Functions (aka functions called by other functions)\n\n# Returns unique dict of file urls from hashurl.txt (or hasthtable.txt)\ndef intersectDicts(listOfDicts):\n if len(listOfDicts) == 1:\n return listOfDicts[0]\n\n intersection = {}\n for dictItem in listOfDicts:\n for doc in dictItem:\n if doc not in intersection:\n intersection[doc] = dictItem[doc] #\n else:\n intersection[doc] += dictItem[doc] #adding tfidf weights\n print(\"intersection = \", intersection)\n return intersection\n\n\ndef flaskBackendQuery(queryUser, cacheURLs):\n indexPath = GLOBALS.FINAL_INDEX\n\n if (queryUser.strip() == \"\"):\n print(\"Query needs to be at least one character\")\n\n unsortedDocs = search(queryUser, indexPath) #list of dictionaries\n\n # Change filepaths to website URLs for displaying\n unsortedURLs = getDocURLs(unsortedDocs, indexPath, cacheURLs)\n\n # Sort docs by the TF-IDF score\n sortedURLs = sorted(unsortedURLs, key=lambda x: x[1], reverse=True) #highest scores shown first\n\n return sortedURLs[0:10] #return 10 results\n\n\nif __name__ == '__main__':\n #####\n # Aljon\n # finalIndexPath = \"C:\\\\Users\\\\aljon\\\\Documents\\\\CS_121\\\\Assignment_3\\\\CS121_InvertedIndex\\\\final_index\"\n # indexPath = \"C:\\\\Users\\\\aljon\\\\Documents\\\\CS_121\\\\Assignment_3\\\\CS121_InvertedIndex\\\\index\"\n\n # William\n # folderPath = \"C:\\\\1_Repos\\\\developer\\\\partial_indexes\"\n # folderPath = \"C:\\\\Anaconda3\\\\envs\\\\Projects\\\\developer\\\\partial_indexes\"\n indexPath = \"C:\\\\1_Repos\\\\developer\"\n finalIndexPath = \"C:\\\\1_Repos\\\\developer\"\n\n # Jerome\n #folderPath = \"C:\\\\Users\\\\arkse\\\\Desktop\\\\CS121_InvertedIndex\\\\DEV\"\n\n # Art\n # windows\n #folderPath = \"C:\\\\Users\\\\aghar\\\\Downloads\\\\DEV\"\n # linux\n #folderPath = \"/home/anon/Downloads/DEV\"\n #####\n\n\n # Get query from user\n query = input(\"Enter a search query: \")\n if(query.strip() == \"\"):\n print(\"Query needs to be at least one character\")\n # Fetch all results of query, intersect them to follow Bool-AND logic\n unsortedDocs = search(query, finalIndexPath)\n\n # Change filepaths to website URLs for displaying\n unsortedURLs = getDocURLs(unsortedDocs, indexPath)\n\n # Sort docs by the TF-IDF score\n sortedURLs = sorted(unsortedURLs, key=lambda x: x[1], reverse=True)\n \n # Print top 5 ranked file-urls for given query\n print(f\"\\n------------ Top 5 Docs for '{query}' ------------\\n\")\n for i, doc in enumerate(sortedURLs):\n if (i > 5):\n break\n print(doc[0], \" = \", doc[1])\n\n print(\"\\n------------ DONE! ------------\\n\")\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
#loading data from CSV
training_data_df = pd.read_csv("sales_data_training.csv")
test_data_df = pd.read_csv("sales_data_test.csv")
#scaler
scaler = MinMaxScaler(feature_range=(0,1))
#scale both inputs and outputs
scaled_training = scaler.fit_transform(training_data_df)
scaled_testing = scaler.transform(test_data_df)
#to bring it back to the original values
print("Note: total_earnings values were scaled by multiplying by {:.10f} and adding {:.6f}".format(scaler.scale_[8], scaler.min_[8]))
#create a new scaled dataframe object
scaled_training_df = pd.DataFrame(scaled_training, columns=training_data_df.columns.values)
scaled_testing_df = pd.DataFrame(scaled_testing, columns=test_data_df.columns.values)
#save the scaled dataframe to new csv files
scaled_training_df.to_csv("sales_data_training_scaled.csv", index=False)
scaled_training_df.to_csv("sales_data_test_scaled.csv", index=False)
|
normal
|
{
"blob_id": "050e2207ac7331444d39305869c4b25bcbc53907",
"index": 244,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(\n 'Note: total_earnings values were scaled by multiplying by {:.10f} and adding {:.6f}'\n .format(scaler.scale_[8], scaler.min_[8]))\n<mask token>\nscaled_training_df.to_csv('sales_data_training_scaled.csv', index=False)\nscaled_training_df.to_csv('sales_data_test_scaled.csv', index=False)\n",
"step-3": "<mask token>\ntraining_data_df = pd.read_csv('sales_data_training.csv')\ntest_data_df = pd.read_csv('sales_data_test.csv')\nscaler = MinMaxScaler(feature_range=(0, 1))\nscaled_training = scaler.fit_transform(training_data_df)\nscaled_testing = scaler.transform(test_data_df)\nprint(\n 'Note: total_earnings values were scaled by multiplying by {:.10f} and adding {:.6f}'\n .format(scaler.scale_[8], scaler.min_[8]))\nscaled_training_df = pd.DataFrame(scaled_training, columns=training_data_df\n .columns.values)\nscaled_testing_df = pd.DataFrame(scaled_testing, columns=test_data_df.\n columns.values)\nscaled_training_df.to_csv('sales_data_training_scaled.csv', index=False)\nscaled_training_df.to_csv('sales_data_test_scaled.csv', index=False)\n",
"step-4": "import pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\ntraining_data_df = pd.read_csv('sales_data_training.csv')\ntest_data_df = pd.read_csv('sales_data_test.csv')\nscaler = MinMaxScaler(feature_range=(0, 1))\nscaled_training = scaler.fit_transform(training_data_df)\nscaled_testing = scaler.transform(test_data_df)\nprint(\n 'Note: total_earnings values were scaled by multiplying by {:.10f} and adding {:.6f}'\n .format(scaler.scale_[8], scaler.min_[8]))\nscaled_training_df = pd.DataFrame(scaled_training, columns=training_data_df\n .columns.values)\nscaled_testing_df = pd.DataFrame(scaled_testing, columns=test_data_df.\n columns.values)\nscaled_training_df.to_csv('sales_data_training_scaled.csv', index=False)\nscaled_training_df.to_csv('sales_data_test_scaled.csv', index=False)\n",
"step-5": "import pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\n\n#loading data from CSV\ntraining_data_df = pd.read_csv(\"sales_data_training.csv\")\ntest_data_df = pd.read_csv(\"sales_data_test.csv\")\n\n#scaler\nscaler = MinMaxScaler(feature_range=(0,1))\n\n#scale both inputs and outputs\nscaled_training = scaler.fit_transform(training_data_df)\nscaled_testing = scaler.transform(test_data_df)\n\n#to bring it back to the original values\nprint(\"Note: total_earnings values were scaled by multiplying by {:.10f} and adding {:.6f}\".format(scaler.scale_[8], scaler.min_[8]))\n\n#create a new scaled dataframe object\nscaled_training_df = pd.DataFrame(scaled_training, columns=training_data_df.columns.values)\nscaled_testing_df = pd.DataFrame(scaled_testing, columns=test_data_df.columns.values)\n\n#save the scaled dataframe to new csv files\nscaled_training_df.to_csv(\"sales_data_training_scaled.csv\", index=False)\nscaled_training_df.to_csv(\"sales_data_test_scaled.csv\", index=False)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import vobject
import glob
import sys
vobj=vobject.readOne(open("Nelson.vcf"))
print vobj.contents
def main(args):
suma = 0
titulos = ['nombre del archivo', 'Total', 'subtotoal', 'rfc', 'fecha', 'ivaTrasladado', 'isrTrasladado', 'ivaRetenido', 'isrRetenido']
import csv
out = csv.writer(open("out.csv","w"), delimiter=',',quoting=csv.QUOTE_ALL)
out.writerow(titulos)
for argument in args:
t = datos(argument)
row = []
if not t["rfcEmisor"] in catedraticos:
suma += t["total"]
row.append(argument)
row.append(t['total'])
row.append(t['subTotal'])
row.append(t['rfcEmisor'])
row.append(t['fecha'])
row.append(t['ivat'])
row.append(t['isrt'])
row.append(t['ivar'])
row.append(t['isrr'])
out.writerow(row)
if __name__ == '__main__':
if len(sys.argv[1:]) > 0:
main(sys.argv[1:])
else:
files = glob.glob("*.xml")
if files:
main(files)
else:
raw_input("no hay archivos xml")
|
normal
|
{
"blob_id": "a1115766c5f17abc1ba90a3314cb5b9c4aab73d6",
"index": 8169,
"step-1": "import vobject\nimport glob\nimport sys\n\nvobj=vobject.readOne(open(\"Nelson.vcf\"))\nprint vobj.contents\n\n\ndef main(args):\n suma = 0\n titulos = ['nombre del archivo', 'Total', 'subtotoal', 'rfc', 'fecha', 'ivaTrasladado', 'isrTrasladado', 'ivaRetenido', 'isrRetenido']\n import csv\n out = csv.writer(open(\"out.csv\",\"w\"), delimiter=',',quoting=csv.QUOTE_ALL)\n out.writerow(titulos)\n for argument in args:\n t = datos(argument)\n row = []\n if not t[\"rfcEmisor\"] in catedraticos:\n suma += t[\"total\"]\n row.append(argument)\n row.append(t['total'])\n row.append(t['subTotal'])\n row.append(t['rfcEmisor'])\n row.append(t['fecha'])\n row.append(t['ivat'])\n row.append(t['isrt'])\n row.append(t['ivar'])\n row.append(t['isrr'])\n out.writerow(row)\n\nif __name__ == '__main__':\n if len(sys.argv[1:]) > 0:\n main(sys.argv[1:])\n else:\n files = glob.glob(\"*.xml\")\n if files:\n main(files)\n else:\n raw_input(\"no hay archivos xml\")\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#encoding:UTF-8
from numpy import *
#----------------------------------------------------------------------
def differences(a, b):
""""""
c = a[a!=b]
d = b[a!=b]
nums = nonzero(a!=b)[0]
return concatenate((mat(nums), c, d)).T
|
normal
|
{
"blob_id": "67a76f1f1dad4b7e73359f04ca8f599c8d32dc92",
"index": 2900,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef differences(a, b):\n \"\"\"\"\"\"\n c = a[a != b]\n d = b[a != b]\n nums = nonzero(a != b)[0]\n return concatenate((mat(nums), c, d)).T\n",
"step-3": "from numpy import *\n\n\ndef differences(a, b):\n \"\"\"\"\"\"\n c = a[a != b]\n d = b[a != b]\n nums = nonzero(a != b)[0]\n return concatenate((mat(nums), c, d)).T\n",
"step-4": "#encoding:UTF-8\n\nfrom numpy import *\n\n#----------------------------------------------------------------------\ndef differences(a, b):\n \"\"\"\"\"\"\n c = a[a!=b]\n d = b[a!=b]\n nums = nonzero(a!=b)[0]\n return concatenate((mat(nums), c, d)).T",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
#------------------------------------------------------------------------------
# imsrg_pairing.py
#
# author: H. Hergert
# version: 1.5.0
# date: Dec 6, 2016
#
# tested with Python v2.7
#
# Solves the pairing model for four particles in a basis of four doubly
# degenerate states by means of an In-Medium Similarity Renormalization
# Group (IMSRG) flow.
#
#------------------------------------------------------------------------------
import numpy as np
from numpy import array, dot, diag, reshape, transpose
from scipy.linalg import eigvalsh
from scipy.integrate import odeint, ode
from sys import argv
#-----------------------------------------------------------------------------------
# basis and index functions
#-----------------------------------------------------------------------------------
def construct_basis_2B(holes, particles):
basis = []
for i in holes:
for j in holes:
basis.append((i, j))
for i in holes:
for a in particles:
basis.append((i, a))
for a in particles:
for i in holes:
basis.append((a, i))
for a in particles:
for b in particles:
basis.append((a, b))
return basis
def construct_basis_ph2B(holes, particles):
basis = []
for i in holes:
for j in holes:
basis.append((i, j))
for i in holes:
for a in particles:
basis.append((i, a))
for a in particles:
for i in holes:
basis.append((a, i))
for a in particles:
for b in particles:
basis.append((a, b))
return basis
#
# We use dictionaries for the reverse lookup of state indices
#
def construct_index_2B(bas2B):
index = { }
for i, state in enumerate(bas2B):
index[state] = i
return index
#-----------------------------------------------------------------------------------
# transform matrices to particle-hole representation
#-----------------------------------------------------------------------------------
def ph_transform_2B(Gamma, bas2B, idx2B, basph2B, idxph2B):
dim = len(basph2B)
Gamma_ph = np.zeros((dim, dim))
for i1, (a,b) in enumerate(basph2B):
for i2, (c, d) in enumerate(basph2B):
Gamma_ph[i1, i2] -= Gamma[idx2B[(a,d)], idx2B[(c,b)]]
return Gamma_ph
def inverse_ph_transform_2B(Gamma_ph, bas2B, idx2B, basph2B, idxph2B):
dim = len(bas2B)
Gamma = np.zeros((dim, dim))
for i1, (a,b) in enumerate(bas2B):
for i2, (c, d) in enumerate(bas2B):
Gamma[i1, i2] -= Gamma_ph[idxph2B[(a,d)], idxph2B[(c,b)]]
return Gamma
#-----------------------------------------------------------------------------------
# commutator of matrices
#-----------------------------------------------------------------------------------
def commutator(a,b):
return dot(a,b) - dot(b,a)
#-----------------------------------------------------------------------------------
# norms of off-diagonal Hamiltonian pieces
#-----------------------------------------------------------------------------------
def calc_fod_norm(f, user_data):
particles = user_data["particles"]
holes = user_data["holes"]
norm = 0.0
for a in particles:
for i in holes:
norm += f[a,i]**2 + f[i,a]**2
return np.sqrt(norm)
def calc_Gammaod_norm(Gamma, user_data):
particles = user_data["particles"]
holes = user_data["holes"]
idx2B = user_data["idx2B"]
norm = 0.0
for a in particles:
for b in particles:
for i in holes:
for j in holes:
norm += Gamma[idx2B[(a,b)],idx2B[(i,j)]]**2 + Gamma[idx2B[(i,j)],idx2B[(a,b)]]**2
return np.sqrt(norm)
#-----------------------------------------------------------------------------------
# occupation number matrices
#-----------------------------------------------------------------------------------
def construct_occupation_1B(bas1B, holes, particles):
dim = len(bas1B)
occ = np.zeros(dim)
for i in holes:
occ[i] = 1.
return occ
# diagonal matrix: n_a - n_b
def construct_occupationA_2B(bas2B, occ1B):
dim = len(bas2B)
occ = np.zeros((dim,dim))
for i1, (i,j) in enumerate(bas2B):
occ[i1, i1] = occ1B[i] - occ1B[j]
return occ
# diagonal matrix: 1 - n_a - n_b
def construct_occupationB_2B(bas2B, occ1B):
dim = len(bas2B)
occ = np.zeros((dim,dim))
for i1, (i,j) in enumerate(bas2B):
occ[i1, i1] = 1. - occ1B[i] - occ1B[j]
return occ
# diagonal matrix: n_a * n_b
def construct_occupationC_2B(bas2B, occ1B):
dim = len(bas2B)
occ = np.zeros((dim,dim))
for i1, (i,j) in enumerate(bas2B):
occ[i1, i1] = occ1B[i] * occ1B[j]
return occ
#-----------------------------------------------------------------------------------
# generators
#-----------------------------------------------------------------------------------
def eta_brillouin(f, Gamma, user_data):
dim1B = user_data["dim1B"]
particles = user_data["particles"]
holes = user_data["holes"]
idx2B = user_data["idx2B"]
# one-body part of the generator
eta1B = np.zeros_like(f)
for a in particles:
for i in holes:
# (1-n_a)n_i - n_a(1-n_i) = n_i - n_a
eta1B[a, i] = f[a,i]
eta1B[i, a] = -f[a,i]
# two-body part of the generator
eta2B = np.zeros_like(Gamma)
for a in particles:
for b in particles:
for i in holes:
for j in holes:
val = Gamma[idx2B[(a,b)], idx2B[(i,j)]]
eta2B[idx2B[(a,b)],idx2B[(i,j)]] = val
eta2B[idx2B[(i,j)],idx2B[(a,b)]] = -val
return eta1B, eta2B
def eta_imtime(f, Gamma, user_data):
dim1B = user_data["dim1B"]
particles = user_data["particles"]
holes = user_data["holes"]
idx2B = user_data["idx2B"]
# one-body part of the generator
eta1B = np.zeros_like(f)
for a in particles:
for i in holes:
dE = f[a,a] - f[i,i] + Gamma[idx2B[(a,i)], idx2B[(a,i)]]
val = np.sign(dE)*f[a,i]
eta1B[a, i] = val
eta1B[i, a] = -val
# two-body part of the generator
eta2B = np.zeros_like(Gamma)
for a in particles:
for b in particles:
for i in holes:
for j in holes:
dE = (
f[a,a] + f[b,b] - f[i,i] - f[j,j]
+ Gamma[idx2B[(a,b)],idx2B[(a,b)]]
+ Gamma[idx2B[(i,j)],idx2B[(i,j)]]
- Gamma[idx2B[(a,i)],idx2B[(a,i)]]
- Gamma[idx2B[(a,j)],idx2B[(a,j)]]
- Gamma[idx2B[(b,i)],idx2B[(b,i)]]
- Gamma[idx2B[(b,j)],idx2B[(b,j)]]
)
val = np.sign(dE)*Gamma[idx2B[(a,b)], idx2B[(i,j)]]
eta2B[idx2B[(a,b)],idx2B[(i,j)]] = val
eta2B[idx2B[(i,j)],idx2B[(a,b)]] = -val
return eta1B, eta2B
def eta_white(f, Gamma, user_data):
dim1B = user_data["dim1B"]
particles = user_data["particles"]
holes = user_data["holes"]
idx2B = user_data["idx2B"]
# one-body part of the generator
eta1B = np.zeros_like(f)
for a in particles:
for i in holes:
denom = f[a,a] - f[i,i] + Gamma[idx2B[(a,i)], idx2B[(a,i)]]
val = f[a,i]/denom
eta1B[a, i] = val
eta1B[i, a] = -val
# two-body part of the generator
eta2B = np.zeros_like(Gamma)
for a in particles:
for b in particles:
for i in holes:
for j in holes:
denom = (
f[a,a] + f[b,b] - f[i,i] - f[j,j]
+ Gamma[idx2B[(a,b)],idx2B[(a,b)]]
+ Gamma[idx2B[(i,j)],idx2B[(i,j)]]
- Gamma[idx2B[(a,i)],idx2B[(a,i)]]
- Gamma[idx2B[(a,j)],idx2B[(a,j)]]
- Gamma[idx2B[(b,i)],idx2B[(b,i)]]
- Gamma[idx2B[(b,j)],idx2B[(b,j)]]
)
val = Gamma[idx2B[(a,b)], idx2B[(i,j)]] / denom
eta2B[idx2B[(a,b)],idx2B[(i,j)]] = val
eta2B[idx2B[(i,j)],idx2B[(a,b)]] = -val
return eta1B, eta2B
def eta_white_mp(f, Gamma, user_data):
dim1B = user_data["dim1B"]
particles = user_data["particles"]
holes = user_data["holes"]
idx2B = user_data["idx2B"]
# one-body part of the generator
eta1B = np.zeros_like(f)
for a in particles:
for i in holes:
denom = f[a,a] - f[i,i]
val = f[a,i]/denom
eta1B[a, i] = val
eta1B[i, a] = -val
# two-body part of the generator
eta2B = np.zeros_like(Gamma)
for a in particles:
for b in particles:
for i in holes:
for j in holes:
denom = (
f[a,a] + f[b,b] - f[i,i] - f[j,j]
)
val = Gamma[idx2B[(a,b)], idx2B[(i,j)]] / denom
eta2B[idx2B[(a,b)],idx2B[(i,j)]] = val
eta2B[idx2B[(i,j)],idx2B[(a,b)]] = -val
return eta1B, eta2B
def eta_white_atan(f, Gamma, user_data):
dim1B = user_data["dim1B"]
particles = user_data["particles"]
holes = user_data["holes"]
idx2B = user_data["idx2B"]
# one-body part of the generator
eta1B = np.zeros_like(f)
for a in particles:
for i in holes:
denom = f[a,a] - f[i,i] + Gamma[idx2B[(a,i)], idx2B[(a,i)]]
val = 0.5 * np.arctan(2 * f[a,i]/denom)
eta1B[a, i] = val
eta1B[i, a] = -val
# two-body part of the generator
eta2B = np.zeros_like(Gamma)
for a in particles:
for b in particles:
for i in holes:
for j in holes:
denom = (
f[a,a] + f[b,b] - f[i,i] - f[j,j]
+ Gamma[idx2B[(a,b)],idx2B[(a,b)]]
+ Gamma[idx2B[(i,j)],idx2B[(i,j)]]
- Gamma[idx2B[(a,i)],idx2B[(a,i)]]
- Gamma[idx2B[(a,j)],idx2B[(a,j)]]
- Gamma[idx2B[(b,i)],idx2B[(b,i)]]
- Gamma[idx2B[(b,j)],idx2B[(b,j)]]
)
val = 0.5 * np.arctan(2 * Gamma[idx2B[(a,b)], idx2B[(i,j)]] / denom)
eta2B[idx2B[(a,b)],idx2B[(i,j)]] = val
eta2B[idx2B[(i,j)],idx2B[(a,b)]] = -val
return eta1B, eta2B
def eta_wegner(f, Gamma, user_data):
dim1B = user_data["dim1B"]
holes = user_data["holes"]
particles = user_data["particles"]
bas2B = user_data["bas2B"]
basph2B = user_data["basph2B"]
idx2B = user_data["idx2B"]
idxph2B = user_data["idxph2B"]
occB_2B = user_data["occB_2B"]
occC_2B = user_data["occC_2B"]
occphA_2B = user_data["occphA_2B"]
# split Hamiltonian in diagonal and off-diagonal parts
fd = np.zeros_like(f)
fod = np.zeros_like(f)
Gammad = np.zeros_like(Gamma)
Gammaod = np.zeros_like(Gamma)
for a in particles:
for i in holes:
fod[a, i] = f[a,i]
fod[i, a] = f[i,a]
fd = f - fod
for a in particles:
for b in particles:
for i in holes:
for j in holes:
Gammaod[idx2B[(a,b)], idx2B[(i,j)]] = Gamma[idx2B[(a,b)], idx2B[(i,j)]]
Gammaod[idx2B[(i,j)], idx2B[(a,b)]] = Gamma[idx2B[(i,j)], idx2B[(a,b)]]
Gammad = Gamma - Gammaod
#############################
# one-body part of the generator
eta1B = np.zeros_like(f)
# 1B - 1B
eta1B += commutator(fd, fod)
# 1B - 2B
for p in range(dim1B):
for q in range(dim1B):
for i in holes:
for a in particles:
eta1B[p,q] += (
fd[i,a] * Gammaod[idx2B[(a, p)], idx2B[(i, q)]]
- fd[a,i] * Gammaod[idx2B[(i, p)], idx2B[(a, q)]]
- fod[i,a] * Gammad[idx2B[(a, p)], idx2B[(i, q)]]
+ fod[a,i] * Gammad[idx2B[(i, p)], idx2B[(a, q)]]
)
# 2B - 2B
# n_a n_b nn_c + nn_a nn_b n_c = n_a n_b + (1 - n_a - n_b) * n_c
GammaGamma = dot(Gammad, dot(occB_2B, Gammaod))
for p in range(dim1B):
for q in range(dim1B):
for i in holes:
eta1B[p,q] += 0.5*(
GammaGamma[idx2B[(i,p)], idx2B[(i,q)]]
- transpose(GammaGamma)[idx2B[(i,p)], idx2B[(i,q)]]
)
GammaGamma = dot(Gammad, dot(occC_2B, Gammaod))
for p in range(dim1B):
for q in range(dim1B):
for r in range(dim1B):
eta1B[p,q] += 0.5*(
GammaGamma[idx2B[(r,p)], idx2B[(r,q)]]
+ transpose(GammaGamma)[idx2B[(r,p)], idx2B[(r,q)]]
)
#############################
# two-body flow equation
eta2B = np.zeros_like(Gamma)
# 1B - 2B
for p in range(dim1B):
for q in range(dim1B):
for r in range(dim1B):
for s in range(dim1B):
for t in range(dim1B):
eta2B[idx2B[(p,q)],idx2B[(r,s)]] += (
fd[p,t] * Gammaod[idx2B[(t,q)],idx2B[(r,s)]]
+ fd[q,t] * Gammaod[idx2B[(p,t)],idx2B[(r,s)]]
- fd[t,r] * Gammaod[idx2B[(p,q)],idx2B[(t,s)]]
- fd[t,s] * Gammaod[idx2B[(p,q)],idx2B[(r,t)]]
- fod[p,t] * Gammad[idx2B[(t,q)],idx2B[(r,s)]]
- fod[q,t] * Gammad[idx2B[(p,t)],idx2B[(r,s)]]
+ fod[t,r] * Gammad[idx2B[(p,q)],idx2B[(t,s)]]
+ fod[t,s] * Gammad[idx2B[(p,q)],idx2B[(r,t)]]
)
# 2B - 2B - particle and hole ladders
# Gammad.occB.Gammaod
GammaGamma = dot(Gammad, dot(occB_2B, Gammaod))
eta2B += 0.5 * (GammaGamma - transpose(GammaGamma))
# 2B - 2B - particle-hole chain
# transform matrices to particle-hole representation and calculate
# Gammad_ph.occA_ph.Gammaod_ph
Gammad_ph = ph_transform_2B(Gammad, bas2B, idx2B, basph2B, idxph2B)
Gammaod_ph = ph_transform_2B(Gammaod, bas2B, idx2B, basph2B, idxph2B)
GammaGamma_ph = dot(Gammad_ph, dot(occphA_2B, Gammaod_ph))
# transform back to standard representation
GammaGamma = inverse_ph_transform_2B(GammaGamma_ph, bas2B, idx2B, basph2B, idxph2B)
# commutator / antisymmetrization
work = np.zeros_like(GammaGamma)
for i1, (i,j) in enumerate(bas2B):
for i2, (k,l) in enumerate(bas2B):
work[i1, i2] -= (
GammaGamma[i1, i2]
- GammaGamma[idx2B[(j,i)], i2]
- GammaGamma[i1, idx2B[(l,k)]]
+ GammaGamma[idx2B[(j,i)], idx2B[(l,k)]]
)
GammaGamma = work
eta2B += GammaGamma
return eta1B, eta2B
#-----------------------------------------------------------------------------------
# derivatives
#-----------------------------------------------------------------------------------
def flow_imsrg2(eta1B, eta2B, f, Gamma, user_data):
dim1B = user_data["dim1B"]
holes = user_data["holes"]
particles = user_data["particles"]
bas2B = user_data["bas2B"]
idx2B = user_data["idx2B"]
basph2B = user_data["basph2B"]
idxph2B = user_data["idxph2B"]
occB_2B = user_data["occB_2B"]
occC_2B = user_data["occC_2B"]
occphA_2B = user_data["occphA_2B"]
#############################
# zero-body flow equation
dE = 0.0
for i in holes:
for a in particles:
dE += eta1B[i,a] * f[a,i] - eta1B[a,i] * f[i,a]
for i in holes:
for j in holes:
for a in particles:
for b in particles:
dE += 0.5 * eta2B[idx2B[(i,j)], idx2B[(a,b)]] * Gamma[idx2B[(a,b)], idx2B[(i,j)]]
#############################
# one-body flow equation
df = np.zeros_like(f)
# 1B - 1B
df += commutator(eta1B, f)
# 1B - 2B
for p in range(dim1B):
for q in range(dim1B):
for i in holes:
for a in particles:
df[p,q] += (
eta1B[i,a] * Gamma[idx2B[(a, p)], idx2B[(i, q)]]
- eta1B[a,i] * Gamma[idx2B[(i, p)], idx2B[(a, q)]]
- f[i,a] * eta2B[idx2B[(a, p)], idx2B[(i, q)]]
+ f[a,i] * eta2B[idx2B[(i, p)], idx2B[(a, q)]]
)
# 2B - 2B
# n_a n_b nn_c + nn_a nn_b n_c = n_a n_b + (1 - n_a - n_b) * n_c
etaGamma = dot(eta2B, dot(occB_2B, Gamma))
for p in range(dim1B):
for q in range(dim1B):
for i in holes:
df[p,q] += 0.5*(
etaGamma[idx2B[(i,p)], idx2B[(i,q)]]
+ transpose(etaGamma)[idx2B[(i,p)], idx2B[(i,q)]]
)
etaGamma = dot(eta2B, dot(occC_2B, Gamma))
for p in range(dim1B):
for q in range(dim1B):
for r in range(dim1B):
df[p,q] += 0.5*(
etaGamma[idx2B[(r,p)], idx2B[(r,q)]]
+ transpose(etaGamma)[idx2B[(r,p)], idx2B[(r,q)]]
)
#############################
# two-body flow equation
dGamma = np.zeros_like(Gamma)
# 1B - 2B
for p in range(dim1B):
for q in range(dim1B):
for r in range(dim1B):
for s in range(dim1B):
for t in range(dim1B):
dGamma[idx2B[(p,q)],idx2B[(r,s)]] += (
eta1B[p,t] * Gamma[idx2B[(t,q)],idx2B[(r,s)]]
+ eta1B[q,t] * Gamma[idx2B[(p,t)],idx2B[(r,s)]]
- eta1B[t,r] * Gamma[idx2B[(p,q)],idx2B[(t,s)]]
- eta1B[t,s] * Gamma[idx2B[(p,q)],idx2B[(r,t)]]
- f[p,t] * eta2B[idx2B[(t,q)],idx2B[(r,s)]]
- f[q,t] * eta2B[idx2B[(p,t)],idx2B[(r,s)]]
+ f[t,r] * eta2B[idx2B[(p,q)],idx2B[(t,s)]]
+ f[t,s] * eta2B[idx2B[(p,q)],idx2B[(r,t)]]
)
# 2B - 2B - particle and hole ladders
# eta2B.occB.Gamma
etaGamma = dot(eta2B, dot(occB_2B, Gamma))
dGamma += 0.5 * (etaGamma + transpose(etaGamma))
# 2B - 2B - particle-hole chain
# transform matrices to particle-hole representation and calculate
# eta2B_ph.occA_ph.Gamma_ph
eta2B_ph = ph_transform_2B(eta2B, bas2B, idx2B, basph2B, idxph2B)
Gamma_ph = ph_transform_2B(Gamma, bas2B, idx2B, basph2B, idxph2B)
etaGamma_ph = dot(eta2B_ph, dot(occphA_2B, Gamma_ph))
# transform back to standard representation
etaGamma = inverse_ph_transform_2B(etaGamma_ph, bas2B, idx2B, basph2B, idxph2B)
# commutator / antisymmetrization
work = np.zeros_like(etaGamma)
for i1, (i,j) in enumerate(bas2B):
for i2, (k,l) in enumerate(bas2B):
work[i1, i2] -= (
etaGamma[i1, i2]
- etaGamma[idx2B[(j,i)], i2]
- etaGamma[i1, idx2B[(l,k)]]
+ etaGamma[idx2B[(j,i)], idx2B[(l,k)]]
)
etaGamma = work
dGamma += etaGamma
return dE, df, dGamma
#-----------------------------------------------------------------------------------
# derivative wrapper
#-----------------------------------------------------------------------------------
def get_operator_from_y(y, dim1B, dim2B):
# reshape the solution vector into 0B, 1B, 2B pieces
ptr = 0
zero_body = y[ptr]
ptr += 1
one_body = reshape(y[ptr:ptr+dim1B*dim1B], (dim1B, dim1B))
ptr += dim1B*dim1B
two_body = reshape(y[ptr:ptr+dim2B*dim2B], (dim2B, dim2B))
return zero_body,one_body,two_body
def derivative_wrapper(t, y, user_data):
dim1B = user_data["dim1B"]
dim2B = dim1B*dim1B
holes = user_data["holes"]
particles = user_data["particles"]
bas1B = user_data["bas1B"]
bas2B = user_data["bas2B"]
basph2B = user_data["basph2B"]
idx2B = user_data["idx2B"]
idxph2B = user_data["idxph2B"]
occA_2B = user_data["occA_2B"]
occB_2B = user_data["occB_2B"]
occC_2B = user_data["occC_2B"]
occphA_2B = user_data["occphA_2B"]
calc_eta = user_data["calc_eta"]
calc_rhs = user_data["calc_rhs"]
# extract operator pieces from solution vector
E, f, Gamma = get_operator_from_y(y, dim1B, dim2B)
# calculate the generator
eta1B, eta2B = calc_eta(f, Gamma, user_data)
# calculate the right-hand side
dE, df, dGamma = calc_rhs(eta1B, eta2B, f, Gamma, user_data)
# convert derivatives into linear array
dy = np.append([dE], np.append(reshape(df, -1), reshape(dGamma, -1)))
# share data
user_data["dE"] = dE
user_data["eta_norm"] = np.linalg.norm(eta1B,ord='fro')+np.linalg.norm(eta2B,ord='fro')
return dy
#-----------------------------------------------------------------------------------
# pairing Hamiltonian
#-----------------------------------------------------------------------------------
def pairing_hamiltonian(delta, g, user_data):
bas1B = user_data["bas1B"]
bas2B = user_data["bas2B"]
idx2B = user_data["idx2B"]
dim = len(bas1B)
H1B = np.zeros((dim,dim))
for i in bas1B:
H1B[i,i] = delta*np.floor_divide(i, 2)
dim = len(bas2B)
H2B = np.zeros((dim, dim))
# spin up states have even indices, spin down the next odd index
for (i, j) in bas2B:
if (i % 2 == 0 and j == i+1):
for (k, l) in bas2B:
if (k % 2 == 0 and l == k+1):
H2B[idx2B[(i,j)],idx2B[(k,l)]] = -0.5*g
H2B[idx2B[(j,i)],idx2B[(k,l)]] = 0.5*g
H2B[idx2B[(i,j)],idx2B[(l,k)]] = 0.5*g
H2B[idx2B[(j,i)],idx2B[(l,k)]] = -0.5*g
return H1B, H2B
#-----------------------------------------------------------------------------------
# normal-ordered pairing Hamiltonian
#-----------------------------------------------------------------------------------
def normal_order(H1B, H2B, user_data):
bas1B = user_data["bas1B"]
bas2B = user_data["bas2B"]
idx2B = user_data["idx2B"]
particles = user_data["particles"]
holes = user_data["holes"]
# 0B part
E = 0.0
for i in holes:
E += H1B[i,i]
for i in holes:
for j in holes:
E += 0.5*H2B[idx2B[(i,j)],idx2B[(i,j)]]
# 1B part
f = H1B
for i in bas1B:
for j in bas1B:
for h in holes:
f[i,j] += H2B[idx2B[(i,h)],idx2B[(j,h)]]
# 2B part
Gamma = H2B
return E, f, Gamma
#-----------------------------------------------------------------------------------
# Perturbation theory
#-----------------------------------------------------------------------------------
def calc_mbpt2(f, Gamma, user_data):
DE2 = 0.0
particles = user_data["particles"]
holes = user_data["holes"]
idx2B = user_data["idx2B"]
for i in holes:
for j in holes:
for a in particles:
for b in particles:
denom = f[i,i] + f[j,j] - f[a,a] - f[b,b]
me = Gamma[idx2B[(a,b)],idx2B[(i,j)]]
DE2 += 0.25*me*me/denom
return DE2
def calc_mbpt3(f, Gamma, user_data):
particles = user_data["particles"]
holes = user_data["holes"]
idx2B = user_data["idx2B"]
# DE3 = 0.0
DE3pp = 0.0
DE3hh = 0.0
DE3ph = 0.0
for a in particles:
for b in particles:
for c in particles:
for d in particles:
for i in holes:
for j in holes:
denom = (f[i,i] + f[j,j] - f[a,a] - f[b,b])*(f[i,i] + f[j,j] - f[c,c] - f[d,d])
me = (Gamma[idx2B[(i,j)],idx2B[(a,b)]]*Gamma[idx2B[(a,b)],idx2B[(c,d)]]*
Gamma[idx2B[(c,d)],idx2B[(i,j)]])
DE3pp += 0.125*me/denom
for i in holes:
for j in holes:
for k in holes:
for l in holes:
for a in particles:
for b in particles:
denom = (f[i,i] + f[j,j] - f[a,a] - f[b,b])*(f[k,k] + f[l,l] - f[a,a] - f[b,b])
me = (Gamma[idx2B[(a,b)],idx2B[(k,l)]]*Gamma[idx2B[(k,l)],idx2B[(i,j)]]*
Gamma[idx2B[(i,j)],idx2B[(a,b)]])
DE3hh += 0.125*me/denom
for i in holes:
for j in holes:
for k in holes:
for a in particles:
for b in particles:
for c in particles:
denom = (f[i,i] + f[j,j] - f[a,a] - f[b,b])*(f[k,k] + f[j,j] - f[a,a] - f[c,c])
me = (Gamma[idx2B[(i,j)],idx2B[(a,b)]]*Gamma[idx2B[(k,b)],idx2B[(i,c)]]*
Gamma[idx2B[(a,c)],idx2B[(k,j)]])
DE3ph -= me/denom
return DE3pp+DE3hh+DE3ph
#------------------------------------------------------------------------------
# Main program
#------------------------------------------------------------------------------
def main():
# grab delta and g from the command line
delta = float(argv[1])
g = float(argv[2])
particles = 4
# setup shared data
dim1B = 8
# this defines the reference state
# 1st state
holes = [0,1,2,3]
particles = [4,5,6,7]
# 2nd state
# holes = [0,1,4,5]
# particles = [2,3,6,7]
# 3rd state
# holes = [0,1,6,7]
# particles = [2,3,4,5]
# basis definitions
bas1B = range(dim1B)
bas2B = construct_basis_2B(holes, particles)
basph2B = construct_basis_ph2B(holes, particles)
idx2B = construct_index_2B(bas2B)
idxph2B = construct_index_2B(basph2B)
# occupation number matrices
occ1B = construct_occupation_1B(bas1B, holes, particles)
occA_2B = construct_occupationA_2B(bas2B, occ1B)
occB_2B = construct_occupationB_2B(bas2B, occ1B)
occC_2B = construct_occupationC_2B(bas2B, occ1B)
occphA_2B = construct_occupationA_2B(basph2B, occ1B)
# store shared data in a dictionary, so we can avoid passing the basis
# lookups etc. as separate parameters all the time
user_data = {
"dim1B": dim1B,
"holes": holes,
"particles": particles,
"bas1B": bas1B,
"bas2B": bas2B,
"basph2B": basph2B,
"idx2B": idx2B,
"idxph2B": idxph2B,
"occ1B": occ1B,
"occA_2B": occA_2B,
"occB_2B": occB_2B,
"occC_2B": occC_2B,
"occphA_2B": occphA_2B,
"eta_norm": 0.0, # variables for sharing data between ODE solver
"dE": 0.0, # and main routine
"calc_eta": eta_white_atan, # specify the generator (function object)
"calc_rhs": flow_imsrg2 # specify the right-hand side and truncation
}
# set up initial Hamiltonian
H1B, H2B = pairing_hamiltonian(delta, g, user_data)
E, f, Gamma = normal_order(H1B, H2B, user_data)
# reshape Hamiltonian into a linear array (initial ODE vector)
y0 = np.append([E], np.append(reshape(f, -1), reshape(Gamma, -1)))
# integrate flow equations
solver = ode(derivative_wrapper,jac=None)
solver.set_integrator('vode', method='bdf', order=5, nsteps=1000)
solver.set_f_params(user_data)
solver.set_initial_value(y0, 0.)
sfinal = 50
ds = 0.1
print("%-8s %-14s %-14s %-14s %-14s %-14s %-14s %-14s %-14s"%(
"s", "E" , "DE(2)", "DE(3)", "E+DE", "dE/ds",
"||eta||", "||fod||", "||Gammaod||"))
print("-" * 148)
while solver.successful() and solver.t < sfinal:
ys = solver.integrate(sfinal, step=True)
dim2B = dim1B*dim1B
E, f, Gamma = get_operator_from_y(ys, dim1B, dim2B)
DE2 = calc_mbpt2(f, Gamma, user_data)
DE3 = calc_mbpt3(f, Gamma, user_data)
norm_fod = calc_fod_norm(f, user_data)
norm_Gammaod = calc_Gammaod_norm(Gamma, user_data)
print("%8.5f %14.8f %14.8f %14.8f %14.8f %14.8f %14.8f %14.8f %14.8f"%(
solver.t, E , DE2, DE3, E+DE2+DE3, user_data["dE"], user_data["eta_norm"], norm_fod, norm_Gammaod))
if abs(DE2/E) < 10e-8: break
return
#------------------------------------------------------------------------------
# make executable
#------------------------------------------------------------------------------
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "0eb86fc64b74c79cace838e2d71ed92533123229",
"index": 9910,
"step-1": "<mask token>\n\n\ndef ph_transform_2B(Gamma, bas2B, idx2B, basph2B, idxph2B):\n dim = len(basph2B)\n Gamma_ph = np.zeros((dim, dim))\n for i1, (a, b) in enumerate(basph2B):\n for i2, (c, d) in enumerate(basph2B):\n Gamma_ph[i1, i2] -= Gamma[idx2B[a, d], idx2B[c, b]]\n return Gamma_ph\n\n\ndef inverse_ph_transform_2B(Gamma_ph, bas2B, idx2B, basph2B, idxph2B):\n dim = len(bas2B)\n Gamma = np.zeros((dim, dim))\n for i1, (a, b) in enumerate(bas2B):\n for i2, (c, d) in enumerate(bas2B):\n Gamma[i1, i2] -= Gamma_ph[idxph2B[a, d], idxph2B[c, b]]\n return Gamma\n\n\n<mask token>\n\n\ndef calc_fod_norm(f, user_data):\n particles = user_data['particles']\n holes = user_data['holes']\n norm = 0.0\n for a in particles:\n for i in holes:\n norm += f[a, i] ** 2 + f[i, a] ** 2\n return np.sqrt(norm)\n\n\ndef calc_Gammaod_norm(Gamma, user_data):\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n norm = 0.0\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n norm += Gamma[idx2B[a, b], idx2B[i, j]] ** 2 + Gamma[\n idx2B[i, j], idx2B[a, b]] ** 2\n return np.sqrt(norm)\n\n\ndef construct_occupation_1B(bas1B, holes, particles):\n dim = len(bas1B)\n occ = np.zeros(dim)\n for i in holes:\n occ[i] = 1.0\n return occ\n\n\n<mask token>\n\n\ndef construct_occupationB_2B(bas2B, occ1B):\n dim = len(bas2B)\n occ = np.zeros((dim, dim))\n for i1, (i, j) in enumerate(bas2B):\n occ[i1, i1] = 1.0 - occ1B[i] - occ1B[j]\n return occ\n\n\n<mask token>\n\n\ndef eta_brillouin(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n eta1B = np.zeros_like(f)\n for a in particles:\n for i in holes:\n eta1B[a, i] = f[a, i]\n eta1B[i, a] = -f[a, i]\n eta2B = np.zeros_like(Gamma)\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n val = Gamma[idx2B[a, b], idx2B[i, j]]\n eta2B[idx2B[a, b], idx2B[i, j]] = val\n eta2B[idx2B[i, j], idx2B[a, b]] = -val\n return eta1B, eta2B\n\n\ndef eta_imtime(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n eta1B = np.zeros_like(f)\n for a in particles:\n for i in holes:\n dE = f[a, a] - f[i, i] + Gamma[idx2B[a, i], idx2B[a, i]]\n val = np.sign(dE) * f[a, i]\n eta1B[a, i] = val\n eta1B[i, a] = -val\n eta2B = np.zeros_like(Gamma)\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n dE = f[a, a] + f[b, b] - f[i, i] - f[j, j] + Gamma[\n idx2B[a, b], idx2B[a, b]] + Gamma[idx2B[i, j],\n idx2B[i, j]] - Gamma[idx2B[a, i], idx2B[a, i]] - Gamma[\n idx2B[a, j], idx2B[a, j]] - Gamma[idx2B[b, i],\n idx2B[b, i]] - Gamma[idx2B[b, j], idx2B[b, j]]\n val = np.sign(dE) * Gamma[idx2B[a, b], idx2B[i, j]]\n eta2B[idx2B[a, b], idx2B[i, j]] = val\n eta2B[idx2B[i, j], idx2B[a, b]] = -val\n return eta1B, eta2B\n\n\ndef eta_white(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n eta1B = np.zeros_like(f)\n for a in particles:\n for i in holes:\n denom = f[a, a] - f[i, i] + Gamma[idx2B[a, i], idx2B[a, i]]\n val = f[a, i] / denom\n eta1B[a, i] = val\n eta1B[i, a] = -val\n eta2B = np.zeros_like(Gamma)\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n denom = f[a, a] + f[b, b] - f[i, i] - f[j, j] + Gamma[\n idx2B[a, b], idx2B[a, b]] + Gamma[idx2B[i, j],\n idx2B[i, j]] - Gamma[idx2B[a, i], idx2B[a, i]] - Gamma[\n idx2B[a, j], idx2B[a, j]] - Gamma[idx2B[b, i],\n idx2B[b, i]] - Gamma[idx2B[b, j], idx2B[b, j]]\n val = Gamma[idx2B[a, b], idx2B[i, j]] / denom\n eta2B[idx2B[a, b], idx2B[i, j]] = val\n eta2B[idx2B[i, j], idx2B[a, b]] = -val\n return eta1B, eta2B\n\n\ndef eta_white_mp(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n eta1B = np.zeros_like(f)\n for a in particles:\n for i in holes:\n denom = f[a, a] - f[i, i]\n val = f[a, i] / denom\n eta1B[a, i] = val\n eta1B[i, a] = -val\n eta2B = np.zeros_like(Gamma)\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n denom = f[a, a] + f[b, b] - f[i, i] - f[j, j]\n val = Gamma[idx2B[a, b], idx2B[i, j]] / denom\n eta2B[idx2B[a, b], idx2B[i, j]] = val\n eta2B[idx2B[i, j], idx2B[a, b]] = -val\n return eta1B, eta2B\n\n\ndef eta_white_atan(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n eta1B = np.zeros_like(f)\n for a in particles:\n for i in holes:\n denom = f[a, a] - f[i, i] + Gamma[idx2B[a, i], idx2B[a, i]]\n val = 0.5 * np.arctan(2 * f[a, i] / denom)\n eta1B[a, i] = val\n eta1B[i, a] = -val\n eta2B = np.zeros_like(Gamma)\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n denom = f[a, a] + f[b, b] - f[i, i] - f[j, j] + Gamma[\n idx2B[a, b], idx2B[a, b]] + Gamma[idx2B[i, j],\n idx2B[i, j]] - Gamma[idx2B[a, i], idx2B[a, i]] - Gamma[\n idx2B[a, j], idx2B[a, j]] - Gamma[idx2B[b, i],\n idx2B[b, i]] - Gamma[idx2B[b, j], idx2B[b, j]]\n val = 0.5 * np.arctan(2 * Gamma[idx2B[a, b], idx2B[i, j\n ]] / denom)\n eta2B[idx2B[a, b], idx2B[i, j]] = val\n eta2B[idx2B[i, j], idx2B[a, b]] = -val\n return eta1B, eta2B\n\n\ndef eta_wegner(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n holes = user_data['holes']\n particles = user_data['particles']\n bas2B = user_data['bas2B']\n basph2B = user_data['basph2B']\n idx2B = user_data['idx2B']\n idxph2B = user_data['idxph2B']\n occB_2B = user_data['occB_2B']\n occC_2B = user_data['occC_2B']\n occphA_2B = user_data['occphA_2B']\n fd = np.zeros_like(f)\n fod = np.zeros_like(f)\n Gammad = np.zeros_like(Gamma)\n Gammaod = np.zeros_like(Gamma)\n for a in particles:\n for i in holes:\n fod[a, i] = f[a, i]\n fod[i, a] = f[i, a]\n fd = f - fod\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n Gammaod[idx2B[a, b], idx2B[i, j]] = Gamma[idx2B[a, b],\n idx2B[i, j]]\n Gammaod[idx2B[i, j], idx2B[a, b]] = Gamma[idx2B[i, j],\n idx2B[a, b]]\n Gammad = Gamma - Gammaod\n eta1B = np.zeros_like(f)\n eta1B += commutator(fd, fod)\n for p in range(dim1B):\n for q in range(dim1B):\n for i in holes:\n for a in particles:\n eta1B[p, q] += fd[i, a] * Gammaod[idx2B[a, p], idx2B[i, q]\n ] - fd[a, i] * Gammaod[idx2B[i, p], idx2B[a, q]] - fod[\n i, a] * Gammad[idx2B[a, p], idx2B[i, q]] + fod[a, i\n ] * Gammad[idx2B[i, p], idx2B[a, q]]\n GammaGamma = dot(Gammad, dot(occB_2B, Gammaod))\n for p in range(dim1B):\n for q in range(dim1B):\n for i in holes:\n eta1B[p, q] += 0.5 * (GammaGamma[idx2B[i, p], idx2B[i, q]] -\n transpose(GammaGamma)[idx2B[i, p], idx2B[i, q]])\n GammaGamma = dot(Gammad, dot(occC_2B, Gammaod))\n for p in range(dim1B):\n for q in range(dim1B):\n for r in range(dim1B):\n eta1B[p, q] += 0.5 * (GammaGamma[idx2B[r, p], idx2B[r, q]] +\n transpose(GammaGamma)[idx2B[r, p], idx2B[r, q]])\n eta2B = np.zeros_like(Gamma)\n for p in range(dim1B):\n for q in range(dim1B):\n for r in range(dim1B):\n for s in range(dim1B):\n for t in range(dim1B):\n eta2B[idx2B[p, q], idx2B[r, s]] += fd[p, t] * Gammaod[\n idx2B[t, q], idx2B[r, s]] + fd[q, t] * Gammaod[\n idx2B[p, t], idx2B[r, s]] - fd[t, r] * Gammaod[\n idx2B[p, q], idx2B[t, s]] - fd[t, s] * Gammaod[\n idx2B[p, q], idx2B[r, t]] - fod[p, t] * Gammad[\n idx2B[t, q], idx2B[r, s]] - fod[q, t] * Gammad[\n idx2B[p, t], idx2B[r, s]] + fod[t, r] * Gammad[\n idx2B[p, q], idx2B[t, s]] + fod[t, s] * Gammad[\n idx2B[p, q], idx2B[r, t]]\n GammaGamma = dot(Gammad, dot(occB_2B, Gammaod))\n eta2B += 0.5 * (GammaGamma - transpose(GammaGamma))\n Gammad_ph = ph_transform_2B(Gammad, bas2B, idx2B, basph2B, idxph2B)\n Gammaod_ph = ph_transform_2B(Gammaod, bas2B, idx2B, basph2B, idxph2B)\n GammaGamma_ph = dot(Gammad_ph, dot(occphA_2B, Gammaod_ph))\n GammaGamma = inverse_ph_transform_2B(GammaGamma_ph, bas2B, idx2B,\n basph2B, idxph2B)\n work = np.zeros_like(GammaGamma)\n for i1, (i, j) in enumerate(bas2B):\n for i2, (k, l) in enumerate(bas2B):\n work[i1, i2] -= GammaGamma[i1, i2] - GammaGamma[idx2B[j, i], i2\n ] - GammaGamma[i1, idx2B[l, k]] + GammaGamma[idx2B[j, i],\n idx2B[l, k]]\n GammaGamma = work\n eta2B += GammaGamma\n return eta1B, eta2B\n\n\ndef flow_imsrg2(eta1B, eta2B, f, Gamma, user_data):\n dim1B = user_data['dim1B']\n holes = user_data['holes']\n particles = user_data['particles']\n bas2B = user_data['bas2B']\n idx2B = user_data['idx2B']\n basph2B = user_data['basph2B']\n idxph2B = user_data['idxph2B']\n occB_2B = user_data['occB_2B']\n occC_2B = user_data['occC_2B']\n occphA_2B = user_data['occphA_2B']\n dE = 0.0\n for i in holes:\n for a in particles:\n dE += eta1B[i, a] * f[a, i] - eta1B[a, i] * f[i, a]\n for i in holes:\n for j in holes:\n for a in particles:\n for b in particles:\n dE += 0.5 * eta2B[idx2B[i, j], idx2B[a, b]] * Gamma[\n idx2B[a, b], idx2B[i, j]]\n df = np.zeros_like(f)\n df += commutator(eta1B, f)\n for p in range(dim1B):\n for q in range(dim1B):\n for i in holes:\n for a in particles:\n df[p, q] += eta1B[i, a] * Gamma[idx2B[a, p], idx2B[i, q]\n ] - eta1B[a, i] * Gamma[idx2B[i, p], idx2B[a, q]] - f[\n i, a] * eta2B[idx2B[a, p], idx2B[i, q]] + f[a, i\n ] * eta2B[idx2B[i, p], idx2B[a, q]]\n etaGamma = dot(eta2B, dot(occB_2B, Gamma))\n for p in range(dim1B):\n for q in range(dim1B):\n for i in holes:\n df[p, q] += 0.5 * (etaGamma[idx2B[i, p], idx2B[i, q]] +\n transpose(etaGamma)[idx2B[i, p], idx2B[i, q]])\n etaGamma = dot(eta2B, dot(occC_2B, Gamma))\n for p in range(dim1B):\n for q in range(dim1B):\n for r in range(dim1B):\n df[p, q] += 0.5 * (etaGamma[idx2B[r, p], idx2B[r, q]] +\n transpose(etaGamma)[idx2B[r, p], idx2B[r, q]])\n dGamma = np.zeros_like(Gamma)\n for p in range(dim1B):\n for q in range(dim1B):\n for r in range(dim1B):\n for s in range(dim1B):\n for t in range(dim1B):\n dGamma[idx2B[p, q], idx2B[r, s]] += eta1B[p, t\n ] * Gamma[idx2B[t, q], idx2B[r, s]] + eta1B[q, t\n ] * Gamma[idx2B[p, t], idx2B[r, s]] - eta1B[t, r\n ] * Gamma[idx2B[p, q], idx2B[t, s]] - eta1B[t, s\n ] * Gamma[idx2B[p, q], idx2B[r, t]] - f[p, t\n ] * eta2B[idx2B[t, q], idx2B[r, s]] - f[q, t\n ] * eta2B[idx2B[p, t], idx2B[r, s]] + f[t, r\n ] * eta2B[idx2B[p, q], idx2B[t, s]] + f[t, s\n ] * eta2B[idx2B[p, q], idx2B[r, t]]\n etaGamma = dot(eta2B, dot(occB_2B, Gamma))\n dGamma += 0.5 * (etaGamma + transpose(etaGamma))\n eta2B_ph = ph_transform_2B(eta2B, bas2B, idx2B, basph2B, idxph2B)\n Gamma_ph = ph_transform_2B(Gamma, bas2B, idx2B, basph2B, idxph2B)\n etaGamma_ph = dot(eta2B_ph, dot(occphA_2B, Gamma_ph))\n etaGamma = inverse_ph_transform_2B(etaGamma_ph, bas2B, idx2B, basph2B,\n idxph2B)\n work = np.zeros_like(etaGamma)\n for i1, (i, j) in enumerate(bas2B):\n for i2, (k, l) in enumerate(bas2B):\n work[i1, i2] -= etaGamma[i1, i2] - etaGamma[idx2B[j, i], i2\n ] - etaGamma[i1, idx2B[l, k]] + etaGamma[idx2B[j, i], idx2B\n [l, k]]\n etaGamma = work\n dGamma += etaGamma\n return dE, df, dGamma\n\n\ndef get_operator_from_y(y, dim1B, dim2B):\n ptr = 0\n zero_body = y[ptr]\n ptr += 1\n one_body = reshape(y[ptr:ptr + dim1B * dim1B], (dim1B, dim1B))\n ptr += dim1B * dim1B\n two_body = reshape(y[ptr:ptr + dim2B * dim2B], (dim2B, dim2B))\n return zero_body, one_body, two_body\n\n\n<mask token>\n\n\ndef pairing_hamiltonian(delta, g, user_data):\n bas1B = user_data['bas1B']\n bas2B = user_data['bas2B']\n idx2B = user_data['idx2B']\n dim = len(bas1B)\n H1B = np.zeros((dim, dim))\n for i in bas1B:\n H1B[i, i] = delta * np.floor_divide(i, 2)\n dim = len(bas2B)\n H2B = np.zeros((dim, dim))\n for i, j in bas2B:\n if i % 2 == 0 and j == i + 1:\n for k, l in bas2B:\n if k % 2 == 0 and l == k + 1:\n H2B[idx2B[i, j], idx2B[k, l]] = -0.5 * g\n H2B[idx2B[j, i], idx2B[k, l]] = 0.5 * g\n H2B[idx2B[i, j], idx2B[l, k]] = 0.5 * g\n H2B[idx2B[j, i], idx2B[l, k]] = -0.5 * g\n return H1B, H2B\n\n\n<mask token>\n\n\ndef calc_mbpt2(f, Gamma, user_data):\n DE2 = 0.0\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n for i in holes:\n for j in holes:\n for a in particles:\n for b in particles:\n denom = f[i, i] + f[j, j] - f[a, a] - f[b, b]\n me = Gamma[idx2B[a, b], idx2B[i, j]]\n DE2 += 0.25 * me * me / denom\n return DE2\n\n\n<mask token>\n\n\ndef main():\n delta = float(argv[1])\n g = float(argv[2])\n particles = 4\n dim1B = 8\n holes = [0, 1, 2, 3]\n particles = [4, 5, 6, 7]\n bas1B = range(dim1B)\n bas2B = construct_basis_2B(holes, particles)\n basph2B = construct_basis_ph2B(holes, particles)\n idx2B = construct_index_2B(bas2B)\n idxph2B = construct_index_2B(basph2B)\n occ1B = construct_occupation_1B(bas1B, holes, particles)\n occA_2B = construct_occupationA_2B(bas2B, occ1B)\n occB_2B = construct_occupationB_2B(bas2B, occ1B)\n occC_2B = construct_occupationC_2B(bas2B, occ1B)\n occphA_2B = construct_occupationA_2B(basph2B, occ1B)\n user_data = {'dim1B': dim1B, 'holes': holes, 'particles': particles,\n 'bas1B': bas1B, 'bas2B': bas2B, 'basph2B': basph2B, 'idx2B': idx2B,\n 'idxph2B': idxph2B, 'occ1B': occ1B, 'occA_2B': occA_2B, 'occB_2B':\n occB_2B, 'occC_2B': occC_2B, 'occphA_2B': occphA_2B, 'eta_norm': \n 0.0, 'dE': 0.0, 'calc_eta': eta_white_atan, 'calc_rhs': flow_imsrg2}\n H1B, H2B = pairing_hamiltonian(delta, g, user_data)\n E, f, Gamma = normal_order(H1B, H2B, user_data)\n y0 = np.append([E], np.append(reshape(f, -1), reshape(Gamma, -1)))\n solver = ode(derivative_wrapper, jac=None)\n solver.set_integrator('vode', method='bdf', order=5, nsteps=1000)\n solver.set_f_params(user_data)\n solver.set_initial_value(y0, 0.0)\n sfinal = 50\n ds = 0.1\n print(\n '%-8s %-14s %-14s %-14s %-14s %-14s %-14s %-14s %-14s'\n % ('s', 'E', 'DE(2)', 'DE(3)', 'E+DE', 'dE/ds', '||eta||',\n '||fod||', '||Gammaod||'))\n print('-' * 148)\n while solver.successful() and solver.t < sfinal:\n ys = solver.integrate(sfinal, step=True)\n dim2B = dim1B * dim1B\n E, f, Gamma = get_operator_from_y(ys, dim1B, dim2B)\n DE2 = calc_mbpt2(f, Gamma, user_data)\n DE3 = calc_mbpt3(f, Gamma, user_data)\n norm_fod = calc_fod_norm(f, user_data)\n norm_Gammaod = calc_Gammaod_norm(Gamma, user_data)\n print(\n '%8.5f %14.8f %14.8f %14.8f %14.8f %14.8f %14.8f %14.8f %14.8f'\n % (solver.t, E, DE2, DE3, E + DE2 + DE3, user_data['dE'],\n user_data['eta_norm'], norm_fod, norm_Gammaod))\n if abs(DE2 / E) < 1e-07:\n break\n return\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef construct_basis_ph2B(holes, particles):\n basis = []\n for i in holes:\n for j in holes:\n basis.append((i, j))\n for i in holes:\n for a in particles:\n basis.append((i, a))\n for a in particles:\n for i in holes:\n basis.append((a, i))\n for a in particles:\n for b in particles:\n basis.append((a, b))\n return basis\n\n\n<mask token>\n\n\ndef ph_transform_2B(Gamma, bas2B, idx2B, basph2B, idxph2B):\n dim = len(basph2B)\n Gamma_ph = np.zeros((dim, dim))\n for i1, (a, b) in enumerate(basph2B):\n for i2, (c, d) in enumerate(basph2B):\n Gamma_ph[i1, i2] -= Gamma[idx2B[a, d], idx2B[c, b]]\n return Gamma_ph\n\n\ndef inverse_ph_transform_2B(Gamma_ph, bas2B, idx2B, basph2B, idxph2B):\n dim = len(bas2B)\n Gamma = np.zeros((dim, dim))\n for i1, (a, b) in enumerate(bas2B):\n for i2, (c, d) in enumerate(bas2B):\n Gamma[i1, i2] -= Gamma_ph[idxph2B[a, d], idxph2B[c, b]]\n return Gamma\n\n\n<mask token>\n\n\ndef calc_fod_norm(f, user_data):\n particles = user_data['particles']\n holes = user_data['holes']\n norm = 0.0\n for a in particles:\n for i in holes:\n norm += f[a, i] ** 2 + f[i, a] ** 2\n return np.sqrt(norm)\n\n\ndef calc_Gammaod_norm(Gamma, user_data):\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n norm = 0.0\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n norm += Gamma[idx2B[a, b], idx2B[i, j]] ** 2 + Gamma[\n idx2B[i, j], idx2B[a, b]] ** 2\n return np.sqrt(norm)\n\n\ndef construct_occupation_1B(bas1B, holes, particles):\n dim = len(bas1B)\n occ = np.zeros(dim)\n for i in holes:\n occ[i] = 1.0\n return occ\n\n\n<mask token>\n\n\ndef construct_occupationB_2B(bas2B, occ1B):\n dim = len(bas2B)\n occ = np.zeros((dim, dim))\n for i1, (i, j) in enumerate(bas2B):\n occ[i1, i1] = 1.0 - occ1B[i] - occ1B[j]\n return occ\n\n\n<mask token>\n\n\ndef eta_brillouin(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n eta1B = np.zeros_like(f)\n for a in particles:\n for i in holes:\n eta1B[a, i] = f[a, i]\n eta1B[i, a] = -f[a, i]\n eta2B = np.zeros_like(Gamma)\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n val = Gamma[idx2B[a, b], idx2B[i, j]]\n eta2B[idx2B[a, b], idx2B[i, j]] = val\n eta2B[idx2B[i, j], idx2B[a, b]] = -val\n return eta1B, eta2B\n\n\ndef eta_imtime(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n eta1B = np.zeros_like(f)\n for a in particles:\n for i in holes:\n dE = f[a, a] - f[i, i] + Gamma[idx2B[a, i], idx2B[a, i]]\n val = np.sign(dE) * f[a, i]\n eta1B[a, i] = val\n eta1B[i, a] = -val\n eta2B = np.zeros_like(Gamma)\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n dE = f[a, a] + f[b, b] - f[i, i] - f[j, j] + Gamma[\n idx2B[a, b], idx2B[a, b]] + Gamma[idx2B[i, j],\n idx2B[i, j]] - Gamma[idx2B[a, i], idx2B[a, i]] - Gamma[\n idx2B[a, j], idx2B[a, j]] - Gamma[idx2B[b, i],\n idx2B[b, i]] - Gamma[idx2B[b, j], idx2B[b, j]]\n val = np.sign(dE) * Gamma[idx2B[a, b], idx2B[i, j]]\n eta2B[idx2B[a, b], idx2B[i, j]] = val\n eta2B[idx2B[i, j], idx2B[a, b]] = -val\n return eta1B, eta2B\n\n\ndef eta_white(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n eta1B = np.zeros_like(f)\n for a in particles:\n for i in holes:\n denom = f[a, a] - f[i, i] + Gamma[idx2B[a, i], idx2B[a, i]]\n val = f[a, i] / denom\n eta1B[a, i] = val\n eta1B[i, a] = -val\n eta2B = np.zeros_like(Gamma)\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n denom = f[a, a] + f[b, b] - f[i, i] - f[j, j] + Gamma[\n idx2B[a, b], idx2B[a, b]] + Gamma[idx2B[i, j],\n idx2B[i, j]] - Gamma[idx2B[a, i], idx2B[a, i]] - Gamma[\n idx2B[a, j], idx2B[a, j]] - Gamma[idx2B[b, i],\n idx2B[b, i]] - Gamma[idx2B[b, j], idx2B[b, j]]\n val = Gamma[idx2B[a, b], idx2B[i, j]] / denom\n eta2B[idx2B[a, b], idx2B[i, j]] = val\n eta2B[idx2B[i, j], idx2B[a, b]] = -val\n return eta1B, eta2B\n\n\ndef eta_white_mp(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n eta1B = np.zeros_like(f)\n for a in particles:\n for i in holes:\n denom = f[a, a] - f[i, i]\n val = f[a, i] / denom\n eta1B[a, i] = val\n eta1B[i, a] = -val\n eta2B = np.zeros_like(Gamma)\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n denom = f[a, a] + f[b, b] - f[i, i] - f[j, j]\n val = Gamma[idx2B[a, b], idx2B[i, j]] / denom\n eta2B[idx2B[a, b], idx2B[i, j]] = val\n eta2B[idx2B[i, j], idx2B[a, b]] = -val\n return eta1B, eta2B\n\n\ndef eta_white_atan(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n eta1B = np.zeros_like(f)\n for a in particles:\n for i in holes:\n denom = f[a, a] - f[i, i] + Gamma[idx2B[a, i], idx2B[a, i]]\n val = 0.5 * np.arctan(2 * f[a, i] / denom)\n eta1B[a, i] = val\n eta1B[i, a] = -val\n eta2B = np.zeros_like(Gamma)\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n denom = f[a, a] + f[b, b] - f[i, i] - f[j, j] + Gamma[\n idx2B[a, b], idx2B[a, b]] + Gamma[idx2B[i, j],\n idx2B[i, j]] - Gamma[idx2B[a, i], idx2B[a, i]] - Gamma[\n idx2B[a, j], idx2B[a, j]] - Gamma[idx2B[b, i],\n idx2B[b, i]] - Gamma[idx2B[b, j], idx2B[b, j]]\n val = 0.5 * np.arctan(2 * Gamma[idx2B[a, b], idx2B[i, j\n ]] / denom)\n eta2B[idx2B[a, b], idx2B[i, j]] = val\n eta2B[idx2B[i, j], idx2B[a, b]] = -val\n return eta1B, eta2B\n\n\ndef eta_wegner(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n holes = user_data['holes']\n particles = user_data['particles']\n bas2B = user_data['bas2B']\n basph2B = user_data['basph2B']\n idx2B = user_data['idx2B']\n idxph2B = user_data['idxph2B']\n occB_2B = user_data['occB_2B']\n occC_2B = user_data['occC_2B']\n occphA_2B = user_data['occphA_2B']\n fd = np.zeros_like(f)\n fod = np.zeros_like(f)\n Gammad = np.zeros_like(Gamma)\n Gammaod = np.zeros_like(Gamma)\n for a in particles:\n for i in holes:\n fod[a, i] = f[a, i]\n fod[i, a] = f[i, a]\n fd = f - fod\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n Gammaod[idx2B[a, b], idx2B[i, j]] = Gamma[idx2B[a, b],\n idx2B[i, j]]\n Gammaod[idx2B[i, j], idx2B[a, b]] = Gamma[idx2B[i, j],\n idx2B[a, b]]\n Gammad = Gamma - Gammaod\n eta1B = np.zeros_like(f)\n eta1B += commutator(fd, fod)\n for p in range(dim1B):\n for q in range(dim1B):\n for i in holes:\n for a in particles:\n eta1B[p, q] += fd[i, a] * Gammaod[idx2B[a, p], idx2B[i, q]\n ] - fd[a, i] * Gammaod[idx2B[i, p], idx2B[a, q]] - fod[\n i, a] * Gammad[idx2B[a, p], idx2B[i, q]] + fod[a, i\n ] * Gammad[idx2B[i, p], idx2B[a, q]]\n GammaGamma = dot(Gammad, dot(occB_2B, Gammaod))\n for p in range(dim1B):\n for q in range(dim1B):\n for i in holes:\n eta1B[p, q] += 0.5 * (GammaGamma[idx2B[i, p], idx2B[i, q]] -\n transpose(GammaGamma)[idx2B[i, p], idx2B[i, q]])\n GammaGamma = dot(Gammad, dot(occC_2B, Gammaod))\n for p in range(dim1B):\n for q in range(dim1B):\n for r in range(dim1B):\n eta1B[p, q] += 0.5 * (GammaGamma[idx2B[r, p], idx2B[r, q]] +\n transpose(GammaGamma)[idx2B[r, p], idx2B[r, q]])\n eta2B = np.zeros_like(Gamma)\n for p in range(dim1B):\n for q in range(dim1B):\n for r in range(dim1B):\n for s in range(dim1B):\n for t in range(dim1B):\n eta2B[idx2B[p, q], idx2B[r, s]] += fd[p, t] * Gammaod[\n idx2B[t, q], idx2B[r, s]] + fd[q, t] * Gammaod[\n idx2B[p, t], idx2B[r, s]] - fd[t, r] * Gammaod[\n idx2B[p, q], idx2B[t, s]] - fd[t, s] * Gammaod[\n idx2B[p, q], idx2B[r, t]] - fod[p, t] * Gammad[\n idx2B[t, q], idx2B[r, s]] - fod[q, t] * Gammad[\n idx2B[p, t], idx2B[r, s]] + fod[t, r] * Gammad[\n idx2B[p, q], idx2B[t, s]] + fod[t, s] * Gammad[\n idx2B[p, q], idx2B[r, t]]\n GammaGamma = dot(Gammad, dot(occB_2B, Gammaod))\n eta2B += 0.5 * (GammaGamma - transpose(GammaGamma))\n Gammad_ph = ph_transform_2B(Gammad, bas2B, idx2B, basph2B, idxph2B)\n Gammaod_ph = ph_transform_2B(Gammaod, bas2B, idx2B, basph2B, idxph2B)\n GammaGamma_ph = dot(Gammad_ph, dot(occphA_2B, Gammaod_ph))\n GammaGamma = inverse_ph_transform_2B(GammaGamma_ph, bas2B, idx2B,\n basph2B, idxph2B)\n work = np.zeros_like(GammaGamma)\n for i1, (i, j) in enumerate(bas2B):\n for i2, (k, l) in enumerate(bas2B):\n work[i1, i2] -= GammaGamma[i1, i2] - GammaGamma[idx2B[j, i], i2\n ] - GammaGamma[i1, idx2B[l, k]] + GammaGamma[idx2B[j, i],\n idx2B[l, k]]\n GammaGamma = work\n eta2B += GammaGamma\n return eta1B, eta2B\n\n\ndef flow_imsrg2(eta1B, eta2B, f, Gamma, user_data):\n dim1B = user_data['dim1B']\n holes = user_data['holes']\n particles = user_data['particles']\n bas2B = user_data['bas2B']\n idx2B = user_data['idx2B']\n basph2B = user_data['basph2B']\n idxph2B = user_data['idxph2B']\n occB_2B = user_data['occB_2B']\n occC_2B = user_data['occC_2B']\n occphA_2B = user_data['occphA_2B']\n dE = 0.0\n for i in holes:\n for a in particles:\n dE += eta1B[i, a] * f[a, i] - eta1B[a, i] * f[i, a]\n for i in holes:\n for j in holes:\n for a in particles:\n for b in particles:\n dE += 0.5 * eta2B[idx2B[i, j], idx2B[a, b]] * Gamma[\n idx2B[a, b], idx2B[i, j]]\n df = np.zeros_like(f)\n df += commutator(eta1B, f)\n for p in range(dim1B):\n for q in range(dim1B):\n for i in holes:\n for a in particles:\n df[p, q] += eta1B[i, a] * Gamma[idx2B[a, p], idx2B[i, q]\n ] - eta1B[a, i] * Gamma[idx2B[i, p], idx2B[a, q]] - f[\n i, a] * eta2B[idx2B[a, p], idx2B[i, q]] + f[a, i\n ] * eta2B[idx2B[i, p], idx2B[a, q]]\n etaGamma = dot(eta2B, dot(occB_2B, Gamma))\n for p in range(dim1B):\n for q in range(dim1B):\n for i in holes:\n df[p, q] += 0.5 * (etaGamma[idx2B[i, p], idx2B[i, q]] +\n transpose(etaGamma)[idx2B[i, p], idx2B[i, q]])\n etaGamma = dot(eta2B, dot(occC_2B, Gamma))\n for p in range(dim1B):\n for q in range(dim1B):\n for r in range(dim1B):\n df[p, q] += 0.5 * (etaGamma[idx2B[r, p], idx2B[r, q]] +\n transpose(etaGamma)[idx2B[r, p], idx2B[r, q]])\n dGamma = np.zeros_like(Gamma)\n for p in range(dim1B):\n for q in range(dim1B):\n for r in range(dim1B):\n for s in range(dim1B):\n for t in range(dim1B):\n dGamma[idx2B[p, q], idx2B[r, s]] += eta1B[p, t\n ] * Gamma[idx2B[t, q], idx2B[r, s]] + eta1B[q, t\n ] * Gamma[idx2B[p, t], idx2B[r, s]] - eta1B[t, r\n ] * Gamma[idx2B[p, q], idx2B[t, s]] - eta1B[t, s\n ] * Gamma[idx2B[p, q], idx2B[r, t]] - f[p, t\n ] * eta2B[idx2B[t, q], idx2B[r, s]] - f[q, t\n ] * eta2B[idx2B[p, t], idx2B[r, s]] + f[t, r\n ] * eta2B[idx2B[p, q], idx2B[t, s]] + f[t, s\n ] * eta2B[idx2B[p, q], idx2B[r, t]]\n etaGamma = dot(eta2B, dot(occB_2B, Gamma))\n dGamma += 0.5 * (etaGamma + transpose(etaGamma))\n eta2B_ph = ph_transform_2B(eta2B, bas2B, idx2B, basph2B, idxph2B)\n Gamma_ph = ph_transform_2B(Gamma, bas2B, idx2B, basph2B, idxph2B)\n etaGamma_ph = dot(eta2B_ph, dot(occphA_2B, Gamma_ph))\n etaGamma = inverse_ph_transform_2B(etaGamma_ph, bas2B, idx2B, basph2B,\n idxph2B)\n work = np.zeros_like(etaGamma)\n for i1, (i, j) in enumerate(bas2B):\n for i2, (k, l) in enumerate(bas2B):\n work[i1, i2] -= etaGamma[i1, i2] - etaGamma[idx2B[j, i], i2\n ] - etaGamma[i1, idx2B[l, k]] + etaGamma[idx2B[j, i], idx2B\n [l, k]]\n etaGamma = work\n dGamma += etaGamma\n return dE, df, dGamma\n\n\ndef get_operator_from_y(y, dim1B, dim2B):\n ptr = 0\n zero_body = y[ptr]\n ptr += 1\n one_body = reshape(y[ptr:ptr + dim1B * dim1B], (dim1B, dim1B))\n ptr += dim1B * dim1B\n two_body = reshape(y[ptr:ptr + dim2B * dim2B], (dim2B, dim2B))\n return zero_body, one_body, two_body\n\n\n<mask token>\n\n\ndef pairing_hamiltonian(delta, g, user_data):\n bas1B = user_data['bas1B']\n bas2B = user_data['bas2B']\n idx2B = user_data['idx2B']\n dim = len(bas1B)\n H1B = np.zeros((dim, dim))\n for i in bas1B:\n H1B[i, i] = delta * np.floor_divide(i, 2)\n dim = len(bas2B)\n H2B = np.zeros((dim, dim))\n for i, j in bas2B:\n if i % 2 == 0 and j == i + 1:\n for k, l in bas2B:\n if k % 2 == 0 and l == k + 1:\n H2B[idx2B[i, j], idx2B[k, l]] = -0.5 * g\n H2B[idx2B[j, i], idx2B[k, l]] = 0.5 * g\n H2B[idx2B[i, j], idx2B[l, k]] = 0.5 * g\n H2B[idx2B[j, i], idx2B[l, k]] = -0.5 * g\n return H1B, H2B\n\n\n<mask token>\n\n\ndef calc_mbpt2(f, Gamma, user_data):\n DE2 = 0.0\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n for i in holes:\n for j in holes:\n for a in particles:\n for b in particles:\n denom = f[i, i] + f[j, j] - f[a, a] - f[b, b]\n me = Gamma[idx2B[a, b], idx2B[i, j]]\n DE2 += 0.25 * me * me / denom\n return DE2\n\n\n<mask token>\n\n\ndef main():\n delta = float(argv[1])\n g = float(argv[2])\n particles = 4\n dim1B = 8\n holes = [0, 1, 2, 3]\n particles = [4, 5, 6, 7]\n bas1B = range(dim1B)\n bas2B = construct_basis_2B(holes, particles)\n basph2B = construct_basis_ph2B(holes, particles)\n idx2B = construct_index_2B(bas2B)\n idxph2B = construct_index_2B(basph2B)\n occ1B = construct_occupation_1B(bas1B, holes, particles)\n occA_2B = construct_occupationA_2B(bas2B, occ1B)\n occB_2B = construct_occupationB_2B(bas2B, occ1B)\n occC_2B = construct_occupationC_2B(bas2B, occ1B)\n occphA_2B = construct_occupationA_2B(basph2B, occ1B)\n user_data = {'dim1B': dim1B, 'holes': holes, 'particles': particles,\n 'bas1B': bas1B, 'bas2B': bas2B, 'basph2B': basph2B, 'idx2B': idx2B,\n 'idxph2B': idxph2B, 'occ1B': occ1B, 'occA_2B': occA_2B, 'occB_2B':\n occB_2B, 'occC_2B': occC_2B, 'occphA_2B': occphA_2B, 'eta_norm': \n 0.0, 'dE': 0.0, 'calc_eta': eta_white_atan, 'calc_rhs': flow_imsrg2}\n H1B, H2B = pairing_hamiltonian(delta, g, user_data)\n E, f, Gamma = normal_order(H1B, H2B, user_data)\n y0 = np.append([E], np.append(reshape(f, -1), reshape(Gamma, -1)))\n solver = ode(derivative_wrapper, jac=None)\n solver.set_integrator('vode', method='bdf', order=5, nsteps=1000)\n solver.set_f_params(user_data)\n solver.set_initial_value(y0, 0.0)\n sfinal = 50\n ds = 0.1\n print(\n '%-8s %-14s %-14s %-14s %-14s %-14s %-14s %-14s %-14s'\n % ('s', 'E', 'DE(2)', 'DE(3)', 'E+DE', 'dE/ds', '||eta||',\n '||fod||', '||Gammaod||'))\n print('-' * 148)\n while solver.successful() and solver.t < sfinal:\n ys = solver.integrate(sfinal, step=True)\n dim2B = dim1B * dim1B\n E, f, Gamma = get_operator_from_y(ys, dim1B, dim2B)\n DE2 = calc_mbpt2(f, Gamma, user_data)\n DE3 = calc_mbpt3(f, Gamma, user_data)\n norm_fod = calc_fod_norm(f, user_data)\n norm_Gammaod = calc_Gammaod_norm(Gamma, user_data)\n print(\n '%8.5f %14.8f %14.8f %14.8f %14.8f %14.8f %14.8f %14.8f %14.8f'\n % (solver.t, E, DE2, DE3, E + DE2 + DE3, user_data['dE'],\n user_data['eta_norm'], norm_fod, norm_Gammaod))\n if abs(DE2 / E) < 1e-07:\n break\n return\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef construct_basis_ph2B(holes, particles):\n basis = []\n for i in holes:\n for j in holes:\n basis.append((i, j))\n for i in holes:\n for a in particles:\n basis.append((i, a))\n for a in particles:\n for i in holes:\n basis.append((a, i))\n for a in particles:\n for b in particles:\n basis.append((a, b))\n return basis\n\n\n<mask token>\n\n\ndef ph_transform_2B(Gamma, bas2B, idx2B, basph2B, idxph2B):\n dim = len(basph2B)\n Gamma_ph = np.zeros((dim, dim))\n for i1, (a, b) in enumerate(basph2B):\n for i2, (c, d) in enumerate(basph2B):\n Gamma_ph[i1, i2] -= Gamma[idx2B[a, d], idx2B[c, b]]\n return Gamma_ph\n\n\ndef inverse_ph_transform_2B(Gamma_ph, bas2B, idx2B, basph2B, idxph2B):\n dim = len(bas2B)\n Gamma = np.zeros((dim, dim))\n for i1, (a, b) in enumerate(bas2B):\n for i2, (c, d) in enumerate(bas2B):\n Gamma[i1, i2] -= Gamma_ph[idxph2B[a, d], idxph2B[c, b]]\n return Gamma\n\n\n<mask token>\n\n\ndef calc_fod_norm(f, user_data):\n particles = user_data['particles']\n holes = user_data['holes']\n norm = 0.0\n for a in particles:\n for i in holes:\n norm += f[a, i] ** 2 + f[i, a] ** 2\n return np.sqrt(norm)\n\n\ndef calc_Gammaod_norm(Gamma, user_data):\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n norm = 0.0\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n norm += Gamma[idx2B[a, b], idx2B[i, j]] ** 2 + Gamma[\n idx2B[i, j], idx2B[a, b]] ** 2\n return np.sqrt(norm)\n\n\ndef construct_occupation_1B(bas1B, holes, particles):\n dim = len(bas1B)\n occ = np.zeros(dim)\n for i in holes:\n occ[i] = 1.0\n return occ\n\n\n<mask token>\n\n\ndef construct_occupationB_2B(bas2B, occ1B):\n dim = len(bas2B)\n occ = np.zeros((dim, dim))\n for i1, (i, j) in enumerate(bas2B):\n occ[i1, i1] = 1.0 - occ1B[i] - occ1B[j]\n return occ\n\n\ndef construct_occupationC_2B(bas2B, occ1B):\n dim = len(bas2B)\n occ = np.zeros((dim, dim))\n for i1, (i, j) in enumerate(bas2B):\n occ[i1, i1] = occ1B[i] * occ1B[j]\n return occ\n\n\ndef eta_brillouin(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n eta1B = np.zeros_like(f)\n for a in particles:\n for i in holes:\n eta1B[a, i] = f[a, i]\n eta1B[i, a] = -f[a, i]\n eta2B = np.zeros_like(Gamma)\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n val = Gamma[idx2B[a, b], idx2B[i, j]]\n eta2B[idx2B[a, b], idx2B[i, j]] = val\n eta2B[idx2B[i, j], idx2B[a, b]] = -val\n return eta1B, eta2B\n\n\ndef eta_imtime(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n eta1B = np.zeros_like(f)\n for a in particles:\n for i in holes:\n dE = f[a, a] - f[i, i] + Gamma[idx2B[a, i], idx2B[a, i]]\n val = np.sign(dE) * f[a, i]\n eta1B[a, i] = val\n eta1B[i, a] = -val\n eta2B = np.zeros_like(Gamma)\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n dE = f[a, a] + f[b, b] - f[i, i] - f[j, j] + Gamma[\n idx2B[a, b], idx2B[a, b]] + Gamma[idx2B[i, j],\n idx2B[i, j]] - Gamma[idx2B[a, i], idx2B[a, i]] - Gamma[\n idx2B[a, j], idx2B[a, j]] - Gamma[idx2B[b, i],\n idx2B[b, i]] - Gamma[idx2B[b, j], idx2B[b, j]]\n val = np.sign(dE) * Gamma[idx2B[a, b], idx2B[i, j]]\n eta2B[idx2B[a, b], idx2B[i, j]] = val\n eta2B[idx2B[i, j], idx2B[a, b]] = -val\n return eta1B, eta2B\n\n\ndef eta_white(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n eta1B = np.zeros_like(f)\n for a in particles:\n for i in holes:\n denom = f[a, a] - f[i, i] + Gamma[idx2B[a, i], idx2B[a, i]]\n val = f[a, i] / denom\n eta1B[a, i] = val\n eta1B[i, a] = -val\n eta2B = np.zeros_like(Gamma)\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n denom = f[a, a] + f[b, b] - f[i, i] - f[j, j] + Gamma[\n idx2B[a, b], idx2B[a, b]] + Gamma[idx2B[i, j],\n idx2B[i, j]] - Gamma[idx2B[a, i], idx2B[a, i]] - Gamma[\n idx2B[a, j], idx2B[a, j]] - Gamma[idx2B[b, i],\n idx2B[b, i]] - Gamma[idx2B[b, j], idx2B[b, j]]\n val = Gamma[idx2B[a, b], idx2B[i, j]] / denom\n eta2B[idx2B[a, b], idx2B[i, j]] = val\n eta2B[idx2B[i, j], idx2B[a, b]] = -val\n return eta1B, eta2B\n\n\ndef eta_white_mp(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n eta1B = np.zeros_like(f)\n for a in particles:\n for i in holes:\n denom = f[a, a] - f[i, i]\n val = f[a, i] / denom\n eta1B[a, i] = val\n eta1B[i, a] = -val\n eta2B = np.zeros_like(Gamma)\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n denom = f[a, a] + f[b, b] - f[i, i] - f[j, j]\n val = Gamma[idx2B[a, b], idx2B[i, j]] / denom\n eta2B[idx2B[a, b], idx2B[i, j]] = val\n eta2B[idx2B[i, j], idx2B[a, b]] = -val\n return eta1B, eta2B\n\n\ndef eta_white_atan(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n eta1B = np.zeros_like(f)\n for a in particles:\n for i in holes:\n denom = f[a, a] - f[i, i] + Gamma[idx2B[a, i], idx2B[a, i]]\n val = 0.5 * np.arctan(2 * f[a, i] / denom)\n eta1B[a, i] = val\n eta1B[i, a] = -val\n eta2B = np.zeros_like(Gamma)\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n denom = f[a, a] + f[b, b] - f[i, i] - f[j, j] + Gamma[\n idx2B[a, b], idx2B[a, b]] + Gamma[idx2B[i, j],\n idx2B[i, j]] - Gamma[idx2B[a, i], idx2B[a, i]] - Gamma[\n idx2B[a, j], idx2B[a, j]] - Gamma[idx2B[b, i],\n idx2B[b, i]] - Gamma[idx2B[b, j], idx2B[b, j]]\n val = 0.5 * np.arctan(2 * Gamma[idx2B[a, b], idx2B[i, j\n ]] / denom)\n eta2B[idx2B[a, b], idx2B[i, j]] = val\n eta2B[idx2B[i, j], idx2B[a, b]] = -val\n return eta1B, eta2B\n\n\ndef eta_wegner(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n holes = user_data['holes']\n particles = user_data['particles']\n bas2B = user_data['bas2B']\n basph2B = user_data['basph2B']\n idx2B = user_data['idx2B']\n idxph2B = user_data['idxph2B']\n occB_2B = user_data['occB_2B']\n occC_2B = user_data['occC_2B']\n occphA_2B = user_data['occphA_2B']\n fd = np.zeros_like(f)\n fod = np.zeros_like(f)\n Gammad = np.zeros_like(Gamma)\n Gammaod = np.zeros_like(Gamma)\n for a in particles:\n for i in holes:\n fod[a, i] = f[a, i]\n fod[i, a] = f[i, a]\n fd = f - fod\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n Gammaod[idx2B[a, b], idx2B[i, j]] = Gamma[idx2B[a, b],\n idx2B[i, j]]\n Gammaod[idx2B[i, j], idx2B[a, b]] = Gamma[idx2B[i, j],\n idx2B[a, b]]\n Gammad = Gamma - Gammaod\n eta1B = np.zeros_like(f)\n eta1B += commutator(fd, fod)\n for p in range(dim1B):\n for q in range(dim1B):\n for i in holes:\n for a in particles:\n eta1B[p, q] += fd[i, a] * Gammaod[idx2B[a, p], idx2B[i, q]\n ] - fd[a, i] * Gammaod[idx2B[i, p], idx2B[a, q]] - fod[\n i, a] * Gammad[idx2B[a, p], idx2B[i, q]] + fod[a, i\n ] * Gammad[idx2B[i, p], idx2B[a, q]]\n GammaGamma = dot(Gammad, dot(occB_2B, Gammaod))\n for p in range(dim1B):\n for q in range(dim1B):\n for i in holes:\n eta1B[p, q] += 0.5 * (GammaGamma[idx2B[i, p], idx2B[i, q]] -\n transpose(GammaGamma)[idx2B[i, p], idx2B[i, q]])\n GammaGamma = dot(Gammad, dot(occC_2B, Gammaod))\n for p in range(dim1B):\n for q in range(dim1B):\n for r in range(dim1B):\n eta1B[p, q] += 0.5 * (GammaGamma[idx2B[r, p], idx2B[r, q]] +\n transpose(GammaGamma)[idx2B[r, p], idx2B[r, q]])\n eta2B = np.zeros_like(Gamma)\n for p in range(dim1B):\n for q in range(dim1B):\n for r in range(dim1B):\n for s in range(dim1B):\n for t in range(dim1B):\n eta2B[idx2B[p, q], idx2B[r, s]] += fd[p, t] * Gammaod[\n idx2B[t, q], idx2B[r, s]] + fd[q, t] * Gammaod[\n idx2B[p, t], idx2B[r, s]] - fd[t, r] * Gammaod[\n idx2B[p, q], idx2B[t, s]] - fd[t, s] * Gammaod[\n idx2B[p, q], idx2B[r, t]] - fod[p, t] * Gammad[\n idx2B[t, q], idx2B[r, s]] - fod[q, t] * Gammad[\n idx2B[p, t], idx2B[r, s]] + fod[t, r] * Gammad[\n idx2B[p, q], idx2B[t, s]] + fod[t, s] * Gammad[\n idx2B[p, q], idx2B[r, t]]\n GammaGamma = dot(Gammad, dot(occB_2B, Gammaod))\n eta2B += 0.5 * (GammaGamma - transpose(GammaGamma))\n Gammad_ph = ph_transform_2B(Gammad, bas2B, idx2B, basph2B, idxph2B)\n Gammaod_ph = ph_transform_2B(Gammaod, bas2B, idx2B, basph2B, idxph2B)\n GammaGamma_ph = dot(Gammad_ph, dot(occphA_2B, Gammaod_ph))\n GammaGamma = inverse_ph_transform_2B(GammaGamma_ph, bas2B, idx2B,\n basph2B, idxph2B)\n work = np.zeros_like(GammaGamma)\n for i1, (i, j) in enumerate(bas2B):\n for i2, (k, l) in enumerate(bas2B):\n work[i1, i2] -= GammaGamma[i1, i2] - GammaGamma[idx2B[j, i], i2\n ] - GammaGamma[i1, idx2B[l, k]] + GammaGamma[idx2B[j, i],\n idx2B[l, k]]\n GammaGamma = work\n eta2B += GammaGamma\n return eta1B, eta2B\n\n\ndef flow_imsrg2(eta1B, eta2B, f, Gamma, user_data):\n dim1B = user_data['dim1B']\n holes = user_data['holes']\n particles = user_data['particles']\n bas2B = user_data['bas2B']\n idx2B = user_data['idx2B']\n basph2B = user_data['basph2B']\n idxph2B = user_data['idxph2B']\n occB_2B = user_data['occB_2B']\n occC_2B = user_data['occC_2B']\n occphA_2B = user_data['occphA_2B']\n dE = 0.0\n for i in holes:\n for a in particles:\n dE += eta1B[i, a] * f[a, i] - eta1B[a, i] * f[i, a]\n for i in holes:\n for j in holes:\n for a in particles:\n for b in particles:\n dE += 0.5 * eta2B[idx2B[i, j], idx2B[a, b]] * Gamma[\n idx2B[a, b], idx2B[i, j]]\n df = np.zeros_like(f)\n df += commutator(eta1B, f)\n for p in range(dim1B):\n for q in range(dim1B):\n for i in holes:\n for a in particles:\n df[p, q] += eta1B[i, a] * Gamma[idx2B[a, p], idx2B[i, q]\n ] - eta1B[a, i] * Gamma[idx2B[i, p], idx2B[a, q]] - f[\n i, a] * eta2B[idx2B[a, p], idx2B[i, q]] + f[a, i\n ] * eta2B[idx2B[i, p], idx2B[a, q]]\n etaGamma = dot(eta2B, dot(occB_2B, Gamma))\n for p in range(dim1B):\n for q in range(dim1B):\n for i in holes:\n df[p, q] += 0.5 * (etaGamma[idx2B[i, p], idx2B[i, q]] +\n transpose(etaGamma)[idx2B[i, p], idx2B[i, q]])\n etaGamma = dot(eta2B, dot(occC_2B, Gamma))\n for p in range(dim1B):\n for q in range(dim1B):\n for r in range(dim1B):\n df[p, q] += 0.5 * (etaGamma[idx2B[r, p], idx2B[r, q]] +\n transpose(etaGamma)[idx2B[r, p], idx2B[r, q]])\n dGamma = np.zeros_like(Gamma)\n for p in range(dim1B):\n for q in range(dim1B):\n for r in range(dim1B):\n for s in range(dim1B):\n for t in range(dim1B):\n dGamma[idx2B[p, q], idx2B[r, s]] += eta1B[p, t\n ] * Gamma[idx2B[t, q], idx2B[r, s]] + eta1B[q, t\n ] * Gamma[idx2B[p, t], idx2B[r, s]] - eta1B[t, r\n ] * Gamma[idx2B[p, q], idx2B[t, s]] - eta1B[t, s\n ] * Gamma[idx2B[p, q], idx2B[r, t]] - f[p, t\n ] * eta2B[idx2B[t, q], idx2B[r, s]] - f[q, t\n ] * eta2B[idx2B[p, t], idx2B[r, s]] + f[t, r\n ] * eta2B[idx2B[p, q], idx2B[t, s]] + f[t, s\n ] * eta2B[idx2B[p, q], idx2B[r, t]]\n etaGamma = dot(eta2B, dot(occB_2B, Gamma))\n dGamma += 0.5 * (etaGamma + transpose(etaGamma))\n eta2B_ph = ph_transform_2B(eta2B, bas2B, idx2B, basph2B, idxph2B)\n Gamma_ph = ph_transform_2B(Gamma, bas2B, idx2B, basph2B, idxph2B)\n etaGamma_ph = dot(eta2B_ph, dot(occphA_2B, Gamma_ph))\n etaGamma = inverse_ph_transform_2B(etaGamma_ph, bas2B, idx2B, basph2B,\n idxph2B)\n work = np.zeros_like(etaGamma)\n for i1, (i, j) in enumerate(bas2B):\n for i2, (k, l) in enumerate(bas2B):\n work[i1, i2] -= etaGamma[i1, i2] - etaGamma[idx2B[j, i], i2\n ] - etaGamma[i1, idx2B[l, k]] + etaGamma[idx2B[j, i], idx2B\n [l, k]]\n etaGamma = work\n dGamma += etaGamma\n return dE, df, dGamma\n\n\ndef get_operator_from_y(y, dim1B, dim2B):\n ptr = 0\n zero_body = y[ptr]\n ptr += 1\n one_body = reshape(y[ptr:ptr + dim1B * dim1B], (dim1B, dim1B))\n ptr += dim1B * dim1B\n two_body = reshape(y[ptr:ptr + dim2B * dim2B], (dim2B, dim2B))\n return zero_body, one_body, two_body\n\n\ndef derivative_wrapper(t, y, user_data):\n dim1B = user_data['dim1B']\n dim2B = dim1B * dim1B\n holes = user_data['holes']\n particles = user_data['particles']\n bas1B = user_data['bas1B']\n bas2B = user_data['bas2B']\n basph2B = user_data['basph2B']\n idx2B = user_data['idx2B']\n idxph2B = user_data['idxph2B']\n occA_2B = user_data['occA_2B']\n occB_2B = user_data['occB_2B']\n occC_2B = user_data['occC_2B']\n occphA_2B = user_data['occphA_2B']\n calc_eta = user_data['calc_eta']\n calc_rhs = user_data['calc_rhs']\n E, f, Gamma = get_operator_from_y(y, dim1B, dim2B)\n eta1B, eta2B = calc_eta(f, Gamma, user_data)\n dE, df, dGamma = calc_rhs(eta1B, eta2B, f, Gamma, user_data)\n dy = np.append([dE], np.append(reshape(df, -1), reshape(dGamma, -1)))\n user_data['dE'] = dE\n user_data['eta_norm'] = np.linalg.norm(eta1B, ord='fro') + np.linalg.norm(\n eta2B, ord='fro')\n return dy\n\n\ndef pairing_hamiltonian(delta, g, user_data):\n bas1B = user_data['bas1B']\n bas2B = user_data['bas2B']\n idx2B = user_data['idx2B']\n dim = len(bas1B)\n H1B = np.zeros((dim, dim))\n for i in bas1B:\n H1B[i, i] = delta * np.floor_divide(i, 2)\n dim = len(bas2B)\n H2B = np.zeros((dim, dim))\n for i, j in bas2B:\n if i % 2 == 0 and j == i + 1:\n for k, l in bas2B:\n if k % 2 == 0 and l == k + 1:\n H2B[idx2B[i, j], idx2B[k, l]] = -0.5 * g\n H2B[idx2B[j, i], idx2B[k, l]] = 0.5 * g\n H2B[idx2B[i, j], idx2B[l, k]] = 0.5 * g\n H2B[idx2B[j, i], idx2B[l, k]] = -0.5 * g\n return H1B, H2B\n\n\n<mask token>\n\n\ndef calc_mbpt2(f, Gamma, user_data):\n DE2 = 0.0\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n for i in holes:\n for j in holes:\n for a in particles:\n for b in particles:\n denom = f[i, i] + f[j, j] - f[a, a] - f[b, b]\n me = Gamma[idx2B[a, b], idx2B[i, j]]\n DE2 += 0.25 * me * me / denom\n return DE2\n\n\n<mask token>\n\n\ndef main():\n delta = float(argv[1])\n g = float(argv[2])\n particles = 4\n dim1B = 8\n holes = [0, 1, 2, 3]\n particles = [4, 5, 6, 7]\n bas1B = range(dim1B)\n bas2B = construct_basis_2B(holes, particles)\n basph2B = construct_basis_ph2B(holes, particles)\n idx2B = construct_index_2B(bas2B)\n idxph2B = construct_index_2B(basph2B)\n occ1B = construct_occupation_1B(bas1B, holes, particles)\n occA_2B = construct_occupationA_2B(bas2B, occ1B)\n occB_2B = construct_occupationB_2B(bas2B, occ1B)\n occC_2B = construct_occupationC_2B(bas2B, occ1B)\n occphA_2B = construct_occupationA_2B(basph2B, occ1B)\n user_data = {'dim1B': dim1B, 'holes': holes, 'particles': particles,\n 'bas1B': bas1B, 'bas2B': bas2B, 'basph2B': basph2B, 'idx2B': idx2B,\n 'idxph2B': idxph2B, 'occ1B': occ1B, 'occA_2B': occA_2B, 'occB_2B':\n occB_2B, 'occC_2B': occC_2B, 'occphA_2B': occphA_2B, 'eta_norm': \n 0.0, 'dE': 0.0, 'calc_eta': eta_white_atan, 'calc_rhs': flow_imsrg2}\n H1B, H2B = pairing_hamiltonian(delta, g, user_data)\n E, f, Gamma = normal_order(H1B, H2B, user_data)\n y0 = np.append([E], np.append(reshape(f, -1), reshape(Gamma, -1)))\n solver = ode(derivative_wrapper, jac=None)\n solver.set_integrator('vode', method='bdf', order=5, nsteps=1000)\n solver.set_f_params(user_data)\n solver.set_initial_value(y0, 0.0)\n sfinal = 50\n ds = 0.1\n print(\n '%-8s %-14s %-14s %-14s %-14s %-14s %-14s %-14s %-14s'\n % ('s', 'E', 'DE(2)', 'DE(3)', 'E+DE', 'dE/ds', '||eta||',\n '||fod||', '||Gammaod||'))\n print('-' * 148)\n while solver.successful() and solver.t < sfinal:\n ys = solver.integrate(sfinal, step=True)\n dim2B = dim1B * dim1B\n E, f, Gamma = get_operator_from_y(ys, dim1B, dim2B)\n DE2 = calc_mbpt2(f, Gamma, user_data)\n DE3 = calc_mbpt3(f, Gamma, user_data)\n norm_fod = calc_fod_norm(f, user_data)\n norm_Gammaod = calc_Gammaod_norm(Gamma, user_data)\n print(\n '%8.5f %14.8f %14.8f %14.8f %14.8f %14.8f %14.8f %14.8f %14.8f'\n % (solver.t, E, DE2, DE3, E + DE2 + DE3, user_data['dE'],\n user_data['eta_norm'], norm_fod, norm_Gammaod))\n if abs(DE2 / E) < 1e-07:\n break\n return\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef construct_basis_ph2B(holes, particles):\n basis = []\n for i in holes:\n for j in holes:\n basis.append((i, j))\n for i in holes:\n for a in particles:\n basis.append((i, a))\n for a in particles:\n for i in holes:\n basis.append((a, i))\n for a in particles:\n for b in particles:\n basis.append((a, b))\n return basis\n\n\n<mask token>\n\n\ndef ph_transform_2B(Gamma, bas2B, idx2B, basph2B, idxph2B):\n dim = len(basph2B)\n Gamma_ph = np.zeros((dim, dim))\n for i1, (a, b) in enumerate(basph2B):\n for i2, (c, d) in enumerate(basph2B):\n Gamma_ph[i1, i2] -= Gamma[idx2B[a, d], idx2B[c, b]]\n return Gamma_ph\n\n\ndef inverse_ph_transform_2B(Gamma_ph, bas2B, idx2B, basph2B, idxph2B):\n dim = len(bas2B)\n Gamma = np.zeros((dim, dim))\n for i1, (a, b) in enumerate(bas2B):\n for i2, (c, d) in enumerate(bas2B):\n Gamma[i1, i2] -= Gamma_ph[idxph2B[a, d], idxph2B[c, b]]\n return Gamma\n\n\n<mask token>\n\n\ndef calc_fod_norm(f, user_data):\n particles = user_data['particles']\n holes = user_data['holes']\n norm = 0.0\n for a in particles:\n for i in holes:\n norm += f[a, i] ** 2 + f[i, a] ** 2\n return np.sqrt(norm)\n\n\ndef calc_Gammaod_norm(Gamma, user_data):\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n norm = 0.0\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n norm += Gamma[idx2B[a, b], idx2B[i, j]] ** 2 + Gamma[\n idx2B[i, j], idx2B[a, b]] ** 2\n return np.sqrt(norm)\n\n\ndef construct_occupation_1B(bas1B, holes, particles):\n dim = len(bas1B)\n occ = np.zeros(dim)\n for i in holes:\n occ[i] = 1.0\n return occ\n\n\ndef construct_occupationA_2B(bas2B, occ1B):\n dim = len(bas2B)\n occ = np.zeros((dim, dim))\n for i1, (i, j) in enumerate(bas2B):\n occ[i1, i1] = occ1B[i] - occ1B[j]\n return occ\n\n\ndef construct_occupationB_2B(bas2B, occ1B):\n dim = len(bas2B)\n occ = np.zeros((dim, dim))\n for i1, (i, j) in enumerate(bas2B):\n occ[i1, i1] = 1.0 - occ1B[i] - occ1B[j]\n return occ\n\n\ndef construct_occupationC_2B(bas2B, occ1B):\n dim = len(bas2B)\n occ = np.zeros((dim, dim))\n for i1, (i, j) in enumerate(bas2B):\n occ[i1, i1] = occ1B[i] * occ1B[j]\n return occ\n\n\ndef eta_brillouin(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n eta1B = np.zeros_like(f)\n for a in particles:\n for i in holes:\n eta1B[a, i] = f[a, i]\n eta1B[i, a] = -f[a, i]\n eta2B = np.zeros_like(Gamma)\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n val = Gamma[idx2B[a, b], idx2B[i, j]]\n eta2B[idx2B[a, b], idx2B[i, j]] = val\n eta2B[idx2B[i, j], idx2B[a, b]] = -val\n return eta1B, eta2B\n\n\ndef eta_imtime(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n eta1B = np.zeros_like(f)\n for a in particles:\n for i in holes:\n dE = f[a, a] - f[i, i] + Gamma[idx2B[a, i], idx2B[a, i]]\n val = np.sign(dE) * f[a, i]\n eta1B[a, i] = val\n eta1B[i, a] = -val\n eta2B = np.zeros_like(Gamma)\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n dE = f[a, a] + f[b, b] - f[i, i] - f[j, j] + Gamma[\n idx2B[a, b], idx2B[a, b]] + Gamma[idx2B[i, j],\n idx2B[i, j]] - Gamma[idx2B[a, i], idx2B[a, i]] - Gamma[\n idx2B[a, j], idx2B[a, j]] - Gamma[idx2B[b, i],\n idx2B[b, i]] - Gamma[idx2B[b, j], idx2B[b, j]]\n val = np.sign(dE) * Gamma[idx2B[a, b], idx2B[i, j]]\n eta2B[idx2B[a, b], idx2B[i, j]] = val\n eta2B[idx2B[i, j], idx2B[a, b]] = -val\n return eta1B, eta2B\n\n\ndef eta_white(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n eta1B = np.zeros_like(f)\n for a in particles:\n for i in holes:\n denom = f[a, a] - f[i, i] + Gamma[idx2B[a, i], idx2B[a, i]]\n val = f[a, i] / denom\n eta1B[a, i] = val\n eta1B[i, a] = -val\n eta2B = np.zeros_like(Gamma)\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n denom = f[a, a] + f[b, b] - f[i, i] - f[j, j] + Gamma[\n idx2B[a, b], idx2B[a, b]] + Gamma[idx2B[i, j],\n idx2B[i, j]] - Gamma[idx2B[a, i], idx2B[a, i]] - Gamma[\n idx2B[a, j], idx2B[a, j]] - Gamma[idx2B[b, i],\n idx2B[b, i]] - Gamma[idx2B[b, j], idx2B[b, j]]\n val = Gamma[idx2B[a, b], idx2B[i, j]] / denom\n eta2B[idx2B[a, b], idx2B[i, j]] = val\n eta2B[idx2B[i, j], idx2B[a, b]] = -val\n return eta1B, eta2B\n\n\ndef eta_white_mp(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n eta1B = np.zeros_like(f)\n for a in particles:\n for i in holes:\n denom = f[a, a] - f[i, i]\n val = f[a, i] / denom\n eta1B[a, i] = val\n eta1B[i, a] = -val\n eta2B = np.zeros_like(Gamma)\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n denom = f[a, a] + f[b, b] - f[i, i] - f[j, j]\n val = Gamma[idx2B[a, b], idx2B[i, j]] / denom\n eta2B[idx2B[a, b], idx2B[i, j]] = val\n eta2B[idx2B[i, j], idx2B[a, b]] = -val\n return eta1B, eta2B\n\n\ndef eta_white_atan(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n eta1B = np.zeros_like(f)\n for a in particles:\n for i in holes:\n denom = f[a, a] - f[i, i] + Gamma[idx2B[a, i], idx2B[a, i]]\n val = 0.5 * np.arctan(2 * f[a, i] / denom)\n eta1B[a, i] = val\n eta1B[i, a] = -val\n eta2B = np.zeros_like(Gamma)\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n denom = f[a, a] + f[b, b] - f[i, i] - f[j, j] + Gamma[\n idx2B[a, b], idx2B[a, b]] + Gamma[idx2B[i, j],\n idx2B[i, j]] - Gamma[idx2B[a, i], idx2B[a, i]] - Gamma[\n idx2B[a, j], idx2B[a, j]] - Gamma[idx2B[b, i],\n idx2B[b, i]] - Gamma[idx2B[b, j], idx2B[b, j]]\n val = 0.5 * np.arctan(2 * Gamma[idx2B[a, b], idx2B[i, j\n ]] / denom)\n eta2B[idx2B[a, b], idx2B[i, j]] = val\n eta2B[idx2B[i, j], idx2B[a, b]] = -val\n return eta1B, eta2B\n\n\ndef eta_wegner(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n holes = user_data['holes']\n particles = user_data['particles']\n bas2B = user_data['bas2B']\n basph2B = user_data['basph2B']\n idx2B = user_data['idx2B']\n idxph2B = user_data['idxph2B']\n occB_2B = user_data['occB_2B']\n occC_2B = user_data['occC_2B']\n occphA_2B = user_data['occphA_2B']\n fd = np.zeros_like(f)\n fod = np.zeros_like(f)\n Gammad = np.zeros_like(Gamma)\n Gammaod = np.zeros_like(Gamma)\n for a in particles:\n for i in holes:\n fod[a, i] = f[a, i]\n fod[i, a] = f[i, a]\n fd = f - fod\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n Gammaod[idx2B[a, b], idx2B[i, j]] = Gamma[idx2B[a, b],\n idx2B[i, j]]\n Gammaod[idx2B[i, j], idx2B[a, b]] = Gamma[idx2B[i, j],\n idx2B[a, b]]\n Gammad = Gamma - Gammaod\n eta1B = np.zeros_like(f)\n eta1B += commutator(fd, fod)\n for p in range(dim1B):\n for q in range(dim1B):\n for i in holes:\n for a in particles:\n eta1B[p, q] += fd[i, a] * Gammaod[idx2B[a, p], idx2B[i, q]\n ] - fd[a, i] * Gammaod[idx2B[i, p], idx2B[a, q]] - fod[\n i, a] * Gammad[idx2B[a, p], idx2B[i, q]] + fod[a, i\n ] * Gammad[idx2B[i, p], idx2B[a, q]]\n GammaGamma = dot(Gammad, dot(occB_2B, Gammaod))\n for p in range(dim1B):\n for q in range(dim1B):\n for i in holes:\n eta1B[p, q] += 0.5 * (GammaGamma[idx2B[i, p], idx2B[i, q]] -\n transpose(GammaGamma)[idx2B[i, p], idx2B[i, q]])\n GammaGamma = dot(Gammad, dot(occC_2B, Gammaod))\n for p in range(dim1B):\n for q in range(dim1B):\n for r in range(dim1B):\n eta1B[p, q] += 0.5 * (GammaGamma[idx2B[r, p], idx2B[r, q]] +\n transpose(GammaGamma)[idx2B[r, p], idx2B[r, q]])\n eta2B = np.zeros_like(Gamma)\n for p in range(dim1B):\n for q in range(dim1B):\n for r in range(dim1B):\n for s in range(dim1B):\n for t in range(dim1B):\n eta2B[idx2B[p, q], idx2B[r, s]] += fd[p, t] * Gammaod[\n idx2B[t, q], idx2B[r, s]] + fd[q, t] * Gammaod[\n idx2B[p, t], idx2B[r, s]] - fd[t, r] * Gammaod[\n idx2B[p, q], idx2B[t, s]] - fd[t, s] * Gammaod[\n idx2B[p, q], idx2B[r, t]] - fod[p, t] * Gammad[\n idx2B[t, q], idx2B[r, s]] - fod[q, t] * Gammad[\n idx2B[p, t], idx2B[r, s]] + fod[t, r] * Gammad[\n idx2B[p, q], idx2B[t, s]] + fod[t, s] * Gammad[\n idx2B[p, q], idx2B[r, t]]\n GammaGamma = dot(Gammad, dot(occB_2B, Gammaod))\n eta2B += 0.5 * (GammaGamma - transpose(GammaGamma))\n Gammad_ph = ph_transform_2B(Gammad, bas2B, idx2B, basph2B, idxph2B)\n Gammaod_ph = ph_transform_2B(Gammaod, bas2B, idx2B, basph2B, idxph2B)\n GammaGamma_ph = dot(Gammad_ph, dot(occphA_2B, Gammaod_ph))\n GammaGamma = inverse_ph_transform_2B(GammaGamma_ph, bas2B, idx2B,\n basph2B, idxph2B)\n work = np.zeros_like(GammaGamma)\n for i1, (i, j) in enumerate(bas2B):\n for i2, (k, l) in enumerate(bas2B):\n work[i1, i2] -= GammaGamma[i1, i2] - GammaGamma[idx2B[j, i], i2\n ] - GammaGamma[i1, idx2B[l, k]] + GammaGamma[idx2B[j, i],\n idx2B[l, k]]\n GammaGamma = work\n eta2B += GammaGamma\n return eta1B, eta2B\n\n\ndef flow_imsrg2(eta1B, eta2B, f, Gamma, user_data):\n dim1B = user_data['dim1B']\n holes = user_data['holes']\n particles = user_data['particles']\n bas2B = user_data['bas2B']\n idx2B = user_data['idx2B']\n basph2B = user_data['basph2B']\n idxph2B = user_data['idxph2B']\n occB_2B = user_data['occB_2B']\n occC_2B = user_data['occC_2B']\n occphA_2B = user_data['occphA_2B']\n dE = 0.0\n for i in holes:\n for a in particles:\n dE += eta1B[i, a] * f[a, i] - eta1B[a, i] * f[i, a]\n for i in holes:\n for j in holes:\n for a in particles:\n for b in particles:\n dE += 0.5 * eta2B[idx2B[i, j], idx2B[a, b]] * Gamma[\n idx2B[a, b], idx2B[i, j]]\n df = np.zeros_like(f)\n df += commutator(eta1B, f)\n for p in range(dim1B):\n for q in range(dim1B):\n for i in holes:\n for a in particles:\n df[p, q] += eta1B[i, a] * Gamma[idx2B[a, p], idx2B[i, q]\n ] - eta1B[a, i] * Gamma[idx2B[i, p], idx2B[a, q]] - f[\n i, a] * eta2B[idx2B[a, p], idx2B[i, q]] + f[a, i\n ] * eta2B[idx2B[i, p], idx2B[a, q]]\n etaGamma = dot(eta2B, dot(occB_2B, Gamma))\n for p in range(dim1B):\n for q in range(dim1B):\n for i in holes:\n df[p, q] += 0.5 * (etaGamma[idx2B[i, p], idx2B[i, q]] +\n transpose(etaGamma)[idx2B[i, p], idx2B[i, q]])\n etaGamma = dot(eta2B, dot(occC_2B, Gamma))\n for p in range(dim1B):\n for q in range(dim1B):\n for r in range(dim1B):\n df[p, q] += 0.5 * (etaGamma[idx2B[r, p], idx2B[r, q]] +\n transpose(etaGamma)[idx2B[r, p], idx2B[r, q]])\n dGamma = np.zeros_like(Gamma)\n for p in range(dim1B):\n for q in range(dim1B):\n for r in range(dim1B):\n for s in range(dim1B):\n for t in range(dim1B):\n dGamma[idx2B[p, q], idx2B[r, s]] += eta1B[p, t\n ] * Gamma[idx2B[t, q], idx2B[r, s]] + eta1B[q, t\n ] * Gamma[idx2B[p, t], idx2B[r, s]] - eta1B[t, r\n ] * Gamma[idx2B[p, q], idx2B[t, s]] - eta1B[t, s\n ] * Gamma[idx2B[p, q], idx2B[r, t]] - f[p, t\n ] * eta2B[idx2B[t, q], idx2B[r, s]] - f[q, t\n ] * eta2B[idx2B[p, t], idx2B[r, s]] + f[t, r\n ] * eta2B[idx2B[p, q], idx2B[t, s]] + f[t, s\n ] * eta2B[idx2B[p, q], idx2B[r, t]]\n etaGamma = dot(eta2B, dot(occB_2B, Gamma))\n dGamma += 0.5 * (etaGamma + transpose(etaGamma))\n eta2B_ph = ph_transform_2B(eta2B, bas2B, idx2B, basph2B, idxph2B)\n Gamma_ph = ph_transform_2B(Gamma, bas2B, idx2B, basph2B, idxph2B)\n etaGamma_ph = dot(eta2B_ph, dot(occphA_2B, Gamma_ph))\n etaGamma = inverse_ph_transform_2B(etaGamma_ph, bas2B, idx2B, basph2B,\n idxph2B)\n work = np.zeros_like(etaGamma)\n for i1, (i, j) in enumerate(bas2B):\n for i2, (k, l) in enumerate(bas2B):\n work[i1, i2] -= etaGamma[i1, i2] - etaGamma[idx2B[j, i], i2\n ] - etaGamma[i1, idx2B[l, k]] + etaGamma[idx2B[j, i], idx2B\n [l, k]]\n etaGamma = work\n dGamma += etaGamma\n return dE, df, dGamma\n\n\ndef get_operator_from_y(y, dim1B, dim2B):\n ptr = 0\n zero_body = y[ptr]\n ptr += 1\n one_body = reshape(y[ptr:ptr + dim1B * dim1B], (dim1B, dim1B))\n ptr += dim1B * dim1B\n two_body = reshape(y[ptr:ptr + dim2B * dim2B], (dim2B, dim2B))\n return zero_body, one_body, two_body\n\n\ndef derivative_wrapper(t, y, user_data):\n dim1B = user_data['dim1B']\n dim2B = dim1B * dim1B\n holes = user_data['holes']\n particles = user_data['particles']\n bas1B = user_data['bas1B']\n bas2B = user_data['bas2B']\n basph2B = user_data['basph2B']\n idx2B = user_data['idx2B']\n idxph2B = user_data['idxph2B']\n occA_2B = user_data['occA_2B']\n occB_2B = user_data['occB_2B']\n occC_2B = user_data['occC_2B']\n occphA_2B = user_data['occphA_2B']\n calc_eta = user_data['calc_eta']\n calc_rhs = user_data['calc_rhs']\n E, f, Gamma = get_operator_from_y(y, dim1B, dim2B)\n eta1B, eta2B = calc_eta(f, Gamma, user_data)\n dE, df, dGamma = calc_rhs(eta1B, eta2B, f, Gamma, user_data)\n dy = np.append([dE], np.append(reshape(df, -1), reshape(dGamma, -1)))\n user_data['dE'] = dE\n user_data['eta_norm'] = np.linalg.norm(eta1B, ord='fro') + np.linalg.norm(\n eta2B, ord='fro')\n return dy\n\n\ndef pairing_hamiltonian(delta, g, user_data):\n bas1B = user_data['bas1B']\n bas2B = user_data['bas2B']\n idx2B = user_data['idx2B']\n dim = len(bas1B)\n H1B = np.zeros((dim, dim))\n for i in bas1B:\n H1B[i, i] = delta * np.floor_divide(i, 2)\n dim = len(bas2B)\n H2B = np.zeros((dim, dim))\n for i, j in bas2B:\n if i % 2 == 0 and j == i + 1:\n for k, l in bas2B:\n if k % 2 == 0 and l == k + 1:\n H2B[idx2B[i, j], idx2B[k, l]] = -0.5 * g\n H2B[idx2B[j, i], idx2B[k, l]] = 0.5 * g\n H2B[idx2B[i, j], idx2B[l, k]] = 0.5 * g\n H2B[idx2B[j, i], idx2B[l, k]] = -0.5 * g\n return H1B, H2B\n\n\n<mask token>\n\n\ndef calc_mbpt2(f, Gamma, user_data):\n DE2 = 0.0\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n for i in holes:\n for j in holes:\n for a in particles:\n for b in particles:\n denom = f[i, i] + f[j, j] - f[a, a] - f[b, b]\n me = Gamma[idx2B[a, b], idx2B[i, j]]\n DE2 += 0.25 * me * me / denom\n return DE2\n\n\n<mask token>\n\n\ndef main():\n delta = float(argv[1])\n g = float(argv[2])\n particles = 4\n dim1B = 8\n holes = [0, 1, 2, 3]\n particles = [4, 5, 6, 7]\n bas1B = range(dim1B)\n bas2B = construct_basis_2B(holes, particles)\n basph2B = construct_basis_ph2B(holes, particles)\n idx2B = construct_index_2B(bas2B)\n idxph2B = construct_index_2B(basph2B)\n occ1B = construct_occupation_1B(bas1B, holes, particles)\n occA_2B = construct_occupationA_2B(bas2B, occ1B)\n occB_2B = construct_occupationB_2B(bas2B, occ1B)\n occC_2B = construct_occupationC_2B(bas2B, occ1B)\n occphA_2B = construct_occupationA_2B(basph2B, occ1B)\n user_data = {'dim1B': dim1B, 'holes': holes, 'particles': particles,\n 'bas1B': bas1B, 'bas2B': bas2B, 'basph2B': basph2B, 'idx2B': idx2B,\n 'idxph2B': idxph2B, 'occ1B': occ1B, 'occA_2B': occA_2B, 'occB_2B':\n occB_2B, 'occC_2B': occC_2B, 'occphA_2B': occphA_2B, 'eta_norm': \n 0.0, 'dE': 0.0, 'calc_eta': eta_white_atan, 'calc_rhs': flow_imsrg2}\n H1B, H2B = pairing_hamiltonian(delta, g, user_data)\n E, f, Gamma = normal_order(H1B, H2B, user_data)\n y0 = np.append([E], np.append(reshape(f, -1), reshape(Gamma, -1)))\n solver = ode(derivative_wrapper, jac=None)\n solver.set_integrator('vode', method='bdf', order=5, nsteps=1000)\n solver.set_f_params(user_data)\n solver.set_initial_value(y0, 0.0)\n sfinal = 50\n ds = 0.1\n print(\n '%-8s %-14s %-14s %-14s %-14s %-14s %-14s %-14s %-14s'\n % ('s', 'E', 'DE(2)', 'DE(3)', 'E+DE', 'dE/ds', '||eta||',\n '||fod||', '||Gammaod||'))\n print('-' * 148)\n while solver.successful() and solver.t < sfinal:\n ys = solver.integrate(sfinal, step=True)\n dim2B = dim1B * dim1B\n E, f, Gamma = get_operator_from_y(ys, dim1B, dim2B)\n DE2 = calc_mbpt2(f, Gamma, user_data)\n DE3 = calc_mbpt3(f, Gamma, user_data)\n norm_fod = calc_fod_norm(f, user_data)\n norm_Gammaod = calc_Gammaod_norm(Gamma, user_data)\n print(\n '%8.5f %14.8f %14.8f %14.8f %14.8f %14.8f %14.8f %14.8f %14.8f'\n % (solver.t, E, DE2, DE3, E + DE2 + DE3, user_data['dE'],\n user_data['eta_norm'], norm_fod, norm_Gammaod))\n if abs(DE2 / E) < 1e-07:\n break\n return\n\n\n<mask token>\n",
"step-5": "#!/usr/bin/env python\n\n#------------------------------------------------------------------------------\n# imsrg_pairing.py\n#\n# author: H. Hergert \n# version: 1.5.0\n# date: Dec 6, 2016\n# \n# tested with Python v2.7\n# \n# Solves the pairing model for four particles in a basis of four doubly \n# degenerate states by means of an In-Medium Similarity Renormalization \n# Group (IMSRG) flow.\n#\n#------------------------------------------------------------------------------\n\nimport numpy as np\nfrom numpy import array, dot, diag, reshape, transpose\nfrom scipy.linalg import eigvalsh\nfrom scipy.integrate import odeint, ode\n\nfrom sys import argv\n\n#-----------------------------------------------------------------------------------\n# basis and index functions\n#-----------------------------------------------------------------------------------\n\ndef construct_basis_2B(holes, particles):\n basis = []\n for i in holes:\n for j in holes:\n basis.append((i, j))\n\n for i in holes:\n for a in particles:\n basis.append((i, a))\n\n for a in particles:\n for i in holes:\n basis.append((a, i))\n\n for a in particles:\n for b in particles:\n basis.append((a, b))\n\n return basis\n\n\ndef construct_basis_ph2B(holes, particles):\n basis = []\n for i in holes:\n for j in holes:\n basis.append((i, j))\n\n for i in holes:\n for a in particles:\n basis.append((i, a))\n\n for a in particles:\n for i in holes:\n basis.append((a, i))\n\n for a in particles:\n for b in particles:\n basis.append((a, b))\n\n return basis\n\n\n#\n# We use dictionaries for the reverse lookup of state indices\n#\ndef construct_index_2B(bas2B):\n index = { }\n for i, state in enumerate(bas2B):\n index[state] = i\n\n return index\n\n\n\n#-----------------------------------------------------------------------------------\n# transform matrices to particle-hole representation\n#-----------------------------------------------------------------------------------\ndef ph_transform_2B(Gamma, bas2B, idx2B, basph2B, idxph2B):\n dim = len(basph2B)\n Gamma_ph = np.zeros((dim, dim))\n\n for i1, (a,b) in enumerate(basph2B):\n for i2, (c, d) in enumerate(basph2B):\n Gamma_ph[i1, i2] -= Gamma[idx2B[(a,d)], idx2B[(c,b)]]\n\n return Gamma_ph\n\ndef inverse_ph_transform_2B(Gamma_ph, bas2B, idx2B, basph2B, idxph2B):\n dim = len(bas2B)\n Gamma = np.zeros((dim, dim))\n\n for i1, (a,b) in enumerate(bas2B):\n for i2, (c, d) in enumerate(bas2B):\n Gamma[i1, i2] -= Gamma_ph[idxph2B[(a,d)], idxph2B[(c,b)]]\n \n return Gamma\n\n#-----------------------------------------------------------------------------------\n# commutator of matrices\n#-----------------------------------------------------------------------------------\ndef commutator(a,b):\n return dot(a,b) - dot(b,a)\n\n#-----------------------------------------------------------------------------------\n# norms of off-diagonal Hamiltonian pieces\n#-----------------------------------------------------------------------------------\ndef calc_fod_norm(f, user_data):\n particles = user_data[\"particles\"]\n holes = user_data[\"holes\"]\n \n norm = 0.0\n for a in particles:\n for i in holes:\n norm += f[a,i]**2 + f[i,a]**2\n\n return np.sqrt(norm)\n\ndef calc_Gammaod_norm(Gamma, user_data):\n particles = user_data[\"particles\"]\n holes = user_data[\"holes\"]\n idx2B = user_data[\"idx2B\"]\n\n norm = 0.0\n for a in particles: \n for b in particles:\n for i in holes:\n for j in holes:\n norm += Gamma[idx2B[(a,b)],idx2B[(i,j)]]**2 + Gamma[idx2B[(i,j)],idx2B[(a,b)]]**2\n\n return np.sqrt(norm)\n\n#-----------------------------------------------------------------------------------\n# occupation number matrices\n#-----------------------------------------------------------------------------------\ndef construct_occupation_1B(bas1B, holes, particles):\n dim = len(bas1B)\n occ = np.zeros(dim)\n\n for i in holes:\n occ[i] = 1.\n\n return occ\n\n# diagonal matrix: n_a - n_b\ndef construct_occupationA_2B(bas2B, occ1B):\n dim = len(bas2B)\n occ = np.zeros((dim,dim))\n\n for i1, (i,j) in enumerate(bas2B):\n occ[i1, i1] = occ1B[i] - occ1B[j]\n\n return occ\n\n\n# diagonal matrix: 1 - n_a - n_b\ndef construct_occupationB_2B(bas2B, occ1B):\n dim = len(bas2B)\n occ = np.zeros((dim,dim))\n\n for i1, (i,j) in enumerate(bas2B):\n occ[i1, i1] = 1. - occ1B[i] - occ1B[j]\n\n return occ\n\n# diagonal matrix: n_a * n_b\ndef construct_occupationC_2B(bas2B, occ1B):\n dim = len(bas2B)\n occ = np.zeros((dim,dim))\n\n for i1, (i,j) in enumerate(bas2B):\n occ[i1, i1] = occ1B[i] * occ1B[j]\n\n return occ\n\n#-----------------------------------------------------------------------------------\n# generators\n#-----------------------------------------------------------------------------------\ndef eta_brillouin(f, Gamma, user_data):\n dim1B = user_data[\"dim1B\"]\n particles = user_data[\"particles\"]\n holes = user_data[\"holes\"]\n idx2B = user_data[\"idx2B\"]\n\n # one-body part of the generator\n eta1B = np.zeros_like(f)\n\n for a in particles:\n for i in holes:\n # (1-n_a)n_i - n_a(1-n_i) = n_i - n_a\n eta1B[a, i] = f[a,i]\n eta1B[i, a] = -f[a,i]\n\n # two-body part of the generator\n eta2B = np.zeros_like(Gamma)\n\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n val = Gamma[idx2B[(a,b)], idx2B[(i,j)]]\n\n eta2B[idx2B[(a,b)],idx2B[(i,j)]] = val\n eta2B[idx2B[(i,j)],idx2B[(a,b)]] = -val\n\n return eta1B, eta2B\n\ndef eta_imtime(f, Gamma, user_data):\n dim1B = user_data[\"dim1B\"]\n particles = user_data[\"particles\"]\n holes = user_data[\"holes\"]\n idx2B = user_data[\"idx2B\"]\n\n # one-body part of the generator\n eta1B = np.zeros_like(f)\n\n for a in particles:\n for i in holes:\n dE = f[a,a] - f[i,i] + Gamma[idx2B[(a,i)], idx2B[(a,i)]]\n val = np.sign(dE)*f[a,i]\n eta1B[a, i] = val\n eta1B[i, a] = -val \n\n # two-body part of the generator\n eta2B = np.zeros_like(Gamma)\n\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n dE = ( \n f[a,a] + f[b,b] - f[i,i] - f[j,j] \n + Gamma[idx2B[(a,b)],idx2B[(a,b)]] \n + Gamma[idx2B[(i,j)],idx2B[(i,j)]]\n - Gamma[idx2B[(a,i)],idx2B[(a,i)]] \n - Gamma[idx2B[(a,j)],idx2B[(a,j)]] \n - Gamma[idx2B[(b,i)],idx2B[(b,i)]] \n - Gamma[idx2B[(b,j)],idx2B[(b,j)]] \n )\n\n val = np.sign(dE)*Gamma[idx2B[(a,b)], idx2B[(i,j)]]\n\n eta2B[idx2B[(a,b)],idx2B[(i,j)]] = val\n eta2B[idx2B[(i,j)],idx2B[(a,b)]] = -val\n\n return eta1B, eta2B\n\n\ndef eta_white(f, Gamma, user_data):\n dim1B = user_data[\"dim1B\"]\n particles = user_data[\"particles\"]\n holes = user_data[\"holes\"]\n idx2B = user_data[\"idx2B\"]\n\n # one-body part of the generator\n eta1B = np.zeros_like(f)\n\n for a in particles:\n for i in holes:\n denom = f[a,a] - f[i,i] + Gamma[idx2B[(a,i)], idx2B[(a,i)]]\n val = f[a,i]/denom\n eta1B[a, i] = val\n eta1B[i, a] = -val \n\n # two-body part of the generator\n eta2B = np.zeros_like(Gamma)\n\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n denom = ( \n f[a,a] + f[b,b] - f[i,i] - f[j,j] \n + Gamma[idx2B[(a,b)],idx2B[(a,b)]] \n + Gamma[idx2B[(i,j)],idx2B[(i,j)]]\n - Gamma[idx2B[(a,i)],idx2B[(a,i)]] \n - Gamma[idx2B[(a,j)],idx2B[(a,j)]] \n - Gamma[idx2B[(b,i)],idx2B[(b,i)]] \n - Gamma[idx2B[(b,j)],idx2B[(b,j)]] \n )\n\n val = Gamma[idx2B[(a,b)], idx2B[(i,j)]] / denom\n\n eta2B[idx2B[(a,b)],idx2B[(i,j)]] = val\n eta2B[idx2B[(i,j)],idx2B[(a,b)]] = -val\n\n return eta1B, eta2B\n\n\ndef eta_white_mp(f, Gamma, user_data):\n dim1B = user_data[\"dim1B\"]\n particles = user_data[\"particles\"]\n holes = user_data[\"holes\"]\n idx2B = user_data[\"idx2B\"]\n\n # one-body part of the generator\n eta1B = np.zeros_like(f)\n\n for a in particles:\n for i in holes:\n denom = f[a,a] - f[i,i]\n val = f[a,i]/denom\n eta1B[a, i] = val\n eta1B[i, a] = -val \n\n # two-body part of the generator\n eta2B = np.zeros_like(Gamma)\n\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n denom = ( \n f[a,a] + f[b,b] - f[i,i] - f[j,j] \n )\n\n val = Gamma[idx2B[(a,b)], idx2B[(i,j)]] / denom\n\n eta2B[idx2B[(a,b)],idx2B[(i,j)]] = val\n eta2B[idx2B[(i,j)],idx2B[(a,b)]] = -val\n\n return eta1B, eta2B\n\ndef eta_white_atan(f, Gamma, user_data):\n dim1B = user_data[\"dim1B\"]\n particles = user_data[\"particles\"]\n holes = user_data[\"holes\"]\n idx2B = user_data[\"idx2B\"]\n\n # one-body part of the generator\n eta1B = np.zeros_like(f)\n\n for a in particles:\n for i in holes:\n denom = f[a,a] - f[i,i] + Gamma[idx2B[(a,i)], idx2B[(a,i)]]\n val = 0.5 * np.arctan(2 * f[a,i]/denom)\n eta1B[a, i] = val\n eta1B[i, a] = -val \n\n # two-body part of the generator\n eta2B = np.zeros_like(Gamma)\n\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n denom = ( \n f[a,a] + f[b,b] - f[i,i] - f[j,j] \n + Gamma[idx2B[(a,b)],idx2B[(a,b)]] \n + Gamma[idx2B[(i,j)],idx2B[(i,j)]] \n - Gamma[idx2B[(a,i)],idx2B[(a,i)]] \n - Gamma[idx2B[(a,j)],idx2B[(a,j)]] \n - Gamma[idx2B[(b,i)],idx2B[(b,i)]] \n - Gamma[idx2B[(b,j)],idx2B[(b,j)]] \n )\n\n val = 0.5 * np.arctan(2 * Gamma[idx2B[(a,b)], idx2B[(i,j)]] / denom)\n\n eta2B[idx2B[(a,b)],idx2B[(i,j)]] = val\n eta2B[idx2B[(i,j)],idx2B[(a,b)]] = -val\n\n return eta1B, eta2B\n\n\ndef eta_wegner(f, Gamma, user_data):\n\n dim1B = user_data[\"dim1B\"]\n holes = user_data[\"holes\"]\n particles = user_data[\"particles\"]\n bas2B = user_data[\"bas2B\"]\n basph2B = user_data[\"basph2B\"]\n idx2B = user_data[\"idx2B\"]\n idxph2B = user_data[\"idxph2B\"]\n occB_2B = user_data[\"occB_2B\"]\n occC_2B = user_data[\"occC_2B\"]\n occphA_2B = user_data[\"occphA_2B\"]\n\n\n # split Hamiltonian in diagonal and off-diagonal parts\n fd = np.zeros_like(f)\n fod = np.zeros_like(f)\n Gammad = np.zeros_like(Gamma)\n Gammaod = np.zeros_like(Gamma)\n\n for a in particles:\n for i in holes:\n fod[a, i] = f[a,i]\n fod[i, a] = f[i,a]\n fd = f - fod\n\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n Gammaod[idx2B[(a,b)], idx2B[(i,j)]] = Gamma[idx2B[(a,b)], idx2B[(i,j)]]\n Gammaod[idx2B[(i,j)], idx2B[(a,b)]] = Gamma[idx2B[(i,j)], idx2B[(a,b)]]\n Gammad = Gamma - Gammaod\n\n\n ############################# \n # one-body part of the generator\n eta1B = np.zeros_like(f)\n\n # 1B - 1B\n eta1B += commutator(fd, fod)\n\n # 1B - 2B\n for p in range(dim1B):\n for q in range(dim1B):\n for i in holes:\n for a in particles:\n eta1B[p,q] += (\n fd[i,a] * Gammaod[idx2B[(a, p)], idx2B[(i, q)]] \n - fd[a,i] * Gammaod[idx2B[(i, p)], idx2B[(a, q)]] \n - fod[i,a] * Gammad[idx2B[(a, p)], idx2B[(i, q)]] \n + fod[a,i] * Gammad[idx2B[(i, p)], idx2B[(a, q)]]\n )\n\n # 2B - 2B\n # n_a n_b nn_c + nn_a nn_b n_c = n_a n_b + (1 - n_a - n_b) * n_c\n GammaGamma = dot(Gammad, dot(occB_2B, Gammaod))\n for p in range(dim1B):\n for q in range(dim1B):\n for i in holes:\n eta1B[p,q] += 0.5*(\n GammaGamma[idx2B[(i,p)], idx2B[(i,q)]] \n - transpose(GammaGamma)[idx2B[(i,p)], idx2B[(i,q)]]\n )\n\n GammaGamma = dot(Gammad, dot(occC_2B, Gammaod))\n for p in range(dim1B):\n for q in range(dim1B):\n for r in range(dim1B):\n eta1B[p,q] += 0.5*(\n GammaGamma[idx2B[(r,p)], idx2B[(r,q)]] \n + transpose(GammaGamma)[idx2B[(r,p)], idx2B[(r,q)]] \n )\n\n\n ############################# \n # two-body flow equation \n eta2B = np.zeros_like(Gamma)\n\n # 1B - 2B\n for p in range(dim1B):\n for q in range(dim1B):\n for r in range(dim1B):\n for s in range(dim1B):\n for t in range(dim1B):\n eta2B[idx2B[(p,q)],idx2B[(r,s)]] += (\n fd[p,t] * Gammaod[idx2B[(t,q)],idx2B[(r,s)]] \n + fd[q,t] * Gammaod[idx2B[(p,t)],idx2B[(r,s)]] \n - fd[t,r] * Gammaod[idx2B[(p,q)],idx2B[(t,s)]] \n - fd[t,s] * Gammaod[idx2B[(p,q)],idx2B[(r,t)]]\n - fod[p,t] * Gammad[idx2B[(t,q)],idx2B[(r,s)]] \n - fod[q,t] * Gammad[idx2B[(p,t)],idx2B[(r,s)]] \n + fod[t,r] * Gammad[idx2B[(p,q)],idx2B[(t,s)]] \n + fod[t,s] * Gammad[idx2B[(p,q)],idx2B[(r,t)]]\n )\n\n \n # 2B - 2B - particle and hole ladders\n # Gammad.occB.Gammaod\n GammaGamma = dot(Gammad, dot(occB_2B, Gammaod))\n\n eta2B += 0.5 * (GammaGamma - transpose(GammaGamma))\n\n # 2B - 2B - particle-hole chain\n \n # transform matrices to particle-hole representation and calculate \n # Gammad_ph.occA_ph.Gammaod_ph\n Gammad_ph = ph_transform_2B(Gammad, bas2B, idx2B, basph2B, idxph2B)\n Gammaod_ph = ph_transform_2B(Gammaod, bas2B, idx2B, basph2B, idxph2B)\n\n GammaGamma_ph = dot(Gammad_ph, dot(occphA_2B, Gammaod_ph))\n\n # transform back to standard representation\n GammaGamma = inverse_ph_transform_2B(GammaGamma_ph, bas2B, idx2B, basph2B, idxph2B)\n\n # commutator / antisymmetrization\n work = np.zeros_like(GammaGamma)\n for i1, (i,j) in enumerate(bas2B):\n for i2, (k,l) in enumerate(bas2B):\n work[i1, i2] -= (\n GammaGamma[i1, i2] \n - GammaGamma[idx2B[(j,i)], i2] \n - GammaGamma[i1, idx2B[(l,k)]] \n + GammaGamma[idx2B[(j,i)], idx2B[(l,k)]]\n )\n GammaGamma = work\n\n eta2B += GammaGamma\n\n\n return eta1B, eta2B\n\n\n#-----------------------------------------------------------------------------------\n# derivatives \n#-----------------------------------------------------------------------------------\ndef flow_imsrg2(eta1B, eta2B, f, Gamma, user_data):\n\n dim1B = user_data[\"dim1B\"]\n holes = user_data[\"holes\"]\n particles = user_data[\"particles\"]\n bas2B = user_data[\"bas2B\"]\n idx2B = user_data[\"idx2B\"]\n basph2B = user_data[\"basph2B\"]\n idxph2B = user_data[\"idxph2B\"]\n occB_2B = user_data[\"occB_2B\"]\n occC_2B = user_data[\"occC_2B\"]\n occphA_2B = user_data[\"occphA_2B\"]\n\n ############################# \n # zero-body flow equation\n dE = 0.0\n\n for i in holes:\n for a in particles:\n dE += eta1B[i,a] * f[a,i] - eta1B[a,i] * f[i,a]\n\n for i in holes:\n for j in holes:\n for a in particles:\n for b in particles:\n dE += 0.5 * eta2B[idx2B[(i,j)], idx2B[(a,b)]] * Gamma[idx2B[(a,b)], idx2B[(i,j)]]\n\n\n ############################# \n # one-body flow equation \n df = np.zeros_like(f)\n\n # 1B - 1B\n df += commutator(eta1B, f)\n\n # 1B - 2B\n for p in range(dim1B):\n for q in range(dim1B):\n for i in holes:\n for a in particles:\n df[p,q] += (\n eta1B[i,a] * Gamma[idx2B[(a, p)], idx2B[(i, q)]] \n - eta1B[a,i] * Gamma[idx2B[(i, p)], idx2B[(a, q)]] \n - f[i,a] * eta2B[idx2B[(a, p)], idx2B[(i, q)]] \n + f[a,i] * eta2B[idx2B[(i, p)], idx2B[(a, q)]]\n )\n\n # 2B - 2B\n # n_a n_b nn_c + nn_a nn_b n_c = n_a n_b + (1 - n_a - n_b) * n_c\n etaGamma = dot(eta2B, dot(occB_2B, Gamma))\n for p in range(dim1B):\n for q in range(dim1B):\n for i in holes:\n df[p,q] += 0.5*(\n etaGamma[idx2B[(i,p)], idx2B[(i,q)]] \n + transpose(etaGamma)[idx2B[(i,p)], idx2B[(i,q)]]\n )\n\n etaGamma = dot(eta2B, dot(occC_2B, Gamma))\n for p in range(dim1B):\n for q in range(dim1B):\n for r in range(dim1B):\n df[p,q] += 0.5*(\n etaGamma[idx2B[(r,p)], idx2B[(r,q)]] \n + transpose(etaGamma)[idx2B[(r,p)], idx2B[(r,q)]] \n )\n\n\n ############################# \n # two-body flow equation \n dGamma = np.zeros_like(Gamma)\n\n # 1B - 2B\n for p in range(dim1B):\n for q in range(dim1B):\n for r in range(dim1B):\n for s in range(dim1B):\n for t in range(dim1B):\n dGamma[idx2B[(p,q)],idx2B[(r,s)]] += (\n eta1B[p,t] * Gamma[idx2B[(t,q)],idx2B[(r,s)]] \n + eta1B[q,t] * Gamma[idx2B[(p,t)],idx2B[(r,s)]] \n - eta1B[t,r] * Gamma[idx2B[(p,q)],idx2B[(t,s)]] \n - eta1B[t,s] * Gamma[idx2B[(p,q)],idx2B[(r,t)]]\n - f[p,t] * eta2B[idx2B[(t,q)],idx2B[(r,s)]] \n - f[q,t] * eta2B[idx2B[(p,t)],idx2B[(r,s)]] \n + f[t,r] * eta2B[idx2B[(p,q)],idx2B[(t,s)]] \n + f[t,s] * eta2B[idx2B[(p,q)],idx2B[(r,t)]]\n )\n\n \n # 2B - 2B - particle and hole ladders\n # eta2B.occB.Gamma\n etaGamma = dot(eta2B, dot(occB_2B, Gamma))\n\n dGamma += 0.5 * (etaGamma + transpose(etaGamma))\n\n # 2B - 2B - particle-hole chain\n \n # transform matrices to particle-hole representation and calculate \n # eta2B_ph.occA_ph.Gamma_ph\n eta2B_ph = ph_transform_2B(eta2B, bas2B, idx2B, basph2B, idxph2B)\n Gamma_ph = ph_transform_2B(Gamma, bas2B, idx2B, basph2B, idxph2B)\n\n etaGamma_ph = dot(eta2B_ph, dot(occphA_2B, Gamma_ph))\n\n # transform back to standard representation\n etaGamma = inverse_ph_transform_2B(etaGamma_ph, bas2B, idx2B, basph2B, idxph2B)\n\n # commutator / antisymmetrization\n work = np.zeros_like(etaGamma)\n for i1, (i,j) in enumerate(bas2B):\n for i2, (k,l) in enumerate(bas2B):\n work[i1, i2] -= (\n etaGamma[i1, i2] \n - etaGamma[idx2B[(j,i)], i2] \n - etaGamma[i1, idx2B[(l,k)]] \n + etaGamma[idx2B[(j,i)], idx2B[(l,k)]]\n )\n etaGamma = work\n\n dGamma += etaGamma\n\n\n return dE, df, dGamma\n\n\n#-----------------------------------------------------------------------------------\n# derivative wrapper\n#-----------------------------------------------------------------------------------\ndef get_operator_from_y(y, dim1B, dim2B):\n \n # reshape the solution vector into 0B, 1B, 2B pieces\n ptr = 0\n zero_body = y[ptr]\n\n ptr += 1\n one_body = reshape(y[ptr:ptr+dim1B*dim1B], (dim1B, dim1B))\n\n ptr += dim1B*dim1B\n two_body = reshape(y[ptr:ptr+dim2B*dim2B], (dim2B, dim2B))\n\n return zero_body,one_body,two_body\n\n\ndef derivative_wrapper(t, y, user_data):\n\n dim1B = user_data[\"dim1B\"]\n dim2B = dim1B*dim1B\n\n\n holes = user_data[\"holes\"]\n particles = user_data[\"particles\"]\n bas1B = user_data[\"bas1B\"]\n bas2B = user_data[\"bas2B\"]\n basph2B = user_data[\"basph2B\"]\n idx2B = user_data[\"idx2B\"]\n idxph2B = user_data[\"idxph2B\"]\n occA_2B = user_data[\"occA_2B\"]\n occB_2B = user_data[\"occB_2B\"]\n occC_2B = user_data[\"occC_2B\"]\n occphA_2B = user_data[\"occphA_2B\"]\n calc_eta = user_data[\"calc_eta\"]\n calc_rhs = user_data[\"calc_rhs\"]\n\n # extract operator pieces from solution vector\n E, f, Gamma = get_operator_from_y(y, dim1B, dim2B)\n\n\n # calculate the generator\n eta1B, eta2B = calc_eta(f, Gamma, user_data)\n\n # calculate the right-hand side\n dE, df, dGamma = calc_rhs(eta1B, eta2B, f, Gamma, user_data)\n\n # convert derivatives into linear array\n dy = np.append([dE], np.append(reshape(df, -1), reshape(dGamma, -1)))\n\n # share data\n user_data[\"dE\"] = dE\n user_data[\"eta_norm\"] = np.linalg.norm(eta1B,ord='fro')+np.linalg.norm(eta2B,ord='fro')\n \n return dy\n\n#-----------------------------------------------------------------------------------\n# pairing Hamiltonian\n#-----------------------------------------------------------------------------------\ndef pairing_hamiltonian(delta, g, user_data):\n bas1B = user_data[\"bas1B\"]\n bas2B = user_data[\"bas2B\"]\n idx2B = user_data[\"idx2B\"]\n\n dim = len(bas1B)\n H1B = np.zeros((dim,dim))\n\n for i in bas1B:\n H1B[i,i] = delta*np.floor_divide(i, 2)\n\n dim = len(bas2B)\n H2B = np.zeros((dim, dim))\n\n # spin up states have even indices, spin down the next odd index\n for (i, j) in bas2B:\n if (i % 2 == 0 and j == i+1):\n for (k, l) in bas2B:\n if (k % 2 == 0 and l == k+1):\n H2B[idx2B[(i,j)],idx2B[(k,l)]] = -0.5*g\n H2B[idx2B[(j,i)],idx2B[(k,l)]] = 0.5*g\n H2B[idx2B[(i,j)],idx2B[(l,k)]] = 0.5*g\n H2B[idx2B[(j,i)],idx2B[(l,k)]] = -0.5*g\n \n return H1B, H2B\n\n#-----------------------------------------------------------------------------------\n# normal-ordered pairing Hamiltonian\n#-----------------------------------------------------------------------------------\ndef normal_order(H1B, H2B, user_data):\n bas1B = user_data[\"bas1B\"]\n bas2B = user_data[\"bas2B\"]\n idx2B = user_data[\"idx2B\"]\n particles = user_data[\"particles\"]\n holes = user_data[\"holes\"]\n\n # 0B part\n E = 0.0\n for i in holes:\n E += H1B[i,i]\n\n for i in holes:\n for j in holes:\n E += 0.5*H2B[idx2B[(i,j)],idx2B[(i,j)]] \n\n # 1B part\n f = H1B\n for i in bas1B:\n for j in bas1B:\n for h in holes:\n f[i,j] += H2B[idx2B[(i,h)],idx2B[(j,h)]] \n\n # 2B part\n Gamma = H2B\n\n return E, f, Gamma\n\n#-----------------------------------------------------------------------------------\n# Perturbation theory\n#-----------------------------------------------------------------------------------\ndef calc_mbpt2(f, Gamma, user_data):\n DE2 = 0.0\n\n particles = user_data[\"particles\"]\n holes = user_data[\"holes\"]\n idx2B = user_data[\"idx2B\"]\n\n for i in holes:\n for j in holes:\n for a in particles:\n for b in particles:\n denom = f[i,i] + f[j,j] - f[a,a] - f[b,b]\n me = Gamma[idx2B[(a,b)],idx2B[(i,j)]]\n DE2 += 0.25*me*me/denom\n\n return DE2\n\ndef calc_mbpt3(f, Gamma, user_data):\n particles = user_data[\"particles\"]\n holes = user_data[\"holes\"]\n idx2B = user_data[\"idx2B\"]\n\n # DE3 = 0.0\n\n DE3pp = 0.0\n DE3hh = 0.0\n DE3ph = 0.0\n\n for a in particles:\n for b in particles:\n for c in particles:\n for d in particles:\n for i in holes:\n for j in holes:\n denom = (f[i,i] + f[j,j] - f[a,a] - f[b,b])*(f[i,i] + f[j,j] - f[c,c] - f[d,d])\n me = (Gamma[idx2B[(i,j)],idx2B[(a,b)]]*Gamma[idx2B[(a,b)],idx2B[(c,d)]]*\n Gamma[idx2B[(c,d)],idx2B[(i,j)]])\n DE3pp += 0.125*me/denom\n\n for i in holes:\n for j in holes:\n for k in holes:\n for l in holes:\n for a in particles:\n for b in particles:\n denom = (f[i,i] + f[j,j] - f[a,a] - f[b,b])*(f[k,k] + f[l,l] - f[a,a] - f[b,b])\n me = (Gamma[idx2B[(a,b)],idx2B[(k,l)]]*Gamma[idx2B[(k,l)],idx2B[(i,j)]]*\n Gamma[idx2B[(i,j)],idx2B[(a,b)]])\n DE3hh += 0.125*me/denom\n\n for i in holes:\n for j in holes:\n for k in holes:\n for a in particles:\n for b in particles:\n for c in particles:\n denom = (f[i,i] + f[j,j] - f[a,a] - f[b,b])*(f[k,k] + f[j,j] - f[a,a] - f[c,c])\n me = (Gamma[idx2B[(i,j)],idx2B[(a,b)]]*Gamma[idx2B[(k,b)],idx2B[(i,c)]]*\n Gamma[idx2B[(a,c)],idx2B[(k,j)]])\n DE3ph -= me/denom\n\n return DE3pp+DE3hh+DE3ph\n\n\n#------------------------------------------------------------------------------\n# Main program\n#------------------------------------------------------------------------------\ndef main():\n # grab delta and g from the command line\n delta = float(argv[1])\n g = float(argv[2])\n\n particles = 4\n\n # setup shared data\n dim1B = 8\n\n # this defines the reference state\n # 1st state\n holes = [0,1,2,3]\n particles = [4,5,6,7]\n\n # 2nd state\n # holes = [0,1,4,5]\n # particles = [2,3,6,7]\n\n # 3rd state\n # holes = [0,1,6,7]\n # particles = [2,3,4,5]\n\n # basis definitions\n bas1B = range(dim1B)\n bas2B = construct_basis_2B(holes, particles)\n basph2B = construct_basis_ph2B(holes, particles)\n\n idx2B = construct_index_2B(bas2B)\n idxph2B = construct_index_2B(basph2B)\n\n # occupation number matrices\n occ1B = construct_occupation_1B(bas1B, holes, particles)\n occA_2B = construct_occupationA_2B(bas2B, occ1B)\n occB_2B = construct_occupationB_2B(bas2B, occ1B)\n occC_2B = construct_occupationC_2B(bas2B, occ1B)\n\n occphA_2B = construct_occupationA_2B(basph2B, occ1B)\n\n # store shared data in a dictionary, so we can avoid passing the basis\n # lookups etc. as separate parameters all the time\n user_data = {\n \"dim1B\": dim1B, \n \"holes\": holes,\n \"particles\": particles,\n \"bas1B\": bas1B,\n \"bas2B\": bas2B,\n \"basph2B\": basph2B,\n \"idx2B\": idx2B,\n \"idxph2B\": idxph2B,\n \"occ1B\": occ1B,\n \"occA_2B\": occA_2B,\n \"occB_2B\": occB_2B,\n \"occC_2B\": occC_2B,\n \"occphA_2B\": occphA_2B,\n\n \"eta_norm\": 0.0, # variables for sharing data between ODE solver\n \"dE\": 0.0, # and main routine\n\n\n \"calc_eta\": eta_white_atan, # specify the generator (function object)\n \"calc_rhs\": flow_imsrg2 # specify the right-hand side and truncation\n }\n\n # set up initial Hamiltonian\n H1B, H2B = pairing_hamiltonian(delta, g, user_data)\n\n E, f, Gamma = normal_order(H1B, H2B, user_data) \n\n # reshape Hamiltonian into a linear array (initial ODE vector)\n y0 = np.append([E], np.append(reshape(f, -1), reshape(Gamma, -1)))\n\n # integrate flow equations \n solver = ode(derivative_wrapper,jac=None)\n solver.set_integrator('vode', method='bdf', order=5, nsteps=1000)\n solver.set_f_params(user_data)\n solver.set_initial_value(y0, 0.)\n\n sfinal = 50\n ds = 0.1\n\n print(\"%-8s %-14s %-14s %-14s %-14s %-14s %-14s %-14s %-14s\"%(\n \"s\", \"E\" , \"DE(2)\", \"DE(3)\", \"E+DE\", \"dE/ds\", \n \"||eta||\", \"||fod||\", \"||Gammaod||\"))\n print(\"-\" * 148)\n \n while solver.successful() and solver.t < sfinal:\n ys = solver.integrate(sfinal, step=True)\n \n dim2B = dim1B*dim1B\n E, f, Gamma = get_operator_from_y(ys, dim1B, dim2B)\n\n DE2 = calc_mbpt2(f, Gamma, user_data)\n DE3 = calc_mbpt3(f, Gamma, user_data)\n\n norm_fod = calc_fod_norm(f, user_data)\n norm_Gammaod = calc_Gammaod_norm(Gamma, user_data)\n\n print(\"%8.5f %14.8f %14.8f %14.8f %14.8f %14.8f %14.8f %14.8f %14.8f\"%(\n solver.t, E , DE2, DE3, E+DE2+DE3, user_data[\"dE\"], user_data[\"eta_norm\"], norm_fod, norm_Gammaod))\n if abs(DE2/E) < 10e-8: break\n\n return\n\n\n#------------------------------------------------------------------------------\n# make executable\n#------------------------------------------------------------------------------\nif __name__ == \"__main__\": \n main()\n",
"step-ids": [
17,
18,
20,
21,
29
]
}
|
[
17,
18,
20,
21,
29
] |
import unittest
from dispatcher.task import *
from mock import *
class TestTask(unittest.TestCase):
def test_init(self):
task_attr = ['filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py',
'frequency=1D', 'time=09:45', 'description=invalid time']
task = Task(task_attr)
self.assertEqual(task.basename(), 'dummy_task2.py')
self.assertEqual(task.frequency, str('1D').lower())
self.assertEqual(task.filename, 'C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py')
self.assertEqual(task.time, '09:45')
self.assertTrue(task.is_good())
def test_init_bad_invalid_filename(self):
task_attr = ['filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\sadfsda.py',
'frequency=1D', 'time=09:45', 'description=invalid filename']
task = Task(task_attr)
self.assertFalse(task.is_good())
task_attr = ['filename=',
'frequency=1D', 'time=09:45', 'description=invalid filename']
task = Task(task_attr)
self.assertFalse(task.is_good())
def test_init_bad_invalid_time(self):
task_attr = ['filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py',
'frequency=1D', 'time=0ssss9:45', 'description=invalid time']
task = Task(task_attr)
self.assertFalse(task.is_good())
task_attr = ['filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py',
'frequency=1D', 'time=09:45:924355435', 'description=invalid time']
task = Task(task_attr)
self.assertFalse(task.is_good())
task_attr = ['filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py',
'frequency=1D', 'time=09924355435', 'description=invalid time']
task = Task(task_attr)
self.assertFalse(task.is_good())
def test_init_good_empty_time(self):
task_attr = ['filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py',
'frequency=1D', 'time=', 'description=']
task = Task(task_attr)
self.assertTrue(task.is_good())
def test_init_bad_invalid_frequency(self):
task_attr = ['filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py',
'frequency=1Dhhhh', 'time=', 'description=invalid frequency']
task = Task(task_attr)
self.assertFalse(task.is_good())
task_attr = ['filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py',
'frequency=D', 'time=', 'description=invalid frequency']
task = Task(task_attr)
self.assertFalse(task.is_good())
task_attr = ['filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py',
'frequency=2S', 'time=', 'description=invalid frequency']
task = Task(task_attr)
self.assertFalse(task.is_good())
task_attr = ['filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py',
'frequency=', 'time=', 'description=invalid frequency']
task = Task(task_attr)
self.assertFalse(task.is_good())
def test_init_good_zero_frequency(self):
task_attr = ['filename=C:\\Users\\kcheng\\PycharmProjects\\first_project\\dummy_task2.py',
'frequency=0minute', 'time=', 'description=invalid frequency']
task = Task(task_attr)
self.assertTrue(task.is_good())
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "86c053b7d4c752182965755ad5b6ba6937ce6f86",
"index": 5984,
"step-1": "<mask token>\n\n\nclass TestTask(unittest.TestCase):\n <mask token>\n\n def test_init_bad_invalid_filename(self):\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\sadfsda.py'\n , 'frequency=1D', 'time=09:45', 'description=invalid filename']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n task_attr = ['filename=', 'frequency=1D', 'time=09:45',\n 'description=invalid filename']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestTask(unittest.TestCase):\n\n def test_init(self):\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=1D', 'time=09:45', 'description=invalid time']\n task = Task(task_attr)\n self.assertEqual(task.basename(), 'dummy_task2.py')\n self.assertEqual(task.frequency, str('1D').lower())\n self.assertEqual(task.filename,\n 'C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n )\n self.assertEqual(task.time, '09:45')\n self.assertTrue(task.is_good())\n\n def test_init_bad_invalid_filename(self):\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\sadfsda.py'\n , 'frequency=1D', 'time=09:45', 'description=invalid filename']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n task_attr = ['filename=', 'frequency=1D', 'time=09:45',\n 'description=invalid filename']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n\n def test_init_bad_invalid_time(self):\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=1D', 'time=0ssss9:45', 'description=invalid time']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=1D', 'time=09:45:924355435',\n 'description=invalid time']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=1D', 'time=09924355435', 'description=invalid time']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n <mask token>\n <mask token>\n\n def test_init_good_zero_frequency(self):\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=0minute', 'time=', 'description=invalid frequency']\n task = Task(task_attr)\n self.assertTrue(task.is_good())\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestTask(unittest.TestCase):\n\n def test_init(self):\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=1D', 'time=09:45', 'description=invalid time']\n task = Task(task_attr)\n self.assertEqual(task.basename(), 'dummy_task2.py')\n self.assertEqual(task.frequency, str('1D').lower())\n self.assertEqual(task.filename,\n 'C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n )\n self.assertEqual(task.time, '09:45')\n self.assertTrue(task.is_good())\n\n def test_init_bad_invalid_filename(self):\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\sadfsda.py'\n , 'frequency=1D', 'time=09:45', 'description=invalid filename']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n task_attr = ['filename=', 'frequency=1D', 'time=09:45',\n 'description=invalid filename']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n\n def test_init_bad_invalid_time(self):\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=1D', 'time=0ssss9:45', 'description=invalid time']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=1D', 'time=09:45:924355435',\n 'description=invalid time']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=1D', 'time=09924355435', 'description=invalid time']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n <mask token>\n\n def test_init_bad_invalid_frequency(self):\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=1Dhhhh', 'time=', 'description=invalid frequency']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=D', 'time=', 'description=invalid frequency']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=2S', 'time=', 'description=invalid frequency']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=', 'time=', 'description=invalid frequency']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n\n def test_init_good_zero_frequency(self):\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=0minute', 'time=', 'description=invalid frequency']\n task = Task(task_attr)\n self.assertTrue(task.is_good())\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass TestTask(unittest.TestCase):\n\n def test_init(self):\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=1D', 'time=09:45', 'description=invalid time']\n task = Task(task_attr)\n self.assertEqual(task.basename(), 'dummy_task2.py')\n self.assertEqual(task.frequency, str('1D').lower())\n self.assertEqual(task.filename,\n 'C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n )\n self.assertEqual(task.time, '09:45')\n self.assertTrue(task.is_good())\n\n def test_init_bad_invalid_filename(self):\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\sadfsda.py'\n , 'frequency=1D', 'time=09:45', 'description=invalid filename']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n task_attr = ['filename=', 'frequency=1D', 'time=09:45',\n 'description=invalid filename']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n\n def test_init_bad_invalid_time(self):\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=1D', 'time=0ssss9:45', 'description=invalid time']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=1D', 'time=09:45:924355435',\n 'description=invalid time']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=1D', 'time=09924355435', 'description=invalid time']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n\n def test_init_good_empty_time(self):\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=1D', 'time=', 'description=']\n task = Task(task_attr)\n self.assertTrue(task.is_good())\n\n def test_init_bad_invalid_frequency(self):\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=1Dhhhh', 'time=', 'description=invalid frequency']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=D', 'time=', 'description=invalid frequency']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=2S', 'time=', 'description=invalid frequency']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=', 'time=', 'description=invalid frequency']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n\n def test_init_good_zero_frequency(self):\n task_attr = [\n 'filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py'\n , 'frequency=0minute', 'time=', 'description=invalid frequency']\n task = Task(task_attr)\n self.assertTrue(task.is_good())\n\n\n<mask token>\n",
"step-5": "import unittest\nfrom dispatcher.task import *\nfrom mock import *\n\nclass TestTask(unittest.TestCase):\n def test_init(self):\n task_attr = ['filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py',\n 'frequency=1D', 'time=09:45', 'description=invalid time']\n task = Task(task_attr)\n self.assertEqual(task.basename(), 'dummy_task2.py')\n self.assertEqual(task.frequency, str('1D').lower())\n self.assertEqual(task.filename, 'C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py')\n self.assertEqual(task.time, '09:45')\n self.assertTrue(task.is_good())\n\n def test_init_bad_invalid_filename(self):\n task_attr = ['filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\sadfsda.py',\n 'frequency=1D', 'time=09:45', 'description=invalid filename']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n\n task_attr = ['filename=',\n 'frequency=1D', 'time=09:45', 'description=invalid filename']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n\n def test_init_bad_invalid_time(self):\n task_attr = ['filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py',\n 'frequency=1D', 'time=0ssss9:45', 'description=invalid time']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n\n task_attr = ['filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py',\n 'frequency=1D', 'time=09:45:924355435', 'description=invalid time']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n\n task_attr = ['filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py',\n 'frequency=1D', 'time=09924355435', 'description=invalid time']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n\n def test_init_good_empty_time(self):\n task_attr = ['filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py',\n 'frequency=1D', 'time=', 'description=']\n task = Task(task_attr)\n self.assertTrue(task.is_good())\n\n def test_init_bad_invalid_frequency(self):\n task_attr = ['filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py',\n 'frequency=1Dhhhh', 'time=', 'description=invalid frequency']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n\n task_attr = ['filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py',\n 'frequency=D', 'time=', 'description=invalid frequency']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n\n task_attr = ['filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py',\n 'frequency=2S', 'time=', 'description=invalid frequency']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n\n task_attr = ['filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py',\n 'frequency=', 'time=', 'description=invalid frequency']\n task = Task(task_attr)\n self.assertFalse(task.is_good())\n\n def test_init_good_zero_frequency(self):\n task_attr = ['filename=C:\\\\Users\\\\kcheng\\\\PycharmProjects\\\\first_project\\\\dummy_task2.py',\n 'frequency=0minute', 'time=', 'description=invalid frequency']\n task = Task(task_attr)\n self.assertTrue(task.is_good())\n\nif __name__ == '__main__':\n unittest.main()",
"step-ids": [
2,
5,
6,
7,
10
]
}
|
[
2,
5,
6,
7,
10
] |
import os
import json
from google.appengine.ext import webapp
from generic import JsonRpcService
class ViewService(JsonRpcService):
def json_create(self):
return "Hello, World!"
|
normal
|
{
"blob_id": "1b091d139635e90fb53b3fecc09bb879514c7b38",
"index": 7352,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass ViewService(JsonRpcService):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass ViewService(JsonRpcService):\n\n def json_create(self):\n return 'Hello, World!'\n",
"step-4": "import os\nimport json\nfrom google.appengine.ext import webapp\nfrom generic import JsonRpcService\n\n\nclass ViewService(JsonRpcService):\n\n def json_create(self):\n return 'Hello, World!'\n",
"step-5": "import os\nimport json\n\nfrom google.appengine.ext import webapp\nfrom generic import JsonRpcService\n\nclass ViewService(JsonRpcService):\n def json_create(self):\n return \"Hello, World!\"\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# load the dependencies
from airflow import DAG
from datetime import date, timedelta, datetime
# default_args are the default arguments applied to the DAG and all inherited tasks
DAG_DEFAULT_ARGS = {
'owner': 'airflow',
'depends_on_past': False,
'retries': 1,
'retry_delay': timedelta(minutes=1)
}
with DAG('twitter_dag_v1', start_date=datetime(2018, 10, 1), schedule_interval="@daily", default_args=DAG_DEFAULT_ARGS, catchup=False) as dag:
None
|
normal
|
{
"blob_id": "436cc06778bf9ac9e04a897f4a4db90c595d943c",
"index": 5969,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith DAG('twitter_dag_v1', start_date=datetime(2018, 10, 1),\n schedule_interval='@daily', default_args=DAG_DEFAULT_ARGS, catchup=False\n ) as dag:\n None\n",
"step-3": "<mask token>\nDAG_DEFAULT_ARGS = {'owner': 'airflow', 'depends_on_past': False, 'retries':\n 1, 'retry_delay': timedelta(minutes=1)}\nwith DAG('twitter_dag_v1', start_date=datetime(2018, 10, 1),\n schedule_interval='@daily', default_args=DAG_DEFAULT_ARGS, catchup=False\n ) as dag:\n None\n",
"step-4": "from airflow import DAG\nfrom datetime import date, timedelta, datetime\nDAG_DEFAULT_ARGS = {'owner': 'airflow', 'depends_on_past': False, 'retries':\n 1, 'retry_delay': timedelta(minutes=1)}\nwith DAG('twitter_dag_v1', start_date=datetime(2018, 10, 1),\n schedule_interval='@daily', default_args=DAG_DEFAULT_ARGS, catchup=False\n ) as dag:\n None\n",
"step-5": "# load the dependencies\nfrom airflow import DAG\nfrom datetime import date, timedelta, datetime\n\n# default_args are the default arguments applied to the DAG and all inherited tasks\nDAG_DEFAULT_ARGS = {\n\t'owner': 'airflow',\n\t'depends_on_past': False,\n\t'retries': 1,\n\t'retry_delay': timedelta(minutes=1)\n}\n\nwith DAG('twitter_dag_v1', start_date=datetime(2018, 10, 1), schedule_interval=\"@daily\", default_args=DAG_DEFAULT_ARGS, catchup=False) as dag:\n\tNone\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
print(1)
print(2)
print("Jenkins")
print("Jenkins2")
print("Jenkins3")
print("Jenkins44")
print("Jenkins55khlk")
print("3333333")
print("44444444")
print("jhjhj")
|
normal
|
{
"blob_id": "77a82f99ab10e3d53e3f8466d43b67e8b87c1588",
"index": 2418,
"step-1": "<mask token>\n",
"step-2": "print(1)\nprint(2)\nprint('Jenkins')\nprint('Jenkins2')\nprint('Jenkins3')\nprint('Jenkins44')\nprint('Jenkins55khlk')\nprint('3333333')\nprint('44444444')\nprint('jhjhj')\n",
"step-3": "print(1)\nprint(2)\nprint(\"Jenkins\")\nprint(\"Jenkins2\")\nprint(\"Jenkins3\")\nprint(\"Jenkins44\")\nprint(\"Jenkins55khlk\")\n\nprint(\"3333333\")\nprint(\"44444444\")\n\nprint(\"jhjhj\")\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
__author__ = "Yong Peng"
__version__ = "1.0"
import time
import re
import getpass
from netmiko import (
ConnectHandler,
NetmikoTimeoutException,
NetmikoAuthenticationException,
)
with open('./device_list.txt','r') as f:
device_list = [i.strip() for i in f.readlines() if len(i.strip()) != 0] # read the device list.
print("Data will be collected on below switches:")
for device in device_list:
print(device)
go = input("\nPress y to continue: ")
if go != "y" and go != "Y":
exit(2)
u_id = input("Please input login ID:")
factor_1 = getpass.getpass("ID Password for login:")
# cmd_4_IOS = ['show version | in from','show stack','show flash',\
# 'show license', 'show boot-preference',\
# 'show ip bgp summ', 'show interface brief',\
# 'show ip inter', 'show vlan',\
# 'show vlan brief', 'show lag', 'show lag brief',\
# 'show lldp neighbor', 'show 802-1w', 'show ip route',\
# 'show run']
# cmd_4_IOS = ['show version | in from', 'show flash | in Pri Code|Sec Code']
# cmd_4_IOS = ['show vlan brief', 'show ip interface', 'show version | in from', 'show ip osp inter brief',
# 'show run']n
# cmd_4_IOS = ['show vlan id 464']
with open("temp.txt",'r') as f:
cmd_4_IOS = [i.strip() for i in f.readlines()]
def send_show_command(device, commands):
OutputPath = 'c:/script/output/' + str(device['host']) + '.txt'
result = open(OutputPath, 'w')
flag = True
try:
with ConnectHandler(**device) as ssh:
ssh.enable()
for command in commands:
output = ssh.send_command(command, strip_command=False, strip_prompt=False)
result.write(output + "\n" + 30 * '+' + "\n" + "\n")
except Exception as error:
print(error)
flag = False
result.close()
if flag:
print("Data collection on %s is done. \n \n" % (i))
else:
print("Data collection for %s is NOT done. \n \n" % (i))
switch = {}
for i in device_list:
switch["device_type"] = "ruckus_fastiron"
switch["host"] = i
switch["username"] = u_id
factor_2 = input("Trying to login to %s, enter DUO Code:"%(i))
switch["password"] = str(factor_1) + str(factor_2)
switch['secret'] = '',
switch['port'] = 22
send_show_command(switch, cmd_4_IOS)
print("All collection is done.")
|
normal
|
{
"blob_id": "31a0c9a143a06ac86c8e8616fb273a0af844a352",
"index": 6895,
"step-1": "<mask token>\n\n\ndef send_show_command(device, commands):\n OutputPath = 'c:/script/output/' + str(device['host']) + '.txt'\n result = open(OutputPath, 'w')\n flag = True\n try:\n with ConnectHandler(**device) as ssh:\n ssh.enable()\n for command in commands:\n output = ssh.send_command(command, strip_command=False,\n strip_prompt=False)\n result.write(output + '\\n' + 30 * '+' + '\\n' + '\\n')\n except Exception as error:\n print(error)\n flag = False\n result.close()\n if flag:\n print('Data collection on %s is done. \\n \\n' % i)\n else:\n print('Data collection for %s is NOT done. \\n \\n' % i)\n\n\n<mask token>\n",
"step-2": "<mask token>\nwith open('./device_list.txt', 'r') as f:\n device_list = [i.strip() for i in f.readlines() if len(i.strip()) != 0]\nprint('Data will be collected on below switches:')\nfor device in device_list:\n print(device)\n<mask token>\nif go != 'y' and go != 'Y':\n exit(2)\n<mask token>\nwith open('temp.txt', 'r') as f:\n cmd_4_IOS = [i.strip() for i in f.readlines()]\n\n\ndef send_show_command(device, commands):\n OutputPath = 'c:/script/output/' + str(device['host']) + '.txt'\n result = open(OutputPath, 'w')\n flag = True\n try:\n with ConnectHandler(**device) as ssh:\n ssh.enable()\n for command in commands:\n output = ssh.send_command(command, strip_command=False,\n strip_prompt=False)\n result.write(output + '\\n' + 30 * '+' + '\\n' + '\\n')\n except Exception as error:\n print(error)\n flag = False\n result.close()\n if flag:\n print('Data collection on %s is done. \\n \\n' % i)\n else:\n print('Data collection for %s is NOT done. \\n \\n' % i)\n\n\n<mask token>\nfor i in device_list:\n switch['device_type'] = 'ruckus_fastiron'\n switch['host'] = i\n switch['username'] = u_id\n factor_2 = input('Trying to login to %s, enter DUO Code:' % i)\n switch['password'] = str(factor_1) + str(factor_2)\n switch['secret'] = '',\n switch['port'] = 22\n send_show_command(switch, cmd_4_IOS)\nprint('All collection is done.')\n",
"step-3": "__author__ = 'Yong Peng'\n__version__ = '1.0'\n<mask token>\nwith open('./device_list.txt', 'r') as f:\n device_list = [i.strip() for i in f.readlines() if len(i.strip()) != 0]\nprint('Data will be collected on below switches:')\nfor device in device_list:\n print(device)\ngo = input(\"\"\"\nPress y to continue: \"\"\")\nif go != 'y' and go != 'Y':\n exit(2)\nu_id = input('Please input login ID:')\nfactor_1 = getpass.getpass('ID Password for login:')\nwith open('temp.txt', 'r') as f:\n cmd_4_IOS = [i.strip() for i in f.readlines()]\n\n\ndef send_show_command(device, commands):\n OutputPath = 'c:/script/output/' + str(device['host']) + '.txt'\n result = open(OutputPath, 'w')\n flag = True\n try:\n with ConnectHandler(**device) as ssh:\n ssh.enable()\n for command in commands:\n output = ssh.send_command(command, strip_command=False,\n strip_prompt=False)\n result.write(output + '\\n' + 30 * '+' + '\\n' + '\\n')\n except Exception as error:\n print(error)\n flag = False\n result.close()\n if flag:\n print('Data collection on %s is done. \\n \\n' % i)\n else:\n print('Data collection for %s is NOT done. \\n \\n' % i)\n\n\nswitch = {}\nfor i in device_list:\n switch['device_type'] = 'ruckus_fastiron'\n switch['host'] = i\n switch['username'] = u_id\n factor_2 = input('Trying to login to %s, enter DUO Code:' % i)\n switch['password'] = str(factor_1) + str(factor_2)\n switch['secret'] = '',\n switch['port'] = 22\n send_show_command(switch, cmd_4_IOS)\nprint('All collection is done.')\n",
"step-4": "__author__ = 'Yong Peng'\n__version__ = '1.0'\nimport time\nimport re\nimport getpass\nfrom netmiko import ConnectHandler, NetmikoTimeoutException, NetmikoAuthenticationException\nwith open('./device_list.txt', 'r') as f:\n device_list = [i.strip() for i in f.readlines() if len(i.strip()) != 0]\nprint('Data will be collected on below switches:')\nfor device in device_list:\n print(device)\ngo = input(\"\"\"\nPress y to continue: \"\"\")\nif go != 'y' and go != 'Y':\n exit(2)\nu_id = input('Please input login ID:')\nfactor_1 = getpass.getpass('ID Password for login:')\nwith open('temp.txt', 'r') as f:\n cmd_4_IOS = [i.strip() for i in f.readlines()]\n\n\ndef send_show_command(device, commands):\n OutputPath = 'c:/script/output/' + str(device['host']) + '.txt'\n result = open(OutputPath, 'w')\n flag = True\n try:\n with ConnectHandler(**device) as ssh:\n ssh.enable()\n for command in commands:\n output = ssh.send_command(command, strip_command=False,\n strip_prompt=False)\n result.write(output + '\\n' + 30 * '+' + '\\n' + '\\n')\n except Exception as error:\n print(error)\n flag = False\n result.close()\n if flag:\n print('Data collection on %s is done. \\n \\n' % i)\n else:\n print('Data collection for %s is NOT done. \\n \\n' % i)\n\n\nswitch = {}\nfor i in device_list:\n switch['device_type'] = 'ruckus_fastiron'\n switch['host'] = i\n switch['username'] = u_id\n factor_2 = input('Trying to login to %s, enter DUO Code:' % i)\n switch['password'] = str(factor_1) + str(factor_2)\n switch['secret'] = '',\n switch['port'] = 22\n send_show_command(switch, cmd_4_IOS)\nprint('All collection is done.')\n",
"step-5": "\n__author__ = \"Yong Peng\"\n__version__ = \"1.0\"\n\n\nimport time\nimport re\nimport getpass\nfrom netmiko import (\n ConnectHandler,\n NetmikoTimeoutException,\n NetmikoAuthenticationException,\n)\n\nwith open('./device_list.txt','r') as f:\n device_list = [i.strip() for i in f.readlines() if len(i.strip()) != 0] # read the device list.\n\n\nprint(\"Data will be collected on below switches:\")\nfor device in device_list:\n print(device)\n\ngo = input(\"\\nPress y to continue: \")\n\nif go != \"y\" and go != \"Y\":\n exit(2)\n\nu_id = input(\"Please input login ID:\")\nfactor_1 = getpass.getpass(\"ID Password for login:\")\n\n\n# cmd_4_IOS = ['show version | in from','show stack','show flash',\\\n# 'show license', 'show boot-preference',\\\n# 'show ip bgp summ', 'show interface brief',\\\n# 'show ip inter', 'show vlan',\\\n# 'show vlan brief', 'show lag', 'show lag brief',\\\n# 'show lldp neighbor', 'show 802-1w', 'show ip route',\\\n# 'show run']\n# cmd_4_IOS = ['show version | in from', 'show flash | in Pri Code|Sec Code']\n# cmd_4_IOS = ['show vlan brief', 'show ip interface', 'show version | in from', 'show ip osp inter brief',\n# 'show run']n\n# cmd_4_IOS = ['show vlan id 464']\nwith open(\"temp.txt\",'r') as f:\n cmd_4_IOS = [i.strip() for i in f.readlines()]\n\ndef send_show_command(device, commands):\n OutputPath = 'c:/script/output/' + str(device['host']) + '.txt'\n result = open(OutputPath, 'w')\n flag = True\n try:\n with ConnectHandler(**device) as ssh:\n ssh.enable()\n for command in commands:\n output = ssh.send_command(command, strip_command=False, strip_prompt=False)\n result.write(output + \"\\n\" + 30 * '+' + \"\\n\" + \"\\n\")\n\n except Exception as error:\n print(error)\n flag = False\n result.close()\n if flag:\n print(\"Data collection on %s is done. \\n \\n\" % (i))\n else:\n print(\"Data collection for %s is NOT done. \\n \\n\" % (i))\n\nswitch = {}\nfor i in device_list:\n switch[\"device_type\"] = \"ruckus_fastiron\"\n switch[\"host\"] = i\n switch[\"username\"] = u_id\n factor_2 = input(\"Trying to login to %s, enter DUO Code:\"%(i))\n switch[\"password\"] = str(factor_1) + str(factor_2)\n switch['secret'] = '',\n switch['port'] = 22\n send_show_command(switch, cmd_4_IOS)\n\nprint(\"All collection is done.\")",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import csv
import os
events = {}
eventTypes = set()
eventIndices = {}
i = 0
with open('Civ VI Modding Companion - Events.csv', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in reader:
if i < 4:
i += 1
continue
eventName = row[3]
eventType = "GameEvents" if len(row[10]) > 0 else "Events"
argumentName = row[4]
argumentType = row[5][1:]
try:
events[eventName]
except Exception:
events[eventName] = {'eventType': eventType, 'arguments': []}
eventTypes.add(eventType)
if argumentName:
argumentText = '`' + argumentName
if argumentType:
argumentText += ' [' + argumentType + ']'
argumentText += '`'
# argument = {'argumentName': argumentName, 'argumentType': argumentType, 'argumentText': argumentText}
events[eventName]['arguments'].append(argumentText)
for eventType in eventTypes:
filename = '../EventObjects/' + eventType + '.md'
os.makedirs(os.path.dirname(filename), exist_ok=True)
f = open(filename, "w")
eventIndices[eventType] = f
f.write('## Static Events\n')
f.write('Events can be subscribed by using `' + eventType + '.SomeEvent.Add(SomeFunction)`.\n')
f.write('\n')
f.write('| Name | Parameters |\n')
f.write('|:---- |:--------- |\n')
for eventName in events:
event = events[eventName]
eventType = event['eventType']
eventIndex = eventIndices[eventType]
arguments = event['arguments']
# -----------------------
# Create Index Entry
# -----------------------
indexEntry = '| [[' + eventType + "." + eventName + ']] | '
if len(arguments) > 0:
indexEntry += "<br/>".join(arguments)
indexEntry += ' |\n'
eventIndex.write(indexEntry)
# -----------------------
# Create Event File
# -----------------------
fullName = eventType + '.' + eventName
filename = '../EventObjects/' + eventType + '/' + eventType + "." + eventName + '.md'
os.makedirs(os.path.dirname(filename), exist_ok=True)
f = open(filename, "w")
f.write('# ' + fullName + "\n")
f.write('## Description\n')
f.write('TBD\n')
f.write('\n')
f.write('## Usage\n')
argumentsText = (", ".join(arguments))
argumentsText = argumentsText.replace('`', '')
f.write('> `' + fullName + '(' + argumentsText + ')`\n\n')
f.write('Regular event: you can subscribe to it through `' + fullName + '.Add(<function handler>)`\n')
f.write('\n')
f.write('### Parameters\n')
argumentsList = "\n- ".join(arguments)
if len(argumentsList) > 0:
argumentsList = '- ' + argumentsList
f.write(argumentsList)
|
normal
|
{
"blob_id": "5ce98ae241c0982eeb1027ffcff5b770f94ff1a3",
"index": 77,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('Civ VI Modding Companion - Events.csv', newline='') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n for row in reader:\n if i < 4:\n i += 1\n continue\n eventName = row[3]\n eventType = 'GameEvents' if len(row[10]) > 0 else 'Events'\n argumentName = row[4]\n argumentType = row[5][1:]\n try:\n events[eventName]\n except Exception:\n events[eventName] = {'eventType': eventType, 'arguments': []}\n eventTypes.add(eventType)\n if argumentName:\n argumentText = '`' + argumentName\n if argumentType:\n argumentText += ' [' + argumentType + ']'\n argumentText += '`'\n events[eventName]['arguments'].append(argumentText)\nfor eventType in eventTypes:\n filename = '../EventObjects/' + eventType + '.md'\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n f = open(filename, 'w')\n eventIndices[eventType] = f\n f.write('## Static Events\\n')\n f.write('Events can be subscribed by using `' + eventType +\n '.SomeEvent.Add(SomeFunction)`.\\n')\n f.write('\\n')\n f.write('| Name | Parameters |\\n')\n f.write('|:---- |:--------- |\\n')\nfor eventName in events:\n event = events[eventName]\n eventType = event['eventType']\n eventIndex = eventIndices[eventType]\n arguments = event['arguments']\n indexEntry = '| [[' + eventType + '.' + eventName + ']] | '\n if len(arguments) > 0:\n indexEntry += '<br/>'.join(arguments)\n indexEntry += ' |\\n'\n eventIndex.write(indexEntry)\n fullName = eventType + '.' + eventName\n filename = ('../EventObjects/' + eventType + '/' + eventType + '.' +\n eventName + '.md')\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n f = open(filename, 'w')\n f.write('# ' + fullName + '\\n')\n f.write('## Description\\n')\n f.write('TBD\\n')\n f.write('\\n')\n f.write('## Usage\\n')\n argumentsText = ', '.join(arguments)\n argumentsText = argumentsText.replace('`', '')\n f.write('> `' + fullName + '(' + argumentsText + ')`\\n\\n')\n f.write('Regular event: you can subscribe to it through `' + fullName +\n \"\"\".Add(<function handler>)`\n\"\"\")\n f.write('\\n')\n f.write('### Parameters\\n')\n argumentsList = '\\n- '.join(arguments)\n if len(argumentsList) > 0:\n argumentsList = '- ' + argumentsList\n f.write(argumentsList)\n",
"step-3": "<mask token>\nevents = {}\neventTypes = set()\neventIndices = {}\ni = 0\nwith open('Civ VI Modding Companion - Events.csv', newline='') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n for row in reader:\n if i < 4:\n i += 1\n continue\n eventName = row[3]\n eventType = 'GameEvents' if len(row[10]) > 0 else 'Events'\n argumentName = row[4]\n argumentType = row[5][1:]\n try:\n events[eventName]\n except Exception:\n events[eventName] = {'eventType': eventType, 'arguments': []}\n eventTypes.add(eventType)\n if argumentName:\n argumentText = '`' + argumentName\n if argumentType:\n argumentText += ' [' + argumentType + ']'\n argumentText += '`'\n events[eventName]['arguments'].append(argumentText)\nfor eventType in eventTypes:\n filename = '../EventObjects/' + eventType + '.md'\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n f = open(filename, 'w')\n eventIndices[eventType] = f\n f.write('## Static Events\\n')\n f.write('Events can be subscribed by using `' + eventType +\n '.SomeEvent.Add(SomeFunction)`.\\n')\n f.write('\\n')\n f.write('| Name | Parameters |\\n')\n f.write('|:---- |:--------- |\\n')\nfor eventName in events:\n event = events[eventName]\n eventType = event['eventType']\n eventIndex = eventIndices[eventType]\n arguments = event['arguments']\n indexEntry = '| [[' + eventType + '.' + eventName + ']] | '\n if len(arguments) > 0:\n indexEntry += '<br/>'.join(arguments)\n indexEntry += ' |\\n'\n eventIndex.write(indexEntry)\n fullName = eventType + '.' + eventName\n filename = ('../EventObjects/' + eventType + '/' + eventType + '.' +\n eventName + '.md')\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n f = open(filename, 'w')\n f.write('# ' + fullName + '\\n')\n f.write('## Description\\n')\n f.write('TBD\\n')\n f.write('\\n')\n f.write('## Usage\\n')\n argumentsText = ', '.join(arguments)\n argumentsText = argumentsText.replace('`', '')\n f.write('> `' + fullName + '(' + argumentsText + ')`\\n\\n')\n f.write('Regular event: you can subscribe to it through `' + fullName +\n \"\"\".Add(<function handler>)`\n\"\"\")\n f.write('\\n')\n f.write('### Parameters\\n')\n argumentsList = '\\n- '.join(arguments)\n if len(argumentsList) > 0:\n argumentsList = '- ' + argumentsList\n f.write(argumentsList)\n",
"step-4": "import csv\nimport os\nevents = {}\neventTypes = set()\neventIndices = {}\ni = 0\nwith open('Civ VI Modding Companion - Events.csv', newline='') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n for row in reader:\n if i < 4:\n i += 1\n continue\n eventName = row[3]\n eventType = 'GameEvents' if len(row[10]) > 0 else 'Events'\n argumentName = row[4]\n argumentType = row[5][1:]\n try:\n events[eventName]\n except Exception:\n events[eventName] = {'eventType': eventType, 'arguments': []}\n eventTypes.add(eventType)\n if argumentName:\n argumentText = '`' + argumentName\n if argumentType:\n argumentText += ' [' + argumentType + ']'\n argumentText += '`'\n events[eventName]['arguments'].append(argumentText)\nfor eventType in eventTypes:\n filename = '../EventObjects/' + eventType + '.md'\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n f = open(filename, 'w')\n eventIndices[eventType] = f\n f.write('## Static Events\\n')\n f.write('Events can be subscribed by using `' + eventType +\n '.SomeEvent.Add(SomeFunction)`.\\n')\n f.write('\\n')\n f.write('| Name | Parameters |\\n')\n f.write('|:---- |:--------- |\\n')\nfor eventName in events:\n event = events[eventName]\n eventType = event['eventType']\n eventIndex = eventIndices[eventType]\n arguments = event['arguments']\n indexEntry = '| [[' + eventType + '.' + eventName + ']] | '\n if len(arguments) > 0:\n indexEntry += '<br/>'.join(arguments)\n indexEntry += ' |\\n'\n eventIndex.write(indexEntry)\n fullName = eventType + '.' + eventName\n filename = ('../EventObjects/' + eventType + '/' + eventType + '.' +\n eventName + '.md')\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n f = open(filename, 'w')\n f.write('# ' + fullName + '\\n')\n f.write('## Description\\n')\n f.write('TBD\\n')\n f.write('\\n')\n f.write('## Usage\\n')\n argumentsText = ', '.join(arguments)\n argumentsText = argumentsText.replace('`', '')\n f.write('> `' + fullName + '(' + argumentsText + ')`\\n\\n')\n f.write('Regular event: you can subscribe to it through `' + fullName +\n \"\"\".Add(<function handler>)`\n\"\"\")\n f.write('\\n')\n f.write('### Parameters\\n')\n argumentsList = '\\n- '.join(arguments)\n if len(argumentsList) > 0:\n argumentsList = '- ' + argumentsList\n f.write(argumentsList)\n",
"step-5": "import csv\nimport os\n\nevents = {}\neventTypes = set()\neventIndices = {}\n\ni = 0\n\nwith open('Civ VI Modding Companion - Events.csv', newline='') as csvfile:\n\treader = csv.reader(csvfile, delimiter=',', quotechar='|')\n\tfor row in reader:\n\n\t\tif i < 4:\n\t\t\ti += 1\n\t\t\tcontinue\n\n\t\teventName = row[3]\n\t\teventType = \"GameEvents\" if len(row[10]) > 0 else \"Events\"\n\n\t\targumentName = row[4]\n\t\targumentType = row[5][1:]\n\n\t\ttry:\n\t\t\tevents[eventName]\n\t\texcept Exception:\n\t\t\tevents[eventName] = {'eventType': eventType, 'arguments': []}\n\n\t\teventTypes.add(eventType)\n\n\t\tif argumentName:\n\t\t\targumentText = '`' + argumentName\n\t\t\tif argumentType:\n\t\t\t\targumentText += ' [' + argumentType + ']'\n\t\t\targumentText += '`'\n\n\t\t\t# argument = {'argumentName': argumentName, 'argumentType': argumentType, 'argumentText': argumentText}\n\t\t\tevents[eventName]['arguments'].append(argumentText)\n\nfor eventType in eventTypes:\n\n\tfilename = '../EventObjects/' + eventType + '.md'\n\n\tos.makedirs(os.path.dirname(filename), exist_ok=True)\n\tf = open(filename, \"w\")\n\teventIndices[eventType] = f\n\n\tf.write('## Static Events\\n')\n\tf.write('Events can be subscribed by using `' + eventType + '.SomeEvent.Add(SomeFunction)`.\\n')\n\tf.write('\\n')\n\tf.write('| Name | Parameters |\\n')\n\tf.write('|:---- |:--------- |\\n')\n\nfor eventName in events:\n\n\tevent\t\t= events[eventName]\n\teventType\t= event['eventType']\n\teventIndex\t= eventIndices[eventType]\n\n\targuments\t= event['arguments']\n\n\t# -----------------------\n\t# Create Index Entry\n\t# -----------------------\n\tindexEntry = '| [[' + eventType + \".\" + eventName + ']] | '\n\n\tif len(arguments) > 0:\n\t\tindexEntry += \"<br/>\".join(arguments)\n\n\tindexEntry += ' |\\n'\n\teventIndex.write(indexEntry)\n\t# -----------------------\n\t# Create Event File\n\t# -----------------------\n\tfullName = eventType + '.' + eventName\n\n\tfilename = '../EventObjects/' + eventType + '/' + eventType + \".\" + eventName + '.md'\n\tos.makedirs(os.path.dirname(filename), exist_ok=True)\n\tf = open(filename, \"w\")\n\n\tf.write('# ' + fullName + \"\\n\")\n\tf.write('## Description\\n')\n\tf.write('TBD\\n')\n\tf.write('\\n')\n\tf.write('## Usage\\n')\n\n\targumentsText = (\", \".join(arguments))\n\targumentsText = argumentsText.replace('`', '')\n\n\tf.write('> `' + fullName + '(' + argumentsText + ')`\\n\\n')\n\tf.write('Regular event: you can subscribe to it through `' + fullName + '.Add(<function handler>)`\\n')\n\tf.write('\\n')\n\tf.write('### Parameters\\n')\n\n\targumentsList = \"\\n- \".join(arguments)\n\tif len(argumentsList) > 0:\n\t\targumentsList = '- ' + argumentsList\n\n\tf.write(argumentsList)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from openerp import models, fields, api, _
class priority_customer(models.Model):
_inherit = 'res.partner'
is_priority = fields.Boolean("Is Priority Partner:?")
registration_date = fields.Date("Registration Date:")
liability_card_number = fields.Char("Liability Card Number:")
|
normal
|
{
"blob_id": "f2bb00d06023ef7b3ea3dc33f7ec00d1f48d46ae",
"index": 8477,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass priority_customer(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass priority_customer(models.Model):\n _inherit = 'res.partner'\n is_priority = fields.Boolean('Is Priority Partner:?')\n registration_date = fields.Date('Registration Date:')\n liability_card_number = fields.Char('Liability Card Number:')\n",
"step-4": "from openerp import models, fields, api, _\n\n\nclass priority_customer(models.Model):\n _inherit = 'res.partner'\n is_priority = fields.Boolean('Is Priority Partner:?')\n registration_date = fields.Date('Registration Date:')\n liability_card_number = fields.Char('Liability Card Number:')\n",
"step-5": "from openerp import models, fields, api, _\n\n\nclass priority_customer(models.Model):\n\n _inherit = 'res.partner'\n\n is_priority = fields.Boolean(\"Is Priority Partner:?\")\n registration_date = fields.Date(\"Registration Date:\")\n liability_card_number = fields.Char(\"Liability Card Number:\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
Copyright (c) 2017 Cyberhaven
Copyright (c) 2017 Dependable Systems Laboratory, EPFL
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import glob
import grp
import logging
import os
import pwd
import re
import socket
import time
from threading import Thread
import psutil
from psutil import NoSuchProcess
from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.handlers import FTPHandler
from pyftpdlib.servers import FTPServer
import sh
from sh import ErrorReturnCode
from s2e_env import CONSTANTS
from s2e_env.command import EnvCommand, CommandError
from s2e_env.utils import repos
from s2e_env.utils.images import ImageDownloader, get_image_templates, get_app_templates, get_all_images, \
translate_image_name
logger = logging.getLogger('image_build')
def _get_user_groups(user_name):
"""
Get a list of groups for the user ``user_name``.
"""
groups = [g.gr_name for g in grp.getgrall() if user_name in g.gr_mem]
gid = pwd.getpwnam(user_name).pw_gid
groups.append(grp.getgrgid(gid).gr_name)
return groups
def _get_user_name():
"""
Get the current user.
"""
return pwd.getpwuid(os.getuid())[0]
def _user_belongs_to(group_name):
"""
Check that the current user belongs to the ``group_name`` group.
"""
user_name = _get_user_name()
groups = _get_user_groups(user_name)
return group_name in groups
def _raise_group_error(group_name):
raise CommandError(f'You must belong to the {group_name} group in order to build '
'images. Please run the following command, then logout '
'and login:\n\n'
f'\tsudo usermod -a -G {group_name} $(whoami)')
def _check_groups_docker():
"""
Check that the current user belongs to the required groups to both run S2E and build S2E images.
"""
if not _user_belongs_to('docker'):
_raise_group_error('docker')
def _check_groups_kvm():
"""Being member of KVM is required only when using KVM to build images"""
if not _user_belongs_to('libvirtd') and not _user_belongs_to('kvm'):
_raise_group_error('kvm')
def _check_virtualbox():
"""
Check if VirtualBox is running. VirtualBox conflicts with S2E's requirement for KVM, so VirtualBox must
*not* be running together with S2E.
"""
# Adapted from https://github.com/giampaolo/psutil/issues/132#issuecomment-44017679
# to avoid race conditions
for proc in psutil.process_iter():
try:
if proc.name() == 'VBoxHeadless':
raise CommandError('S2E uses KVM to build images. VirtualBox '
'is currently running, which is not '
'compatible with KVM. Please close all '
'VirtualBox VMs and try again.')
except NoSuchProcess:
pass
def _check_vmware():
"""
Check if VMWare is running. VMware conflicts with S2E's requirement for KVM, so VMWare must
*not* be running together with S2E.
"""
for proc in psutil.process_iter():
try:
if proc.name() == 'vmware-vmx':
raise CommandError('S2E uses KVM to build images. VMware '
'is currently running, which is not '
'compatible with KVM. Please close all '
'VMware VMs and try again.')
except NoSuchProcess:
pass
def _check_kvm():
"""
Check that the KVM interface exists. This is required by libs2e to communicate with QEMU.
"""
if not os.path.exists(os.path.join(os.sep, 'dev', 'kvm')):
raise CommandError('KVM interface not found - check that /dev/kvm '
'exists. Alternatively, you can disable KVM (-n '
'option) or download pre-built images (-d option)')
def _check_vmlinux():
"""
Check that /boot/vmlinux* files are readable. This is important for guestfish.
"""
try:
for f in glob.glob(os.path.join(os.sep, 'boot', 'vmlinu*')):
with open(f, 'rb'):
pass
except IOError:
raise CommandError('Make sure that the kernels in /boot are readable. '
'This is required for guestfish. Please run the '
'following command:\n\n'
'sudo chmod ugo+r /boot/vmlinu*') from None
# pylint: disable=no-member
def _check_cow(image_dir):
"""
Check that the file system that stores guest images supports copy-on-write.
"""
try:
src = f'{image_dir}/.cowcheck'
dst = f'{image_dir}/.cowcheck1'
sh.touch(src)
sh.cp('--reflink=always', src, dst)
return True
except Exception:
warn_msg = f"""
Copy-on-write check failed.
The file system where images are stored ({image_dir}) does not support copy-on-write.
It is recommended to use an XFS or BTRFS file system with copy-on-write enabled as a storage
location for S2E images, as this can save up to 60% of disk space. The building process checkpoints
intermediate build steps with cp --reflink=auto to make use of copy-on-write if it is available.
How to upgrade:
1. Create an XFS or BTRFS partition large enough to store the images that you need (~300 GB for all images).
Make sure you use reflink=1 to enable copy-on-write when running mkfs.xfs.
2. Create a directory for guest images on that partition (e.g., /mnt/disk1/images)
3. Delete the "images" folder in your S2E environment
4. Create in your S2E environment a symbolic link called "images" to the directory you created in step 2
"""
logger.warning(re.sub(r'^ {8}', '', warn_msg, flags=re.MULTILINE))
return False
finally:
sh.rm('-f', src)
sh.rm('-f', dst)
def _raise_invalid_image(image_name):
raise CommandError(f'Invalid image name: {image_name}. Run ``s2e image_build`` '
'to list available images')
def _get_base_image_and_app(image_name):
x = image_name.split('/')
if len(x) == 1:
return x[0], None
if len(x) == 2:
return x
raise CommandError(f'Invalid image name {image_name}')
def _has_app_image(image_names):
for name in image_names:
if '/' in name:
return True
return False
def _check_product_keys(image_descriptors, image_names):
missing_keys = []
for image_name in image_names:
image = image_descriptors[image_name]
if 'product_key' in image:
if not image['product_key']:
missing_keys.append(image_name)
ios = image_descriptors[image_name].get('os', {})
if 'product_key' in ios:
if not ios['product_key']:
missing_keys.append(image_name)
if missing_keys:
logger.error('The following images require a product key:')
for image in missing_keys:
logger.error(' * %s', image)
raise CommandError('Please update images.json and/or apps.json.')
def _check_iso(templates, app_templates, iso_dir, image_names):
for image_name in image_names:
base_image, app_name = _get_base_image_and_app(image_name)
descriptors = [templates[base_image]]
if app_name:
descriptors.append(app_templates[app_name])
for desc in descriptors:
iso = desc.get('iso', {})
if iso.get('url', ''):
continue
name = iso.get('name', '')
if not name:
continue
if not iso_dir:
raise CommandError(
'Please use the --iso-dir option to specify the path '
f'to a folder that contains {name}'
)
path = os.path.join(iso_dir, name)
if not os.path.exists(path):
raise CommandError(f'The image {image_name} requires {path}, which could not be found')
def _is_port_available(port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(("127.0.0.1", port))
return True
except socket.error:
return False
finally:
s.close()
def _start_ftp_server(image_path, port):
authorizer = DummyAuthorizer()
authorizer.add_anonymous(image_path, perm='elradfmwMT')
handler = FTPHandler
handler.authorizer = authorizer
handler.masquerade_address = '10.0.2.2'
# QEMU slirp won't let the guest reconnect if timeout happens, so we disable it
handler.timeout = None
server = FTPServer(("127.0.0.1", port), handler)
thread = Thread(target=_run_ftp_server, args=[server])
thread.daemon = True
thread.start()
time.sleep(1)
return server
def _run_ftp_server(server):
try:
server.serve_forever()
finally:
logger.info('FTP server terminated')
server.close_all()
def _get_archive_rules(image_path, rule_names):
if _has_app_image(rule_names):
raise CommandError('Building archives of app images is not supported yet')
archive_rules = []
for r in rule_names:
archive_rules.append(os.path.join(image_path, f'{r}.tar.xz'))
logger.info('The following archives will be built:')
for a in archive_rules:
logger.info(' * %s', a)
return archive_rules
def _download_images(image_path, image_names, templates):
if _has_app_image(image_names):
raise CommandError('Downloading of app images is not supported yet')
image_downloader = ImageDownloader(templates)
image_downloader.download_images(image_names, image_path)
logger.info('Successfully downloaded images: %s', ', '.join(image_names))
class Command(EnvCommand):
"""
Builds an image.
"""
help = 'Build an image.'
def __init__(self):
super().__init__()
self._headless = True
self._use_kvm = True
self._num_cores = 1
self._has_cow = False
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument('name',
help='The name of the image to build. If empty,'
' shows available images', nargs='*')
parser.add_argument('-g', '--gui', action='store_true',
help='Display QEMU GUI during image build')
parser.add_argument('-c', '--cores', required=False, default=2,
type=int,
help='The number of cores used when building the '
'VM image. Defaults to 2')
parser.add_argument('-x', '--clean', action='store_true',
help='Deletes all images and rebuild them from '
'scratch')
parser.add_argument('-a', '--archive', action='store_true',
help='Creates an archive for the specified image')
parser.add_argument('-p', '--ftp-port', required=False, default=15468, type=int,
help='Port for the internal FTP server to receive files from guest VMs during build')
parser.add_argument('-d', '--download', action='store_true',
help='Download image from the repository instead '
'of building it')
parser.add_argument('-i', '--iso-dir',
help='Path to folder that stores ISO files of Windows images')
parser.add_argument('-n', '--no-kvm', action='store_true',
help='Disable KVM during image build')
def handle(self, *args, **options):
# If DISPLAY is missing, don't use headless mode
if options['gui']:
self._headless = False
# If KVM has been explicitly disabled, don't use it during the build
if options['no_kvm']:
self._use_kvm = False
self._num_cores = options['cores']
# The path could have been deleted by a previous clean
if not os.path.exists(self.image_path()):
os.makedirs(self.image_path())
img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])
if options['clean']:
self._invoke_make(img_build_dir, ['clean'])
return
image_names = options['name']
templates = get_image_templates(img_build_dir)
app_templates = get_app_templates(img_build_dir)
images, image_groups, image_descriptors = get_all_images(templates, app_templates)
if not image_names:
self._print_image_list(images, image_groups, image_descriptors)
print('\nRun ``s2e image_build <name>`` to build an image. '
'Note that you must run ``s2e build`` **before** building '
'an image')
return
image_names = translate_image_name(images, image_groups, image_names)
logger.info('The following images will be built:')
for image in image_names:
logger.info(' * %s', image)
if options['download']:
_download_images(self.image_path(), image_names, templates)
return
rule_names = image_names
if options['archive']:
rule_names = _get_archive_rules(self.image_path(), image_names)
iso_dir = os.path.abspath(options['iso_dir']) if options['iso_dir'] else None
# Check for optional product keys and iso directories.
# These may or may not be required, depending on the set of images.
_check_product_keys(image_descriptors, image_names)
_check_iso(templates, app_templates, iso_dir, image_names)
if self._use_kvm:
_check_kvm()
_check_groups_kvm()
_check_groups_docker()
_check_vmlinux()
self._has_cow = _check_cow(self.image_path())
if self._use_kvm:
_check_virtualbox()
_check_vmware()
if not _is_port_available(options['ftp_port']):
raise CommandError(f'localhost:{options["ftp_port"]} is not available. Check that the port is free or '
'specify a port with --ftp-port')
# Clone kernel if needed.
# This is necessary if the s2e env has been initialized with -b flag.
self._clone_kernel()
server = _start_ftp_server(self.image_path(), options['ftp_port'])
self._invoke_make(img_build_dir, rule_names, options['ftp_port'], iso_dir)
logger.success('Built image(s) \'%s\'', ' '.join(image_names))
server.close_all()
def _invoke_make(self, img_build_dir, rule_names, ftp_port=0, iso_dir=''):
env = os.environ.copy()
env['S2E_INSTALL_ROOT'] = self.install_path()
env['S2E_LINUX_KERNELS_ROOT'] = \
self.source_path(CONSTANTS['repos']['images']['linux'])
env['OUTDIR'] = self.image_path()
env['QEMU_FTP_PORT'] = str(ftp_port)
env['ISODIR'] = iso_dir if iso_dir else ''
env['DEBUG_INTERMEDIATE_RULES'] = '1' if self._has_cow else '0'
logger.debug('Invoking makefile with:')
logger.debug('export S2E_INSTALL_ROOT=%s', env['S2E_INSTALL_ROOT'])
logger.debug('export S2E_LINUX_KERNELS_ROOT=%s', env['S2E_LINUX_KERNELS_ROOT'])
logger.debug('export OUTDIR=%s', env['OUTDIR'])
logger.debug('export ISODIR=%s', env.get('ISODIR', ''))
logger.debug('export DEBUG_INTERMEDIATE_RULES=%s', env.get('DEBUG_INTERMEDIATE_RULES', ''))
if self._headless:
logger.warning('Image creation will run in headless mode. '
'Use --gui to see graphic output for debugging')
else:
env['GRAPHICS'] = ''
if not self._use_kvm:
env['QEMU_KVM'] = ''
logger.warning('Image build without KVM. This will be slow')
try:
make = sh.Command('make').bake(file=os.path.join(img_build_dir,
'Makefile'),
directory=self.image_path(),
_env=env, _fg=True)
make_image = make.bake(j=self._num_cores, r=True, warn_undefined_variables=True)
make_image(sorted(rule_names))
except ErrorReturnCode as e:
raise CommandError(e) from e
def _clone_kernel(self):
kernels_root = self.source_path(CONSTANTS['repos']['images']['linux'])
if os.path.exists(kernels_root):
logger.info('Kernel repository already exists in %s', kernels_root)
return
logger.info('Cloning kernels repository to %s', kernels_root)
kernels_repo = CONSTANTS['repos']['images']['linux']
repos.git_clone_to_source(self.env_path(), kernels_repo)
def _print_image_list(self, images, image_groups, image_descriptors):
img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])
templates = get_image_templates(img_build_dir)
if not templates:
images_json_path = os.path.join(img_build_dir, 'images.json')
raise CommandError('No images available to build. Make sure that '
f'{images_json_path} exists and is valid')
def get_max_len(lst):
ret = 0
for item in lst:
if len(item) > ret:
ret = len(item)
return ret
print('Available image groups:')
max_group_len = get_max_len(image_groups)
for group in image_groups:
print(f' * {group:{max_group_len}} - Build {group} images')
print('\nAvailable images:')
max_image_len = get_max_len(images)
for image in sorted(images):
print(f' * {image:{max_image_len}} - {image_descriptors[image]["name"]}')
def _print_apps_list(self):
img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])
app_templates = get_app_templates(img_build_dir)
if not app_templates:
apps_json_path = os.path.join(img_build_dir, 'apps.json')
raise CommandError('No apps available to build. Make sure that '
f'{apps_json_path} exists and is valid')
print('Available applications:')
for app_template, desc in sorted(app_templates.items()):
for base_image in desc['base_images']:
print(f' * {base_image}/{app_template} - {desc["name"]}')
|
normal
|
{
"blob_id": "e5921edef3d3c56a73f2674f483ea4d1f3577629",
"index": 5186,
"step-1": "<mask token>\n\n\ndef _get_user_name():\n \"\"\"\n Get the current user.\n \"\"\"\n return pwd.getpwuid(os.getuid())[0]\n\n\ndef _user_belongs_to(group_name):\n \"\"\"\n Check that the current user belongs to the ``group_name`` group.\n \"\"\"\n user_name = _get_user_name()\n groups = _get_user_groups(user_name)\n return group_name in groups\n\n\n<mask token>\n\n\ndef _check_vmware():\n \"\"\"\n Check if VMWare is running. VMware conflicts with S2E's requirement for KVM, so VMWare must\n *not* be running together with S2E.\n \"\"\"\n for proc in psutil.process_iter():\n try:\n if proc.name() == 'vmware-vmx':\n raise CommandError(\n 'S2E uses KVM to build images. VMware is currently running, which is not compatible with KVM. Please close all VMware VMs and try again.'\n )\n except NoSuchProcess:\n pass\n\n\ndef _check_kvm():\n \"\"\"\n Check that the KVM interface exists. This is required by libs2e to communicate with QEMU.\n \"\"\"\n if not os.path.exists(os.path.join(os.sep, 'dev', 'kvm')):\n raise CommandError(\n 'KVM interface not found - check that /dev/kvm exists. Alternatively, you can disable KVM (-n option) or download pre-built images (-d option)'\n )\n\n\n<mask token>\n\n\ndef _get_base_image_and_app(image_name):\n x = image_name.split('/')\n if len(x) == 1:\n return x[0], None\n if len(x) == 2:\n return x\n raise CommandError(f'Invalid image name {image_name}')\n\n\n<mask token>\n\n\ndef _check_product_keys(image_descriptors, image_names):\n missing_keys = []\n for image_name in image_names:\n image = image_descriptors[image_name]\n if 'product_key' in image:\n if not image['product_key']:\n missing_keys.append(image_name)\n ios = image_descriptors[image_name].get('os', {})\n if 'product_key' in ios:\n if not ios['product_key']:\n missing_keys.append(image_name)\n if missing_keys:\n logger.error('The following images require a product key:')\n for image in missing_keys:\n logger.error(' * %s', image)\n raise CommandError('Please update images.json and/or apps.json.')\n\n\ndef _check_iso(templates, app_templates, iso_dir, image_names):\n for image_name in image_names:\n base_image, app_name = _get_base_image_and_app(image_name)\n descriptors = [templates[base_image]]\n if app_name:\n descriptors.append(app_templates[app_name])\n for desc in descriptors:\n iso = desc.get('iso', {})\n if iso.get('url', ''):\n continue\n name = iso.get('name', '')\n if not name:\n continue\n if not iso_dir:\n raise CommandError(\n f'Please use the --iso-dir option to specify the path to a folder that contains {name}'\n )\n path = os.path.join(iso_dir, name)\n if not os.path.exists(path):\n raise CommandError(\n f'The image {image_name} requires {path}, which could not be found'\n )\n\n\n<mask token>\n\n\nclass Command(EnvCommand):\n \"\"\"\n Builds an image.\n \"\"\"\n help = 'Build an image.'\n\n def __init__(self):\n super().__init__()\n self._headless = True\n self._use_kvm = True\n self._num_cores = 1\n self._has_cow = False\n\n def add_arguments(self, parser):\n super().add_arguments(parser)\n parser.add_argument('name', help=\n 'The name of the image to build. If empty, shows available images',\n nargs='*')\n parser.add_argument('-g', '--gui', action='store_true', help=\n 'Display QEMU GUI during image build')\n parser.add_argument('-c', '--cores', required=False, default=2,\n type=int, help=\n 'The number of cores used when building the VM image. Defaults to 2'\n )\n parser.add_argument('-x', '--clean', action='store_true', help=\n 'Deletes all images and rebuild them from scratch')\n parser.add_argument('-a', '--archive', action='store_true', help=\n 'Creates an archive for the specified image')\n parser.add_argument('-p', '--ftp-port', required=False, default=\n 15468, type=int, help=\n 'Port for the internal FTP server to receive files from guest VMs during build'\n )\n parser.add_argument('-d', '--download', action='store_true', help=\n 'Download image from the repository instead of building it')\n parser.add_argument('-i', '--iso-dir', help=\n 'Path to folder that stores ISO files of Windows images')\n parser.add_argument('-n', '--no-kvm', action='store_true', help=\n 'Disable KVM during image build')\n\n def handle(self, *args, **options):\n if options['gui']:\n self._headless = False\n if options['no_kvm']:\n self._use_kvm = False\n self._num_cores = options['cores']\n if not os.path.exists(self.image_path()):\n os.makedirs(self.image_path())\n img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])\n if options['clean']:\n self._invoke_make(img_build_dir, ['clean'])\n return\n image_names = options['name']\n templates = get_image_templates(img_build_dir)\n app_templates = get_app_templates(img_build_dir)\n images, image_groups, image_descriptors = get_all_images(templates,\n app_templates)\n if not image_names:\n self._print_image_list(images, image_groups, image_descriptors)\n print(\n \"\"\"\nRun ``s2e image_build <name>`` to build an image. Note that you must run ``s2e build`` **before** building an image\"\"\"\n )\n return\n image_names = translate_image_name(images, image_groups, image_names)\n logger.info('The following images will be built:')\n for image in image_names:\n logger.info(' * %s', image)\n if options['download']:\n _download_images(self.image_path(), image_names, templates)\n return\n rule_names = image_names\n if options['archive']:\n rule_names = _get_archive_rules(self.image_path(), image_names)\n iso_dir = os.path.abspath(options['iso_dir']) if options['iso_dir'\n ] else None\n _check_product_keys(image_descriptors, image_names)\n _check_iso(templates, app_templates, iso_dir, image_names)\n if self._use_kvm:\n _check_kvm()\n _check_groups_kvm()\n _check_groups_docker()\n _check_vmlinux()\n self._has_cow = _check_cow(self.image_path())\n if self._use_kvm:\n _check_virtualbox()\n _check_vmware()\n if not _is_port_available(options['ftp_port']):\n raise CommandError(\n f\"localhost:{options['ftp_port']} is not available. Check that the port is free or specify a port with --ftp-port\"\n )\n self._clone_kernel()\n server = _start_ftp_server(self.image_path(), options['ftp_port'])\n self._invoke_make(img_build_dir, rule_names, options['ftp_port'],\n iso_dir)\n logger.success(\"Built image(s) '%s'\", ' '.join(image_names))\n server.close_all()\n\n def _invoke_make(self, img_build_dir, rule_names, ftp_port=0, iso_dir=''):\n env = os.environ.copy()\n env['S2E_INSTALL_ROOT'] = self.install_path()\n env['S2E_LINUX_KERNELS_ROOT'] = self.source_path(CONSTANTS['repos']\n ['images']['linux'])\n env['OUTDIR'] = self.image_path()\n env['QEMU_FTP_PORT'] = str(ftp_port)\n env['ISODIR'] = iso_dir if iso_dir else ''\n env['DEBUG_INTERMEDIATE_RULES'] = '1' if self._has_cow else '0'\n logger.debug('Invoking makefile with:')\n logger.debug('export S2E_INSTALL_ROOT=%s', env['S2E_INSTALL_ROOT'])\n logger.debug('export S2E_LINUX_KERNELS_ROOT=%s', env[\n 'S2E_LINUX_KERNELS_ROOT'])\n logger.debug('export OUTDIR=%s', env['OUTDIR'])\n logger.debug('export ISODIR=%s', env.get('ISODIR', ''))\n logger.debug('export DEBUG_INTERMEDIATE_RULES=%s', env.get(\n 'DEBUG_INTERMEDIATE_RULES', ''))\n if self._headless:\n logger.warning(\n 'Image creation will run in headless mode. Use --gui to see graphic output for debugging'\n )\n else:\n env['GRAPHICS'] = ''\n if not self._use_kvm:\n env['QEMU_KVM'] = ''\n logger.warning('Image build without KVM. This will be slow')\n try:\n make = sh.Command('make').bake(file=os.path.join(img_build_dir,\n 'Makefile'), directory=self.image_path(), _env=env, _fg=True)\n make_image = make.bake(j=self._num_cores, r=True,\n warn_undefined_variables=True)\n make_image(sorted(rule_names))\n except ErrorReturnCode as e:\n raise CommandError(e) from e\n\n def _clone_kernel(self):\n kernels_root = self.source_path(CONSTANTS['repos']['images']['linux'])\n if os.path.exists(kernels_root):\n logger.info('Kernel repository already exists in %s', kernels_root)\n return\n logger.info('Cloning kernels repository to %s', kernels_root)\n kernels_repo = CONSTANTS['repos']['images']['linux']\n repos.git_clone_to_source(self.env_path(), kernels_repo)\n\n def _print_image_list(self, images, image_groups, image_descriptors):\n img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])\n templates = get_image_templates(img_build_dir)\n if not templates:\n images_json_path = os.path.join(img_build_dir, 'images.json')\n raise CommandError(\n f'No images available to build. Make sure that {images_json_path} exists and is valid'\n )\n\n def get_max_len(lst):\n ret = 0\n for item in lst:\n if len(item) > ret:\n ret = len(item)\n return ret\n print('Available image groups:')\n max_group_len = get_max_len(image_groups)\n for group in image_groups:\n print(f' * {group:{max_group_len}} - Build {group} images')\n print('\\nAvailable images:')\n max_image_len = get_max_len(images)\n for image in sorted(images):\n print(\n f\" * {image:{max_image_len}} - {image_descriptors[image]['name']}\"\n )\n\n def _print_apps_list(self):\n img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])\n app_templates = get_app_templates(img_build_dir)\n if not app_templates:\n apps_json_path = os.path.join(img_build_dir, 'apps.json')\n raise CommandError(\n f'No apps available to build. Make sure that {apps_json_path} exists and is valid'\n )\n print('Available applications:')\n for app_template, desc in sorted(app_templates.items()):\n for base_image in desc['base_images']:\n print(f\" * {base_image}/{app_template} - {desc['name']}\")\n",
"step-2": "<mask token>\n\n\ndef _get_user_name():\n \"\"\"\n Get the current user.\n \"\"\"\n return pwd.getpwuid(os.getuid())[0]\n\n\ndef _user_belongs_to(group_name):\n \"\"\"\n Check that the current user belongs to the ``group_name`` group.\n \"\"\"\n user_name = _get_user_name()\n groups = _get_user_groups(user_name)\n return group_name in groups\n\n\n<mask token>\n\n\ndef _check_vmware():\n \"\"\"\n Check if VMWare is running. VMware conflicts with S2E's requirement for KVM, so VMWare must\n *not* be running together with S2E.\n \"\"\"\n for proc in psutil.process_iter():\n try:\n if proc.name() == 'vmware-vmx':\n raise CommandError(\n 'S2E uses KVM to build images. VMware is currently running, which is not compatible with KVM. Please close all VMware VMs and try again.'\n )\n except NoSuchProcess:\n pass\n\n\ndef _check_kvm():\n \"\"\"\n Check that the KVM interface exists. This is required by libs2e to communicate with QEMU.\n \"\"\"\n if not os.path.exists(os.path.join(os.sep, 'dev', 'kvm')):\n raise CommandError(\n 'KVM interface not found - check that /dev/kvm exists. Alternatively, you can disable KVM (-n option) or download pre-built images (-d option)'\n )\n\n\ndef _check_vmlinux():\n \"\"\"\n Check that /boot/vmlinux* files are readable. This is important for guestfish.\n \"\"\"\n try:\n for f in glob.glob(os.path.join(os.sep, 'boot', 'vmlinu*')):\n with open(f, 'rb'):\n pass\n except IOError:\n raise CommandError(\n \"\"\"Make sure that the kernels in /boot are readable. This is required for guestfish. Please run the following command:\n\nsudo chmod ugo+r /boot/vmlinu*\"\"\"\n ) from None\n\n\n<mask token>\n\n\ndef _raise_invalid_image(image_name):\n raise CommandError(\n f'Invalid image name: {image_name}. Run ``s2e image_build`` to list available images'\n )\n\n\ndef _get_base_image_and_app(image_name):\n x = image_name.split('/')\n if len(x) == 1:\n return x[0], None\n if len(x) == 2:\n return x\n raise CommandError(f'Invalid image name {image_name}')\n\n\ndef _has_app_image(image_names):\n for name in image_names:\n if '/' in name:\n return True\n return False\n\n\ndef _check_product_keys(image_descriptors, image_names):\n missing_keys = []\n for image_name in image_names:\n image = image_descriptors[image_name]\n if 'product_key' in image:\n if not image['product_key']:\n missing_keys.append(image_name)\n ios = image_descriptors[image_name].get('os', {})\n if 'product_key' in ios:\n if not ios['product_key']:\n missing_keys.append(image_name)\n if missing_keys:\n logger.error('The following images require a product key:')\n for image in missing_keys:\n logger.error(' * %s', image)\n raise CommandError('Please update images.json and/or apps.json.')\n\n\ndef _check_iso(templates, app_templates, iso_dir, image_names):\n for image_name in image_names:\n base_image, app_name = _get_base_image_and_app(image_name)\n descriptors = [templates[base_image]]\n if app_name:\n descriptors.append(app_templates[app_name])\n for desc in descriptors:\n iso = desc.get('iso', {})\n if iso.get('url', ''):\n continue\n name = iso.get('name', '')\n if not name:\n continue\n if not iso_dir:\n raise CommandError(\n f'Please use the --iso-dir option to specify the path to a folder that contains {name}'\n )\n path = os.path.join(iso_dir, name)\n if not os.path.exists(path):\n raise CommandError(\n f'The image {image_name} requires {path}, which could not be found'\n )\n\n\n<mask token>\n\n\ndef _start_ftp_server(image_path, port):\n authorizer = DummyAuthorizer()\n authorizer.add_anonymous(image_path, perm='elradfmwMT')\n handler = FTPHandler\n handler.authorizer = authorizer\n handler.masquerade_address = '10.0.2.2'\n handler.timeout = None\n server = FTPServer(('127.0.0.1', port), handler)\n thread = Thread(target=_run_ftp_server, args=[server])\n thread.daemon = True\n thread.start()\n time.sleep(1)\n return server\n\n\n<mask token>\n\n\nclass Command(EnvCommand):\n \"\"\"\n Builds an image.\n \"\"\"\n help = 'Build an image.'\n\n def __init__(self):\n super().__init__()\n self._headless = True\n self._use_kvm = True\n self._num_cores = 1\n self._has_cow = False\n\n def add_arguments(self, parser):\n super().add_arguments(parser)\n parser.add_argument('name', help=\n 'The name of the image to build. If empty, shows available images',\n nargs='*')\n parser.add_argument('-g', '--gui', action='store_true', help=\n 'Display QEMU GUI during image build')\n parser.add_argument('-c', '--cores', required=False, default=2,\n type=int, help=\n 'The number of cores used when building the VM image. Defaults to 2'\n )\n parser.add_argument('-x', '--clean', action='store_true', help=\n 'Deletes all images and rebuild them from scratch')\n parser.add_argument('-a', '--archive', action='store_true', help=\n 'Creates an archive for the specified image')\n parser.add_argument('-p', '--ftp-port', required=False, default=\n 15468, type=int, help=\n 'Port for the internal FTP server to receive files from guest VMs during build'\n )\n parser.add_argument('-d', '--download', action='store_true', help=\n 'Download image from the repository instead of building it')\n parser.add_argument('-i', '--iso-dir', help=\n 'Path to folder that stores ISO files of Windows images')\n parser.add_argument('-n', '--no-kvm', action='store_true', help=\n 'Disable KVM during image build')\n\n def handle(self, *args, **options):\n if options['gui']:\n self._headless = False\n if options['no_kvm']:\n self._use_kvm = False\n self._num_cores = options['cores']\n if not os.path.exists(self.image_path()):\n os.makedirs(self.image_path())\n img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])\n if options['clean']:\n self._invoke_make(img_build_dir, ['clean'])\n return\n image_names = options['name']\n templates = get_image_templates(img_build_dir)\n app_templates = get_app_templates(img_build_dir)\n images, image_groups, image_descriptors = get_all_images(templates,\n app_templates)\n if not image_names:\n self._print_image_list(images, image_groups, image_descriptors)\n print(\n \"\"\"\nRun ``s2e image_build <name>`` to build an image. Note that you must run ``s2e build`` **before** building an image\"\"\"\n )\n return\n image_names = translate_image_name(images, image_groups, image_names)\n logger.info('The following images will be built:')\n for image in image_names:\n logger.info(' * %s', image)\n if options['download']:\n _download_images(self.image_path(), image_names, templates)\n return\n rule_names = image_names\n if options['archive']:\n rule_names = _get_archive_rules(self.image_path(), image_names)\n iso_dir = os.path.abspath(options['iso_dir']) if options['iso_dir'\n ] else None\n _check_product_keys(image_descriptors, image_names)\n _check_iso(templates, app_templates, iso_dir, image_names)\n if self._use_kvm:\n _check_kvm()\n _check_groups_kvm()\n _check_groups_docker()\n _check_vmlinux()\n self._has_cow = _check_cow(self.image_path())\n if self._use_kvm:\n _check_virtualbox()\n _check_vmware()\n if not _is_port_available(options['ftp_port']):\n raise CommandError(\n f\"localhost:{options['ftp_port']} is not available. Check that the port is free or specify a port with --ftp-port\"\n )\n self._clone_kernel()\n server = _start_ftp_server(self.image_path(), options['ftp_port'])\n self._invoke_make(img_build_dir, rule_names, options['ftp_port'],\n iso_dir)\n logger.success(\"Built image(s) '%s'\", ' '.join(image_names))\n server.close_all()\n\n def _invoke_make(self, img_build_dir, rule_names, ftp_port=0, iso_dir=''):\n env = os.environ.copy()\n env['S2E_INSTALL_ROOT'] = self.install_path()\n env['S2E_LINUX_KERNELS_ROOT'] = self.source_path(CONSTANTS['repos']\n ['images']['linux'])\n env['OUTDIR'] = self.image_path()\n env['QEMU_FTP_PORT'] = str(ftp_port)\n env['ISODIR'] = iso_dir if iso_dir else ''\n env['DEBUG_INTERMEDIATE_RULES'] = '1' if self._has_cow else '0'\n logger.debug('Invoking makefile with:')\n logger.debug('export S2E_INSTALL_ROOT=%s', env['S2E_INSTALL_ROOT'])\n logger.debug('export S2E_LINUX_KERNELS_ROOT=%s', env[\n 'S2E_LINUX_KERNELS_ROOT'])\n logger.debug('export OUTDIR=%s', env['OUTDIR'])\n logger.debug('export ISODIR=%s', env.get('ISODIR', ''))\n logger.debug('export DEBUG_INTERMEDIATE_RULES=%s', env.get(\n 'DEBUG_INTERMEDIATE_RULES', ''))\n if self._headless:\n logger.warning(\n 'Image creation will run in headless mode. Use --gui to see graphic output for debugging'\n )\n else:\n env['GRAPHICS'] = ''\n if not self._use_kvm:\n env['QEMU_KVM'] = ''\n logger.warning('Image build without KVM. This will be slow')\n try:\n make = sh.Command('make').bake(file=os.path.join(img_build_dir,\n 'Makefile'), directory=self.image_path(), _env=env, _fg=True)\n make_image = make.bake(j=self._num_cores, r=True,\n warn_undefined_variables=True)\n make_image(sorted(rule_names))\n except ErrorReturnCode as e:\n raise CommandError(e) from e\n\n def _clone_kernel(self):\n kernels_root = self.source_path(CONSTANTS['repos']['images']['linux'])\n if os.path.exists(kernels_root):\n logger.info('Kernel repository already exists in %s', kernels_root)\n return\n logger.info('Cloning kernels repository to %s', kernels_root)\n kernels_repo = CONSTANTS['repos']['images']['linux']\n repos.git_clone_to_source(self.env_path(), kernels_repo)\n\n def _print_image_list(self, images, image_groups, image_descriptors):\n img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])\n templates = get_image_templates(img_build_dir)\n if not templates:\n images_json_path = os.path.join(img_build_dir, 'images.json')\n raise CommandError(\n f'No images available to build. Make sure that {images_json_path} exists and is valid'\n )\n\n def get_max_len(lst):\n ret = 0\n for item in lst:\n if len(item) > ret:\n ret = len(item)\n return ret\n print('Available image groups:')\n max_group_len = get_max_len(image_groups)\n for group in image_groups:\n print(f' * {group:{max_group_len}} - Build {group} images')\n print('\\nAvailable images:')\n max_image_len = get_max_len(images)\n for image in sorted(images):\n print(\n f\" * {image:{max_image_len}} - {image_descriptors[image]['name']}\"\n )\n\n def _print_apps_list(self):\n img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])\n app_templates = get_app_templates(img_build_dir)\n if not app_templates:\n apps_json_path = os.path.join(img_build_dir, 'apps.json')\n raise CommandError(\n f'No apps available to build. Make sure that {apps_json_path} exists and is valid'\n )\n print('Available applications:')\n for app_template, desc in sorted(app_templates.items()):\n for base_image in desc['base_images']:\n print(f\" * {base_image}/{app_template} - {desc['name']}\")\n",
"step-3": "<mask token>\n\n\ndef _get_user_name():\n \"\"\"\n Get the current user.\n \"\"\"\n return pwd.getpwuid(os.getuid())[0]\n\n\ndef _user_belongs_to(group_name):\n \"\"\"\n Check that the current user belongs to the ``group_name`` group.\n \"\"\"\n user_name = _get_user_name()\n groups = _get_user_groups(user_name)\n return group_name in groups\n\n\n<mask token>\n\n\ndef _check_groups_kvm():\n \"\"\"Being member of KVM is required only when using KVM to build images\"\"\"\n if not _user_belongs_to('libvirtd') and not _user_belongs_to('kvm'):\n _raise_group_error('kvm')\n\n\n<mask token>\n\n\ndef _check_vmware():\n \"\"\"\n Check if VMWare is running. VMware conflicts with S2E's requirement for KVM, so VMWare must\n *not* be running together with S2E.\n \"\"\"\n for proc in psutil.process_iter():\n try:\n if proc.name() == 'vmware-vmx':\n raise CommandError(\n 'S2E uses KVM to build images. VMware is currently running, which is not compatible with KVM. Please close all VMware VMs and try again.'\n )\n except NoSuchProcess:\n pass\n\n\ndef _check_kvm():\n \"\"\"\n Check that the KVM interface exists. This is required by libs2e to communicate with QEMU.\n \"\"\"\n if not os.path.exists(os.path.join(os.sep, 'dev', 'kvm')):\n raise CommandError(\n 'KVM interface not found - check that /dev/kvm exists. Alternatively, you can disable KVM (-n option) or download pre-built images (-d option)'\n )\n\n\ndef _check_vmlinux():\n \"\"\"\n Check that /boot/vmlinux* files are readable. This is important for guestfish.\n \"\"\"\n try:\n for f in glob.glob(os.path.join(os.sep, 'boot', 'vmlinu*')):\n with open(f, 'rb'):\n pass\n except IOError:\n raise CommandError(\n \"\"\"Make sure that the kernels in /boot are readable. This is required for guestfish. Please run the following command:\n\nsudo chmod ugo+r /boot/vmlinu*\"\"\"\n ) from None\n\n\ndef _check_cow(image_dir):\n \"\"\"\n Check that the file system that stores guest images supports copy-on-write.\n \"\"\"\n try:\n src = f'{image_dir}/.cowcheck'\n dst = f'{image_dir}/.cowcheck1'\n sh.touch(src)\n sh.cp('--reflink=always', src, dst)\n return True\n except Exception:\n warn_msg = f\"\"\"\n Copy-on-write check failed.\n The file system where images are stored ({image_dir}) does not support copy-on-write.\n It is recommended to use an XFS or BTRFS file system with copy-on-write enabled as a storage\n location for S2E images, as this can save up to 60% of disk space. The building process checkpoints\n intermediate build steps with cp --reflink=auto to make use of copy-on-write if it is available.\n\n How to upgrade:\n 1. Create an XFS or BTRFS partition large enough to store the images that you need (~300 GB for all images).\n Make sure you use reflink=1 to enable copy-on-write when running mkfs.xfs.\n 2. Create a directory for guest images on that partition (e.g., /mnt/disk1/images)\n 3. Delete the \"images\" folder in your S2E environment\n 4. Create in your S2E environment a symbolic link called \"images\" to the directory you created in step 2\n \"\"\"\n logger.warning(re.sub('^ {8}', '', warn_msg, flags=re.MULTILINE))\n return False\n finally:\n sh.rm('-f', src)\n sh.rm('-f', dst)\n\n\ndef _raise_invalid_image(image_name):\n raise CommandError(\n f'Invalid image name: {image_name}. Run ``s2e image_build`` to list available images'\n )\n\n\ndef _get_base_image_and_app(image_name):\n x = image_name.split('/')\n if len(x) == 1:\n return x[0], None\n if len(x) == 2:\n return x\n raise CommandError(f'Invalid image name {image_name}')\n\n\ndef _has_app_image(image_names):\n for name in image_names:\n if '/' in name:\n return True\n return False\n\n\ndef _check_product_keys(image_descriptors, image_names):\n missing_keys = []\n for image_name in image_names:\n image = image_descriptors[image_name]\n if 'product_key' in image:\n if not image['product_key']:\n missing_keys.append(image_name)\n ios = image_descriptors[image_name].get('os', {})\n if 'product_key' in ios:\n if not ios['product_key']:\n missing_keys.append(image_name)\n if missing_keys:\n logger.error('The following images require a product key:')\n for image in missing_keys:\n logger.error(' * %s', image)\n raise CommandError('Please update images.json and/or apps.json.')\n\n\ndef _check_iso(templates, app_templates, iso_dir, image_names):\n for image_name in image_names:\n base_image, app_name = _get_base_image_and_app(image_name)\n descriptors = [templates[base_image]]\n if app_name:\n descriptors.append(app_templates[app_name])\n for desc in descriptors:\n iso = desc.get('iso', {})\n if iso.get('url', ''):\n continue\n name = iso.get('name', '')\n if not name:\n continue\n if not iso_dir:\n raise CommandError(\n f'Please use the --iso-dir option to specify the path to a folder that contains {name}'\n )\n path = os.path.join(iso_dir, name)\n if not os.path.exists(path):\n raise CommandError(\n f'The image {image_name} requires {path}, which could not be found'\n )\n\n\n<mask token>\n\n\ndef _start_ftp_server(image_path, port):\n authorizer = DummyAuthorizer()\n authorizer.add_anonymous(image_path, perm='elradfmwMT')\n handler = FTPHandler\n handler.authorizer = authorizer\n handler.masquerade_address = '10.0.2.2'\n handler.timeout = None\n server = FTPServer(('127.0.0.1', port), handler)\n thread = Thread(target=_run_ftp_server, args=[server])\n thread.daemon = True\n thread.start()\n time.sleep(1)\n return server\n\n\n<mask token>\n\n\ndef _get_archive_rules(image_path, rule_names):\n if _has_app_image(rule_names):\n raise CommandError(\n 'Building archives of app images is not supported yet')\n archive_rules = []\n for r in rule_names:\n archive_rules.append(os.path.join(image_path, f'{r}.tar.xz'))\n logger.info('The following archives will be built:')\n for a in archive_rules:\n logger.info(' * %s', a)\n return archive_rules\n\n\n<mask token>\n\n\nclass Command(EnvCommand):\n \"\"\"\n Builds an image.\n \"\"\"\n help = 'Build an image.'\n\n def __init__(self):\n super().__init__()\n self._headless = True\n self._use_kvm = True\n self._num_cores = 1\n self._has_cow = False\n\n def add_arguments(self, parser):\n super().add_arguments(parser)\n parser.add_argument('name', help=\n 'The name of the image to build. If empty, shows available images',\n nargs='*')\n parser.add_argument('-g', '--gui', action='store_true', help=\n 'Display QEMU GUI during image build')\n parser.add_argument('-c', '--cores', required=False, default=2,\n type=int, help=\n 'The number of cores used when building the VM image. Defaults to 2'\n )\n parser.add_argument('-x', '--clean', action='store_true', help=\n 'Deletes all images and rebuild them from scratch')\n parser.add_argument('-a', '--archive', action='store_true', help=\n 'Creates an archive for the specified image')\n parser.add_argument('-p', '--ftp-port', required=False, default=\n 15468, type=int, help=\n 'Port for the internal FTP server to receive files from guest VMs during build'\n )\n parser.add_argument('-d', '--download', action='store_true', help=\n 'Download image from the repository instead of building it')\n parser.add_argument('-i', '--iso-dir', help=\n 'Path to folder that stores ISO files of Windows images')\n parser.add_argument('-n', '--no-kvm', action='store_true', help=\n 'Disable KVM during image build')\n\n def handle(self, *args, **options):\n if options['gui']:\n self._headless = False\n if options['no_kvm']:\n self._use_kvm = False\n self._num_cores = options['cores']\n if not os.path.exists(self.image_path()):\n os.makedirs(self.image_path())\n img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])\n if options['clean']:\n self._invoke_make(img_build_dir, ['clean'])\n return\n image_names = options['name']\n templates = get_image_templates(img_build_dir)\n app_templates = get_app_templates(img_build_dir)\n images, image_groups, image_descriptors = get_all_images(templates,\n app_templates)\n if not image_names:\n self._print_image_list(images, image_groups, image_descriptors)\n print(\n \"\"\"\nRun ``s2e image_build <name>`` to build an image. Note that you must run ``s2e build`` **before** building an image\"\"\"\n )\n return\n image_names = translate_image_name(images, image_groups, image_names)\n logger.info('The following images will be built:')\n for image in image_names:\n logger.info(' * %s', image)\n if options['download']:\n _download_images(self.image_path(), image_names, templates)\n return\n rule_names = image_names\n if options['archive']:\n rule_names = _get_archive_rules(self.image_path(), image_names)\n iso_dir = os.path.abspath(options['iso_dir']) if options['iso_dir'\n ] else None\n _check_product_keys(image_descriptors, image_names)\n _check_iso(templates, app_templates, iso_dir, image_names)\n if self._use_kvm:\n _check_kvm()\n _check_groups_kvm()\n _check_groups_docker()\n _check_vmlinux()\n self._has_cow = _check_cow(self.image_path())\n if self._use_kvm:\n _check_virtualbox()\n _check_vmware()\n if not _is_port_available(options['ftp_port']):\n raise CommandError(\n f\"localhost:{options['ftp_port']} is not available. Check that the port is free or specify a port with --ftp-port\"\n )\n self._clone_kernel()\n server = _start_ftp_server(self.image_path(), options['ftp_port'])\n self._invoke_make(img_build_dir, rule_names, options['ftp_port'],\n iso_dir)\n logger.success(\"Built image(s) '%s'\", ' '.join(image_names))\n server.close_all()\n\n def _invoke_make(self, img_build_dir, rule_names, ftp_port=0, iso_dir=''):\n env = os.environ.copy()\n env['S2E_INSTALL_ROOT'] = self.install_path()\n env['S2E_LINUX_KERNELS_ROOT'] = self.source_path(CONSTANTS['repos']\n ['images']['linux'])\n env['OUTDIR'] = self.image_path()\n env['QEMU_FTP_PORT'] = str(ftp_port)\n env['ISODIR'] = iso_dir if iso_dir else ''\n env['DEBUG_INTERMEDIATE_RULES'] = '1' if self._has_cow else '0'\n logger.debug('Invoking makefile with:')\n logger.debug('export S2E_INSTALL_ROOT=%s', env['S2E_INSTALL_ROOT'])\n logger.debug('export S2E_LINUX_KERNELS_ROOT=%s', env[\n 'S2E_LINUX_KERNELS_ROOT'])\n logger.debug('export OUTDIR=%s', env['OUTDIR'])\n logger.debug('export ISODIR=%s', env.get('ISODIR', ''))\n logger.debug('export DEBUG_INTERMEDIATE_RULES=%s', env.get(\n 'DEBUG_INTERMEDIATE_RULES', ''))\n if self._headless:\n logger.warning(\n 'Image creation will run in headless mode. Use --gui to see graphic output for debugging'\n )\n else:\n env['GRAPHICS'] = ''\n if not self._use_kvm:\n env['QEMU_KVM'] = ''\n logger.warning('Image build without KVM. This will be slow')\n try:\n make = sh.Command('make').bake(file=os.path.join(img_build_dir,\n 'Makefile'), directory=self.image_path(), _env=env, _fg=True)\n make_image = make.bake(j=self._num_cores, r=True,\n warn_undefined_variables=True)\n make_image(sorted(rule_names))\n except ErrorReturnCode as e:\n raise CommandError(e) from e\n\n def _clone_kernel(self):\n kernels_root = self.source_path(CONSTANTS['repos']['images']['linux'])\n if os.path.exists(kernels_root):\n logger.info('Kernel repository already exists in %s', kernels_root)\n return\n logger.info('Cloning kernels repository to %s', kernels_root)\n kernels_repo = CONSTANTS['repos']['images']['linux']\n repos.git_clone_to_source(self.env_path(), kernels_repo)\n\n def _print_image_list(self, images, image_groups, image_descriptors):\n img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])\n templates = get_image_templates(img_build_dir)\n if not templates:\n images_json_path = os.path.join(img_build_dir, 'images.json')\n raise CommandError(\n f'No images available to build. Make sure that {images_json_path} exists and is valid'\n )\n\n def get_max_len(lst):\n ret = 0\n for item in lst:\n if len(item) > ret:\n ret = len(item)\n return ret\n print('Available image groups:')\n max_group_len = get_max_len(image_groups)\n for group in image_groups:\n print(f' * {group:{max_group_len}} - Build {group} images')\n print('\\nAvailable images:')\n max_image_len = get_max_len(images)\n for image in sorted(images):\n print(\n f\" * {image:{max_image_len}} - {image_descriptors[image]['name']}\"\n )\n\n def _print_apps_list(self):\n img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])\n app_templates = get_app_templates(img_build_dir)\n if not app_templates:\n apps_json_path = os.path.join(img_build_dir, 'apps.json')\n raise CommandError(\n f'No apps available to build. Make sure that {apps_json_path} exists and is valid'\n )\n print('Available applications:')\n for app_template, desc in sorted(app_templates.items()):\n for base_image in desc['base_images']:\n print(f\" * {base_image}/{app_template} - {desc['name']}\")\n",
"step-4": "<mask token>\n\n\ndef _get_user_groups(user_name):\n \"\"\"\n Get a list of groups for the user ``user_name``.\n \"\"\"\n groups = [g.gr_name for g in grp.getgrall() if user_name in g.gr_mem]\n gid = pwd.getpwnam(user_name).pw_gid\n groups.append(grp.getgrgid(gid).gr_name)\n return groups\n\n\ndef _get_user_name():\n \"\"\"\n Get the current user.\n \"\"\"\n return pwd.getpwuid(os.getuid())[0]\n\n\ndef _user_belongs_to(group_name):\n \"\"\"\n Check that the current user belongs to the ``group_name`` group.\n \"\"\"\n user_name = _get_user_name()\n groups = _get_user_groups(user_name)\n return group_name in groups\n\n\ndef _raise_group_error(group_name):\n raise CommandError(\n f\"\"\"You must belong to the {group_name} group in order to build images. Please run the following command, then logout and login:\n\n\tsudo usermod -a -G {group_name} $(whoami)\"\"\"\n )\n\n\ndef _check_groups_docker():\n \"\"\"\n Check that the current user belongs to the required groups to both run S2E and build S2E images.\n \"\"\"\n if not _user_belongs_to('docker'):\n _raise_group_error('docker')\n\n\ndef _check_groups_kvm():\n \"\"\"Being member of KVM is required only when using KVM to build images\"\"\"\n if not _user_belongs_to('libvirtd') and not _user_belongs_to('kvm'):\n _raise_group_error('kvm')\n\n\ndef _check_virtualbox():\n \"\"\"\n Check if VirtualBox is running. VirtualBox conflicts with S2E's requirement for KVM, so VirtualBox must\n *not* be running together with S2E.\n \"\"\"\n for proc in psutil.process_iter():\n try:\n if proc.name() == 'VBoxHeadless':\n raise CommandError(\n 'S2E uses KVM to build images. VirtualBox is currently running, which is not compatible with KVM. Please close all VirtualBox VMs and try again.'\n )\n except NoSuchProcess:\n pass\n\n\ndef _check_vmware():\n \"\"\"\n Check if VMWare is running. VMware conflicts with S2E's requirement for KVM, so VMWare must\n *not* be running together with S2E.\n \"\"\"\n for proc in psutil.process_iter():\n try:\n if proc.name() == 'vmware-vmx':\n raise CommandError(\n 'S2E uses KVM to build images. VMware is currently running, which is not compatible with KVM. Please close all VMware VMs and try again.'\n )\n except NoSuchProcess:\n pass\n\n\ndef _check_kvm():\n \"\"\"\n Check that the KVM interface exists. This is required by libs2e to communicate with QEMU.\n \"\"\"\n if not os.path.exists(os.path.join(os.sep, 'dev', 'kvm')):\n raise CommandError(\n 'KVM interface not found - check that /dev/kvm exists. Alternatively, you can disable KVM (-n option) or download pre-built images (-d option)'\n )\n\n\ndef _check_vmlinux():\n \"\"\"\n Check that /boot/vmlinux* files are readable. This is important for guestfish.\n \"\"\"\n try:\n for f in glob.glob(os.path.join(os.sep, 'boot', 'vmlinu*')):\n with open(f, 'rb'):\n pass\n except IOError:\n raise CommandError(\n \"\"\"Make sure that the kernels in /boot are readable. This is required for guestfish. Please run the following command:\n\nsudo chmod ugo+r /boot/vmlinu*\"\"\"\n ) from None\n\n\ndef _check_cow(image_dir):\n \"\"\"\n Check that the file system that stores guest images supports copy-on-write.\n \"\"\"\n try:\n src = f'{image_dir}/.cowcheck'\n dst = f'{image_dir}/.cowcheck1'\n sh.touch(src)\n sh.cp('--reflink=always', src, dst)\n return True\n except Exception:\n warn_msg = f\"\"\"\n Copy-on-write check failed.\n The file system where images are stored ({image_dir}) does not support copy-on-write.\n It is recommended to use an XFS or BTRFS file system with copy-on-write enabled as a storage\n location for S2E images, as this can save up to 60% of disk space. The building process checkpoints\n intermediate build steps with cp --reflink=auto to make use of copy-on-write if it is available.\n\n How to upgrade:\n 1. Create an XFS or BTRFS partition large enough to store the images that you need (~300 GB for all images).\n Make sure you use reflink=1 to enable copy-on-write when running mkfs.xfs.\n 2. Create a directory for guest images on that partition (e.g., /mnt/disk1/images)\n 3. Delete the \"images\" folder in your S2E environment\n 4. Create in your S2E environment a symbolic link called \"images\" to the directory you created in step 2\n \"\"\"\n logger.warning(re.sub('^ {8}', '', warn_msg, flags=re.MULTILINE))\n return False\n finally:\n sh.rm('-f', src)\n sh.rm('-f', dst)\n\n\ndef _raise_invalid_image(image_name):\n raise CommandError(\n f'Invalid image name: {image_name}. Run ``s2e image_build`` to list available images'\n )\n\n\ndef _get_base_image_and_app(image_name):\n x = image_name.split('/')\n if len(x) == 1:\n return x[0], None\n if len(x) == 2:\n return x\n raise CommandError(f'Invalid image name {image_name}')\n\n\ndef _has_app_image(image_names):\n for name in image_names:\n if '/' in name:\n return True\n return False\n\n\ndef _check_product_keys(image_descriptors, image_names):\n missing_keys = []\n for image_name in image_names:\n image = image_descriptors[image_name]\n if 'product_key' in image:\n if not image['product_key']:\n missing_keys.append(image_name)\n ios = image_descriptors[image_name].get('os', {})\n if 'product_key' in ios:\n if not ios['product_key']:\n missing_keys.append(image_name)\n if missing_keys:\n logger.error('The following images require a product key:')\n for image in missing_keys:\n logger.error(' * %s', image)\n raise CommandError('Please update images.json and/or apps.json.')\n\n\ndef _check_iso(templates, app_templates, iso_dir, image_names):\n for image_name in image_names:\n base_image, app_name = _get_base_image_and_app(image_name)\n descriptors = [templates[base_image]]\n if app_name:\n descriptors.append(app_templates[app_name])\n for desc in descriptors:\n iso = desc.get('iso', {})\n if iso.get('url', ''):\n continue\n name = iso.get('name', '')\n if not name:\n continue\n if not iso_dir:\n raise CommandError(\n f'Please use the --iso-dir option to specify the path to a folder that contains {name}'\n )\n path = os.path.join(iso_dir, name)\n if not os.path.exists(path):\n raise CommandError(\n f'The image {image_name} requires {path}, which could not be found'\n )\n\n\ndef _is_port_available(port):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n s.bind(('127.0.0.1', port))\n return True\n except socket.error:\n return False\n finally:\n s.close()\n\n\ndef _start_ftp_server(image_path, port):\n authorizer = DummyAuthorizer()\n authorizer.add_anonymous(image_path, perm='elradfmwMT')\n handler = FTPHandler\n handler.authorizer = authorizer\n handler.masquerade_address = '10.0.2.2'\n handler.timeout = None\n server = FTPServer(('127.0.0.1', port), handler)\n thread = Thread(target=_run_ftp_server, args=[server])\n thread.daemon = True\n thread.start()\n time.sleep(1)\n return server\n\n\ndef _run_ftp_server(server):\n try:\n server.serve_forever()\n finally:\n logger.info('FTP server terminated')\n server.close_all()\n\n\ndef _get_archive_rules(image_path, rule_names):\n if _has_app_image(rule_names):\n raise CommandError(\n 'Building archives of app images is not supported yet')\n archive_rules = []\n for r in rule_names:\n archive_rules.append(os.path.join(image_path, f'{r}.tar.xz'))\n logger.info('The following archives will be built:')\n for a in archive_rules:\n logger.info(' * %s', a)\n return archive_rules\n\n\n<mask token>\n\n\nclass Command(EnvCommand):\n \"\"\"\n Builds an image.\n \"\"\"\n help = 'Build an image.'\n\n def __init__(self):\n super().__init__()\n self._headless = True\n self._use_kvm = True\n self._num_cores = 1\n self._has_cow = False\n\n def add_arguments(self, parser):\n super().add_arguments(parser)\n parser.add_argument('name', help=\n 'The name of the image to build. If empty, shows available images',\n nargs='*')\n parser.add_argument('-g', '--gui', action='store_true', help=\n 'Display QEMU GUI during image build')\n parser.add_argument('-c', '--cores', required=False, default=2,\n type=int, help=\n 'The number of cores used when building the VM image. Defaults to 2'\n )\n parser.add_argument('-x', '--clean', action='store_true', help=\n 'Deletes all images and rebuild them from scratch')\n parser.add_argument('-a', '--archive', action='store_true', help=\n 'Creates an archive for the specified image')\n parser.add_argument('-p', '--ftp-port', required=False, default=\n 15468, type=int, help=\n 'Port for the internal FTP server to receive files from guest VMs during build'\n )\n parser.add_argument('-d', '--download', action='store_true', help=\n 'Download image from the repository instead of building it')\n parser.add_argument('-i', '--iso-dir', help=\n 'Path to folder that stores ISO files of Windows images')\n parser.add_argument('-n', '--no-kvm', action='store_true', help=\n 'Disable KVM during image build')\n\n def handle(self, *args, **options):\n if options['gui']:\n self._headless = False\n if options['no_kvm']:\n self._use_kvm = False\n self._num_cores = options['cores']\n if not os.path.exists(self.image_path()):\n os.makedirs(self.image_path())\n img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])\n if options['clean']:\n self._invoke_make(img_build_dir, ['clean'])\n return\n image_names = options['name']\n templates = get_image_templates(img_build_dir)\n app_templates = get_app_templates(img_build_dir)\n images, image_groups, image_descriptors = get_all_images(templates,\n app_templates)\n if not image_names:\n self._print_image_list(images, image_groups, image_descriptors)\n print(\n \"\"\"\nRun ``s2e image_build <name>`` to build an image. Note that you must run ``s2e build`` **before** building an image\"\"\"\n )\n return\n image_names = translate_image_name(images, image_groups, image_names)\n logger.info('The following images will be built:')\n for image in image_names:\n logger.info(' * %s', image)\n if options['download']:\n _download_images(self.image_path(), image_names, templates)\n return\n rule_names = image_names\n if options['archive']:\n rule_names = _get_archive_rules(self.image_path(), image_names)\n iso_dir = os.path.abspath(options['iso_dir']) if options['iso_dir'\n ] else None\n _check_product_keys(image_descriptors, image_names)\n _check_iso(templates, app_templates, iso_dir, image_names)\n if self._use_kvm:\n _check_kvm()\n _check_groups_kvm()\n _check_groups_docker()\n _check_vmlinux()\n self._has_cow = _check_cow(self.image_path())\n if self._use_kvm:\n _check_virtualbox()\n _check_vmware()\n if not _is_port_available(options['ftp_port']):\n raise CommandError(\n f\"localhost:{options['ftp_port']} is not available. Check that the port is free or specify a port with --ftp-port\"\n )\n self._clone_kernel()\n server = _start_ftp_server(self.image_path(), options['ftp_port'])\n self._invoke_make(img_build_dir, rule_names, options['ftp_port'],\n iso_dir)\n logger.success(\"Built image(s) '%s'\", ' '.join(image_names))\n server.close_all()\n\n def _invoke_make(self, img_build_dir, rule_names, ftp_port=0, iso_dir=''):\n env = os.environ.copy()\n env['S2E_INSTALL_ROOT'] = self.install_path()\n env['S2E_LINUX_KERNELS_ROOT'] = self.source_path(CONSTANTS['repos']\n ['images']['linux'])\n env['OUTDIR'] = self.image_path()\n env['QEMU_FTP_PORT'] = str(ftp_port)\n env['ISODIR'] = iso_dir if iso_dir else ''\n env['DEBUG_INTERMEDIATE_RULES'] = '1' if self._has_cow else '0'\n logger.debug('Invoking makefile with:')\n logger.debug('export S2E_INSTALL_ROOT=%s', env['S2E_INSTALL_ROOT'])\n logger.debug('export S2E_LINUX_KERNELS_ROOT=%s', env[\n 'S2E_LINUX_KERNELS_ROOT'])\n logger.debug('export OUTDIR=%s', env['OUTDIR'])\n logger.debug('export ISODIR=%s', env.get('ISODIR', ''))\n logger.debug('export DEBUG_INTERMEDIATE_RULES=%s', env.get(\n 'DEBUG_INTERMEDIATE_RULES', ''))\n if self._headless:\n logger.warning(\n 'Image creation will run in headless mode. Use --gui to see graphic output for debugging'\n )\n else:\n env['GRAPHICS'] = ''\n if not self._use_kvm:\n env['QEMU_KVM'] = ''\n logger.warning('Image build without KVM. This will be slow')\n try:\n make = sh.Command('make').bake(file=os.path.join(img_build_dir,\n 'Makefile'), directory=self.image_path(), _env=env, _fg=True)\n make_image = make.bake(j=self._num_cores, r=True,\n warn_undefined_variables=True)\n make_image(sorted(rule_names))\n except ErrorReturnCode as e:\n raise CommandError(e) from e\n\n def _clone_kernel(self):\n kernels_root = self.source_path(CONSTANTS['repos']['images']['linux'])\n if os.path.exists(kernels_root):\n logger.info('Kernel repository already exists in %s', kernels_root)\n return\n logger.info('Cloning kernels repository to %s', kernels_root)\n kernels_repo = CONSTANTS['repos']['images']['linux']\n repos.git_clone_to_source(self.env_path(), kernels_repo)\n\n def _print_image_list(self, images, image_groups, image_descriptors):\n img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])\n templates = get_image_templates(img_build_dir)\n if not templates:\n images_json_path = os.path.join(img_build_dir, 'images.json')\n raise CommandError(\n f'No images available to build. Make sure that {images_json_path} exists and is valid'\n )\n\n def get_max_len(lst):\n ret = 0\n for item in lst:\n if len(item) > ret:\n ret = len(item)\n return ret\n print('Available image groups:')\n max_group_len = get_max_len(image_groups)\n for group in image_groups:\n print(f' * {group:{max_group_len}} - Build {group} images')\n print('\\nAvailable images:')\n max_image_len = get_max_len(images)\n for image in sorted(images):\n print(\n f\" * {image:{max_image_len}} - {image_descriptors[image]['name']}\"\n )\n\n def _print_apps_list(self):\n img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])\n app_templates = get_app_templates(img_build_dir)\n if not app_templates:\n apps_json_path = os.path.join(img_build_dir, 'apps.json')\n raise CommandError(\n f'No apps available to build. Make sure that {apps_json_path} exists and is valid'\n )\n print('Available applications:')\n for app_template, desc in sorted(app_templates.items()):\n for base_image in desc['base_images']:\n print(f\" * {base_image}/{app_template} - {desc['name']}\")\n",
"step-5": "\"\"\"\nCopyright (c) 2017 Cyberhaven\nCopyright (c) 2017 Dependable Systems Laboratory, EPFL\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\n\nimport glob\nimport grp\nimport logging\nimport os\nimport pwd\nimport re\nimport socket\nimport time\n\nfrom threading import Thread\n\nimport psutil\nfrom psutil import NoSuchProcess\n\nfrom pyftpdlib.authorizers import DummyAuthorizer\nfrom pyftpdlib.handlers import FTPHandler\nfrom pyftpdlib.servers import FTPServer\n\nimport sh\nfrom sh import ErrorReturnCode\n\nfrom s2e_env import CONSTANTS\nfrom s2e_env.command import EnvCommand, CommandError\nfrom s2e_env.utils import repos\nfrom s2e_env.utils.images import ImageDownloader, get_image_templates, get_app_templates, get_all_images, \\\n translate_image_name\n\n\nlogger = logging.getLogger('image_build')\n\n\ndef _get_user_groups(user_name):\n \"\"\"\n Get a list of groups for the user ``user_name``.\n \"\"\"\n groups = [g.gr_name for g in grp.getgrall() if user_name in g.gr_mem]\n gid = pwd.getpwnam(user_name).pw_gid\n groups.append(grp.getgrgid(gid).gr_name)\n\n return groups\n\n\ndef _get_user_name():\n \"\"\"\n Get the current user.\n \"\"\"\n return pwd.getpwuid(os.getuid())[0]\n\n\ndef _user_belongs_to(group_name):\n \"\"\"\n Check that the current user belongs to the ``group_name`` group.\n \"\"\"\n user_name = _get_user_name()\n groups = _get_user_groups(user_name)\n return group_name in groups\n\n\ndef _raise_group_error(group_name):\n raise CommandError(f'You must belong to the {group_name} group in order to build '\n 'images. Please run the following command, then logout '\n 'and login:\\n\\n'\n f'\\tsudo usermod -a -G {group_name} $(whoami)')\n\n\ndef _check_groups_docker():\n \"\"\"\n Check that the current user belongs to the required groups to both run S2E and build S2E images.\n \"\"\"\n if not _user_belongs_to('docker'):\n _raise_group_error('docker')\n\n\ndef _check_groups_kvm():\n \"\"\"Being member of KVM is required only when using KVM to build images\"\"\"\n if not _user_belongs_to('libvirtd') and not _user_belongs_to('kvm'):\n _raise_group_error('kvm')\n\n\ndef _check_virtualbox():\n \"\"\"\n Check if VirtualBox is running. VirtualBox conflicts with S2E's requirement for KVM, so VirtualBox must\n *not* be running together with S2E.\n \"\"\"\n # Adapted from https://github.com/giampaolo/psutil/issues/132#issuecomment-44017679\n # to avoid race conditions\n for proc in psutil.process_iter():\n try:\n if proc.name() == 'VBoxHeadless':\n raise CommandError('S2E uses KVM to build images. VirtualBox '\n 'is currently running, which is not '\n 'compatible with KVM. Please close all '\n 'VirtualBox VMs and try again.')\n except NoSuchProcess:\n pass\n\n\ndef _check_vmware():\n \"\"\"\n Check if VMWare is running. VMware conflicts with S2E's requirement for KVM, so VMWare must\n *not* be running together with S2E.\n \"\"\"\n for proc in psutil.process_iter():\n try:\n if proc.name() == 'vmware-vmx':\n raise CommandError('S2E uses KVM to build images. VMware '\n 'is currently running, which is not '\n 'compatible with KVM. Please close all '\n 'VMware VMs and try again.')\n except NoSuchProcess:\n pass\n\n\ndef _check_kvm():\n \"\"\"\n Check that the KVM interface exists. This is required by libs2e to communicate with QEMU.\n \"\"\"\n if not os.path.exists(os.path.join(os.sep, 'dev', 'kvm')):\n raise CommandError('KVM interface not found - check that /dev/kvm '\n 'exists. Alternatively, you can disable KVM (-n '\n 'option) or download pre-built images (-d option)')\n\n\ndef _check_vmlinux():\n \"\"\"\n Check that /boot/vmlinux* files are readable. This is important for guestfish.\n \"\"\"\n try:\n for f in glob.glob(os.path.join(os.sep, 'boot', 'vmlinu*')):\n with open(f, 'rb'):\n pass\n except IOError:\n raise CommandError('Make sure that the kernels in /boot are readable. '\n 'This is required for guestfish. Please run the '\n 'following command:\\n\\n'\n 'sudo chmod ugo+r /boot/vmlinu*') from None\n\n\n# pylint: disable=no-member\ndef _check_cow(image_dir):\n \"\"\"\n Check that the file system that stores guest images supports copy-on-write.\n \"\"\"\n try:\n src = f'{image_dir}/.cowcheck'\n dst = f'{image_dir}/.cowcheck1'\n sh.touch(src)\n sh.cp('--reflink=always', src, dst)\n return True\n except Exception:\n warn_msg = f\"\"\"\n Copy-on-write check failed.\n The file system where images are stored ({image_dir}) does not support copy-on-write.\n It is recommended to use an XFS or BTRFS file system with copy-on-write enabled as a storage\n location for S2E images, as this can save up to 60% of disk space. The building process checkpoints\n intermediate build steps with cp --reflink=auto to make use of copy-on-write if it is available.\n\n How to upgrade:\n 1. Create an XFS or BTRFS partition large enough to store the images that you need (~300 GB for all images).\n Make sure you use reflink=1 to enable copy-on-write when running mkfs.xfs.\n 2. Create a directory for guest images on that partition (e.g., /mnt/disk1/images)\n 3. Delete the \"images\" folder in your S2E environment\n 4. Create in your S2E environment a symbolic link called \"images\" to the directory you created in step 2\n \"\"\"\n logger.warning(re.sub(r'^ {8}', '', warn_msg, flags=re.MULTILINE))\n return False\n finally:\n sh.rm('-f', src)\n sh.rm('-f', dst)\n\n\ndef _raise_invalid_image(image_name):\n raise CommandError(f'Invalid image name: {image_name}. Run ``s2e image_build`` '\n 'to list available images')\n\n\ndef _get_base_image_and_app(image_name):\n x = image_name.split('/')\n if len(x) == 1:\n return x[0], None\n if len(x) == 2:\n return x\n raise CommandError(f'Invalid image name {image_name}')\n\n\ndef _has_app_image(image_names):\n for name in image_names:\n if '/' in name:\n return True\n return False\n\n\ndef _check_product_keys(image_descriptors, image_names):\n missing_keys = []\n\n for image_name in image_names:\n image = image_descriptors[image_name]\n\n if 'product_key' in image:\n if not image['product_key']:\n missing_keys.append(image_name)\n\n ios = image_descriptors[image_name].get('os', {})\n if 'product_key' in ios:\n if not ios['product_key']:\n missing_keys.append(image_name)\n\n if missing_keys:\n logger.error('The following images require a product key:')\n for image in missing_keys:\n logger.error(' * %s', image)\n\n raise CommandError('Please update images.json and/or apps.json.')\n\n\ndef _check_iso(templates, app_templates, iso_dir, image_names):\n for image_name in image_names:\n base_image, app_name = _get_base_image_and_app(image_name)\n\n descriptors = [templates[base_image]]\n if app_name:\n descriptors.append(app_templates[app_name])\n\n for desc in descriptors:\n iso = desc.get('iso', {})\n if iso.get('url', ''):\n continue\n\n name = iso.get('name', '')\n if not name:\n continue\n\n if not iso_dir:\n raise CommandError(\n 'Please use the --iso-dir option to specify the path '\n f'to a folder that contains {name}'\n )\n\n path = os.path.join(iso_dir, name)\n if not os.path.exists(path):\n raise CommandError(f'The image {image_name} requires {path}, which could not be found')\n\n\ndef _is_port_available(port):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n try:\n s.bind((\"127.0.0.1\", port))\n return True\n except socket.error:\n return False\n finally:\n s.close()\n\n\ndef _start_ftp_server(image_path, port):\n authorizer = DummyAuthorizer()\n authorizer.add_anonymous(image_path, perm='elradfmwMT')\n handler = FTPHandler\n handler.authorizer = authorizer\n handler.masquerade_address = '10.0.2.2'\n # QEMU slirp won't let the guest reconnect if timeout happens, so we disable it\n handler.timeout = None\n\n server = FTPServer((\"127.0.0.1\", port), handler)\n\n thread = Thread(target=_run_ftp_server, args=[server])\n thread.daemon = True\n thread.start()\n time.sleep(1)\n\n return server\n\n\ndef _run_ftp_server(server):\n try:\n server.serve_forever()\n finally:\n logger.info('FTP server terminated')\n server.close_all()\n\n\ndef _get_archive_rules(image_path, rule_names):\n if _has_app_image(rule_names):\n raise CommandError('Building archives of app images is not supported yet')\n\n archive_rules = []\n for r in rule_names:\n archive_rules.append(os.path.join(image_path, f'{r}.tar.xz'))\n\n logger.info('The following archives will be built:')\n for a in archive_rules:\n logger.info(' * %s', a)\n\n return archive_rules\n\n\ndef _download_images(image_path, image_names, templates):\n if _has_app_image(image_names):\n raise CommandError('Downloading of app images is not supported yet')\n\n image_downloader = ImageDownloader(templates)\n image_downloader.download_images(image_names, image_path)\n\n logger.info('Successfully downloaded images: %s', ', '.join(image_names))\n\n\nclass Command(EnvCommand):\n \"\"\"\n Builds an image.\n \"\"\"\n\n help = 'Build an image.'\n\n def __init__(self):\n super().__init__()\n\n self._headless = True\n self._use_kvm = True\n self._num_cores = 1\n self._has_cow = False\n\n def add_arguments(self, parser):\n super().add_arguments(parser)\n\n parser.add_argument('name',\n help='The name of the image to build. If empty,'\n ' shows available images', nargs='*')\n parser.add_argument('-g', '--gui', action='store_true',\n help='Display QEMU GUI during image build')\n parser.add_argument('-c', '--cores', required=False, default=2,\n type=int,\n help='The number of cores used when building the '\n 'VM image. Defaults to 2')\n parser.add_argument('-x', '--clean', action='store_true',\n help='Deletes all images and rebuild them from '\n 'scratch')\n parser.add_argument('-a', '--archive', action='store_true',\n help='Creates an archive for the specified image')\n parser.add_argument('-p', '--ftp-port', required=False, default=15468, type=int,\n help='Port for the internal FTP server to receive files from guest VMs during build')\n parser.add_argument('-d', '--download', action='store_true',\n help='Download image from the repository instead '\n 'of building it')\n parser.add_argument('-i', '--iso-dir',\n help='Path to folder that stores ISO files of Windows images')\n parser.add_argument('-n', '--no-kvm', action='store_true',\n help='Disable KVM during image build')\n\n def handle(self, *args, **options):\n # If DISPLAY is missing, don't use headless mode\n if options['gui']:\n self._headless = False\n\n # If KVM has been explicitly disabled, don't use it during the build\n if options['no_kvm']:\n self._use_kvm = False\n\n self._num_cores = options['cores']\n\n # The path could have been deleted by a previous clean\n if not os.path.exists(self.image_path()):\n os.makedirs(self.image_path())\n\n img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])\n\n if options['clean']:\n self._invoke_make(img_build_dir, ['clean'])\n return\n\n image_names = options['name']\n templates = get_image_templates(img_build_dir)\n app_templates = get_app_templates(img_build_dir)\n images, image_groups, image_descriptors = get_all_images(templates, app_templates)\n\n if not image_names:\n self._print_image_list(images, image_groups, image_descriptors)\n print('\\nRun ``s2e image_build <name>`` to build an image. '\n 'Note that you must run ``s2e build`` **before** building '\n 'an image')\n return\n\n image_names = translate_image_name(images, image_groups, image_names)\n logger.info('The following images will be built:')\n for image in image_names:\n logger.info(' * %s', image)\n\n if options['download']:\n _download_images(self.image_path(), image_names, templates)\n return\n\n rule_names = image_names\n\n if options['archive']:\n rule_names = _get_archive_rules(self.image_path(), image_names)\n\n iso_dir = os.path.abspath(options['iso_dir']) if options['iso_dir'] else None\n\n # Check for optional product keys and iso directories.\n # These may or may not be required, depending on the set of images.\n _check_product_keys(image_descriptors, image_names)\n _check_iso(templates, app_templates, iso_dir, image_names)\n\n if self._use_kvm:\n _check_kvm()\n _check_groups_kvm()\n\n _check_groups_docker()\n _check_vmlinux()\n\n self._has_cow = _check_cow(self.image_path())\n\n if self._use_kvm:\n _check_virtualbox()\n _check_vmware()\n\n if not _is_port_available(options['ftp_port']):\n raise CommandError(f'localhost:{options[\"ftp_port\"]} is not available. Check that the port is free or '\n 'specify a port with --ftp-port')\n\n # Clone kernel if needed.\n # This is necessary if the s2e env has been initialized with -b flag.\n self._clone_kernel()\n\n server = _start_ftp_server(self.image_path(), options['ftp_port'])\n\n self._invoke_make(img_build_dir, rule_names, options['ftp_port'], iso_dir)\n\n logger.success('Built image(s) \\'%s\\'', ' '.join(image_names))\n\n server.close_all()\n\n def _invoke_make(self, img_build_dir, rule_names, ftp_port=0, iso_dir=''):\n env = os.environ.copy()\n env['S2E_INSTALL_ROOT'] = self.install_path()\n env['S2E_LINUX_KERNELS_ROOT'] = \\\n self.source_path(CONSTANTS['repos']['images']['linux'])\n env['OUTDIR'] = self.image_path()\n env['QEMU_FTP_PORT'] = str(ftp_port)\n env['ISODIR'] = iso_dir if iso_dir else ''\n env['DEBUG_INTERMEDIATE_RULES'] = '1' if self._has_cow else '0'\n\n logger.debug('Invoking makefile with:')\n logger.debug('export S2E_INSTALL_ROOT=%s', env['S2E_INSTALL_ROOT'])\n logger.debug('export S2E_LINUX_KERNELS_ROOT=%s', env['S2E_LINUX_KERNELS_ROOT'])\n logger.debug('export OUTDIR=%s', env['OUTDIR'])\n logger.debug('export ISODIR=%s', env.get('ISODIR', ''))\n logger.debug('export DEBUG_INTERMEDIATE_RULES=%s', env.get('DEBUG_INTERMEDIATE_RULES', ''))\n\n if self._headless:\n logger.warning('Image creation will run in headless mode. '\n 'Use --gui to see graphic output for debugging')\n else:\n env['GRAPHICS'] = ''\n\n if not self._use_kvm:\n env['QEMU_KVM'] = ''\n logger.warning('Image build without KVM. This will be slow')\n\n try:\n make = sh.Command('make').bake(file=os.path.join(img_build_dir,\n 'Makefile'),\n directory=self.image_path(),\n _env=env, _fg=True)\n\n make_image = make.bake(j=self._num_cores, r=True, warn_undefined_variables=True)\n make_image(sorted(rule_names))\n except ErrorReturnCode as e:\n raise CommandError(e) from e\n\n def _clone_kernel(self):\n kernels_root = self.source_path(CONSTANTS['repos']['images']['linux'])\n if os.path.exists(kernels_root):\n logger.info('Kernel repository already exists in %s', kernels_root)\n return\n\n logger.info('Cloning kernels repository to %s', kernels_root)\n\n kernels_repo = CONSTANTS['repos']['images']['linux']\n repos.git_clone_to_source(self.env_path(), kernels_repo)\n\n def _print_image_list(self, images, image_groups, image_descriptors):\n img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])\n templates = get_image_templates(img_build_dir)\n if not templates:\n images_json_path = os.path.join(img_build_dir, 'images.json')\n raise CommandError('No images available to build. Make sure that '\n f'{images_json_path} exists and is valid')\n\n def get_max_len(lst):\n ret = 0\n for item in lst:\n if len(item) > ret:\n ret = len(item)\n return ret\n\n print('Available image groups:')\n max_group_len = get_max_len(image_groups)\n for group in image_groups:\n print(f' * {group:{max_group_len}} - Build {group} images')\n\n print('\\nAvailable images:')\n max_image_len = get_max_len(images)\n for image in sorted(images):\n print(f' * {image:{max_image_len}} - {image_descriptors[image][\"name\"]}')\n\n def _print_apps_list(self):\n img_build_dir = self.source_path(CONSTANTS['repos']['images']['build'])\n app_templates = get_app_templates(img_build_dir)\n if not app_templates:\n apps_json_path = os.path.join(img_build_dir, 'apps.json')\n raise CommandError('No apps available to build. Make sure that '\n f'{apps_json_path} exists and is valid')\n\n print('Available applications:')\n for app_template, desc in sorted(app_templates.items()):\n for base_image in desc['base_images']:\n print(f' * {base_image}/{app_template} - {desc[\"name\"]}')\n",
"step-ids": [
17,
21,
24,
30,
34
]
}
|
[
17,
21,
24,
30,
34
] |
#!usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author: Jack
@datetime: 2018/8/31 13:32
@E-mail: [email protected]
"""
def isValid(s):
stack = []
for ss in s:
if ss in '([{':
stack.append(ss)
if ss in ')]}':
if len(stack) <= 0:
return False
else:
compare = stack.pop()
if (compare == '(' and ss != ')') or (compare == '[' and ss != ']') or (compare == '{' and ss != '}'):
return False
if len(stack) == 0:
return True
else:
return False
if __name__ == '__main__':
print isValid("{[]}")
|
normal
|
{
"blob_id": "607f0aac0d6d2c05737f59803befcff37d559398",
"index": 5117,
"step-1": "#!usr/bin/env python\n# -*- coding:utf-8 -*-\n\"\"\"\n@author: Jack\n@datetime: 2018/8/31 13:32\n@E-mail: [email protected]\n\"\"\"\n\n\ndef isValid(s):\n stack = []\n for ss in s:\n if ss in '([{':\n stack.append(ss)\n if ss in ')]}':\n if len(stack) <= 0:\n return False\n else:\n compare = stack.pop()\n if (compare == '(' and ss != ')') or (compare == '[' and ss != ']') or (compare == '{' and ss != '}'):\n return False\n if len(stack) == 0:\n return True\n else:\n return False\n\n\nif __name__ == '__main__':\n print isValid(\"{[]}\")\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import json
import os, django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dangjianyun.settings")# project_name 项目名称
django.setup()
from dangjiansite.djfuncs import *
import os
import datetime
import requests
import time
import urllib3
import base64
import csv
import random
from bs4 import BeautifulSoup
from dangjiansite.models import *
class Runner():
# def __init__(self, appid='TJZHDJ01', username='024549', password='Aa1234'):
def __init__(self, appid='TJZHDJ01', username='', password=''):
urllib3.disable_warnings()#屏蔽ssl告警
self.currentTime = datetime.datetime.now().strftime("%H:%M:%S")
self.username = username
self.password = password
self.thumbedFilePath = './lib/'.format(username)
self.logFilePath = './log/'.format(username)
self.errFilePath = './err/'.format(username)
# self.thumbedFileList = self.getThumbFromFile()
self.thumbedFileList = []
self.debug = True
self.session = requests.session()
self.appid = appid#应该是本设备安装app的id 等换个设备试一下就知道了
self.headers ={
'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 6.0; HUAWEI MLA-AL10 Build/HUAWEIMLA-AL10)',
'header_version': '80',
'system': 'android',
'Connection': 'Keep-Alive',
'Host': 'mapi.dangjianwang.com',
}
self.token = self.getToken()
time.sleep(0.1)
self.thumbPageList = self.getPages(urls=[
'https://mapi.dangjianwang.com/v3_1/Learn/List',
'https://mapi.dangjianwang.com/v3_1/Activities/List',
'https://mapi.dangjianwang.com/v3_1/Hotspots/Hotlist'
])
self.thumbPages = [i[1] for i in self.thumbPageList]
time.sleep(0.1)
self.helpPageList = self.getPages(urls=['https://mapi.dangjianwang.com/v3_1/Help/List', ])
self.helpPages = [i[1] for i in self.helpPageList]
self.helpResults = {}
time.sleep(0.1)
self.studyPageList = self.getPagesII(urls=['https://mapi.dangjianwang.com/v3_1/Study/MaterialCollList'])
self.studyPages = [i[1] for i in self.studyPageList]
time.sleep(0.1)
self.studyRsults = {}
self.thumbedPages = []
self.thumbResults = {}
self.helpedPages = []
self.multiThumbed = []#考虑最后要写入文件之中
self.viewsResults = []
self.examC19Info = []
self.examlist = []
self.qaList = []
def getCurrentTime(self):
return datetime.datetime.now().strftime("%H:%M:%S")
def writeErr2File(self, err):
path = self.logFilePath
fullPath = '{}{}err.txt'.format(path, self.username)
if not os.path.exists(path):
os.mkdir(path)
with open(fullPath, 'a') as f:
f.write('{}:{}\n'.format(self.currentTime, err))
print('err已经写入{}'.format(fullPath))
def writeLog2File(self, log):
path = self.logFilePath
fullPath = '{}{}logs.txt'.format(path, self.username)
if not os.path.exists(path):
os.mkdir(path)
with open(fullPath, 'a') as f:
f.write('{}:{}\n'.format(self.currentTime, log))
print('log已经写入{}'.format(fullPath))
def writeThumb2File(self, id):
path = self.thumbedFilePath
fullPath = '{}{}thumbs.txt'.format(path, self.username)
if not os.path.exists(path):
os.mkdir(path)
with open(fullPath, 'a') as f:
f.write(',{}'.format(id))
print('点赞记录已经写入{}'.format(fullPath))
def getThumbFromFile(self):
'''
:return: 文件中id组成的列表
'''
path = self.thumbedFilePath
inFileList = []
fullPath = '{}{}thumbs.txt'.format(path, self.username)
if not os.path.exists(fullPath):
return fullPath
with open(fullPath, 'r') as f:
inFileList.extend(list(set(f.readlines()[0].split(','))))
# print('getThumbFormFile', inFileList)
with open(fullPath, 'w') as f1:
f1.write(','.join(sorted(inFileList)))
return inFileList
def getExcuteTimes(self):
'''
返回点赞等自动执行的次数的字典
:return:
'''
excuteTimes = {}
credInfo = self.getCredItinfo()
print(credInfo)
currentScore = credInfo[0]
# 点赞次数
thumbScore = credInfo[1]['信息评论'].split('/')[0]
thumbExcuteTimes = 10 - int(thumbScore)
excuteTimes.update({'thumb': thumbExcuteTimes})
# 帮助次数
helpScore = credInfo[1]['互助广场回答'].split('/')[0]
helpExctuteTimes = 2 - int(helpScore)
excuteTimes.update({'help': helpExctuteTimes})
# 党员视角发布次数
viewScore = credInfo[1]['党员视角发布'].split('/')[0]
viewExcuteTimes = int((4 - int(viewScore)) / 2)
excuteTimes.update({'view': viewExcuteTimes})
# 在线知识竞答次数
examScore = credInfo[1]['在线知识竞答'].split('/')[0]
examExcuteTimes = int((4 - int(examScore)) / 2)
excuteTimes.update({'exam': examExcuteTimes})
# 学习次数
flag = int(credInfo[1]['在线阅读学习资料'].split('/')[1]) - int(credInfo[1]['在线阅读学习资料'].split('/')[0])
flag1 = int(credInfo[1]['学习资料写体会'].split('/')[1]) - int(credInfo[1]['学习资料写体会'].split('/')[0])
examExcuteTimes = 1 if flag != 0 or flag1 != 0 else 0
excuteTimes.update({'study': examExcuteTimes})
return excuteTimes
def getToken(self):
'''
获得一个连接的token
每个连接都需要使用到
:return:
'''
data = {
'appid': self.appid,
'username': self.username,
'password': self.password,
}
longinurl = 'https://mapi.dangjianwang.com/v3_1/login'
r = self.session.post(url=longinurl, data=data, verify=False)
rjson = r.json()
# print(type(rjson))
# print(rjson)
if rjson['code'] == '200':
return rjson['token']
else:
print('token 获得失败')
return None
def getRJson(self, url):
data={
'token': self.token,
'appid': self.appid
}
return self.session.post(url=url, data=data, verify=False).json()
def getUserInfo(self):
'''
获得一大串用户的信息,暂时没用
:return:
'''
infoUrl = 'https://mapi.dangjianwang.com/v3_1/User/UserInfo'
return self.getRJson(url=infoUrl)
def getCredItinfoToday(self):
'''
获得人员当前的得分等级参数
:return:
'''
creditInfourl = 'https://mapi.dangjianwang.com/v3_1/User/CreditInfo'
info = self.getRJson(url=creditInfourl)
fullScore = info['data']['full']
gainScore = info['data']['gain']
currentLevel = info['data']['level']
username = info['data']['name']
ret = {
'fullScore': fullScore,
'gainScore': gainScore,
'currentLevel': currentLevel,
'username': username,
}
return ret
def getCredItinfo(self):
'''
获得用户的今日积分状态
可用来判断是否需要再继续流程
数据如下
('35', [('连续登录', '3/3'), ('手机端登录', '2/2'), ('信息评论', '10/10'), ('党员视角发布', '4/4'), ('互助广场回答', '2/2'), ('学习资料写体会', '5/5'), ('在线阅读学习资料', '5/5'), ('在线知识竞答', '4/4')])
:return:(haved_credit, credit_detail)
'''
creditInfourl = 'https://mapi.dangjianwang.com/v3_1/User/CreditInfo'
haved_credit = 0
credit_detail = {}
info = self.getRJson(url=creditInfourl)
for k, v in info.items():
if k == 'data':
for k2, v2 in v.items():
if k2 == 'haved_credit':
haved_credit = v2
if k2 == 'credit_detail':
for i in v2:
credit_detail.update({i['title']: i['score']})
return (haved_credit, credit_detail)
def getPages(self, urls):
pages = []
for url in urls:
data = self.getRJson(url=url)
for k, v in data.items():
if k == 'data':
for i in v:
# pages.append({'pageId': i['id'], 'pageTitle': i['title']})
# pages.append(i['id'])
pages.append((i['title'], i['id']))
return pages
def getPagesII(self, urls):
def getRJson(url):
data = {
'token': self.token,
'appid': self.appid,
'type_id': '791',
'page_index': '1',
}
return self.session.post(url=url, data=data, verify=False).json()
pages = []
for url in urls:
data = getRJson(url=url)
for k, v in data.items():
# print(k, v)
if k == 'data':
for i in v:
# pages.append({'pageId': i['id'], 'pageTitle': i['title']})
# pages.append(i['id'])
pages.append((i['name'], i['id']))
return pages
def doThumb(self, id):
'''
点赞函数,操作与id对应的页面
每次记录对应的信息到文件
:return:
'''
contents = [
'关注',
'关注!',
'关注!!']
data = {
'id': id,
'comment': random.choice(contents),
'token': self.token,
'appid': self.appid,
}
commitUrl = 'https://mapi.dangjianwang.com/v3_1/Activities/CommentAct'
rjson = self.session.post(url=commitUrl,
data=data,
verify=False).json()
print(rjson)
if rjson['code'] == '1003':
self.token = self.getToken()
elif rjson['code'] == '200':
result = rjson['msg']
if result == '操作成功':
self.thumbedPages.append(id)
# print(self.thumbPageList)
# print(len(self.thumbPageList), len(list(set(self.thumbPageList))))
for i in list(set(self.thumbPageList)):
if id == i[1]:
temp = {'title': i[0]}
self.thumbResults.update(temp)
log = '信息点赞:\n主题: {}\n提交:{}'.format(i[0], data['comment'])
detail = '{} 主题:{}\n回复:{}\n'.format(self.getCurrentTime(), i[0], data['comment'])
write2File(self, './results/', 'result.txt', log)
thumbInfo = {'title': i[0], 'reply': data['comment']}
self.thumbPages.remove(id)
self.writeThumb2File(id=id)
return (detail, thumbInfo)
elif rjson['code'] == '500' and rjson['msg'] == '评论过快,请求休息一会':
print('因评论过快,等待一段时间')
time.sleep(20)
else:
print('rjson', rjson)
# self.multiThumbed.append(id)
self.thumbedPages.remove(id)#不成功的时候也要去掉不然总会选到
self.writeThumb2File(id=id)
log = '点赞:{}'.format(rjson)
self.writeLog2File(log)
print(log)
time.sleep(10)
def doHelp(self, id, callback=None):
'''
互助功能
:param id:
:return:
'''
detail = ''
helpInfo = None
log = ''
content = [
'把党的政治建设摆在首位!',
'不忘初心,牢记使命!',
'发展史第一要务,人才是第一资源,创新是第一动力。',
'要把党的领导贯彻到依法治国全过程和各方面',
'毫不动摇坚持中国共产党领导',]
data = {
'id': id,
'content': random.choice(content),
'token': self.token,
'appid': self.appid,
}
print(data)
commitUrl = 'https://mapi.dangjianwang.com/v3_1/Help/PostComment'
rjson = self.session.post(url=commitUrl,
data=data,
verify=False).json()
if rjson['code'] == '200':
result = rjson['msg']
if result == '操作成功':
self.helpedPages.append(id)
self.helpPages.remove(id)
#记录成功的到result
for i in self.helpPageList:
if id == i[1]:
curTime = self.getCurrentTime()
# print('('*88)
# print(curTime)
self.helpResults.update({'title': id[0]})
log = '互助:\n主题: {}\n提交内容: {}'.format(i[0], rjson['comment'])
write2File(self, './results/', 'result.txt', log)
# #写入数据库
detail = '{} 主题: {}\n提交内容: {}\n'.format(curTime, i[0], rjson['comment'].strip())
helpInfo = {'title': i[0], 'reply': rjson['comment']}
else:
pass
else:
pass
log = '帮助:{}'.format(rjson)
self.writeLog2File(log)
print(log)
return (detail, log, helpInfo)
def doView(self):
'''
党员视角发布功能
:return:
'''
content = [
'全面的小康,覆盖的人口要全面,是惠及全体人民的小康。',
'不忘初心,牢记使命,坚持终身学习!']
data = {
'content': random.choice(content),
'token': self.token,
'appid': self.appid,
}
commitUrl = 'https://mapi.dangjianwang.com/v3_1/Viewpoint/Create'
rjson = self.session.post(url=commitUrl,
data=data,
verify=False).json()
if rjson['code'] == '200':
result = rjson['msg']
if result == '操作成功':
self.viewsResults.append(1)
# self.viewsResults.append(id)
else:
pass
log = '党员视角:{}'.format(rjson)
detail = '{} 党员视角:\n发布内容:{}\n'.format(self.getCurrentTime(), rjson['data']['content'])
publicContent = rjson['data']['content']
# print(detail)
# self.writeLog2File(log)
# print('党员视角'*12)
# print(id)
# print(log)
# print('党员视角' * 12)
return (detail, publicContent)
def doStudy(self, mid):
'''
前三个post函数的响应的三个请求
get用来获得填写的内容
最后一个post是学习完离开并检测时间的函数如果成功说明该次学习成功。
:param mid:
:return:
'''
interval = 60 * 5 + 5
def post1():
data = {
'mid': mid,
'token': self.token,
'appid': self.appid,
}
commitUrl = 'https://mapi.dangjianwang.com/v3_1//Study/CheckCollStatus'
rjson = self.session.post(url=commitUrl,
data=data,
verify=False).json()
# print(rjson)
log = '学习post1:{}'.format(rjson)
self.writeLog2File(log)
print(log)
def post2():
data = {
'token': self.token,
'appid': self.appid,
}
commitUrl = 'https://mapi.dangjianwang.com/v3_1/Login/CheckToken'
rjson = self.session.post(url=commitUrl,
data=data,
verify=False).json()
# print(rjson)
log = '学习post2:{}'.format(rjson)
self.writeLog2File(log)
print(log)
def post3():
data = {
'mid': mid,
'token': self.token,
'appid': self.appid,
}
commitUrl = 'https://mapi.dangjianwang.com/v3_1/Study/GetFeelingsNum'
rjson = self.session.post(url=commitUrl,
data=data,
verify=False).json()
# print(rjson)
log = '学习post3:{}'.format(rjson)
self.writeLog2File(log)
print(log)
def get1():
url = 'https://mapi.dangjianwang.com/v3_1/Study/MaterialDetail?token={}&mid={}'.format(self.token, mid)
rjson = self.session.get(url=url)
text = rjson.content
soup = BeautifulSoup(text, 'html.parser')
retContents = []
for div in soup.find_all('p'):
p = div.text.strip()
retContents.append(p if 100 > len(p) < 200 else p[0:200])
return random.choice(retContents)
def recordFeeling(content=None):
if not content:
content = '伟大的时代造就伟大的人物。邓小平同志就是从中国人民和中华民族近代以来伟大斗争中产生的伟人,' \
'是我们大家衷心热爱的伟人。我们很多同志都曾经在他的领导和指导下工作过,他的崇高风范对我们来说是那样熟悉、那样亲切。' \
'邓小平同志崇高鲜明又独具魅力的革命风范,将激励我们在实现“两个一百年”奋斗目标、实现中华民族伟大复兴中国梦的征程上奋勇前进。'
data = {
'mid': mid,
'token': self.token,
'appid': self.appid,
'content': content
}
commitUrl = 'https://mapi.dangjianwang.com/v3_1/Study/RecordFeeling'
rjson = self.session.post(url=commitUrl,
data=data,
verify=False).json()
# print(rjson)
log = '学习recordFeeling:{}'.format(rjson)
self.writeLog2File(log)
print('in recordFeeling')
print(log)
if rjson['code'] == '200':
return {'content': content}
elif rjson['code'] == '1120':
addtion = [
'我们必须坚定不移,任何时候任何情况下都不能动摇',
'人民有信心,国家才有未来,国家才有力量。',
'新时代,属于自强不息、勇于创造的奋斗者。',
'民主政治建设有序推进,依法治市迈出新步伐。',
'一切公职人员,都必须牢记始终为人民利益和幸福而努力工作。',
]
return recordFeeling(content= '{}\n{}'.format(content, random.choice(addtion)))
else:
return None
#记录回复的心得
def readTime():
data = {
'mid': mid,
'token': self.token,
'appid': self.appid,
'time': interval,
}
commitUrl = 'https://mapi.dangjianwang.com/v3_1/Study/ReadTime'
rjson = self.session.post(url=commitUrl,
data=data,
verify=False).json()
# print(rjson)
log = '学习readTime:{}'.format(rjson)
# self.studyRsults.update({'学习readTime', rjson})
self.writeLog2File(log)
print(log)
post1()
time.sleep(1)
post2()
time.sleep(1)
post3()
time.sleep(1)
content = get1()
time.sleep(1)
# time.sleep(interval)
count = 0
print('开始学习请稍后')
for i in range(interval):
count += 1
# print(i + 1)
if count % 30 == 0:
print('已用时{}秒'.format(count))
time.sleep(1)
# time.sleep(5)
print('填写的学习体会', content)
self.studyRsults.update(recordFeeling(content=content))
time.sleep(1)
readTime()
time.sleep(1)
pass
def doExam(self):
'''
:param self:
:return:
'''
ids = []
data = {
'page': '1',
'page_size': '20',
'token': self.token,
'appid': self.appid,
}
examlistUrl = 'https://mapi.dangjianwang.com/v3_1/quora/examlist'
rjson = self.session.post(url=examlistUrl,
data=data,
verify=False).json()
# print(rjson)
# for i in rjson['data']:
# print(i)
time.sleep(0.3)
#########################################################
print('*' * 99)
data = {
'page': '1',
'page_size': '20',
'token': self.token,
'appid': self.appid,
}
banklistUrl = 'https://mapi.dangjianwang.com/v3_1/exam/banklist'
rjson = self.session.post(url=banklistUrl,
data=data,
verify=False).json()
# print(rjson)
for i in rjson['data']:
tem = (i['bank_name'], i['id'])
self.examlist.append(tem)
if i['bank_name'] == '十九大报告100题(单选)':
# if i['bank_num'] == '65':
temp = {
'title': i['bank_name'],
'detail': i['detail'],
'id': i['id'],
}
self.examC19Info.append(temp)
# print(self.examC19Info)
# print(self.examlist)
time.sleep(0.3)
#########################################################
print('*' * 99)
data = {
'bank': '6',
'token': self.token,
'appid': self.appid,
}
commitUrl = 'https://mapi.dangjianwang.com/v3_1/exam/randexam'
rjson = self.session.post(url=commitUrl,
data=data,
verify=False).json()
# print(rjson)
aa = rjson['data']
paper = aa['id']
for i in aa['questions']:
temp = {'id': i['id'], 'content': i['content']}
ids.append(temp)
#########################################################
print('*' * 99)
time.sleep(0.5)
# 以下答题交卷
answers = []
# 先得到答案
for i in ids:
# 丛书据库获得答案
correctAnswer = Qa.objects.filter(question__contains=i['content'])[0]
answerText = correctAnswer.answerText
answer = correctAnswer.answer
#从文键获得答案
# answerText = getAnswer(i['content'])[2]
# answer = getAnswer(i['content'])[1]
temp = {'index': i['id'], 'answer': answer}
qa = {'index': i['id'], 'answer': answer, 'answerText': answerText}
self.qaList.append(qa)
print(qa, i['content'])
answers.append(temp)
time.sleep(1)
hdata = {
'token': self.token,
'appid': self.appid,
'paper': paper,
'answers': json.dumps(answers),
# 'answers': [{'answer': 'A', 'index': '639'}, {'answer': 'A', 'index': '639'}],
}
# print('hdata:', hdata)
commitUrl = 'https://mapi.dangjianwang.com/v3_1/exam/handpaper'
rjson = self.session.post(url=commitUrl,
data=hdata,
verify=False).json()
print(rjson)
print(self.examlist)
print(self.examC19Info)
print(self.qaList)
def getAnswerInfo(self):
'''
获得答题的结果与正确率
:return:
'''
data = {
'token': self.token,
'appid': self.appid,
'page_size': '20',
'page_index': 'page_index',
}
commitUrl = 'https://mapi.dangjianwang.com/v3_1/exam/randexam'
rjson = self.session.post(url=commitUrl,
data=data,
verify=False).json()
print(rjson)
'''
https://mapi.dangjianwang.com/v3_1/exam/randexam 答题地址 主id是交卷的paper 这里要获取到questions里的id 等于回答问题中的index
appid TJZHDJ01
bank 6
token 5jTY47PbPZ0KdUprwmfJVfH4cX23tyDcV25XrEYkWVvElH3YjJpIb1JCDwq_
https://mapi.dangjianwang.com/v3_1/exam/handpaper 交卷的连接
appid TJZHDJ01
answers [{"index":"635","answer":"D"},{"index":"640","answer":"C"},{"index":"641","answer":"B"},{"index":"665","answer":"B"},{"index":"670","answer":"B"},{"index":"673","answer":"B"},{"index":"677","answer":"C"},{"index":"682","answer":"B"},{"index":"684","answer":"C"},{"index":"690","answer":"A"}]
token 5jTY47PbPZ0KdUprwmfJVfH4cX23tyDcV25XrEYkWVvElH3YjJpIb1JCDwq_
paper 4565894
https://mapi.dangjianwang.com/v3_1/exam/banklist 获得答题情况的连接
appid TJZHDJ01
page_size 20
token 5jTY47PbPZxXeRxlkzScAPWidyvssy3TBD5Y9UYiCQnMmCfa2pRNb1JCDwq_
page_index 1
--------------------------------------------------
https://mapi.dangjianwang.com/v3_1/Study/MaterialCollList 学习的id列表
appid TJZHDJ01
page_size 20
type_id 791
token 5jTY47PbPZJbeh9ixjfOUvaoI3604SrSAz5Zokt3DAmfz3qIis4Yb1JCDwq_
page_index 1
下面是针对791id列表中的访问地址
https://mapi.dangjianwang.com/v3_1//Study/CheckCollStatus
post1:
appid TJZHDJ01
mid 9729
token 5jTY47PbPZoOKEUwlDCaAKWqICGwt3_OVzlVpk5yW1bMyS_M3J5Db1JCDwq_
post2:
https://mapi.dangjianwang.com/v3_1/Login/CheckToken
appid TJZHDJ01
token 5jTY47PbPZoOKEUwlDCaAKWqICGwt3_OVzlVpk5yW1bMyS_M3J5Db1JCDwq_
post3:
https://mapi.dangjianwang.com/v3_1/Study/GetFeelingsNum
appid TJZHDJ01
mid 9729
token 5jTY47PbPZoOKEUwlDCaAKWqICGwt3_OVzlVpk5yW1bMyS_M3J5Db1JCDwq_
get1 https://mapi.dangjianwang.com/v3_1/Study/MaterialDetail?token={}&mid={} 获得页面
post 发表体会
https://mapi.dangjianwang.com/v3_1/Study/RecordFeeling
appid TJZHDJ01
content 伟大的时代造就伟大的人物。邓小平同志就是从中国人民和中华民族近代以来伟大斗争中产生的伟人,是我们大家衷心热爱的伟人。我们很多同志都曾经在他的领导和指导下工作过,他的崇高风范对我们来说是那样熟悉、那样亲切。邓小平同志崇高鲜明又独具魅力的革命风范,将激励我们在实现“两个一百年”奋斗目标、实现中华民族伟大复兴中国梦的征程上奋勇前进。
mid 9729
token 5jTY47PbPckOdUlllmfOCaCvcy7ls3rSVmxRoE0gDg3EmyrYi5Ucb1JCDwq_
post 结束学习
https://mapi.dangjianwang.com/v3_1/Study/ReadTime
appid TJZHDJ01
time 362
mid 9729
token 5jTY47PbPckOdUlllmfOCaCvcy7ls3rSVmxRoE0gDg3EmyrYi5Ucb1JCDwq_
---------------------------------------
https://mapi.dangjianwang.com/v3_1/Help/List 这里获得帮助id
https://mapi.dangjianwang.com/v3_1/Help/PostComment 提交评论的地址
appid TJZHDJ01
content 不忘初心,牢记使命!
id 55984
token 5jTY47PbPcpZe0s1xDLKAqKoIimx6SnSVjcApB92DF3Nmy/djZ1Nb1JCDwq_
把党的政治建设摆在首位!
不忘初心,牢记使命!
-------------------------------
发布的内容
https://mapi.dangjianwang.com/v3_1/Viewpoint/Create
appid TJZHDJ01
content 不忘初心牢记使命
token 5jTY47PbPZ9deR5rkTXIB/b/fymw5HvbAj9R900gDArNnXqE1s9Kb1JCDwq_
不忘初心,牢记使命,坚持终身学习!
全面的小康,覆盖的人口要全面,是惠及全体人民的小康。
-----------------------------
点赞错误
{'msg': '重复评论过多,请您修改后重新提交。', 'code': '500'}
'''
|
normal
|
{
"blob_id": "55a26eb2625acb201677f5ff50fde809402c9b93",
"index": 2630,
"step-1": "<mask token>\n\n\nclass Runner:\n\n def __init__(self, appid='TJZHDJ01', username='', password=''):\n urllib3.disable_warnings()\n self.currentTime = datetime.datetime.now().strftime('%H:%M:%S')\n self.username = username\n self.password = password\n self.thumbedFilePath = './lib/'.format(username)\n self.logFilePath = './log/'.format(username)\n self.errFilePath = './err/'.format(username)\n self.thumbedFileList = []\n self.debug = True\n self.session = requests.session()\n self.appid = appid\n self.headers = {'User-Agent':\n 'Dalvik/2.1.0 (Linux; U; Android 6.0; HUAWEI MLA-AL10 Build/HUAWEIMLA-AL10)'\n , 'header_version': '80', 'system': 'android', 'Connection':\n 'Keep-Alive', 'Host': 'mapi.dangjianwang.com'}\n self.token = self.getToken()\n time.sleep(0.1)\n self.thumbPageList = self.getPages(urls=[\n 'https://mapi.dangjianwang.com/v3_1/Learn/List',\n 'https://mapi.dangjianwang.com/v3_1/Activities/List',\n 'https://mapi.dangjianwang.com/v3_1/Hotspots/Hotlist'])\n self.thumbPages = [i[1] for i in self.thumbPageList]\n time.sleep(0.1)\n self.helpPageList = self.getPages(urls=[\n 'https://mapi.dangjianwang.com/v3_1/Help/List'])\n self.helpPages = [i[1] for i in self.helpPageList]\n self.helpResults = {}\n time.sleep(0.1)\n self.studyPageList = self.getPagesII(urls=[\n 'https://mapi.dangjianwang.com/v3_1/Study/MaterialCollList'])\n self.studyPages = [i[1] for i in self.studyPageList]\n time.sleep(0.1)\n self.studyRsults = {}\n self.thumbedPages = []\n self.thumbResults = {}\n self.helpedPages = []\n self.multiThumbed = []\n self.viewsResults = []\n self.examC19Info = []\n self.examlist = []\n self.qaList = []\n <mask token>\n\n def writeErr2File(self, err):\n path = self.logFilePath\n fullPath = '{}{}err.txt'.format(path, self.username)\n if not os.path.exists(path):\n os.mkdir(path)\n with open(fullPath, 'a') as f:\n f.write('{}:{}\\n'.format(self.currentTime, err))\n print('err已经写入{}'.format(fullPath))\n\n def writeLog2File(self, log):\n path = self.logFilePath\n fullPath = '{}{}logs.txt'.format(path, self.username)\n if not os.path.exists(path):\n os.mkdir(path)\n with open(fullPath, 'a') as f:\n f.write('{}:{}\\n'.format(self.currentTime, log))\n print('log已经写入{}'.format(fullPath))\n <mask token>\n\n def getThumbFromFile(self):\n \"\"\"\n\n :return: 文件中id组成的列表\n \"\"\"\n path = self.thumbedFilePath\n inFileList = []\n fullPath = '{}{}thumbs.txt'.format(path, self.username)\n if not os.path.exists(fullPath):\n return fullPath\n with open(fullPath, 'r') as f:\n inFileList.extend(list(set(f.readlines()[0].split(','))))\n with open(fullPath, 'w') as f1:\n f1.write(','.join(sorted(inFileList)))\n return inFileList\n\n def getExcuteTimes(self):\n \"\"\"\n 返回点赞等自动执行的次数的字典\n :return:\n \"\"\"\n excuteTimes = {}\n credInfo = self.getCredItinfo()\n print(credInfo)\n currentScore = credInfo[0]\n thumbScore = credInfo[1]['信息评论'].split('/')[0]\n thumbExcuteTimes = 10 - int(thumbScore)\n excuteTimes.update({'thumb': thumbExcuteTimes})\n helpScore = credInfo[1]['互助广场回答'].split('/')[0]\n helpExctuteTimes = 2 - int(helpScore)\n excuteTimes.update({'help': helpExctuteTimes})\n viewScore = credInfo[1]['党员视角发布'].split('/')[0]\n viewExcuteTimes = int((4 - int(viewScore)) / 2)\n excuteTimes.update({'view': viewExcuteTimes})\n examScore = credInfo[1]['在线知识竞答'].split('/')[0]\n examExcuteTimes = int((4 - int(examScore)) / 2)\n excuteTimes.update({'exam': examExcuteTimes})\n flag = int(credInfo[1]['在线阅读学习资料'].split('/')[1]) - int(credInfo[1]\n ['在线阅读学习资料'].split('/')[0])\n flag1 = int(credInfo[1]['学习资料写体会'].split('/')[1]) - int(credInfo[1]\n ['学习资料写体会'].split('/')[0])\n examExcuteTimes = 1 if flag != 0 or flag1 != 0 else 0\n excuteTimes.update({'study': examExcuteTimes})\n return excuteTimes\n\n def getToken(self):\n \"\"\"\n 获得一个连接的token\n 每个连接都需要使用到\n :return:\n \"\"\"\n data = {'appid': self.appid, 'username': self.username, 'password':\n self.password}\n longinurl = 'https://mapi.dangjianwang.com/v3_1/login'\n r = self.session.post(url=longinurl, data=data, verify=False)\n rjson = r.json()\n if rjson['code'] == '200':\n return rjson['token']\n else:\n print('token 获得失败')\n return None\n\n def getRJson(self, url):\n data = {'token': self.token, 'appid': self.appid}\n return self.session.post(url=url, data=data, verify=False).json()\n\n def getUserInfo(self):\n \"\"\"\n 获得一大串用户的信息,暂时没用\n :return:\n \"\"\"\n infoUrl = 'https://mapi.dangjianwang.com/v3_1/User/UserInfo'\n return self.getRJson(url=infoUrl)\n\n def getCredItinfoToday(self):\n \"\"\"\n 获得人员当前的得分等级参数\n :return:\n \"\"\"\n creditInfourl = 'https://mapi.dangjianwang.com/v3_1/User/CreditInfo'\n info = self.getRJson(url=creditInfourl)\n fullScore = info['data']['full']\n gainScore = info['data']['gain']\n currentLevel = info['data']['level']\n username = info['data']['name']\n ret = {'fullScore': fullScore, 'gainScore': gainScore,\n 'currentLevel': currentLevel, 'username': username}\n return ret\n <mask token>\n\n def getPages(self, urls):\n pages = []\n for url in urls:\n data = self.getRJson(url=url)\n for k, v in data.items():\n if k == 'data':\n for i in v:\n pages.append((i['title'], i['id']))\n return pages\n\n def getPagesII(self, urls):\n\n def getRJson(url):\n data = {'token': self.token, 'appid': self.appid, 'type_id':\n '791', 'page_index': '1'}\n return self.session.post(url=url, data=data, verify=False).json()\n pages = []\n for url in urls:\n data = getRJson(url=url)\n for k, v in data.items():\n if k == 'data':\n for i in v:\n pages.append((i['name'], i['id']))\n return pages\n\n def doThumb(self, id):\n \"\"\"\n 点赞函数,操作与id对应的页面\n 每次记录对应的信息到文件\n :return:\n \"\"\"\n contents = ['关注', '关注!', '关注!!']\n data = {'id': id, 'comment': random.choice(contents), 'token': self\n .token, 'appid': self.appid}\n commitUrl = 'https://mapi.dangjianwang.com/v3_1/Activities/CommentAct'\n rjson = self.session.post(url=commitUrl, data=data, verify=False).json(\n )\n print(rjson)\n if rjson['code'] == '1003':\n self.token = self.getToken()\n elif rjson['code'] == '200':\n result = rjson['msg']\n if result == '操作成功':\n self.thumbedPages.append(id)\n for i in list(set(self.thumbPageList)):\n if id == i[1]:\n temp = {'title': i[0]}\n self.thumbResults.update(temp)\n log = '信息点赞:\\n主题: {}\\n提交:{}'.format(i[0], data[\n 'comment'])\n detail = '{} 主题:{}\\n回复:{}\\n'.format(self.\n getCurrentTime(), i[0], data['comment'])\n write2File(self, './results/', 'result.txt', log)\n thumbInfo = {'title': i[0], 'reply': data['comment']}\n self.thumbPages.remove(id)\n self.writeThumb2File(id=id)\n return detail, thumbInfo\n elif rjson['code'] == '500' and rjson['msg'] == '评论过快,请求休息一会':\n print('因评论过快,等待一段时间')\n time.sleep(20)\n else:\n print('rjson', rjson)\n self.thumbedPages.remove(id)\n self.writeThumb2File(id=id)\n log = '点赞:{}'.format(rjson)\n self.writeLog2File(log)\n print(log)\n time.sleep(10)\n\n def doHelp(self, id, callback=None):\n \"\"\"\n 互助功能\n :param id:\n :return:\n \"\"\"\n detail = ''\n helpInfo = None\n log = ''\n content = ['把党的政治建设摆在首位!', '不忘初心,牢记使命!', '发展史第一要务,人才是第一资源,创新是第一动力。',\n '要把党的领导贯彻到依法治国全过程和各方面', '毫不动摇坚持中国共产党领导']\n data = {'id': id, 'content': random.choice(content), 'token': self.\n token, 'appid': self.appid}\n print(data)\n commitUrl = 'https://mapi.dangjianwang.com/v3_1/Help/PostComment'\n rjson = self.session.post(url=commitUrl, data=data, verify=False).json(\n )\n if rjson['code'] == '200':\n result = rjson['msg']\n if result == '操作成功':\n self.helpedPages.append(id)\n self.helpPages.remove(id)\n for i in self.helpPageList:\n if id == i[1]:\n curTime = self.getCurrentTime()\n self.helpResults.update({'title': id[0]})\n log = '互助:\\n主题: {}\\n提交内容: {}'.format(i[0], rjson[\n 'comment'])\n write2File(self, './results/', 'result.txt', log)\n detail = '{} 主题: {}\\n提交内容: {}\\n'.format(curTime, i[\n 0], rjson['comment'].strip())\n helpInfo = {'title': i[0], 'reply': rjson['comment']}\n else:\n pass\n else:\n pass\n log = '帮助:{}'.format(rjson)\n self.writeLog2File(log)\n print(log)\n return detail, log, helpInfo\n\n def doView(self):\n \"\"\"\n 党员视角发布功能\n\n :return:\n \"\"\"\n content = ['全面的小康,覆盖的人口要全面,是惠及全体人民的小康。', '不忘初心,牢记使命,坚持终身学习!']\n data = {'content': random.choice(content), 'token': self.token,\n 'appid': self.appid}\n commitUrl = 'https://mapi.dangjianwang.com/v3_1/Viewpoint/Create'\n rjson = self.session.post(url=commitUrl, data=data, verify=False).json(\n )\n if rjson['code'] == '200':\n result = rjson['msg']\n if result == '操作成功':\n self.viewsResults.append(1)\n else:\n pass\n log = '党员视角:{}'.format(rjson)\n detail = '{} 党员视角:\\n发布内容:{}\\n'.format(self.getCurrentTime(), rjson[\n 'data']['content'])\n publicContent = rjson['data']['content']\n return detail, publicContent\n\n def doStudy(self, mid):\n \"\"\"\n 前三个post函数的响应的三个请求\n get用来获得填写的内容\n 最后一个post是学习完离开并检测时间的函数如果成功说明该次学习成功。\n :param mid:\n :return:\n \"\"\"\n interval = 60 * 5 + 5\n\n def post1():\n data = {'mid': mid, 'token': self.token, 'appid': self.appid}\n commitUrl = (\n 'https://mapi.dangjianwang.com/v3_1//Study/CheckCollStatus')\n rjson = self.session.post(url=commitUrl, data=data, verify=False\n ).json()\n log = '学习post1:{}'.format(rjson)\n self.writeLog2File(log)\n print(log)\n\n def post2():\n data = {'token': self.token, 'appid': self.appid}\n commitUrl = 'https://mapi.dangjianwang.com/v3_1/Login/CheckToken'\n rjson = self.session.post(url=commitUrl, data=data, verify=False\n ).json()\n log = '学习post2:{}'.format(rjson)\n self.writeLog2File(log)\n print(log)\n\n def post3():\n data = {'mid': mid, 'token': self.token, 'appid': self.appid}\n commitUrl = (\n 'https://mapi.dangjianwang.com/v3_1/Study/GetFeelingsNum')\n rjson = self.session.post(url=commitUrl, data=data, verify=False\n ).json()\n log = '学习post3:{}'.format(rjson)\n self.writeLog2File(log)\n print(log)\n\n def get1():\n url = (\n 'https://mapi.dangjianwang.com/v3_1/Study/MaterialDetail?token={}&mid={}'\n .format(self.token, mid))\n rjson = self.session.get(url=url)\n text = rjson.content\n soup = BeautifulSoup(text, 'html.parser')\n retContents = []\n for div in soup.find_all('p'):\n p = div.text.strip()\n retContents.append(p if 100 > len(p) < 200 else p[0:200])\n return random.choice(retContents)\n\n def recordFeeling(content=None):\n if not content:\n content = (\n '伟大的时代造就伟大的人物。邓小平同志就是从中国人民和中华民族近代以来伟大斗争中产生的伟人,是我们大家衷心热爱的伟人。我们很多同志都曾经在他的领导和指导下工作过,他的崇高风范对我们来说是那样熟悉、那样亲切。邓小平同志崇高鲜明又独具魅力的革命风范,将激励我们在实现“两个一百年”奋斗目标、实现中华民族伟大复兴中国梦的征程上奋勇前进。'\n )\n data = {'mid': mid, 'token': self.token, 'appid': self.appid,\n 'content': content}\n commitUrl = (\n 'https://mapi.dangjianwang.com/v3_1/Study/RecordFeeling')\n rjson = self.session.post(url=commitUrl, data=data, verify=False\n ).json()\n log = '学习recordFeeling:{}'.format(rjson)\n self.writeLog2File(log)\n print('in recordFeeling')\n print(log)\n if rjson['code'] == '200':\n return {'content': content}\n elif rjson['code'] == '1120':\n addtion = ['我们必须坚定不移,任何时候任何情况下都不能动摇',\n '人民有信心,国家才有未来,国家才有力量。', '新时代,属于自强不息、勇于创造的奋斗者。',\n '民主政治建设有序推进,依法治市迈出新步伐。', '一切公职人员,都必须牢记始终为人民利益和幸福而努力工作。']\n return recordFeeling(content='{}\\n{}'.format(content,\n random.choice(addtion)))\n else:\n return None\n\n def readTime():\n data = {'mid': mid, 'token': self.token, 'appid': self.appid,\n 'time': interval}\n commitUrl = 'https://mapi.dangjianwang.com/v3_1/Study/ReadTime'\n rjson = self.session.post(url=commitUrl, data=data, verify=False\n ).json()\n log = '学习readTime:{}'.format(rjson)\n self.writeLog2File(log)\n print(log)\n post1()\n time.sleep(1)\n post2()\n time.sleep(1)\n post3()\n time.sleep(1)\n content = get1()\n time.sleep(1)\n count = 0\n print('开始学习请稍后')\n for i in range(interval):\n count += 1\n if count % 30 == 0:\n print('已用时{}秒'.format(count))\n time.sleep(1)\n print('填写的学习体会', content)\n self.studyRsults.update(recordFeeling(content=content))\n time.sleep(1)\n readTime()\n time.sleep(1)\n pass\n\n def doExam(self):\n \"\"\"\n\n :param self:\n :return:\n \"\"\"\n ids = []\n data = {'page': '1', 'page_size': '20', 'token': self.token,\n 'appid': self.appid}\n examlistUrl = 'https://mapi.dangjianwang.com/v3_1/quora/examlist'\n rjson = self.session.post(url=examlistUrl, data=data, verify=False\n ).json()\n time.sleep(0.3)\n print('*' * 99)\n data = {'page': '1', 'page_size': '20', 'token': self.token,\n 'appid': self.appid}\n banklistUrl = 'https://mapi.dangjianwang.com/v3_1/exam/banklist'\n rjson = self.session.post(url=banklistUrl, data=data, verify=False\n ).json()\n for i in rjson['data']:\n tem = i['bank_name'], i['id']\n self.examlist.append(tem)\n if i['bank_name'] == '十九大报告100题(单选)':\n temp = {'title': i['bank_name'], 'detail': i['detail'],\n 'id': i['id']}\n self.examC19Info.append(temp)\n time.sleep(0.3)\n print('*' * 99)\n data = {'bank': '6', 'token': self.token, 'appid': self.appid}\n commitUrl = 'https://mapi.dangjianwang.com/v3_1/exam/randexam'\n rjson = self.session.post(url=commitUrl, data=data, verify=False).json(\n )\n aa = rjson['data']\n paper = aa['id']\n for i in aa['questions']:\n temp = {'id': i['id'], 'content': i['content']}\n ids.append(temp)\n print('*' * 99)\n time.sleep(0.5)\n answers = []\n for i in ids:\n correctAnswer = Qa.objects.filter(question__contains=i['content'])[\n 0]\n answerText = correctAnswer.answerText\n answer = correctAnswer.answer\n temp = {'index': i['id'], 'answer': answer}\n qa = {'index': i['id'], 'answer': answer, 'answerText': answerText}\n self.qaList.append(qa)\n print(qa, i['content'])\n answers.append(temp)\n time.sleep(1)\n hdata = {'token': self.token, 'appid': self.appid, 'paper': paper,\n 'answers': json.dumps(answers)}\n commitUrl = 'https://mapi.dangjianwang.com/v3_1/exam/handpaper'\n rjson = self.session.post(url=commitUrl, data=hdata, verify=False\n ).json()\n print(rjson)\n print(self.examlist)\n print(self.examC19Info)\n print(self.qaList)\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Runner:\n\n def __init__(self, appid='TJZHDJ01', username='', password=''):\n urllib3.disable_warnings()\n self.currentTime = datetime.datetime.now().strftime('%H:%M:%S')\n self.username = username\n self.password = password\n self.thumbedFilePath = './lib/'.format(username)\n self.logFilePath = './log/'.format(username)\n self.errFilePath = './err/'.format(username)\n self.thumbedFileList = []\n self.debug = True\n self.session = requests.session()\n self.appid = appid\n self.headers = {'User-Agent':\n 'Dalvik/2.1.0 (Linux; U; Android 6.0; HUAWEI MLA-AL10 Build/HUAWEIMLA-AL10)'\n , 'header_version': '80', 'system': 'android', 'Connection':\n 'Keep-Alive', 'Host': 'mapi.dangjianwang.com'}\n self.token = self.getToken()\n time.sleep(0.1)\n self.thumbPageList = self.getPages(urls=[\n 'https://mapi.dangjianwang.com/v3_1/Learn/List',\n 'https://mapi.dangjianwang.com/v3_1/Activities/List',\n 'https://mapi.dangjianwang.com/v3_1/Hotspots/Hotlist'])\n self.thumbPages = [i[1] for i in self.thumbPageList]\n time.sleep(0.1)\n self.helpPageList = self.getPages(urls=[\n 'https://mapi.dangjianwang.com/v3_1/Help/List'])\n self.helpPages = [i[1] for i in self.helpPageList]\n self.helpResults = {}\n time.sleep(0.1)\n self.studyPageList = self.getPagesII(urls=[\n 'https://mapi.dangjianwang.com/v3_1/Study/MaterialCollList'])\n self.studyPages = [i[1] for i in self.studyPageList]\n time.sleep(0.1)\n self.studyRsults = {}\n self.thumbedPages = []\n self.thumbResults = {}\n self.helpedPages = []\n self.multiThumbed = []\n self.viewsResults = []\n self.examC19Info = []\n self.examlist = []\n self.qaList = []\n <mask token>\n\n def writeErr2File(self, err):\n path = self.logFilePath\n fullPath = '{}{}err.txt'.format(path, self.username)\n if not os.path.exists(path):\n os.mkdir(path)\n with open(fullPath, 'a') as f:\n f.write('{}:{}\\n'.format(self.currentTime, err))\n print('err已经写入{}'.format(fullPath))\n\n def writeLog2File(self, log):\n path = self.logFilePath\n fullPath = '{}{}logs.txt'.format(path, self.username)\n if not os.path.exists(path):\n os.mkdir(path)\n with open(fullPath, 'a') as f:\n f.write('{}:{}\\n'.format(self.currentTime, log))\n print('log已经写入{}'.format(fullPath))\n\n def writeThumb2File(self, id):\n path = self.thumbedFilePath\n fullPath = '{}{}thumbs.txt'.format(path, self.username)\n if not os.path.exists(path):\n os.mkdir(path)\n with open(fullPath, 'a') as f:\n f.write(',{}'.format(id))\n print('点赞记录已经写入{}'.format(fullPath))\n\n def getThumbFromFile(self):\n \"\"\"\n\n :return: 文件中id组成的列表\n \"\"\"\n path = self.thumbedFilePath\n inFileList = []\n fullPath = '{}{}thumbs.txt'.format(path, self.username)\n if not os.path.exists(fullPath):\n return fullPath\n with open(fullPath, 'r') as f:\n inFileList.extend(list(set(f.readlines()[0].split(','))))\n with open(fullPath, 'w') as f1:\n f1.write(','.join(sorted(inFileList)))\n return inFileList\n\n def getExcuteTimes(self):\n \"\"\"\n 返回点赞等自动执行的次数的字典\n :return:\n \"\"\"\n excuteTimes = {}\n credInfo = self.getCredItinfo()\n print(credInfo)\n currentScore = credInfo[0]\n thumbScore = credInfo[1]['信息评论'].split('/')[0]\n thumbExcuteTimes = 10 - int(thumbScore)\n excuteTimes.update({'thumb': thumbExcuteTimes})\n helpScore = credInfo[1]['互助广场回答'].split('/')[0]\n helpExctuteTimes = 2 - int(helpScore)\n excuteTimes.update({'help': helpExctuteTimes})\n viewScore = credInfo[1]['党员视角发布'].split('/')[0]\n viewExcuteTimes = int((4 - int(viewScore)) / 2)\n excuteTimes.update({'view': viewExcuteTimes})\n examScore = credInfo[1]['在线知识竞答'].split('/')[0]\n examExcuteTimes = int((4 - int(examScore)) / 2)\n excuteTimes.update({'exam': examExcuteTimes})\n flag = int(credInfo[1]['在线阅读学习资料'].split('/')[1]) - int(credInfo[1]\n ['在线阅读学习资料'].split('/')[0])\n flag1 = int(credInfo[1]['学习资料写体会'].split('/')[1]) - int(credInfo[1]\n ['学习资料写体会'].split('/')[0])\n examExcuteTimes = 1 if flag != 0 or flag1 != 0 else 0\n excuteTimes.update({'study': examExcuteTimes})\n return excuteTimes\n\n def getToken(self):\n \"\"\"\n 获得一个连接的token\n 每个连接都需要使用到\n :return:\n \"\"\"\n data = {'appid': self.appid, 'username': self.username, 'password':\n self.password}\n longinurl = 'https://mapi.dangjianwang.com/v3_1/login'\n r = self.session.post(url=longinurl, data=data, verify=False)\n rjson = r.json()\n if rjson['code'] == '200':\n return rjson['token']\n else:\n print('token 获得失败')\n return None\n\n def getRJson(self, url):\n data = {'token': self.token, 'appid': self.appid}\n return self.session.post(url=url, data=data, verify=False).json()\n\n def getUserInfo(self):\n \"\"\"\n 获得一大串用户的信息,暂时没用\n :return:\n \"\"\"\n infoUrl = 'https://mapi.dangjianwang.com/v3_1/User/UserInfo'\n return self.getRJson(url=infoUrl)\n\n def getCredItinfoToday(self):\n \"\"\"\n 获得人员当前的得分等级参数\n :return:\n \"\"\"\n creditInfourl = 'https://mapi.dangjianwang.com/v3_1/User/CreditInfo'\n info = self.getRJson(url=creditInfourl)\n fullScore = info['data']['full']\n gainScore = info['data']['gain']\n currentLevel = info['data']['level']\n username = info['data']['name']\n ret = {'fullScore': fullScore, 'gainScore': gainScore,\n 'currentLevel': currentLevel, 'username': username}\n return ret\n\n def getCredItinfo(self):\n \"\"\"\n 获得用户的今日积分状态\n 可用来判断是否需要再继续流程\n 数据如下\n ('35', [('连续登录', '3/3'), ('手机端登录', '2/2'), ('信息评论', '10/10'), ('党员视角发布', '4/4'), ('互助广场回答', '2/2'), ('学习资料写体会', '5/5'), ('在线阅读学习资料', '5/5'), ('在线知识竞答', '4/4')])\n :return:(haved_credit, credit_detail)\n \"\"\"\n creditInfourl = 'https://mapi.dangjianwang.com/v3_1/User/CreditInfo'\n haved_credit = 0\n credit_detail = {}\n info = self.getRJson(url=creditInfourl)\n for k, v in info.items():\n if k == 'data':\n for k2, v2 in v.items():\n if k2 == 'haved_credit':\n haved_credit = v2\n if k2 == 'credit_detail':\n for i in v2:\n credit_detail.update({i['title']: i['score']})\n return haved_credit, credit_detail\n\n def getPages(self, urls):\n pages = []\n for url in urls:\n data = self.getRJson(url=url)\n for k, v in data.items():\n if k == 'data':\n for i in v:\n pages.append((i['title'], i['id']))\n return pages\n\n def getPagesII(self, urls):\n\n def getRJson(url):\n data = {'token': self.token, 'appid': self.appid, 'type_id':\n '791', 'page_index': '1'}\n return self.session.post(url=url, data=data, verify=False).json()\n pages = []\n for url in urls:\n data = getRJson(url=url)\n for k, v in data.items():\n if k == 'data':\n for i in v:\n pages.append((i['name'], i['id']))\n return pages\n\n def doThumb(self, id):\n \"\"\"\n 点赞函数,操作与id对应的页面\n 每次记录对应的信息到文件\n :return:\n \"\"\"\n contents = ['关注', '关注!', '关注!!']\n data = {'id': id, 'comment': random.choice(contents), 'token': self\n .token, 'appid': self.appid}\n commitUrl = 'https://mapi.dangjianwang.com/v3_1/Activities/CommentAct'\n rjson = self.session.post(url=commitUrl, data=data, verify=False).json(\n )\n print(rjson)\n if rjson['code'] == '1003':\n self.token = self.getToken()\n elif rjson['code'] == '200':\n result = rjson['msg']\n if result == '操作成功':\n self.thumbedPages.append(id)\n for i in list(set(self.thumbPageList)):\n if id == i[1]:\n temp = {'title': i[0]}\n self.thumbResults.update(temp)\n log = '信息点赞:\\n主题: {}\\n提交:{}'.format(i[0], data[\n 'comment'])\n detail = '{} 主题:{}\\n回复:{}\\n'.format(self.\n getCurrentTime(), i[0], data['comment'])\n write2File(self, './results/', 'result.txt', log)\n thumbInfo = {'title': i[0], 'reply': data['comment']}\n self.thumbPages.remove(id)\n self.writeThumb2File(id=id)\n return detail, thumbInfo\n elif rjson['code'] == '500' and rjson['msg'] == '评论过快,请求休息一会':\n print('因评论过快,等待一段时间')\n time.sleep(20)\n else:\n print('rjson', rjson)\n self.thumbedPages.remove(id)\n self.writeThumb2File(id=id)\n log = '点赞:{}'.format(rjson)\n self.writeLog2File(log)\n print(log)\n time.sleep(10)\n\n def doHelp(self, id, callback=None):\n \"\"\"\n 互助功能\n :param id:\n :return:\n \"\"\"\n detail = ''\n helpInfo = None\n log = ''\n content = ['把党的政治建设摆在首位!', '不忘初心,牢记使命!', '发展史第一要务,人才是第一资源,创新是第一动力。',\n '要把党的领导贯彻到依法治国全过程和各方面', '毫不动摇坚持中国共产党领导']\n data = {'id': id, 'content': random.choice(content), 'token': self.\n token, 'appid': self.appid}\n print(data)\n commitUrl = 'https://mapi.dangjianwang.com/v3_1/Help/PostComment'\n rjson = self.session.post(url=commitUrl, data=data, verify=False).json(\n )\n if rjson['code'] == '200':\n result = rjson['msg']\n if result == '操作成功':\n self.helpedPages.append(id)\n self.helpPages.remove(id)\n for i in self.helpPageList:\n if id == i[1]:\n curTime = self.getCurrentTime()\n self.helpResults.update({'title': id[0]})\n log = '互助:\\n主题: {}\\n提交内容: {}'.format(i[0], rjson[\n 'comment'])\n write2File(self, './results/', 'result.txt', log)\n detail = '{} 主题: {}\\n提交内容: {}\\n'.format(curTime, i[\n 0], rjson['comment'].strip())\n helpInfo = {'title': i[0], 'reply': rjson['comment']}\n else:\n pass\n else:\n pass\n log = '帮助:{}'.format(rjson)\n self.writeLog2File(log)\n print(log)\n return detail, log, helpInfo\n\n def doView(self):\n \"\"\"\n 党员视角发布功能\n\n :return:\n \"\"\"\n content = ['全面的小康,覆盖的人口要全面,是惠及全体人民的小康。', '不忘初心,牢记使命,坚持终身学习!']\n data = {'content': random.choice(content), 'token': self.token,\n 'appid': self.appid}\n commitUrl = 'https://mapi.dangjianwang.com/v3_1/Viewpoint/Create'\n rjson = self.session.post(url=commitUrl, data=data, verify=False).json(\n )\n if rjson['code'] == '200':\n result = rjson['msg']\n if result == '操作成功':\n self.viewsResults.append(1)\n else:\n pass\n log = '党员视角:{}'.format(rjson)\n detail = '{} 党员视角:\\n发布内容:{}\\n'.format(self.getCurrentTime(), rjson[\n 'data']['content'])\n publicContent = rjson['data']['content']\n return detail, publicContent\n\n def doStudy(self, mid):\n \"\"\"\n 前三个post函数的响应的三个请求\n get用来获得填写的内容\n 最后一个post是学习完离开并检测时间的函数如果成功说明该次学习成功。\n :param mid:\n :return:\n \"\"\"\n interval = 60 * 5 + 5\n\n def post1():\n data = {'mid': mid, 'token': self.token, 'appid': self.appid}\n commitUrl = (\n 'https://mapi.dangjianwang.com/v3_1//Study/CheckCollStatus')\n rjson = self.session.post(url=commitUrl, data=data, verify=False\n ).json()\n log = '学习post1:{}'.format(rjson)\n self.writeLog2File(log)\n print(log)\n\n def post2():\n data = {'token': self.token, 'appid': self.appid}\n commitUrl = 'https://mapi.dangjianwang.com/v3_1/Login/CheckToken'\n rjson = self.session.post(url=commitUrl, data=data, verify=False\n ).json()\n log = '学习post2:{}'.format(rjson)\n self.writeLog2File(log)\n print(log)\n\n def post3():\n data = {'mid': mid, 'token': self.token, 'appid': self.appid}\n commitUrl = (\n 'https://mapi.dangjianwang.com/v3_1/Study/GetFeelingsNum')\n rjson = self.session.post(url=commitUrl, data=data, verify=False\n ).json()\n log = '学习post3:{}'.format(rjson)\n self.writeLog2File(log)\n print(log)\n\n def get1():\n url = (\n 'https://mapi.dangjianwang.com/v3_1/Study/MaterialDetail?token={}&mid={}'\n .format(self.token, mid))\n rjson = self.session.get(url=url)\n text = rjson.content\n soup = BeautifulSoup(text, 'html.parser')\n retContents = []\n for div in soup.find_all('p'):\n p = div.text.strip()\n retContents.append(p if 100 > len(p) < 200 else p[0:200])\n return random.choice(retContents)\n\n def recordFeeling(content=None):\n if not content:\n content = (\n '伟大的时代造就伟大的人物。邓小平同志就是从中国人民和中华民族近代以来伟大斗争中产生的伟人,是我们大家衷心热爱的伟人。我们很多同志都曾经在他的领导和指导下工作过,他的崇高风范对我们来说是那样熟悉、那样亲切。邓小平同志崇高鲜明又独具魅力的革命风范,将激励我们在实现“两个一百年”奋斗目标、实现中华民族伟大复兴中国梦的征程上奋勇前进。'\n )\n data = {'mid': mid, 'token': self.token, 'appid': self.appid,\n 'content': content}\n commitUrl = (\n 'https://mapi.dangjianwang.com/v3_1/Study/RecordFeeling')\n rjson = self.session.post(url=commitUrl, data=data, verify=False\n ).json()\n log = '学习recordFeeling:{}'.format(rjson)\n self.writeLog2File(log)\n print('in recordFeeling')\n print(log)\n if rjson['code'] == '200':\n return {'content': content}\n elif rjson['code'] == '1120':\n addtion = ['我们必须坚定不移,任何时候任何情况下都不能动摇',\n '人民有信心,国家才有未来,国家才有力量。', '新时代,属于自强不息、勇于创造的奋斗者。',\n '民主政治建设有序推进,依法治市迈出新步伐。', '一切公职人员,都必须牢记始终为人民利益和幸福而努力工作。']\n return recordFeeling(content='{}\\n{}'.format(content,\n random.choice(addtion)))\n else:\n return None\n\n def readTime():\n data = {'mid': mid, 'token': self.token, 'appid': self.appid,\n 'time': interval}\n commitUrl = 'https://mapi.dangjianwang.com/v3_1/Study/ReadTime'\n rjson = self.session.post(url=commitUrl, data=data, verify=False\n ).json()\n log = '学习readTime:{}'.format(rjson)\n self.writeLog2File(log)\n print(log)\n post1()\n time.sleep(1)\n post2()\n time.sleep(1)\n post3()\n time.sleep(1)\n content = get1()\n time.sleep(1)\n count = 0\n print('开始学习请稍后')\n for i in range(interval):\n count += 1\n if count % 30 == 0:\n print('已用时{}秒'.format(count))\n time.sleep(1)\n print('填写的学习体会', content)\n self.studyRsults.update(recordFeeling(content=content))\n time.sleep(1)\n readTime()\n time.sleep(1)\n pass\n\n def doExam(self):\n \"\"\"\n\n :param self:\n :return:\n \"\"\"\n ids = []\n data = {'page': '1', 'page_size': '20', 'token': self.token,\n 'appid': self.appid}\n examlistUrl = 'https://mapi.dangjianwang.com/v3_1/quora/examlist'\n rjson = self.session.post(url=examlistUrl, data=data, verify=False\n ).json()\n time.sleep(0.3)\n print('*' * 99)\n data = {'page': '1', 'page_size': '20', 'token': self.token,\n 'appid': self.appid}\n banklistUrl = 'https://mapi.dangjianwang.com/v3_1/exam/banklist'\n rjson = self.session.post(url=banklistUrl, data=data, verify=False\n ).json()\n for i in rjson['data']:\n tem = i['bank_name'], i['id']\n self.examlist.append(tem)\n if i['bank_name'] == '十九大报告100题(单选)':\n temp = {'title': i['bank_name'], 'detail': i['detail'],\n 'id': i['id']}\n self.examC19Info.append(temp)\n time.sleep(0.3)\n print('*' * 99)\n data = {'bank': '6', 'token': self.token, 'appid': self.appid}\n commitUrl = 'https://mapi.dangjianwang.com/v3_1/exam/randexam'\n rjson = self.session.post(url=commitUrl, data=data, verify=False).json(\n )\n aa = rjson['data']\n paper = aa['id']\n for i in aa['questions']:\n temp = {'id': i['id'], 'content': i['content']}\n ids.append(temp)\n print('*' * 99)\n time.sleep(0.5)\n answers = []\n for i in ids:\n correctAnswer = Qa.objects.filter(question__contains=i['content'])[\n 0]\n answerText = correctAnswer.answerText\n answer = correctAnswer.answer\n temp = {'index': i['id'], 'answer': answer}\n qa = {'index': i['id'], 'answer': answer, 'answerText': answerText}\n self.qaList.append(qa)\n print(qa, i['content'])\n answers.append(temp)\n time.sleep(1)\n hdata = {'token': self.token, 'appid': self.appid, 'paper': paper,\n 'answers': json.dumps(answers)}\n commitUrl = 'https://mapi.dangjianwang.com/v3_1/exam/handpaper'\n rjson = self.session.post(url=commitUrl, data=hdata, verify=False\n ).json()\n print(rjson)\n print(self.examlist)\n print(self.examC19Info)\n print(self.qaList)\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Runner:\n\n def __init__(self, appid='TJZHDJ01', username='', password=''):\n urllib3.disable_warnings()\n self.currentTime = datetime.datetime.now().strftime('%H:%M:%S')\n self.username = username\n self.password = password\n self.thumbedFilePath = './lib/'.format(username)\n self.logFilePath = './log/'.format(username)\n self.errFilePath = './err/'.format(username)\n self.thumbedFileList = []\n self.debug = True\n self.session = requests.session()\n self.appid = appid\n self.headers = {'User-Agent':\n 'Dalvik/2.1.0 (Linux; U; Android 6.0; HUAWEI MLA-AL10 Build/HUAWEIMLA-AL10)'\n , 'header_version': '80', 'system': 'android', 'Connection':\n 'Keep-Alive', 'Host': 'mapi.dangjianwang.com'}\n self.token = self.getToken()\n time.sleep(0.1)\n self.thumbPageList = self.getPages(urls=[\n 'https://mapi.dangjianwang.com/v3_1/Learn/List',\n 'https://mapi.dangjianwang.com/v3_1/Activities/List',\n 'https://mapi.dangjianwang.com/v3_1/Hotspots/Hotlist'])\n self.thumbPages = [i[1] for i in self.thumbPageList]\n time.sleep(0.1)\n self.helpPageList = self.getPages(urls=[\n 'https://mapi.dangjianwang.com/v3_1/Help/List'])\n self.helpPages = [i[1] for i in self.helpPageList]\n self.helpResults = {}\n time.sleep(0.1)\n self.studyPageList = self.getPagesII(urls=[\n 'https://mapi.dangjianwang.com/v3_1/Study/MaterialCollList'])\n self.studyPages = [i[1] for i in self.studyPageList]\n time.sleep(0.1)\n self.studyRsults = {}\n self.thumbedPages = []\n self.thumbResults = {}\n self.helpedPages = []\n self.multiThumbed = []\n self.viewsResults = []\n self.examC19Info = []\n self.examlist = []\n self.qaList = []\n <mask token>\n\n def writeErr2File(self, err):\n path = self.logFilePath\n fullPath = '{}{}err.txt'.format(path, self.username)\n if not os.path.exists(path):\n os.mkdir(path)\n with open(fullPath, 'a') as f:\n f.write('{}:{}\\n'.format(self.currentTime, err))\n print('err已经写入{}'.format(fullPath))\n\n def writeLog2File(self, log):\n path = self.logFilePath\n fullPath = '{}{}logs.txt'.format(path, self.username)\n if not os.path.exists(path):\n os.mkdir(path)\n with open(fullPath, 'a') as f:\n f.write('{}:{}\\n'.format(self.currentTime, log))\n print('log已经写入{}'.format(fullPath))\n\n def writeThumb2File(self, id):\n path = self.thumbedFilePath\n fullPath = '{}{}thumbs.txt'.format(path, self.username)\n if not os.path.exists(path):\n os.mkdir(path)\n with open(fullPath, 'a') as f:\n f.write(',{}'.format(id))\n print('点赞记录已经写入{}'.format(fullPath))\n\n def getThumbFromFile(self):\n \"\"\"\n\n :return: 文件中id组成的列表\n \"\"\"\n path = self.thumbedFilePath\n inFileList = []\n fullPath = '{}{}thumbs.txt'.format(path, self.username)\n if not os.path.exists(fullPath):\n return fullPath\n with open(fullPath, 'r') as f:\n inFileList.extend(list(set(f.readlines()[0].split(','))))\n with open(fullPath, 'w') as f1:\n f1.write(','.join(sorted(inFileList)))\n return inFileList\n\n def getExcuteTimes(self):\n \"\"\"\n 返回点赞等自动执行的次数的字典\n :return:\n \"\"\"\n excuteTimes = {}\n credInfo = self.getCredItinfo()\n print(credInfo)\n currentScore = credInfo[0]\n thumbScore = credInfo[1]['信息评论'].split('/')[0]\n thumbExcuteTimes = 10 - int(thumbScore)\n excuteTimes.update({'thumb': thumbExcuteTimes})\n helpScore = credInfo[1]['互助广场回答'].split('/')[0]\n helpExctuteTimes = 2 - int(helpScore)\n excuteTimes.update({'help': helpExctuteTimes})\n viewScore = credInfo[1]['党员视角发布'].split('/')[0]\n viewExcuteTimes = int((4 - int(viewScore)) / 2)\n excuteTimes.update({'view': viewExcuteTimes})\n examScore = credInfo[1]['在线知识竞答'].split('/')[0]\n examExcuteTimes = int((4 - int(examScore)) / 2)\n excuteTimes.update({'exam': examExcuteTimes})\n flag = int(credInfo[1]['在线阅读学习资料'].split('/')[1]) - int(credInfo[1]\n ['在线阅读学习资料'].split('/')[0])\n flag1 = int(credInfo[1]['学习资料写体会'].split('/')[1]) - int(credInfo[1]\n ['学习资料写体会'].split('/')[0])\n examExcuteTimes = 1 if flag != 0 or flag1 != 0 else 0\n excuteTimes.update({'study': examExcuteTimes})\n return excuteTimes\n\n def getToken(self):\n \"\"\"\n 获得一个连接的token\n 每个连接都需要使用到\n :return:\n \"\"\"\n data = {'appid': self.appid, 'username': self.username, 'password':\n self.password}\n longinurl = 'https://mapi.dangjianwang.com/v3_1/login'\n r = self.session.post(url=longinurl, data=data, verify=False)\n rjson = r.json()\n if rjson['code'] == '200':\n return rjson['token']\n else:\n print('token 获得失败')\n return None\n\n def getRJson(self, url):\n data = {'token': self.token, 'appid': self.appid}\n return self.session.post(url=url, data=data, verify=False).json()\n\n def getUserInfo(self):\n \"\"\"\n 获得一大串用户的信息,暂时没用\n :return:\n \"\"\"\n infoUrl = 'https://mapi.dangjianwang.com/v3_1/User/UserInfo'\n return self.getRJson(url=infoUrl)\n\n def getCredItinfoToday(self):\n \"\"\"\n 获得人员当前的得分等级参数\n :return:\n \"\"\"\n creditInfourl = 'https://mapi.dangjianwang.com/v3_1/User/CreditInfo'\n info = self.getRJson(url=creditInfourl)\n fullScore = info['data']['full']\n gainScore = info['data']['gain']\n currentLevel = info['data']['level']\n username = info['data']['name']\n ret = {'fullScore': fullScore, 'gainScore': gainScore,\n 'currentLevel': currentLevel, 'username': username}\n return ret\n\n def getCredItinfo(self):\n \"\"\"\n 获得用户的今日积分状态\n 可用来判断是否需要再继续流程\n 数据如下\n ('35', [('连续登录', '3/3'), ('手机端登录', '2/2'), ('信息评论', '10/10'), ('党员视角发布', '4/4'), ('互助广场回答', '2/2'), ('学习资料写体会', '5/5'), ('在线阅读学习资料', '5/5'), ('在线知识竞答', '4/4')])\n :return:(haved_credit, credit_detail)\n \"\"\"\n creditInfourl = 'https://mapi.dangjianwang.com/v3_1/User/CreditInfo'\n haved_credit = 0\n credit_detail = {}\n info = self.getRJson(url=creditInfourl)\n for k, v in info.items():\n if k == 'data':\n for k2, v2 in v.items():\n if k2 == 'haved_credit':\n haved_credit = v2\n if k2 == 'credit_detail':\n for i in v2:\n credit_detail.update({i['title']: i['score']})\n return haved_credit, credit_detail\n\n def getPages(self, urls):\n pages = []\n for url in urls:\n data = self.getRJson(url=url)\n for k, v in data.items():\n if k == 'data':\n for i in v:\n pages.append((i['title'], i['id']))\n return pages\n\n def getPagesII(self, urls):\n\n def getRJson(url):\n data = {'token': self.token, 'appid': self.appid, 'type_id':\n '791', 'page_index': '1'}\n return self.session.post(url=url, data=data, verify=False).json()\n pages = []\n for url in urls:\n data = getRJson(url=url)\n for k, v in data.items():\n if k == 'data':\n for i in v:\n pages.append((i['name'], i['id']))\n return pages\n\n def doThumb(self, id):\n \"\"\"\n 点赞函数,操作与id对应的页面\n 每次记录对应的信息到文件\n :return:\n \"\"\"\n contents = ['关注', '关注!', '关注!!']\n data = {'id': id, 'comment': random.choice(contents), 'token': self\n .token, 'appid': self.appid}\n commitUrl = 'https://mapi.dangjianwang.com/v3_1/Activities/CommentAct'\n rjson = self.session.post(url=commitUrl, data=data, verify=False).json(\n )\n print(rjson)\n if rjson['code'] == '1003':\n self.token = self.getToken()\n elif rjson['code'] == '200':\n result = rjson['msg']\n if result == '操作成功':\n self.thumbedPages.append(id)\n for i in list(set(self.thumbPageList)):\n if id == i[1]:\n temp = {'title': i[0]}\n self.thumbResults.update(temp)\n log = '信息点赞:\\n主题: {}\\n提交:{}'.format(i[0], data[\n 'comment'])\n detail = '{} 主题:{}\\n回复:{}\\n'.format(self.\n getCurrentTime(), i[0], data['comment'])\n write2File(self, './results/', 'result.txt', log)\n thumbInfo = {'title': i[0], 'reply': data['comment']}\n self.thumbPages.remove(id)\n self.writeThumb2File(id=id)\n return detail, thumbInfo\n elif rjson['code'] == '500' and rjson['msg'] == '评论过快,请求休息一会':\n print('因评论过快,等待一段时间')\n time.sleep(20)\n else:\n print('rjson', rjson)\n self.thumbedPages.remove(id)\n self.writeThumb2File(id=id)\n log = '点赞:{}'.format(rjson)\n self.writeLog2File(log)\n print(log)\n time.sleep(10)\n\n def doHelp(self, id, callback=None):\n \"\"\"\n 互助功能\n :param id:\n :return:\n \"\"\"\n detail = ''\n helpInfo = None\n log = ''\n content = ['把党的政治建设摆在首位!', '不忘初心,牢记使命!', '发展史第一要务,人才是第一资源,创新是第一动力。',\n '要把党的领导贯彻到依法治国全过程和各方面', '毫不动摇坚持中国共产党领导']\n data = {'id': id, 'content': random.choice(content), 'token': self.\n token, 'appid': self.appid}\n print(data)\n commitUrl = 'https://mapi.dangjianwang.com/v3_1/Help/PostComment'\n rjson = self.session.post(url=commitUrl, data=data, verify=False).json(\n )\n if rjson['code'] == '200':\n result = rjson['msg']\n if result == '操作成功':\n self.helpedPages.append(id)\n self.helpPages.remove(id)\n for i in self.helpPageList:\n if id == i[1]:\n curTime = self.getCurrentTime()\n self.helpResults.update({'title': id[0]})\n log = '互助:\\n主题: {}\\n提交内容: {}'.format(i[0], rjson[\n 'comment'])\n write2File(self, './results/', 'result.txt', log)\n detail = '{} 主题: {}\\n提交内容: {}\\n'.format(curTime, i[\n 0], rjson['comment'].strip())\n helpInfo = {'title': i[0], 'reply': rjson['comment']}\n else:\n pass\n else:\n pass\n log = '帮助:{}'.format(rjson)\n self.writeLog2File(log)\n print(log)\n return detail, log, helpInfo\n\n def doView(self):\n \"\"\"\n 党员视角发布功能\n\n :return:\n \"\"\"\n content = ['全面的小康,覆盖的人口要全面,是惠及全体人民的小康。', '不忘初心,牢记使命,坚持终身学习!']\n data = {'content': random.choice(content), 'token': self.token,\n 'appid': self.appid}\n commitUrl = 'https://mapi.dangjianwang.com/v3_1/Viewpoint/Create'\n rjson = self.session.post(url=commitUrl, data=data, verify=False).json(\n )\n if rjson['code'] == '200':\n result = rjson['msg']\n if result == '操作成功':\n self.viewsResults.append(1)\n else:\n pass\n log = '党员视角:{}'.format(rjson)\n detail = '{} 党员视角:\\n发布内容:{}\\n'.format(self.getCurrentTime(), rjson[\n 'data']['content'])\n publicContent = rjson['data']['content']\n return detail, publicContent\n\n def doStudy(self, mid):\n \"\"\"\n 前三个post函数的响应的三个请求\n get用来获得填写的内容\n 最后一个post是学习完离开并检测时间的函数如果成功说明该次学习成功。\n :param mid:\n :return:\n \"\"\"\n interval = 60 * 5 + 5\n\n def post1():\n data = {'mid': mid, 'token': self.token, 'appid': self.appid}\n commitUrl = (\n 'https://mapi.dangjianwang.com/v3_1//Study/CheckCollStatus')\n rjson = self.session.post(url=commitUrl, data=data, verify=False\n ).json()\n log = '学习post1:{}'.format(rjson)\n self.writeLog2File(log)\n print(log)\n\n def post2():\n data = {'token': self.token, 'appid': self.appid}\n commitUrl = 'https://mapi.dangjianwang.com/v3_1/Login/CheckToken'\n rjson = self.session.post(url=commitUrl, data=data, verify=False\n ).json()\n log = '学习post2:{}'.format(rjson)\n self.writeLog2File(log)\n print(log)\n\n def post3():\n data = {'mid': mid, 'token': self.token, 'appid': self.appid}\n commitUrl = (\n 'https://mapi.dangjianwang.com/v3_1/Study/GetFeelingsNum')\n rjson = self.session.post(url=commitUrl, data=data, verify=False\n ).json()\n log = '学习post3:{}'.format(rjson)\n self.writeLog2File(log)\n print(log)\n\n def get1():\n url = (\n 'https://mapi.dangjianwang.com/v3_1/Study/MaterialDetail?token={}&mid={}'\n .format(self.token, mid))\n rjson = self.session.get(url=url)\n text = rjson.content\n soup = BeautifulSoup(text, 'html.parser')\n retContents = []\n for div in soup.find_all('p'):\n p = div.text.strip()\n retContents.append(p if 100 > len(p) < 200 else p[0:200])\n return random.choice(retContents)\n\n def recordFeeling(content=None):\n if not content:\n content = (\n '伟大的时代造就伟大的人物。邓小平同志就是从中国人民和中华民族近代以来伟大斗争中产生的伟人,是我们大家衷心热爱的伟人。我们很多同志都曾经在他的领导和指导下工作过,他的崇高风范对我们来说是那样熟悉、那样亲切。邓小平同志崇高鲜明又独具魅力的革命风范,将激励我们在实现“两个一百年”奋斗目标、实现中华民族伟大复兴中国梦的征程上奋勇前进。'\n )\n data = {'mid': mid, 'token': self.token, 'appid': self.appid,\n 'content': content}\n commitUrl = (\n 'https://mapi.dangjianwang.com/v3_1/Study/RecordFeeling')\n rjson = self.session.post(url=commitUrl, data=data, verify=False\n ).json()\n log = '学习recordFeeling:{}'.format(rjson)\n self.writeLog2File(log)\n print('in recordFeeling')\n print(log)\n if rjson['code'] == '200':\n return {'content': content}\n elif rjson['code'] == '1120':\n addtion = ['我们必须坚定不移,任何时候任何情况下都不能动摇',\n '人民有信心,国家才有未来,国家才有力量。', '新时代,属于自强不息、勇于创造的奋斗者。',\n '民主政治建设有序推进,依法治市迈出新步伐。', '一切公职人员,都必须牢记始终为人民利益和幸福而努力工作。']\n return recordFeeling(content='{}\\n{}'.format(content,\n random.choice(addtion)))\n else:\n return None\n\n def readTime():\n data = {'mid': mid, 'token': self.token, 'appid': self.appid,\n 'time': interval}\n commitUrl = 'https://mapi.dangjianwang.com/v3_1/Study/ReadTime'\n rjson = self.session.post(url=commitUrl, data=data, verify=False\n ).json()\n log = '学习readTime:{}'.format(rjson)\n self.writeLog2File(log)\n print(log)\n post1()\n time.sleep(1)\n post2()\n time.sleep(1)\n post3()\n time.sleep(1)\n content = get1()\n time.sleep(1)\n count = 0\n print('开始学习请稍后')\n for i in range(interval):\n count += 1\n if count % 30 == 0:\n print('已用时{}秒'.format(count))\n time.sleep(1)\n print('填写的学习体会', content)\n self.studyRsults.update(recordFeeling(content=content))\n time.sleep(1)\n readTime()\n time.sleep(1)\n pass\n\n def doExam(self):\n \"\"\"\n\n :param self:\n :return:\n \"\"\"\n ids = []\n data = {'page': '1', 'page_size': '20', 'token': self.token,\n 'appid': self.appid}\n examlistUrl = 'https://mapi.dangjianwang.com/v3_1/quora/examlist'\n rjson = self.session.post(url=examlistUrl, data=data, verify=False\n ).json()\n time.sleep(0.3)\n print('*' * 99)\n data = {'page': '1', 'page_size': '20', 'token': self.token,\n 'appid': self.appid}\n banklistUrl = 'https://mapi.dangjianwang.com/v3_1/exam/banklist'\n rjson = self.session.post(url=banklistUrl, data=data, verify=False\n ).json()\n for i in rjson['data']:\n tem = i['bank_name'], i['id']\n self.examlist.append(tem)\n if i['bank_name'] == '十九大报告100题(单选)':\n temp = {'title': i['bank_name'], 'detail': i['detail'],\n 'id': i['id']}\n self.examC19Info.append(temp)\n time.sleep(0.3)\n print('*' * 99)\n data = {'bank': '6', 'token': self.token, 'appid': self.appid}\n commitUrl = 'https://mapi.dangjianwang.com/v3_1/exam/randexam'\n rjson = self.session.post(url=commitUrl, data=data, verify=False).json(\n )\n aa = rjson['data']\n paper = aa['id']\n for i in aa['questions']:\n temp = {'id': i['id'], 'content': i['content']}\n ids.append(temp)\n print('*' * 99)\n time.sleep(0.5)\n answers = []\n for i in ids:\n correctAnswer = Qa.objects.filter(question__contains=i['content'])[\n 0]\n answerText = correctAnswer.answerText\n answer = correctAnswer.answer\n temp = {'index': i['id'], 'answer': answer}\n qa = {'index': i['id'], 'answer': answer, 'answerText': answerText}\n self.qaList.append(qa)\n print(qa, i['content'])\n answers.append(temp)\n time.sleep(1)\n hdata = {'token': self.token, 'appid': self.appid, 'paper': paper,\n 'answers': json.dumps(answers)}\n commitUrl = 'https://mapi.dangjianwang.com/v3_1/exam/handpaper'\n rjson = self.session.post(url=commitUrl, data=hdata, verify=False\n ).json()\n print(rjson)\n print(self.examlist)\n print(self.examC19Info)\n print(self.qaList)\n\n def getAnswerInfo(self):\n \"\"\"\n 获得答题的结果与正确率\n :return:\n \"\"\"\n data = {'token': self.token, 'appid': self.appid, 'page_size': '20',\n 'page_index': 'page_index'}\n commitUrl = 'https://mapi.dangjianwang.com/v3_1/exam/randexam'\n rjson = self.session.post(url=commitUrl, data=data, verify=False).json(\n )\n print(rjson)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Runner:\n\n def __init__(self, appid='TJZHDJ01', username='', password=''):\n urllib3.disable_warnings()\n self.currentTime = datetime.datetime.now().strftime('%H:%M:%S')\n self.username = username\n self.password = password\n self.thumbedFilePath = './lib/'.format(username)\n self.logFilePath = './log/'.format(username)\n self.errFilePath = './err/'.format(username)\n self.thumbedFileList = []\n self.debug = True\n self.session = requests.session()\n self.appid = appid\n self.headers = {'User-Agent':\n 'Dalvik/2.1.0 (Linux; U; Android 6.0; HUAWEI MLA-AL10 Build/HUAWEIMLA-AL10)'\n , 'header_version': '80', 'system': 'android', 'Connection':\n 'Keep-Alive', 'Host': 'mapi.dangjianwang.com'}\n self.token = self.getToken()\n time.sleep(0.1)\n self.thumbPageList = self.getPages(urls=[\n 'https://mapi.dangjianwang.com/v3_1/Learn/List',\n 'https://mapi.dangjianwang.com/v3_1/Activities/List',\n 'https://mapi.dangjianwang.com/v3_1/Hotspots/Hotlist'])\n self.thumbPages = [i[1] for i in self.thumbPageList]\n time.sleep(0.1)\n self.helpPageList = self.getPages(urls=[\n 'https://mapi.dangjianwang.com/v3_1/Help/List'])\n self.helpPages = [i[1] for i in self.helpPageList]\n self.helpResults = {}\n time.sleep(0.1)\n self.studyPageList = self.getPagesII(urls=[\n 'https://mapi.dangjianwang.com/v3_1/Study/MaterialCollList'])\n self.studyPages = [i[1] for i in self.studyPageList]\n time.sleep(0.1)\n self.studyRsults = {}\n self.thumbedPages = []\n self.thumbResults = {}\n self.helpedPages = []\n self.multiThumbed = []\n self.viewsResults = []\n self.examC19Info = []\n self.examlist = []\n self.qaList = []\n\n def getCurrentTime(self):\n return datetime.datetime.now().strftime('%H:%M:%S')\n\n def writeErr2File(self, err):\n path = self.logFilePath\n fullPath = '{}{}err.txt'.format(path, self.username)\n if not os.path.exists(path):\n os.mkdir(path)\n with open(fullPath, 'a') as f:\n f.write('{}:{}\\n'.format(self.currentTime, err))\n print('err已经写入{}'.format(fullPath))\n\n def writeLog2File(self, log):\n path = self.logFilePath\n fullPath = '{}{}logs.txt'.format(path, self.username)\n if not os.path.exists(path):\n os.mkdir(path)\n with open(fullPath, 'a') as f:\n f.write('{}:{}\\n'.format(self.currentTime, log))\n print('log已经写入{}'.format(fullPath))\n\n def writeThumb2File(self, id):\n path = self.thumbedFilePath\n fullPath = '{}{}thumbs.txt'.format(path, self.username)\n if not os.path.exists(path):\n os.mkdir(path)\n with open(fullPath, 'a') as f:\n f.write(',{}'.format(id))\n print('点赞记录已经写入{}'.format(fullPath))\n\n def getThumbFromFile(self):\n \"\"\"\n\n :return: 文件中id组成的列表\n \"\"\"\n path = self.thumbedFilePath\n inFileList = []\n fullPath = '{}{}thumbs.txt'.format(path, self.username)\n if not os.path.exists(fullPath):\n return fullPath\n with open(fullPath, 'r') as f:\n inFileList.extend(list(set(f.readlines()[0].split(','))))\n with open(fullPath, 'w') as f1:\n f1.write(','.join(sorted(inFileList)))\n return inFileList\n\n def getExcuteTimes(self):\n \"\"\"\n 返回点赞等自动执行的次数的字典\n :return:\n \"\"\"\n excuteTimes = {}\n credInfo = self.getCredItinfo()\n print(credInfo)\n currentScore = credInfo[0]\n thumbScore = credInfo[1]['信息评论'].split('/')[0]\n thumbExcuteTimes = 10 - int(thumbScore)\n excuteTimes.update({'thumb': thumbExcuteTimes})\n helpScore = credInfo[1]['互助广场回答'].split('/')[0]\n helpExctuteTimes = 2 - int(helpScore)\n excuteTimes.update({'help': helpExctuteTimes})\n viewScore = credInfo[1]['党员视角发布'].split('/')[0]\n viewExcuteTimes = int((4 - int(viewScore)) / 2)\n excuteTimes.update({'view': viewExcuteTimes})\n examScore = credInfo[1]['在线知识竞答'].split('/')[0]\n examExcuteTimes = int((4 - int(examScore)) / 2)\n excuteTimes.update({'exam': examExcuteTimes})\n flag = int(credInfo[1]['在线阅读学习资料'].split('/')[1]) - int(credInfo[1]\n ['在线阅读学习资料'].split('/')[0])\n flag1 = int(credInfo[1]['学习资料写体会'].split('/')[1]) - int(credInfo[1]\n ['学习资料写体会'].split('/')[0])\n examExcuteTimes = 1 if flag != 0 or flag1 != 0 else 0\n excuteTimes.update({'study': examExcuteTimes})\n return excuteTimes\n\n def getToken(self):\n \"\"\"\n 获得一个连接的token\n 每个连接都需要使用到\n :return:\n \"\"\"\n data = {'appid': self.appid, 'username': self.username, 'password':\n self.password}\n longinurl = 'https://mapi.dangjianwang.com/v3_1/login'\n r = self.session.post(url=longinurl, data=data, verify=False)\n rjson = r.json()\n if rjson['code'] == '200':\n return rjson['token']\n else:\n print('token 获得失败')\n return None\n\n def getRJson(self, url):\n data = {'token': self.token, 'appid': self.appid}\n return self.session.post(url=url, data=data, verify=False).json()\n\n def getUserInfo(self):\n \"\"\"\n 获得一大串用户的信息,暂时没用\n :return:\n \"\"\"\n infoUrl = 'https://mapi.dangjianwang.com/v3_1/User/UserInfo'\n return self.getRJson(url=infoUrl)\n\n def getCredItinfoToday(self):\n \"\"\"\n 获得人员当前的得分等级参数\n :return:\n \"\"\"\n creditInfourl = 'https://mapi.dangjianwang.com/v3_1/User/CreditInfo'\n info = self.getRJson(url=creditInfourl)\n fullScore = info['data']['full']\n gainScore = info['data']['gain']\n currentLevel = info['data']['level']\n username = info['data']['name']\n ret = {'fullScore': fullScore, 'gainScore': gainScore,\n 'currentLevel': currentLevel, 'username': username}\n return ret\n\n def getCredItinfo(self):\n \"\"\"\n 获得用户的今日积分状态\n 可用来判断是否需要再继续流程\n 数据如下\n ('35', [('连续登录', '3/3'), ('手机端登录', '2/2'), ('信息评论', '10/10'), ('党员视角发布', '4/4'), ('互助广场回答', '2/2'), ('学习资料写体会', '5/5'), ('在线阅读学习资料', '5/5'), ('在线知识竞答', '4/4')])\n :return:(haved_credit, credit_detail)\n \"\"\"\n creditInfourl = 'https://mapi.dangjianwang.com/v3_1/User/CreditInfo'\n haved_credit = 0\n credit_detail = {}\n info = self.getRJson(url=creditInfourl)\n for k, v in info.items():\n if k == 'data':\n for k2, v2 in v.items():\n if k2 == 'haved_credit':\n haved_credit = v2\n if k2 == 'credit_detail':\n for i in v2:\n credit_detail.update({i['title']: i['score']})\n return haved_credit, credit_detail\n\n def getPages(self, urls):\n pages = []\n for url in urls:\n data = self.getRJson(url=url)\n for k, v in data.items():\n if k == 'data':\n for i in v:\n pages.append((i['title'], i['id']))\n return pages\n\n def getPagesII(self, urls):\n\n def getRJson(url):\n data = {'token': self.token, 'appid': self.appid, 'type_id':\n '791', 'page_index': '1'}\n return self.session.post(url=url, data=data, verify=False).json()\n pages = []\n for url in urls:\n data = getRJson(url=url)\n for k, v in data.items():\n if k == 'data':\n for i in v:\n pages.append((i['name'], i['id']))\n return pages\n\n def doThumb(self, id):\n \"\"\"\n 点赞函数,操作与id对应的页面\n 每次记录对应的信息到文件\n :return:\n \"\"\"\n contents = ['关注', '关注!', '关注!!']\n data = {'id': id, 'comment': random.choice(contents), 'token': self\n .token, 'appid': self.appid}\n commitUrl = 'https://mapi.dangjianwang.com/v3_1/Activities/CommentAct'\n rjson = self.session.post(url=commitUrl, data=data, verify=False).json(\n )\n print(rjson)\n if rjson['code'] == '1003':\n self.token = self.getToken()\n elif rjson['code'] == '200':\n result = rjson['msg']\n if result == '操作成功':\n self.thumbedPages.append(id)\n for i in list(set(self.thumbPageList)):\n if id == i[1]:\n temp = {'title': i[0]}\n self.thumbResults.update(temp)\n log = '信息点赞:\\n主题: {}\\n提交:{}'.format(i[0], data[\n 'comment'])\n detail = '{} 主题:{}\\n回复:{}\\n'.format(self.\n getCurrentTime(), i[0], data['comment'])\n write2File(self, './results/', 'result.txt', log)\n thumbInfo = {'title': i[0], 'reply': data['comment']}\n self.thumbPages.remove(id)\n self.writeThumb2File(id=id)\n return detail, thumbInfo\n elif rjson['code'] == '500' and rjson['msg'] == '评论过快,请求休息一会':\n print('因评论过快,等待一段时间')\n time.sleep(20)\n else:\n print('rjson', rjson)\n self.thumbedPages.remove(id)\n self.writeThumb2File(id=id)\n log = '点赞:{}'.format(rjson)\n self.writeLog2File(log)\n print(log)\n time.sleep(10)\n\n def doHelp(self, id, callback=None):\n \"\"\"\n 互助功能\n :param id:\n :return:\n \"\"\"\n detail = ''\n helpInfo = None\n log = ''\n content = ['把党的政治建设摆在首位!', '不忘初心,牢记使命!', '发展史第一要务,人才是第一资源,创新是第一动力。',\n '要把党的领导贯彻到依法治国全过程和各方面', '毫不动摇坚持中国共产党领导']\n data = {'id': id, 'content': random.choice(content), 'token': self.\n token, 'appid': self.appid}\n print(data)\n commitUrl = 'https://mapi.dangjianwang.com/v3_1/Help/PostComment'\n rjson = self.session.post(url=commitUrl, data=data, verify=False).json(\n )\n if rjson['code'] == '200':\n result = rjson['msg']\n if result == '操作成功':\n self.helpedPages.append(id)\n self.helpPages.remove(id)\n for i in self.helpPageList:\n if id == i[1]:\n curTime = self.getCurrentTime()\n self.helpResults.update({'title': id[0]})\n log = '互助:\\n主题: {}\\n提交内容: {}'.format(i[0], rjson[\n 'comment'])\n write2File(self, './results/', 'result.txt', log)\n detail = '{} 主题: {}\\n提交内容: {}\\n'.format(curTime, i[\n 0], rjson['comment'].strip())\n helpInfo = {'title': i[0], 'reply': rjson['comment']}\n else:\n pass\n else:\n pass\n log = '帮助:{}'.format(rjson)\n self.writeLog2File(log)\n print(log)\n return detail, log, helpInfo\n\n def doView(self):\n \"\"\"\n 党员视角发布功能\n\n :return:\n \"\"\"\n content = ['全面的小康,覆盖的人口要全面,是惠及全体人民的小康。', '不忘初心,牢记使命,坚持终身学习!']\n data = {'content': random.choice(content), 'token': self.token,\n 'appid': self.appid}\n commitUrl = 'https://mapi.dangjianwang.com/v3_1/Viewpoint/Create'\n rjson = self.session.post(url=commitUrl, data=data, verify=False).json(\n )\n if rjson['code'] == '200':\n result = rjson['msg']\n if result == '操作成功':\n self.viewsResults.append(1)\n else:\n pass\n log = '党员视角:{}'.format(rjson)\n detail = '{} 党员视角:\\n发布内容:{}\\n'.format(self.getCurrentTime(), rjson[\n 'data']['content'])\n publicContent = rjson['data']['content']\n return detail, publicContent\n\n def doStudy(self, mid):\n \"\"\"\n 前三个post函数的响应的三个请求\n get用来获得填写的内容\n 最后一个post是学习完离开并检测时间的函数如果成功说明该次学习成功。\n :param mid:\n :return:\n \"\"\"\n interval = 60 * 5 + 5\n\n def post1():\n data = {'mid': mid, 'token': self.token, 'appid': self.appid}\n commitUrl = (\n 'https://mapi.dangjianwang.com/v3_1//Study/CheckCollStatus')\n rjson = self.session.post(url=commitUrl, data=data, verify=False\n ).json()\n log = '学习post1:{}'.format(rjson)\n self.writeLog2File(log)\n print(log)\n\n def post2():\n data = {'token': self.token, 'appid': self.appid}\n commitUrl = 'https://mapi.dangjianwang.com/v3_1/Login/CheckToken'\n rjson = self.session.post(url=commitUrl, data=data, verify=False\n ).json()\n log = '学习post2:{}'.format(rjson)\n self.writeLog2File(log)\n print(log)\n\n def post3():\n data = {'mid': mid, 'token': self.token, 'appid': self.appid}\n commitUrl = (\n 'https://mapi.dangjianwang.com/v3_1/Study/GetFeelingsNum')\n rjson = self.session.post(url=commitUrl, data=data, verify=False\n ).json()\n log = '学习post3:{}'.format(rjson)\n self.writeLog2File(log)\n print(log)\n\n def get1():\n url = (\n 'https://mapi.dangjianwang.com/v3_1/Study/MaterialDetail?token={}&mid={}'\n .format(self.token, mid))\n rjson = self.session.get(url=url)\n text = rjson.content\n soup = BeautifulSoup(text, 'html.parser')\n retContents = []\n for div in soup.find_all('p'):\n p = div.text.strip()\n retContents.append(p if 100 > len(p) < 200 else p[0:200])\n return random.choice(retContents)\n\n def recordFeeling(content=None):\n if not content:\n content = (\n '伟大的时代造就伟大的人物。邓小平同志就是从中国人民和中华民族近代以来伟大斗争中产生的伟人,是我们大家衷心热爱的伟人。我们很多同志都曾经在他的领导和指导下工作过,他的崇高风范对我们来说是那样熟悉、那样亲切。邓小平同志崇高鲜明又独具魅力的革命风范,将激励我们在实现“两个一百年”奋斗目标、实现中华民族伟大复兴中国梦的征程上奋勇前进。'\n )\n data = {'mid': mid, 'token': self.token, 'appid': self.appid,\n 'content': content}\n commitUrl = (\n 'https://mapi.dangjianwang.com/v3_1/Study/RecordFeeling')\n rjson = self.session.post(url=commitUrl, data=data, verify=False\n ).json()\n log = '学习recordFeeling:{}'.format(rjson)\n self.writeLog2File(log)\n print('in recordFeeling')\n print(log)\n if rjson['code'] == '200':\n return {'content': content}\n elif rjson['code'] == '1120':\n addtion = ['我们必须坚定不移,任何时候任何情况下都不能动摇',\n '人民有信心,国家才有未来,国家才有力量。', '新时代,属于自强不息、勇于创造的奋斗者。',\n '民主政治建设有序推进,依法治市迈出新步伐。', '一切公职人员,都必须牢记始终为人民利益和幸福而努力工作。']\n return recordFeeling(content='{}\\n{}'.format(content,\n random.choice(addtion)))\n else:\n return None\n\n def readTime():\n data = {'mid': mid, 'token': self.token, 'appid': self.appid,\n 'time': interval}\n commitUrl = 'https://mapi.dangjianwang.com/v3_1/Study/ReadTime'\n rjson = self.session.post(url=commitUrl, data=data, verify=False\n ).json()\n log = '学习readTime:{}'.format(rjson)\n self.writeLog2File(log)\n print(log)\n post1()\n time.sleep(1)\n post2()\n time.sleep(1)\n post3()\n time.sleep(1)\n content = get1()\n time.sleep(1)\n count = 0\n print('开始学习请稍后')\n for i in range(interval):\n count += 1\n if count % 30 == 0:\n print('已用时{}秒'.format(count))\n time.sleep(1)\n print('填写的学习体会', content)\n self.studyRsults.update(recordFeeling(content=content))\n time.sleep(1)\n readTime()\n time.sleep(1)\n pass\n\n def doExam(self):\n \"\"\"\n\n :param self:\n :return:\n \"\"\"\n ids = []\n data = {'page': '1', 'page_size': '20', 'token': self.token,\n 'appid': self.appid}\n examlistUrl = 'https://mapi.dangjianwang.com/v3_1/quora/examlist'\n rjson = self.session.post(url=examlistUrl, data=data, verify=False\n ).json()\n time.sleep(0.3)\n print('*' * 99)\n data = {'page': '1', 'page_size': '20', 'token': self.token,\n 'appid': self.appid}\n banklistUrl = 'https://mapi.dangjianwang.com/v3_1/exam/banklist'\n rjson = self.session.post(url=banklistUrl, data=data, verify=False\n ).json()\n for i in rjson['data']:\n tem = i['bank_name'], i['id']\n self.examlist.append(tem)\n if i['bank_name'] == '十九大报告100题(单选)':\n temp = {'title': i['bank_name'], 'detail': i['detail'],\n 'id': i['id']}\n self.examC19Info.append(temp)\n time.sleep(0.3)\n print('*' * 99)\n data = {'bank': '6', 'token': self.token, 'appid': self.appid}\n commitUrl = 'https://mapi.dangjianwang.com/v3_1/exam/randexam'\n rjson = self.session.post(url=commitUrl, data=data, verify=False).json(\n )\n aa = rjson['data']\n paper = aa['id']\n for i in aa['questions']:\n temp = {'id': i['id'], 'content': i['content']}\n ids.append(temp)\n print('*' * 99)\n time.sleep(0.5)\n answers = []\n for i in ids:\n correctAnswer = Qa.objects.filter(question__contains=i['content'])[\n 0]\n answerText = correctAnswer.answerText\n answer = correctAnswer.answer\n temp = {'index': i['id'], 'answer': answer}\n qa = {'index': i['id'], 'answer': answer, 'answerText': answerText}\n self.qaList.append(qa)\n print(qa, i['content'])\n answers.append(temp)\n time.sleep(1)\n hdata = {'token': self.token, 'appid': self.appid, 'paper': paper,\n 'answers': json.dumps(answers)}\n commitUrl = 'https://mapi.dangjianwang.com/v3_1/exam/handpaper'\n rjson = self.session.post(url=commitUrl, data=hdata, verify=False\n ).json()\n print(rjson)\n print(self.examlist)\n print(self.examC19Info)\n print(self.qaList)\n\n def getAnswerInfo(self):\n \"\"\"\n 获得答题的结果与正确率\n :return:\n \"\"\"\n data = {'token': self.token, 'appid': self.appid, 'page_size': '20',\n 'page_index': 'page_index'}\n commitUrl = 'https://mapi.dangjianwang.com/v3_1/exam/randexam'\n rjson = self.session.post(url=commitUrl, data=data, verify=False).json(\n )\n print(rjson)\n\n\n<mask token>\n",
"step-5": "import json\r\nimport os, django\r\n\r\n\r\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"dangjianyun.settings\")# project_name 项目名称\r\ndjango.setup()\r\nfrom dangjiansite.djfuncs import *\r\nimport os\r\nimport datetime\r\nimport requests\r\nimport time\r\nimport urllib3\r\nimport base64\r\nimport csv\r\nimport random\r\nfrom bs4 import BeautifulSoup\r\nfrom dangjiansite.models import *\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass Runner():\r\n\r\n # def __init__(self, appid='TJZHDJ01', username='024549', password='Aa1234'):\r\n def __init__(self, appid='TJZHDJ01', username='', password=''):\r\n urllib3.disable_warnings()#屏蔽ssl告警\r\n self.currentTime = datetime.datetime.now().strftime(\"%H:%M:%S\")\r\n self.username = username\r\n self.password = password\r\n self.thumbedFilePath = './lib/'.format(username)\r\n self.logFilePath = './log/'.format(username)\r\n self.errFilePath = './err/'.format(username)\r\n # self.thumbedFileList = self.getThumbFromFile()\r\n self.thumbedFileList = []\r\n self.debug = True\r\n self.session = requests.session()\r\n self.appid = appid#应该是本设备安装app的id 等换个设备试一下就知道了\r\n self.headers ={\r\n 'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 6.0; HUAWEI MLA-AL10 Build/HUAWEIMLA-AL10)',\r\n 'header_version': '80',\r\n 'system': 'android',\r\n 'Connection': 'Keep-Alive',\r\n 'Host': 'mapi.dangjianwang.com',\r\n }\r\n self.token = self.getToken()\r\n time.sleep(0.1)\r\n self.thumbPageList = self.getPages(urls=[\r\n 'https://mapi.dangjianwang.com/v3_1/Learn/List',\r\n 'https://mapi.dangjianwang.com/v3_1/Activities/List',\r\n 'https://mapi.dangjianwang.com/v3_1/Hotspots/Hotlist'\r\n ])\r\n self.thumbPages = [i[1] for i in self.thumbPageList]\r\n time.sleep(0.1)\r\n self.helpPageList = self.getPages(urls=['https://mapi.dangjianwang.com/v3_1/Help/List', ])\r\n self.helpPages = [i[1] for i in self.helpPageList]\r\n self.helpResults = {}\r\n time.sleep(0.1)\r\n self.studyPageList = self.getPagesII(urls=['https://mapi.dangjianwang.com/v3_1/Study/MaterialCollList'])\r\n self.studyPages = [i[1] for i in self.studyPageList]\r\n time.sleep(0.1)\r\n self.studyRsults = {}\r\n self.thumbedPages = []\r\n self.thumbResults = {}\r\n self.helpedPages = []\r\n self.multiThumbed = []#考虑最后要写入文件之中\r\n self.viewsResults = []\r\n self.examC19Info = []\r\n self.examlist = []\r\n self.qaList = []\r\n\r\n def getCurrentTime(self):\r\n return datetime.datetime.now().strftime(\"%H:%M:%S\")\r\n\r\n\r\n def writeErr2File(self, err):\r\n path = self.logFilePath\r\n fullPath = '{}{}err.txt'.format(path, self.username)\r\n if not os.path.exists(path):\r\n os.mkdir(path)\r\n with open(fullPath, 'a') as f:\r\n f.write('{}:{}\\n'.format(self.currentTime, err))\r\n print('err已经写入{}'.format(fullPath))\r\n\r\n def writeLog2File(self, log):\r\n path = self.logFilePath\r\n fullPath = '{}{}logs.txt'.format(path, self.username)\r\n if not os.path.exists(path):\r\n os.mkdir(path)\r\n with open(fullPath, 'a') as f:\r\n f.write('{}:{}\\n'.format(self.currentTime, log))\r\n print('log已经写入{}'.format(fullPath))\r\n\r\n def writeThumb2File(self, id):\r\n path = self.thumbedFilePath\r\n fullPath = '{}{}thumbs.txt'.format(path, self.username)\r\n if not os.path.exists(path):\r\n os.mkdir(path)\r\n with open(fullPath, 'a') as f:\r\n f.write(',{}'.format(id))\r\n print('点赞记录已经写入{}'.format(fullPath))\r\n\r\n def getThumbFromFile(self):\r\n '''\r\n\r\n :return: 文件中id组成的列表\r\n '''\r\n path = self.thumbedFilePath\r\n inFileList = []\r\n fullPath = '{}{}thumbs.txt'.format(path, self.username)\r\n if not os.path.exists(fullPath):\r\n return fullPath\r\n with open(fullPath, 'r') as f:\r\n inFileList.extend(list(set(f.readlines()[0].split(','))))\r\n # print('getThumbFormFile', inFileList)\r\n with open(fullPath, 'w') as f1:\r\n f1.write(','.join(sorted(inFileList)))\r\n return inFileList\r\n\r\n def getExcuteTimes(self):\r\n '''\r\n 返回点赞等自动执行的次数的字典\r\n :return:\r\n '''\r\n excuteTimes = {}\r\n\r\n credInfo = self.getCredItinfo()\r\n print(credInfo)\r\n currentScore = credInfo[0]\r\n\r\n # 点赞次数\r\n thumbScore = credInfo[1]['信息评论'].split('/')[0]\r\n thumbExcuteTimes = 10 - int(thumbScore)\r\n excuteTimes.update({'thumb': thumbExcuteTimes})\r\n # 帮助次数\r\n helpScore = credInfo[1]['互助广场回答'].split('/')[0]\r\n helpExctuteTimes = 2 - int(helpScore)\r\n excuteTimes.update({'help': helpExctuteTimes})\r\n # 党员视角发布次数\r\n viewScore = credInfo[1]['党员视角发布'].split('/')[0]\r\n viewExcuteTimes = int((4 - int(viewScore)) / 2)\r\n excuteTimes.update({'view': viewExcuteTimes})\r\n # 在线知识竞答次数\r\n examScore = credInfo[1]['在线知识竞答'].split('/')[0]\r\n examExcuteTimes = int((4 - int(examScore)) / 2)\r\n excuteTimes.update({'exam': examExcuteTimes})\r\n # 学习次数\r\n flag = int(credInfo[1]['在线阅读学习资料'].split('/')[1]) - int(credInfo[1]['在线阅读学习资料'].split('/')[0])\r\n flag1 = int(credInfo[1]['学习资料写体会'].split('/')[1]) - int(credInfo[1]['学习资料写体会'].split('/')[0])\r\n examExcuteTimes = 1 if flag != 0 or flag1 != 0 else 0\r\n excuteTimes.update({'study': examExcuteTimes})\r\n\r\n return excuteTimes\r\n\r\n def getToken(self):\r\n '''\r\n 获得一个连接的token\r\n 每个连接都需要使用到\r\n :return:\r\n '''\r\n data = {\r\n 'appid': self.appid,\r\n 'username': self.username,\r\n 'password': self.password,\r\n }\r\n longinurl = 'https://mapi.dangjianwang.com/v3_1/login'\r\n\r\n r = self.session.post(url=longinurl, data=data, verify=False)\r\n rjson = r.json()\r\n # print(type(rjson))\r\n # print(rjson)\r\n\r\n if rjson['code'] == '200':\r\n return rjson['token']\r\n else:\r\n print('token 获得失败')\r\n return None\r\n\r\n def getRJson(self, url):\r\n data={\r\n 'token': self.token,\r\n 'appid': self.appid\r\n }\r\n\r\n return self.session.post(url=url, data=data, verify=False).json()\r\n\r\n def getUserInfo(self):\r\n '''\r\n 获得一大串用户的信息,暂时没用\r\n :return:\r\n '''\r\n infoUrl = 'https://mapi.dangjianwang.com/v3_1/User/UserInfo'\r\n return self.getRJson(url=infoUrl)\r\n\r\n def getCredItinfoToday(self):\r\n '''\r\n 获得人员当前的得分等级参数\r\n :return:\r\n '''\r\n creditInfourl = 'https://mapi.dangjianwang.com/v3_1/User/CreditInfo'\r\n info = self.getRJson(url=creditInfourl)\r\n fullScore = info['data']['full']\r\n gainScore = info['data']['gain']\r\n currentLevel = info['data']['level']\r\n username = info['data']['name']\r\n ret = {\r\n 'fullScore': fullScore,\r\n 'gainScore': gainScore,\r\n 'currentLevel': currentLevel,\r\n 'username': username,\r\n }\r\n return ret\r\n\r\n\r\n def getCredItinfo(self):\r\n '''\r\n 获得用户的今日积分状态\r\n 可用来判断是否需要再继续流程\r\n 数据如下\r\n ('35', [('连续登录', '3/3'), ('手机端登录', '2/2'), ('信息评论', '10/10'), ('党员视角发布', '4/4'), ('互助广场回答', '2/2'), ('学习资料写体会', '5/5'), ('在线阅读学习资料', '5/5'), ('在线知识竞答', '4/4')])\r\n :return:(haved_credit, credit_detail)\r\n '''\r\n creditInfourl = 'https://mapi.dangjianwang.com/v3_1/User/CreditInfo'\r\n haved_credit = 0\r\n credit_detail = {}\r\n\r\n info = self.getRJson(url=creditInfourl)\r\n for k, v in info.items():\r\n if k == 'data':\r\n for k2, v2 in v.items():\r\n if k2 == 'haved_credit':\r\n haved_credit = v2\r\n if k2 == 'credit_detail':\r\n for i in v2:\r\n credit_detail.update({i['title']: i['score']})\r\n\r\n return (haved_credit, credit_detail)\r\n\r\n def getPages(self, urls):\r\n pages = []\r\n for url in urls:\r\n data = self.getRJson(url=url)\r\n for k, v in data.items():\r\n if k == 'data':\r\n for i in v:\r\n # pages.append({'pageId': i['id'], 'pageTitle': i['title']})\r\n # pages.append(i['id'])\r\n pages.append((i['title'], i['id']))\r\n\r\n return pages\r\n\r\n def getPagesII(self, urls):\r\n def getRJson(url):\r\n data = {\r\n 'token': self.token,\r\n 'appid': self.appid,\r\n 'type_id': '791',\r\n 'page_index': '1',\r\n }\r\n\r\n return self.session.post(url=url, data=data, verify=False).json()\r\n pages = []\r\n for url in urls:\r\n data = getRJson(url=url)\r\n for k, v in data.items():\r\n # print(k, v)\r\n if k == 'data':\r\n for i in v:\r\n # pages.append({'pageId': i['id'], 'pageTitle': i['title']})\r\n # pages.append(i['id'])\r\n pages.append((i['name'], i['id']))\r\n\r\n return pages\r\n\r\n def doThumb(self, id):\r\n '''\r\n 点赞函数,操作与id对应的页面\r\n 每次记录对应的信息到文件\r\n :return:\r\n '''\r\n contents = [\r\n '关注',\r\n '关注!',\r\n '关注!!']\r\n data = {\r\n 'id': id,\r\n 'comment': random.choice(contents),\r\n 'token': self.token,\r\n 'appid': self.appid,\r\n }\r\n commitUrl = 'https://mapi.dangjianwang.com/v3_1/Activities/CommentAct'\r\n rjson = self.session.post(url=commitUrl,\r\n data=data,\r\n verify=False).json()\r\n print(rjson)\r\n if rjson['code'] == '1003':\r\n self.token = self.getToken()\r\n elif rjson['code'] == '200':\r\n result = rjson['msg']\r\n if result == '操作成功':\r\n self.thumbedPages.append(id)\r\n # print(self.thumbPageList)\r\n # print(len(self.thumbPageList), len(list(set(self.thumbPageList))))\r\n\r\n for i in list(set(self.thumbPageList)):\r\n if id == i[1]:\r\n temp = {'title': i[0]}\r\n self.thumbResults.update(temp)\r\n log = '信息点赞:\\n主题: {}\\n提交:{}'.format(i[0], data['comment'])\r\n detail = '{} 主题:{}\\n回复:{}\\n'.format(self.getCurrentTime(), i[0], data['comment'])\r\n write2File(self, './results/', 'result.txt', log)\r\n thumbInfo = {'title': i[0], 'reply': data['comment']}\r\n\r\n self.thumbPages.remove(id)\r\n self.writeThumb2File(id=id)\r\n\r\n return (detail, thumbInfo)\r\n elif rjson['code'] == '500' and rjson['msg'] == '评论过快,请求休息一会':\r\n print('因评论过快,等待一段时间')\r\n time.sleep(20)\r\n else:\r\n print('rjson', rjson)\r\n # self.multiThumbed.append(id)\r\n self.thumbedPages.remove(id)#不成功的时候也要去掉不然总会选到\r\n self.writeThumb2File(id=id)\r\n log = '点赞:{}'.format(rjson)\r\n self.writeLog2File(log)\r\n print(log)\r\n time.sleep(10)\r\n\r\n\r\n def doHelp(self, id, callback=None):\r\n '''\r\n 互助功能\r\n :param id:\r\n :return:\r\n '''\r\n detail = ''\r\n helpInfo = None\r\n log = ''\r\n content = [\r\n '把党的政治建设摆在首位!',\r\n '不忘初心,牢记使命!',\r\n '发展史第一要务,人才是第一资源,创新是第一动力。',\r\n '要把党的领导贯彻到依法治国全过程和各方面',\r\n '毫不动摇坚持中国共产党领导',]\r\n data = {\r\n 'id': id,\r\n 'content': random.choice(content),\r\n 'token': self.token,\r\n 'appid': self.appid,\r\n }\r\n print(data)\r\n commitUrl = 'https://mapi.dangjianwang.com/v3_1/Help/PostComment'\r\n rjson = self.session.post(url=commitUrl,\r\n data=data,\r\n verify=False).json()\r\n\r\n\r\n\r\n if rjson['code'] == '200':\r\n result = rjson['msg']\r\n if result == '操作成功':\r\n self.helpedPages.append(id)\r\n self.helpPages.remove(id)\r\n #记录成功的到result\r\n for i in self.helpPageList:\r\n if id == i[1]:\r\n curTime = self.getCurrentTime()\r\n # print('('*88)\r\n # print(curTime)\r\n self.helpResults.update({'title': id[0]})\r\n log = '互助:\\n主题: {}\\n提交内容: {}'.format(i[0], rjson['comment'])\r\n write2File(self, './results/', 'result.txt', log)\r\n # #写入数据库\r\n detail = '{} 主题: {}\\n提交内容: {}\\n'.format(curTime, i[0], rjson['comment'].strip())\r\n helpInfo = {'title': i[0], 'reply': rjson['comment']}\r\n else:\r\n pass\r\n else:\r\n pass\r\n\r\n log = '帮助:{}'.format(rjson)\r\n self.writeLog2File(log)\r\n print(log)\r\n return (detail, log, helpInfo)\r\n\r\n def doView(self):\r\n '''\r\n 党员视角发布功能\r\n\r\n :return:\r\n '''\r\n\r\n content = [\r\n '全面的小康,覆盖的人口要全面,是惠及全体人民的小康。',\r\n '不忘初心,牢记使命,坚持终身学习!']\r\n data = {\r\n 'content': random.choice(content),\r\n 'token': self.token,\r\n 'appid': self.appid,\r\n }\r\n commitUrl = 'https://mapi.dangjianwang.com/v3_1/Viewpoint/Create'\r\n rjson = self.session.post(url=commitUrl,\r\n data=data,\r\n verify=False).json()\r\n if rjson['code'] == '200':\r\n result = rjson['msg']\r\n if result == '操作成功':\r\n self.viewsResults.append(1)\r\n # self.viewsResults.append(id)\r\n else:\r\n pass\r\n\r\n log = '党员视角:{}'.format(rjson)\r\n detail = '{} 党员视角:\\n发布内容:{}\\n'.format(self.getCurrentTime(), rjson['data']['content'])\r\n publicContent = rjson['data']['content']\r\n # print(detail)\r\n # self.writeLog2File(log)\r\n # print('党员视角'*12)\r\n # print(id)\r\n # print(log)\r\n # print('党员视角' * 12)\r\n return (detail, publicContent)\r\n\r\n def doStudy(self, mid):\r\n '''\r\n 前三个post函数的响应的三个请求\r\n get用来获得填写的内容\r\n 最后一个post是学习完离开并检测时间的函数如果成功说明该次学习成功。\r\n :param mid:\r\n :return:\r\n '''\r\n interval = 60 * 5 + 5\r\n def post1():\r\n data = {\r\n 'mid': mid,\r\n 'token': self.token,\r\n 'appid': self.appid,\r\n }\r\n commitUrl = 'https://mapi.dangjianwang.com/v3_1//Study/CheckCollStatus'\r\n rjson = self.session.post(url=commitUrl,\r\n data=data,\r\n verify=False).json()\r\n # print(rjson)\r\n log = '学习post1:{}'.format(rjson)\r\n self.writeLog2File(log)\r\n print(log)\r\n def post2():\r\n data = {\r\n 'token': self.token,\r\n 'appid': self.appid,\r\n }\r\n commitUrl = 'https://mapi.dangjianwang.com/v3_1/Login/CheckToken'\r\n rjson = self.session.post(url=commitUrl,\r\n data=data,\r\n verify=False).json()\r\n # print(rjson)\r\n log = '学习post2:{}'.format(rjson)\r\n self.writeLog2File(log)\r\n print(log)\r\n def post3():\r\n data = {\r\n 'mid': mid,\r\n 'token': self.token,\r\n 'appid': self.appid,\r\n }\r\n commitUrl = 'https://mapi.dangjianwang.com/v3_1/Study/GetFeelingsNum'\r\n rjson = self.session.post(url=commitUrl,\r\n data=data,\r\n verify=False).json()\r\n # print(rjson)\r\n log = '学习post3:{}'.format(rjson)\r\n self.writeLog2File(log)\r\n print(log)\r\n\r\n def get1():\r\n url = 'https://mapi.dangjianwang.com/v3_1/Study/MaterialDetail?token={}&mid={}'.format(self.token, mid)\r\n rjson = self.session.get(url=url)\r\n text = rjson.content\r\n soup = BeautifulSoup(text, 'html.parser')\r\n retContents = []\r\n for div in soup.find_all('p'):\r\n p = div.text.strip()\r\n retContents.append(p if 100 > len(p) < 200 else p[0:200])\r\n return random.choice(retContents)\r\n\r\n def recordFeeling(content=None):\r\n if not content:\r\n content = '伟大的时代造就伟大的人物。邓小平同志就是从中国人民和中华民族近代以来伟大斗争中产生的伟人,' \\\r\n '是我们大家衷心热爱的伟人。我们很多同志都曾经在他的领导和指导下工作过,他的崇高风范对我们来说是那样熟悉、那样亲切。' \\\r\n '邓小平同志崇高鲜明又独具魅力的革命风范,将激励我们在实现“两个一百年”奋斗目标、实现中华民族伟大复兴中国梦的征程上奋勇前进。'\r\n data = {\r\n 'mid': mid,\r\n 'token': self.token,\r\n 'appid': self.appid,\r\n 'content': content\r\n }\r\n\r\n commitUrl = 'https://mapi.dangjianwang.com/v3_1/Study/RecordFeeling'\r\n rjson = self.session.post(url=commitUrl,\r\n data=data,\r\n verify=False).json()\r\n # print(rjson)\r\n log = '学习recordFeeling:{}'.format(rjson)\r\n self.writeLog2File(log)\r\n print('in recordFeeling')\r\n print(log)\r\n\r\n if rjson['code'] == '200':\r\n return {'content': content}\r\n elif rjson['code'] == '1120':\r\n addtion = [\r\n '我们必须坚定不移,任何时候任何情况下都不能动摇',\r\n '人民有信心,国家才有未来,国家才有力量。',\r\n '新时代,属于自强不息、勇于创造的奋斗者。',\r\n '民主政治建设有序推进,依法治市迈出新步伐。',\r\n '一切公职人员,都必须牢记始终为人民利益和幸福而努力工作。',\r\n\r\n ]\r\n return recordFeeling(content= '{}\\n{}'.format(content, random.choice(addtion)))\r\n else:\r\n return None\r\n #记录回复的心得\r\n\r\n\r\n def readTime():\r\n data = {\r\n 'mid': mid,\r\n 'token': self.token,\r\n 'appid': self.appid,\r\n 'time': interval,\r\n }\r\n commitUrl = 'https://mapi.dangjianwang.com/v3_1/Study/ReadTime'\r\n rjson = self.session.post(url=commitUrl,\r\n data=data,\r\n verify=False).json()\r\n # print(rjson)\r\n log = '学习readTime:{}'.format(rjson)\r\n # self.studyRsults.update({'学习readTime', rjson})\r\n self.writeLog2File(log)\r\n print(log)\r\n\r\n\r\n\r\n post1()\r\n time.sleep(1)\r\n post2()\r\n time.sleep(1)\r\n post3()\r\n time.sleep(1)\r\n content = get1()\r\n time.sleep(1)\r\n # time.sleep(interval)\r\n count = 0\r\n print('开始学习请稍后')\r\n for i in range(interval):\r\n count += 1\r\n # print(i + 1)\r\n if count % 30 == 0:\r\n print('已用时{}秒'.format(count))\r\n time.sleep(1)\r\n # time.sleep(5)\r\n print('填写的学习体会', content)\r\n self.studyRsults.update(recordFeeling(content=content))\r\n time.sleep(1)\r\n readTime()\r\n time.sleep(1)\r\n pass\r\n\r\n def doExam(self):\r\n '''\r\n\r\n :param self:\r\n :return:\r\n '''\r\n ids = []\r\n data = {\r\n 'page': '1',\r\n 'page_size': '20',\r\n 'token': self.token,\r\n 'appid': self.appid,\r\n }\r\n examlistUrl = 'https://mapi.dangjianwang.com/v3_1/quora/examlist'\r\n rjson = self.session.post(url=examlistUrl,\r\n data=data,\r\n verify=False).json()\r\n # print(rjson)\r\n # for i in rjson['data']:\r\n # print(i)\r\n time.sleep(0.3)\r\n #########################################################\r\n print('*' * 99)\r\n data = {\r\n 'page': '1',\r\n 'page_size': '20',\r\n 'token': self.token,\r\n 'appid': self.appid,\r\n }\r\n banklistUrl = 'https://mapi.dangjianwang.com/v3_1/exam/banklist'\r\n rjson = self.session.post(url=banklistUrl,\r\n data=data,\r\n verify=False).json()\r\n # print(rjson)\r\n for i in rjson['data']:\r\n tem = (i['bank_name'], i['id'])\r\n self.examlist.append(tem)\r\n if i['bank_name'] == '十九大报告100题(单选)':\r\n # if i['bank_num'] == '65':\r\n temp = {\r\n 'title': i['bank_name'],\r\n 'detail': i['detail'],\r\n 'id': i['id'],\r\n }\r\n self.examC19Info.append(temp)\r\n # print(self.examC19Info)\r\n # print(self.examlist)\r\n time.sleep(0.3)\r\n #########################################################\r\n print('*' * 99)\r\n data = {\r\n 'bank': '6',\r\n 'token': self.token,\r\n 'appid': self.appid,\r\n }\r\n commitUrl = 'https://mapi.dangjianwang.com/v3_1/exam/randexam'\r\n rjson = self.session.post(url=commitUrl,\r\n data=data,\r\n verify=False).json()\r\n # print(rjson)\r\n aa = rjson['data']\r\n paper = aa['id']\r\n for i in aa['questions']:\r\n temp = {'id': i['id'], 'content': i['content']}\r\n ids.append(temp)\r\n\r\n #########################################################\r\n print('*' * 99)\r\n time.sleep(0.5)\r\n # 以下答题交卷\r\n\r\n answers = []\r\n # 先得到答案\r\n\r\n\r\n for i in ids:\r\n # 丛书据库获得答案\r\n correctAnswer = Qa.objects.filter(question__contains=i['content'])[0]\r\n answerText = correctAnswer.answerText\r\n answer = correctAnswer.answer\r\n #从文键获得答案\r\n # answerText = getAnswer(i['content'])[2]\r\n # answer = getAnswer(i['content'])[1]\r\n temp = {'index': i['id'], 'answer': answer}\r\n qa = {'index': i['id'], 'answer': answer, 'answerText': answerText}\r\n self.qaList.append(qa)\r\n print(qa, i['content'])\r\n answers.append(temp)\r\n time.sleep(1)\r\n hdata = {\r\n 'token': self.token,\r\n 'appid': self.appid,\r\n 'paper': paper,\r\n 'answers': json.dumps(answers),\r\n # 'answers': [{'answer': 'A', 'index': '639'}, {'answer': 'A', 'index': '639'}],\r\n }\r\n # print('hdata:', hdata)\r\n commitUrl = 'https://mapi.dangjianwang.com/v3_1/exam/handpaper'\r\n rjson = self.session.post(url=commitUrl,\r\n data=hdata,\r\n verify=False).json()\r\n print(rjson)\r\n print(self.examlist)\r\n print(self.examC19Info)\r\n print(self.qaList)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def getAnswerInfo(self):\r\n '''\r\n 获得答题的结果与正确率\r\n :return:\r\n '''\r\n data = {\r\n 'token': self.token,\r\n 'appid': self.appid,\r\n 'page_size': '20',\r\n 'page_index': 'page_index',\r\n }\r\n commitUrl = 'https://mapi.dangjianwang.com/v3_1/exam/randexam'\r\n rjson = self.session.post(url=commitUrl,\r\n data=data,\r\n verify=False).json()\r\n print(rjson)\r\n\r\n\r\n'''\r\n\r\nhttps://mapi.dangjianwang.com/v3_1/exam/randexam 答题地址 主id是交卷的paper 这里要获取到questions里的id 等于回答问题中的index \r\nappid\tTJZHDJ01\r\nbank\t6\r\ntoken\t5jTY47PbPZ0KdUprwmfJVfH4cX23tyDcV25XrEYkWVvElH3YjJpIb1JCDwq_\r\n\r\nhttps://mapi.dangjianwang.com/v3_1/exam/handpaper 交卷的连接\r\nappid\tTJZHDJ01\r\nanswers\t[{\"index\":\"635\",\"answer\":\"D\"},{\"index\":\"640\",\"answer\":\"C\"},{\"index\":\"641\",\"answer\":\"B\"},{\"index\":\"665\",\"answer\":\"B\"},{\"index\":\"670\",\"answer\":\"B\"},{\"index\":\"673\",\"answer\":\"B\"},{\"index\":\"677\",\"answer\":\"C\"},{\"index\":\"682\",\"answer\":\"B\"},{\"index\":\"684\",\"answer\":\"C\"},{\"index\":\"690\",\"answer\":\"A\"}]\r\ntoken\t5jTY47PbPZ0KdUprwmfJVfH4cX23tyDcV25XrEYkWVvElH3YjJpIb1JCDwq_\r\npaper\t4565894\r\n\r\nhttps://mapi.dangjianwang.com/v3_1/exam/banklist 获得答题情况的连接\r\n\r\nappid\tTJZHDJ01\r\npage_size\t20\r\ntoken\t5jTY47PbPZxXeRxlkzScAPWidyvssy3TBD5Y9UYiCQnMmCfa2pRNb1JCDwq_\r\npage_index\t1\r\n\r\n\r\n\r\n\r\n--------------------------------------------------\r\nhttps://mapi.dangjianwang.com/v3_1/Study/MaterialCollList 学习的id列表\r\nappid\tTJZHDJ01\r\npage_size\t20\r\ntype_id\t791\r\ntoken\t5jTY47PbPZJbeh9ixjfOUvaoI3604SrSAz5Zokt3DAmfz3qIis4Yb1JCDwq_\r\npage_index\t1\r\n\r\n下面是针对791id列表中的访问地址\r\nhttps://mapi.dangjianwang.com/v3_1//Study/CheckCollStatus\r\n\r\npost1:\r\nappid\tTJZHDJ01\r\nmid\t9729\r\ntoken\t5jTY47PbPZoOKEUwlDCaAKWqICGwt3_OVzlVpk5yW1bMyS_M3J5Db1JCDwq_\r\npost2:\r\n\r\nhttps://mapi.dangjianwang.com/v3_1/Login/CheckToken\r\nappid\tTJZHDJ01\r\ntoken\t5jTY47PbPZoOKEUwlDCaAKWqICGwt3_OVzlVpk5yW1bMyS_M3J5Db1JCDwq_\r\n\r\npost3:\r\nhttps://mapi.dangjianwang.com/v3_1/Study/GetFeelingsNum\r\nappid\tTJZHDJ01\r\nmid\t9729\r\ntoken\t5jTY47PbPZoOKEUwlDCaAKWqICGwt3_OVzlVpk5yW1bMyS_M3J5Db1JCDwq_\r\n\r\nget1 https://mapi.dangjianwang.com/v3_1/Study/MaterialDetail?token={}&mid={} 获得页面\r\n\r\n\r\n\r\npost 发表体会\r\nhttps://mapi.dangjianwang.com/v3_1/Study/RecordFeeling\r\nappid\tTJZHDJ01\r\ncontent\t 伟大的时代造就伟大的人物。邓小平同志就是从中国人民和中华民族近代以来伟大斗争中产生的伟人,是我们大家衷心热爱的伟人。我们很多同志都曾经在他的领导和指导下工作过,他的崇高风范对我们来说是那样熟悉、那样亲切。邓小平同志崇高鲜明又独具魅力的革命风范,将激励我们在实现“两个一百年”奋斗目标、实现中华民族伟大复兴中国梦的征程上奋勇前进。\r\nmid\t9729\r\ntoken\t5jTY47PbPckOdUlllmfOCaCvcy7ls3rSVmxRoE0gDg3EmyrYi5Ucb1JCDwq_\r\n\r\npost 结束学习 \r\nhttps://mapi.dangjianwang.com/v3_1/Study/ReadTime\r\nappid\tTJZHDJ01\r\ntime\t362\r\nmid\t9729\r\ntoken\t5jTY47PbPckOdUlllmfOCaCvcy7ls3rSVmxRoE0gDg3EmyrYi5Ucb1JCDwq_\r\n\r\n\r\n---------------------------------------\r\n\r\nhttps://mapi.dangjianwang.com/v3_1/Help/List 这里获得帮助id\r\nhttps://mapi.dangjianwang.com/v3_1/Help/PostComment 提交评论的地址\r\n\r\n\r\nappid\tTJZHDJ01\r\ncontent\t不忘初心,牢记使命!\r\nid\t55984\r\ntoken\t5jTY47PbPcpZe0s1xDLKAqKoIimx6SnSVjcApB92DF3Nmy/djZ1Nb1JCDwq_\r\n\r\n把党的政治建设摆在首位!\r\n不忘初心,牢记使命!\r\n\r\n-------------------------------\r\n\r\n发布的内容\r\nhttps://mapi.dangjianwang.com/v3_1/Viewpoint/Create\r\n\r\nappid\tTJZHDJ01\r\ncontent\t不忘初心牢记使命\r\ntoken\t5jTY47PbPZ9deR5rkTXIB/b/fymw5HvbAj9R900gDArNnXqE1s9Kb1JCDwq_\r\n\r\n\r\n不忘初心,牢记使命,坚持终身学习!\r\n全面的小康,覆盖的人口要全面,是惠及全体人民的小康。\r\n\r\n-----------------------------\r\n点赞错误\r\n{'msg': '重复评论过多,请您修改后重新提交。', 'code': '500'}\r\n'''",
"step-ids": [
17,
19,
20,
21,
24
]
}
|
[
17,
19,
20,
21,
24
] |
import urlparse
def parse_url(url):
"""
Parse a url into a ParseResult() object then evolve that ParseResult()
instance into an EasyUrl() object, finally return the EasyUrl() instance.
"""
url = urlparse.urlparse(url)
#print url.__class__
return EasyUrl.EvolveParseResult(url)
class EasyUrl(urlparse.ParseResult):
"""
Don't change the url at all, instead create a new EasyUrl() object.
Use the python builtin methods to make the ParseResult() object friendlier.
"""
def __init__(self, url):
self = parse_url(url) # returns a EasyUrl object
self.initialize_attributes()
# EasyUrl Methods
def initialize_attributes(self):
"""
When creating an EasyUrl() instance through the
EvolveParseResult() method, the __init__() method is never
called, therefore it makes since to place our initialize code
into a seperate method that we can call from both __init__() and
EvolveParseResult().
"""
self.host = self.netloc
self.url = self.geturl()
self.set_scheme_if_non('https')
# The file extensions we are watching for. Either load the extensions
# from a text file, or create a seperate python file contain a list
# supported file extensions
self.listed_file_extensions = [
'.jpg', '.bmp', '.png',
'.mp3', '.mp4', '.flv', '.avi',
'.zip', '.7z', '.tar', '.tar.gz', '.tar.bz', '.rar',
'.exe', '.git', '.torrent',
]
# Type Boolean: True or False
# Urls contain some useful information. Depending on the framework the
# website is built on, a url can contain information about paths and files.
# This is a glimpse of the sites computer system. Pretty Useful!
self.is_file_extension = None # Does this path end as a file?
#self.file_extension = self.check_for_file_extension()
def set_scheme_if_non(self, scheme='http'):
print self.scheme
if not self.scheme:
self.scheme = scheme
self._set_url()
def _set_url(self):
""" Updates our self.url by seting it to self.geturl()."""
self.url = self.geturl()
# Required Methods for Third parties
# - requests
# - the url passed when making request must be a string (or have the find method)
def find(self, *args, **kwargs):
return self.url.find(*args, **kwargs)
# Builtin Methods: Overriding the python builtin methods
def __str__(self):
return self.url
def __repr__(self):
return self.url
# return '<EasyUrl: %s>' % self.url
def __unicode__(self):
return self.url
# Static Methods: Call from class definition, not using an instance.
# example:
# Good: EasyUrl.EvolveParseresult(...)
#
# Bad : url = EasyUrl()
# : url = url.EvolveParseresult(...)
@staticmethod
def EvolveParseResult(parseresult):
""" url, response
Take a formally (through urlparse.urlparse) constructed
ParseResult() object and transform it into this EasyUrl() object.
"""
parseresult.__class__ = EasyUrl # This turns the the class to EasyUrl()
easy_url = parseresult
easy_url.initialize_attributes()
return easy_url
class HistoryEntry(object):
""" Keeps a collapsed form of a scraper state."""
def __init__(self, url, response):
self.url = url
self.response = response
def load_to_scraper(self, scraper):
"""
Delegate the parameters from this HistoryEntry()
to a scraper that is passed in as an argument.
"""
scraper.url = self.url
scraper.response = self.response
scraper.load_soup()
return scraper
class HistoryManager(dict):
""" Stores and manages HistoryEntry's from a scraper. """
def __init__(self, *history_entries):
# super(HistoryEntry, self).__init__()
self.load_history_entries(*history_entries)
def load_history_entries(self, *entries):
"""
Using HistoryEntries passed through the method call,
populatet request...
'stackoverflow.com' the dictionary. The key being the site name, the
value is a list containing all HistoryEntry's for that site.
"""
# Simplified version:
for entry in entries:
try:
self[entry.url.host] += [entry]
except KeyError:
self[entry.url.host] = [entry]
temp_dict = {entry.url.host: [] for entry in entries}
for entry in entries:
temp_dict[entry.url.host] += [entry]
# Update the dictionary
# self.update(temp_dict) # Will override any lists with the same host name
for host, entry in temp_dict.items():
#try:
self[host] += [entry]
#except IndexError:
#self[host] = [entry]
def save(self, scraper):
""" Save the current state of a scraper. """
entry = HistoryEntry(scraper.url, scraper.response)
self.load_history_entries(entry)
#url = 'http://stackoverflow.com/'
#easy_url1 = parse_url(url)
#print easy_url1
#print easy_url1.__class__
#print repr(easy_url1)
#print easy_url1.geturl()
|
normal
|
{
"blob_id": "0d322bdaf1bfed2b76172cc4dfb1b9af52bdc641",
"index": 8264,
"step-1": "import urlparse\n\n\n\n\ndef parse_url(url):\n \"\"\" \n Parse a url into a ParseResult() object then evolve that ParseResult()\n instance into an EasyUrl() object, finally return the EasyUrl() instance.\n \"\"\"\n url = urlparse.urlparse(url)\n #print url.__class__\n return EasyUrl.EvolveParseResult(url)\n\n\n\n\nclass EasyUrl(urlparse.ParseResult):\n \"\"\" \n Don't change the url at all, instead create a new EasyUrl() object.\n Use the python builtin methods to make the ParseResult() object friendlier. \n \"\"\"\n \n def __init__(self, url): \n self = parse_url(url) # returns a EasyUrl object\n self.initialize_attributes()\n\n # EasyUrl Methods\n def initialize_attributes(self):\n \"\"\" \n When creating an EasyUrl() instance through the\n EvolveParseResult() method, the __init__() method is never\n called, therefore it makes since to place our initialize code\n into a seperate method that we can call from both __init__() and\n EvolveParseResult().\n \"\"\"\n self.host = self.netloc\n self.url = self.geturl()\n\n self.set_scheme_if_non('https')\n \n # The file extensions we are watching for. Either load the extensions\n # from a text file, or create a seperate python file contain a list\n # supported file extensions\n self.listed_file_extensions = [ \n '.jpg', '.bmp', '.png',\n '.mp3', '.mp4', '.flv', '.avi',\n '.zip', '.7z', '.tar', '.tar.gz', '.tar.bz', '.rar',\n '.exe', '.git', '.torrent',\n ] \n # Type Boolean: True or False\n # Urls contain some useful information. Depending on the framework the \n # website is built on, a url can contain information about paths and files.\n # This is a glimpse of the sites computer system. Pretty Useful!\n self.is_file_extension = None # Does this path end as a file?\n #self.file_extension = self.check_for_file_extension()\n \n \n \n \n def set_scheme_if_non(self, scheme='http'):\n print self.scheme\n if not self.scheme:\n self.scheme = scheme\n self._set_url()\n\n\n\n\n\n def _set_url(self):\n \"\"\" Updates our self.url by seting it to self.geturl().\"\"\" \n self.url = self.geturl() \n \n \n # Required Methods for Third parties\n # - requests\n # - the url passed when making request must be a string (or have the find method)\n def find(self, *args, **kwargs):\n return self.url.find(*args, **kwargs)\n\n\n # Builtin Methods: Overriding the python builtin methods\n def __str__(self):\n return self.url\n \n def __repr__(self):\n return self.url\n # return '<EasyUrl: %s>' % self.url\n \n def __unicode__(self):\n return self.url\n\n # Static Methods: Call from class definition, not using an instance.\n # example: \n # Good: EasyUrl.EvolveParseresult(...)\n #\n # Bad : url = EasyUrl()\n # : url = url.EvolveParseresult(...)\n @staticmethod\n def EvolveParseResult(parseresult):\n \"\"\" url, response\n Take a formally (through urlparse.urlparse) constructed\n ParseResult() object and transform it into this EasyUrl() object.\n \"\"\"\n parseresult.__class__ = EasyUrl # This turns the the class to EasyUrl()\n \n easy_url = parseresult\n easy_url.initialize_attributes()\n return easy_url\n \n \n\nclass HistoryEntry(object):\n \"\"\" Keeps a collapsed form of a scraper state.\"\"\"\n \n def __init__(self, url, response):\n self.url = url\n self.response = response\n \n def load_to_scraper(self, scraper):\n \"\"\" \n Delegate the parameters from this HistoryEntry()\n to a scraper that is passed in as an argument.\n \"\"\"\n scraper.url = self.url\n scraper.response = self.response\n scraper.load_soup()\n return scraper\n\n\nclass HistoryManager(dict):\n \"\"\" Stores and manages HistoryEntry's from a scraper. \"\"\"\n\n def __init__(self, *history_entries):\n# super(HistoryEntry, self).__init__()\n self.load_history_entries(*history_entries)\n\n \n def load_history_entries(self, *entries):\n \"\"\" \n Using HistoryEntries passed through the method call,\n populatet request...\n'stackoverflow.com' the dictionary. The key being the site name, the\n value is a list containing all HistoryEntry's for that site.\n \"\"\"\n # Simplified version:\n for entry in entries:\n try:\n self[entry.url.host] += [entry]\n except KeyError:\n self[entry.url.host] = [entry]\n \n \n temp_dict = {entry.url.host: [] for entry in entries} \n for entry in entries:\n temp_dict[entry.url.host] += [entry]\n\n # Update the dictionary\n # self.update(temp_dict) # Will override any lists with the same host name\n for host, entry in temp_dict.items():\n #try:\n self[host] += [entry]\n #except IndexError:\n #self[host] = [entry]\n \n \n \n def save(self, scraper):\n \"\"\" Save the current state of a scraper. \"\"\"\n entry = HistoryEntry(scraper.url, scraper.response)\n self.load_history_entries(entry)\n \n \n \n#url = 'http://stackoverflow.com/'\n\n#easy_url1 = parse_url(url)\n#print easy_url1\n#print easy_url1.__class__\n#print repr(easy_url1)\n#print easy_url1.geturl()\n \n \n \n \n \n \n \n \n ",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
"""
Copyright (C) 2019-2020 Zilliz. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pytest
import requests
original_table_name = "raw_data"
table_name = "nyctaxi"
csv_path = "/arctern/gui/server/arctern_server/data/0_5M_nyc_taxi_and_building.csv"
SCOPE = "nyc_taxi"
def _get_line_count(file):
with open(file, "r") as f:
return len(f.readlines())
class TestScope():
@pytest.mark.run(order=1)
def test_create_scope(self, host, port):
url = "http://" + host + ":" + port + "/scope"
r = requests.post(url=url)
print(r.text)
assert r.status_code == 200
global SCOPE # pylint: disable=global-statement
SCOPE = r.json()['scope']
@pytest.mark.run(order=2)
def test_load_file(self, host, port):
url = "http://" + host + ":" + port + "/loadfile"
payload = {
"scope": SCOPE,
"tables": [
{
"name": original_table_name,
"format": "csv",
"path": csv_path,
"options": {
"header": "True",
"delimiter": ","
},
"schema": [
{"VendorID": "string"},
{"tpep_pickup_datetime": "string"},
{"tpep_dropoff_datetime": "string"},
{"passenger_count": "long"},
{"trip_distance": "double"},
{"pickup_longitude": "double"},
{"pickup_latitude": "double"},
{"dropoff_longitude": "double"},
{"dropoff_latitude": "double"},
{"fare_amount": "double"},
{"tip_amount": "double"},
{"total_amount": "double"},
{"buildingid_pickup": "long"},
{"buildingid_dropoff": "long"},
{"buildingtext_pickup": "string"},
{"buildingtext_dropoff": "string"}
]
}
]
}
r = requests.post(url=url, json=payload)
print(r.text)
assert r.status_code == 200
# TODO: neccessary for /savefile? not convenient for cleaning up
@pytest.mark.run(order=3)
def test_table_schema(self, host, port):
url = "http://" + host + ":" + port + "/table/schema?table={}&scope={}".format(original_table_name, SCOPE)
r = requests.get(url=url)
print(r.text)
assert r.status_code == 200
assert len(r.json()['schema']) == 16
@pytest.mark.run(order=4)
def test_num_rows(self, host, port):
url = "http://" + host + ":" + port + "/query"
sql = "select count(*) as num_rows from {}".format(original_table_name)
payload = {
"scope": SCOPE,
"sql": sql,
"collect_result": "1"
}
r = requests.post(url=url, json=payload)
print(r.text)
assert r.status_code == 200
assert len(r.json()['result']) == 1
assert r.json()['result'][0]['num_rows'] == _get_line_count(csv_path) - 1
@pytest.mark.run(order=5)
def test_query(self, host, port):
url = "http://" + host + ":" + port + "/query"
limit = 1
sql = "select * from {} limit {}".format(original_table_name, limit)
payload = {
"scope": SCOPE,
"sql": sql,
"collect_result": "1"
}
r = requests.post(url=url, json=payload)
print(r.text)
assert r.status_code == 200
assert len(r.json()['result']) == limit
@pytest.mark.run(order=6)
def test_create_table(self, host, port):
url = "http://" + host + ":" + port + "/query"
payload = {
"scope": SCOPE,
"sql": "create table {} as (select VendorID, to_timestamp(tpep_pickup_datetime,'yyyy-MM-dd HH:mm:ss XXXXX') as tpep_pickup_datetime, to_timestamp(tpep_dropoff_datetime,'yyyy-MM-dd HH:mm:ss XXXXX') as tpep_dropoff_datetime, passenger_count, trip_distance, pickup_longitude, pickup_latitude, dropoff_longitude, dropoff_latitude, fare_amount, tip_amount, total_amount, buildingid_pickup, buildingid_dropoff, buildingtext_pickup, buildingtext_dropoff from {} where (pickup_longitude between -180 and 180) and (pickup_latitude between -90 and 90) and (dropoff_longitude between -180 and 180) and (dropoff_latitude between -90 and 90))".format(table_name, original_table_name),
"collect_result": "0"
}
r = requests.post(url=url, json=payload)
print(r.text)
assert r.status_code == 200
@pytest.mark.run(order=7)
def test_pointmap(self, host, port):
url = "http://" + host + ":" + port + "/pointmap"
payload = {
"scope": SCOPE,
"sql": "select ST_Point(pickup_longitude, pickup_latitude) as point from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))".format(table_name),
"params": {
"width": 1024,
"height": 896,
"bounding_box": [-80.37976, 35.191296, -70.714099, 45.897445],
"coordinate_system": "EPSG:4326",
"point_color": "#2DEF4A",
"point_size": 3,
"opacity": 0.5
}
}
r = requests.post(url=url, json=payload)
assert r.status_code == 200
print(r.text)
# assert r.json()["result"] is not None
@pytest.mark.run(order=8)
def test_weighted_pointmap(self, host, port):
url = "http://" + host + ":" + port + "/weighted_pointmap"
payload = {
"scope": SCOPE,
"sql": "select ST_Point(pickup_longitude, pickup_latitude) as point, tip_amount as c, fare_amount as s from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))".format(table_name),
"params": {
"width": 1024,
"height": 896,
"bounding_box": [-80.37976, 35.191296, -70.714099, 45.897445],
"color_gradient": ["#0000FF", "#FF0000"],
"color_bound": [0, 2],
"size_bound": [0, 10],
"opacity": 1.0,
"coordinate_system": "EPSG:4326"
}
}
r = requests.post(url=url, json=payload)
assert r.status_code == 200
print(r.text)
# assert r.json()["result"] is not None
@pytest.mark.run(order=9)
def test_heatmap(self, host, port):
url = "http://" + host + ":" + port + "/heatmap"
payload = {
"scope": SCOPE,
"sql": "select ST_Point(pickup_longitude, pickup_latitude) as point, passenger_count as w from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))".format(table_name),
"params": {
"width": 1024,
"height": 896,
"bounding_box": [-80.37976, 35.191296, -70.714099, 45.897445],
"coordinate_system": "EPSG:4326",
"map_zoom_level": 10,
"aggregation_type": "sum"
}
}
r = requests.post(url=url, json=payload)
assert r.status_code == 200
print(r.text)
# assert r.json()["result"] is not None
@pytest.mark.run(order=10)
def test_choroplethmap(self, host, port):
url = "http://" + host + ":" + port + "/choroplethmap"
payload = {
"scope": SCOPE,
"sql": "select ST_GeomFromText(buildingtext_dropoff) as wkt, passenger_count as w from {} where (buildingtext_dropoff!='')".format(table_name),
"params": {
"width": 1024,
"height": 896,
"bounding_box": [-80.37976, 35.191296, -70.714099, 45.897445],
"coordinate_system": "EPSG:4326",
"color_gradient": ["#0000FF", "#FF0000"],
"color_bound": [2.5, 5],
"opacity": 1,
"aggregation_type": "sum"
}
}
r = requests.post(url=url, json=payload)
assert r.status_code == 200
print(r.text)
# assert r.json()["result"] is not None
@pytest.mark.run(order=11)
def test_icon_viz(self, host, port):
url = "http://" + host + ":" + port + "/icon_viz"
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
png_path = dir_path + "/taxi.png"
payload = {
"scope": SCOPE,
"sql": "select ST_Point(pickup_longitude, pickup_latitude) as point from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))".format(table_name),
"params": {
'width': 1024,
'height': 896,
'bounding_box': [-75.37976, 40.191296, -71.714099, 41.897445],
'coordinate_system': 'EPSG:4326',
'icon_path': png_path
}
}
r = requests.post(url=url, json=payload)
assert r.status_code == 200
print(r.text)
# assert r.json()["result"] is not None
@pytest.mark.run(order=12)
def test_fishnetmap(self, host, port):
url = "http://" + host + ":" + port + "/fishnetmap"
payload = {
"scope": SCOPE,
"sql": "select ST_Point(pickup_longitude, pickup_latitude) as point, passenger_count as w from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))".format(table_name),
"params": {
"width": 1024,
"height": 896,
"bounding_box": [-80.37976, 35.191296, -70.714099, 45.897445],
"color_gradient": ["#0000FF", "#FF0000"],
"cell_size": 4,
"cell_spacing": 1,
"opacity": 1.0,
"coordinate_system": "EPSG:4326",
"aggregation_type": "sum"
}
}
r = requests.post(url=url, json=payload)
assert r.status_code == 200
print(r.text)
# assert r.json()["result"] is not None
@pytest.mark.run(order=13)
def test_drop_table(self, host, port):
url = "http://" + host + ":" + port + '/query'
sql1 = "drop table if exists {}".format(table_name)
sql2 = "drop table if exists {}".format(original_table_name)
payload1 = {
"scope": SCOPE,
"sql": sql1,
"collect_result": "0"
}
payload2 = {
"scope": SCOPE,
"sql": sql2,
"collect_result": "0"
}
r = requests.post(url=url, json=payload1)
print(r.text)
assert r.status_code == 200
r = requests.post(url=url, json=payload2)
print(r.text)
assert r.status_code == 200
@pytest.mark.run(order=14)
def test_command(self, host, port):
url = "http://" + host + ":" + port + '/command'
command = """
from __future__ import print_function
import sys
from random import random
from operator import add
partitions = 2
n = 100000 * partitions
def f(_):
x = random() * 2 - 1
y = random() * 2 - 1
return 1 if x ** 2 + y ** 2 <= 1 else 0
count = spark.sparkContext.parallelize(range(1, n + 1), partitions).map(f).reduce(add)
print("Pi is roughly %f" % (4.0 * count / n))
"""
payload = {
"scope": SCOPE,
"command": command
}
r = requests.post(url=url, json=payload)
print(r.text)
assert r.status_code == 200
@pytest.mark.run(order=15)
def test_remove_scope(self, host, port):
scope = SCOPE
url = "http://" + host + ":" + port + "/scope/" + scope
r = requests.delete(url=url)
print(r.text)
assert r.status_code == 200
|
normal
|
{
"blob_id": "65a9f732fc8c7b9c63f6ef0d7b2172bb4138a895",
"index": 2761,
"step-1": "<mask token>\n\n\nclass TestScope:\n\n @pytest.mark.run(order=1)\n def test_create_scope(self, host, port):\n url = 'http://' + host + ':' + port + '/scope'\n r = requests.post(url=url)\n print(r.text)\n assert r.status_code == 200\n global SCOPE\n SCOPE = r.json()['scope']\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @pytest.mark.run(order=7)\n def test_pointmap(self, host, port):\n url = 'http://' + host + ':' + port + '/pointmap'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_Point(pickup_longitude, pickup_latitude) as point from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-80.37976, 35.191296, -70.714099, 45.897445],\n 'coordinate_system': 'EPSG:4326', 'point_color': '#2DEF4A',\n 'point_size': 3, 'opacity': 0.5}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=8)\n def test_weighted_pointmap(self, host, port):\n url = 'http://' + host + ':' + port + '/weighted_pointmap'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_Point(pickup_longitude, pickup_latitude) as point, tip_amount as c, fare_amount as s from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-80.37976, 35.191296, -70.714099, 45.897445],\n 'color_gradient': ['#0000FF', '#FF0000'], 'color_bound': [0, 2],\n 'size_bound': [0, 10], 'opacity': 1.0, 'coordinate_system':\n 'EPSG:4326'}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n <mask token>\n\n @pytest.mark.run(order=10)\n def test_choroplethmap(self, host, port):\n url = 'http://' + host + ':' + port + '/choroplethmap'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_GeomFromText(buildingtext_dropoff) as wkt, passenger_count as w from {} where (buildingtext_dropoff!='')\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-80.37976, 35.191296, -70.714099, 45.897445],\n 'coordinate_system': 'EPSG:4326', 'color_gradient': ['#0000FF',\n '#FF0000'], 'color_bound': [2.5, 5], 'opacity': 1,\n 'aggregation_type': 'sum'}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=11)\n def test_icon_viz(self, host, port):\n url = 'http://' + host + ':' + port + '/icon_viz'\n import os\n dir_path = os.path.dirname(os.path.realpath(__file__))\n png_path = dir_path + '/taxi.png'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_Point(pickup_longitude, pickup_latitude) as point from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-75.37976, 40.191296, -71.714099, 41.897445],\n 'coordinate_system': 'EPSG:4326', 'icon_path': png_path}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=12)\n def test_fishnetmap(self, host, port):\n url = 'http://' + host + ':' + port + '/fishnetmap'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_Point(pickup_longitude, pickup_latitude) as point, passenger_count as w from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-80.37976, 35.191296, -70.714099, 45.897445],\n 'color_gradient': ['#0000FF', '#FF0000'], 'cell_size': 4,\n 'cell_spacing': 1, 'opacity': 1.0, 'coordinate_system':\n 'EPSG:4326', 'aggregation_type': 'sum'}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=13)\n def test_drop_table(self, host, port):\n url = 'http://' + host + ':' + port + '/query'\n sql1 = 'drop table if exists {}'.format(table_name)\n sql2 = 'drop table if exists {}'.format(original_table_name)\n payload1 = {'scope': SCOPE, 'sql': sql1, 'collect_result': '0'}\n payload2 = {'scope': SCOPE, 'sql': sql2, 'collect_result': '0'}\n r = requests.post(url=url, json=payload1)\n print(r.text)\n assert r.status_code == 200\n r = requests.post(url=url, json=payload2)\n print(r.text)\n assert r.status_code == 200\n\n @pytest.mark.run(order=14)\n def test_command(self, host, port):\n url = 'http://' + host + ':' + port + '/command'\n command = \"\"\"\nfrom __future__ import print_function\n\nimport sys\nfrom random import random\nfrom operator import add\n\npartitions = 2\nn = 100000 * partitions\n\ndef f(_):\n x = random() * 2 - 1\n y = random() * 2 - 1\n return 1 if x ** 2 + y ** 2 <= 1 else 0\n\ncount = spark.sparkContext.parallelize(range(1, n + 1), partitions).map(f).reduce(add)\nprint(\"Pi is roughly %f\" % (4.0 * count / n))\n \"\"\"\n payload = {'scope': SCOPE, 'command': command}\n r = requests.post(url=url, json=payload)\n print(r.text)\n assert r.status_code == 200\n\n @pytest.mark.run(order=15)\n def test_remove_scope(self, host, port):\n scope = SCOPE\n url = 'http://' + host + ':' + port + '/scope/' + scope\n r = requests.delete(url=url)\n print(r.text)\n assert r.status_code == 200\n",
"step-2": "<mask token>\n\n\nclass TestScope:\n\n @pytest.mark.run(order=1)\n def test_create_scope(self, host, port):\n url = 'http://' + host + ':' + port + '/scope'\n r = requests.post(url=url)\n print(r.text)\n assert r.status_code == 200\n global SCOPE\n SCOPE = r.json()['scope']\n\n @pytest.mark.run(order=2)\n def test_load_file(self, host, port):\n url = 'http://' + host + ':' + port + '/loadfile'\n payload = {'scope': SCOPE, 'tables': [{'name': original_table_name,\n 'format': 'csv', 'path': csv_path, 'options': {'header': 'True',\n 'delimiter': ','}, 'schema': [{'VendorID': 'string'}, {\n 'tpep_pickup_datetime': 'string'}, {'tpep_dropoff_datetime':\n 'string'}, {'passenger_count': 'long'}, {'trip_distance':\n 'double'}, {'pickup_longitude': 'double'}, {'pickup_latitude':\n 'double'}, {'dropoff_longitude': 'double'}, {'dropoff_latitude':\n 'double'}, {'fare_amount': 'double'}, {'tip_amount': 'double'},\n {'total_amount': 'double'}, {'buildingid_pickup': 'long'}, {\n 'buildingid_dropoff': 'long'}, {'buildingtext_pickup': 'string'\n }, {'buildingtext_dropoff': 'string'}]}]}\n r = requests.post(url=url, json=payload)\n print(r.text)\n assert r.status_code == 200\n\n @pytest.mark.run(order=3)\n def test_table_schema(self, host, port):\n url = ('http://' + host + ':' + port +\n '/table/schema?table={}&scope={}'.format(original_table_name,\n SCOPE))\n r = requests.get(url=url)\n print(r.text)\n assert r.status_code == 200\n assert len(r.json()['schema']) == 16\n <mask token>\n\n @pytest.mark.run(order=5)\n def test_query(self, host, port):\n url = 'http://' + host + ':' + port + '/query'\n limit = 1\n sql = 'select * from {} limit {}'.format(original_table_name, limit)\n payload = {'scope': SCOPE, 'sql': sql, 'collect_result': '1'}\n r = requests.post(url=url, json=payload)\n print(r.text)\n assert r.status_code == 200\n assert len(r.json()['result']) == limit\n <mask token>\n\n @pytest.mark.run(order=7)\n def test_pointmap(self, host, port):\n url = 'http://' + host + ':' + port + '/pointmap'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_Point(pickup_longitude, pickup_latitude) as point from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-80.37976, 35.191296, -70.714099, 45.897445],\n 'coordinate_system': 'EPSG:4326', 'point_color': '#2DEF4A',\n 'point_size': 3, 'opacity': 0.5}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=8)\n def test_weighted_pointmap(self, host, port):\n url = 'http://' + host + ':' + port + '/weighted_pointmap'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_Point(pickup_longitude, pickup_latitude) as point, tip_amount as c, fare_amount as s from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-80.37976, 35.191296, -70.714099, 45.897445],\n 'color_gradient': ['#0000FF', '#FF0000'], 'color_bound': [0, 2],\n 'size_bound': [0, 10], 'opacity': 1.0, 'coordinate_system':\n 'EPSG:4326'}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=9)\n def test_heatmap(self, host, port):\n url = 'http://' + host + ':' + port + '/heatmap'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_Point(pickup_longitude, pickup_latitude) as point, passenger_count as w from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-80.37976, 35.191296, -70.714099, 45.897445],\n 'coordinate_system': 'EPSG:4326', 'map_zoom_level': 10,\n 'aggregation_type': 'sum'}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=10)\n def test_choroplethmap(self, host, port):\n url = 'http://' + host + ':' + port + '/choroplethmap'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_GeomFromText(buildingtext_dropoff) as wkt, passenger_count as w from {} where (buildingtext_dropoff!='')\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-80.37976, 35.191296, -70.714099, 45.897445],\n 'coordinate_system': 'EPSG:4326', 'color_gradient': ['#0000FF',\n '#FF0000'], 'color_bound': [2.5, 5], 'opacity': 1,\n 'aggregation_type': 'sum'}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=11)\n def test_icon_viz(self, host, port):\n url = 'http://' + host + ':' + port + '/icon_viz'\n import os\n dir_path = os.path.dirname(os.path.realpath(__file__))\n png_path = dir_path + '/taxi.png'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_Point(pickup_longitude, pickup_latitude) as point from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-75.37976, 40.191296, -71.714099, 41.897445],\n 'coordinate_system': 'EPSG:4326', 'icon_path': png_path}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=12)\n def test_fishnetmap(self, host, port):\n url = 'http://' + host + ':' + port + '/fishnetmap'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_Point(pickup_longitude, pickup_latitude) as point, passenger_count as w from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-80.37976, 35.191296, -70.714099, 45.897445],\n 'color_gradient': ['#0000FF', '#FF0000'], 'cell_size': 4,\n 'cell_spacing': 1, 'opacity': 1.0, 'coordinate_system':\n 'EPSG:4326', 'aggregation_type': 'sum'}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=13)\n def test_drop_table(self, host, port):\n url = 'http://' + host + ':' + port + '/query'\n sql1 = 'drop table if exists {}'.format(table_name)\n sql2 = 'drop table if exists {}'.format(original_table_name)\n payload1 = {'scope': SCOPE, 'sql': sql1, 'collect_result': '0'}\n payload2 = {'scope': SCOPE, 'sql': sql2, 'collect_result': '0'}\n r = requests.post(url=url, json=payload1)\n print(r.text)\n assert r.status_code == 200\n r = requests.post(url=url, json=payload2)\n print(r.text)\n assert r.status_code == 200\n\n @pytest.mark.run(order=14)\n def test_command(self, host, port):\n url = 'http://' + host + ':' + port + '/command'\n command = \"\"\"\nfrom __future__ import print_function\n\nimport sys\nfrom random import random\nfrom operator import add\n\npartitions = 2\nn = 100000 * partitions\n\ndef f(_):\n x = random() * 2 - 1\n y = random() * 2 - 1\n return 1 if x ** 2 + y ** 2 <= 1 else 0\n\ncount = spark.sparkContext.parallelize(range(1, n + 1), partitions).map(f).reduce(add)\nprint(\"Pi is roughly %f\" % (4.0 * count / n))\n \"\"\"\n payload = {'scope': SCOPE, 'command': command}\n r = requests.post(url=url, json=payload)\n print(r.text)\n assert r.status_code == 200\n\n @pytest.mark.run(order=15)\n def test_remove_scope(self, host, port):\n scope = SCOPE\n url = 'http://' + host + ':' + port + '/scope/' + scope\n r = requests.delete(url=url)\n print(r.text)\n assert r.status_code == 200\n",
"step-3": "<mask token>\n\n\ndef _get_line_count(file):\n with open(file, 'r') as f:\n return len(f.readlines())\n\n\nclass TestScope:\n\n @pytest.mark.run(order=1)\n def test_create_scope(self, host, port):\n url = 'http://' + host + ':' + port + '/scope'\n r = requests.post(url=url)\n print(r.text)\n assert r.status_code == 200\n global SCOPE\n SCOPE = r.json()['scope']\n\n @pytest.mark.run(order=2)\n def test_load_file(self, host, port):\n url = 'http://' + host + ':' + port + '/loadfile'\n payload = {'scope': SCOPE, 'tables': [{'name': original_table_name,\n 'format': 'csv', 'path': csv_path, 'options': {'header': 'True',\n 'delimiter': ','}, 'schema': [{'VendorID': 'string'}, {\n 'tpep_pickup_datetime': 'string'}, {'tpep_dropoff_datetime':\n 'string'}, {'passenger_count': 'long'}, {'trip_distance':\n 'double'}, {'pickup_longitude': 'double'}, {'pickup_latitude':\n 'double'}, {'dropoff_longitude': 'double'}, {'dropoff_latitude':\n 'double'}, {'fare_amount': 'double'}, {'tip_amount': 'double'},\n {'total_amount': 'double'}, {'buildingid_pickup': 'long'}, {\n 'buildingid_dropoff': 'long'}, {'buildingtext_pickup': 'string'\n }, {'buildingtext_dropoff': 'string'}]}]}\n r = requests.post(url=url, json=payload)\n print(r.text)\n assert r.status_code == 200\n\n @pytest.mark.run(order=3)\n def test_table_schema(self, host, port):\n url = ('http://' + host + ':' + port +\n '/table/schema?table={}&scope={}'.format(original_table_name,\n SCOPE))\n r = requests.get(url=url)\n print(r.text)\n assert r.status_code == 200\n assert len(r.json()['schema']) == 16\n\n @pytest.mark.run(order=4)\n def test_num_rows(self, host, port):\n url = 'http://' + host + ':' + port + '/query'\n sql = 'select count(*) as num_rows from {}'.format(original_table_name)\n payload = {'scope': SCOPE, 'sql': sql, 'collect_result': '1'}\n r = requests.post(url=url, json=payload)\n print(r.text)\n assert r.status_code == 200\n assert len(r.json()['result']) == 1\n assert r.json()['result'][0]['num_rows'] == _get_line_count(csv_path\n ) - 1\n\n @pytest.mark.run(order=5)\n def test_query(self, host, port):\n url = 'http://' + host + ':' + port + '/query'\n limit = 1\n sql = 'select * from {} limit {}'.format(original_table_name, limit)\n payload = {'scope': SCOPE, 'sql': sql, 'collect_result': '1'}\n r = requests.post(url=url, json=payload)\n print(r.text)\n assert r.status_code == 200\n assert len(r.json()['result']) == limit\n\n @pytest.mark.run(order=6)\n def test_create_table(self, host, port):\n url = 'http://' + host + ':' + port + '/query'\n payload = {'scope': SCOPE, 'sql':\n \"create table {} as (select VendorID, to_timestamp(tpep_pickup_datetime,'yyyy-MM-dd HH:mm:ss XXXXX') as tpep_pickup_datetime, to_timestamp(tpep_dropoff_datetime,'yyyy-MM-dd HH:mm:ss XXXXX') as tpep_dropoff_datetime, passenger_count, trip_distance, pickup_longitude, pickup_latitude, dropoff_longitude, dropoff_latitude, fare_amount, tip_amount, total_amount, buildingid_pickup, buildingid_dropoff, buildingtext_pickup, buildingtext_dropoff from {} where (pickup_longitude between -180 and 180) and (pickup_latitude between -90 and 90) and (dropoff_longitude between -180 and 180) and (dropoff_latitude between -90 and 90))\"\n .format(table_name, original_table_name), 'collect_result': '0'}\n r = requests.post(url=url, json=payload)\n print(r.text)\n assert r.status_code == 200\n\n @pytest.mark.run(order=7)\n def test_pointmap(self, host, port):\n url = 'http://' + host + ':' + port + '/pointmap'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_Point(pickup_longitude, pickup_latitude) as point from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-80.37976, 35.191296, -70.714099, 45.897445],\n 'coordinate_system': 'EPSG:4326', 'point_color': '#2DEF4A',\n 'point_size': 3, 'opacity': 0.5}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=8)\n def test_weighted_pointmap(self, host, port):\n url = 'http://' + host + ':' + port + '/weighted_pointmap'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_Point(pickup_longitude, pickup_latitude) as point, tip_amount as c, fare_amount as s from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-80.37976, 35.191296, -70.714099, 45.897445],\n 'color_gradient': ['#0000FF', '#FF0000'], 'color_bound': [0, 2],\n 'size_bound': [0, 10], 'opacity': 1.0, 'coordinate_system':\n 'EPSG:4326'}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=9)\n def test_heatmap(self, host, port):\n url = 'http://' + host + ':' + port + '/heatmap'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_Point(pickup_longitude, pickup_latitude) as point, passenger_count as w from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-80.37976, 35.191296, -70.714099, 45.897445],\n 'coordinate_system': 'EPSG:4326', 'map_zoom_level': 10,\n 'aggregation_type': 'sum'}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=10)\n def test_choroplethmap(self, host, port):\n url = 'http://' + host + ':' + port + '/choroplethmap'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_GeomFromText(buildingtext_dropoff) as wkt, passenger_count as w from {} where (buildingtext_dropoff!='')\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-80.37976, 35.191296, -70.714099, 45.897445],\n 'coordinate_system': 'EPSG:4326', 'color_gradient': ['#0000FF',\n '#FF0000'], 'color_bound': [2.5, 5], 'opacity': 1,\n 'aggregation_type': 'sum'}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=11)\n def test_icon_viz(self, host, port):\n url = 'http://' + host + ':' + port + '/icon_viz'\n import os\n dir_path = os.path.dirname(os.path.realpath(__file__))\n png_path = dir_path + '/taxi.png'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_Point(pickup_longitude, pickup_latitude) as point from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-75.37976, 40.191296, -71.714099, 41.897445],\n 'coordinate_system': 'EPSG:4326', 'icon_path': png_path}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=12)\n def test_fishnetmap(self, host, port):\n url = 'http://' + host + ':' + port + '/fishnetmap'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_Point(pickup_longitude, pickup_latitude) as point, passenger_count as w from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-80.37976, 35.191296, -70.714099, 45.897445],\n 'color_gradient': ['#0000FF', '#FF0000'], 'cell_size': 4,\n 'cell_spacing': 1, 'opacity': 1.0, 'coordinate_system':\n 'EPSG:4326', 'aggregation_type': 'sum'}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=13)\n def test_drop_table(self, host, port):\n url = 'http://' + host + ':' + port + '/query'\n sql1 = 'drop table if exists {}'.format(table_name)\n sql2 = 'drop table if exists {}'.format(original_table_name)\n payload1 = {'scope': SCOPE, 'sql': sql1, 'collect_result': '0'}\n payload2 = {'scope': SCOPE, 'sql': sql2, 'collect_result': '0'}\n r = requests.post(url=url, json=payload1)\n print(r.text)\n assert r.status_code == 200\n r = requests.post(url=url, json=payload2)\n print(r.text)\n assert r.status_code == 200\n\n @pytest.mark.run(order=14)\n def test_command(self, host, port):\n url = 'http://' + host + ':' + port + '/command'\n command = \"\"\"\nfrom __future__ import print_function\n\nimport sys\nfrom random import random\nfrom operator import add\n\npartitions = 2\nn = 100000 * partitions\n\ndef f(_):\n x = random() * 2 - 1\n y = random() * 2 - 1\n return 1 if x ** 2 + y ** 2 <= 1 else 0\n\ncount = spark.sparkContext.parallelize(range(1, n + 1), partitions).map(f).reduce(add)\nprint(\"Pi is roughly %f\" % (4.0 * count / n))\n \"\"\"\n payload = {'scope': SCOPE, 'command': command}\n r = requests.post(url=url, json=payload)\n print(r.text)\n assert r.status_code == 200\n\n @pytest.mark.run(order=15)\n def test_remove_scope(self, host, port):\n scope = SCOPE\n url = 'http://' + host + ':' + port + '/scope/' + scope\n r = requests.delete(url=url)\n print(r.text)\n assert r.status_code == 200\n",
"step-4": "<mask token>\noriginal_table_name = 'raw_data'\ntable_name = 'nyctaxi'\ncsv_path = (\n '/arctern/gui/server/arctern_server/data/0_5M_nyc_taxi_and_building.csv')\nSCOPE = 'nyc_taxi'\n\n\ndef _get_line_count(file):\n with open(file, 'r') as f:\n return len(f.readlines())\n\n\nclass TestScope:\n\n @pytest.mark.run(order=1)\n def test_create_scope(self, host, port):\n url = 'http://' + host + ':' + port + '/scope'\n r = requests.post(url=url)\n print(r.text)\n assert r.status_code == 200\n global SCOPE\n SCOPE = r.json()['scope']\n\n @pytest.mark.run(order=2)\n def test_load_file(self, host, port):\n url = 'http://' + host + ':' + port + '/loadfile'\n payload = {'scope': SCOPE, 'tables': [{'name': original_table_name,\n 'format': 'csv', 'path': csv_path, 'options': {'header': 'True',\n 'delimiter': ','}, 'schema': [{'VendorID': 'string'}, {\n 'tpep_pickup_datetime': 'string'}, {'tpep_dropoff_datetime':\n 'string'}, {'passenger_count': 'long'}, {'trip_distance':\n 'double'}, {'pickup_longitude': 'double'}, {'pickup_latitude':\n 'double'}, {'dropoff_longitude': 'double'}, {'dropoff_latitude':\n 'double'}, {'fare_amount': 'double'}, {'tip_amount': 'double'},\n {'total_amount': 'double'}, {'buildingid_pickup': 'long'}, {\n 'buildingid_dropoff': 'long'}, {'buildingtext_pickup': 'string'\n }, {'buildingtext_dropoff': 'string'}]}]}\n r = requests.post(url=url, json=payload)\n print(r.text)\n assert r.status_code == 200\n\n @pytest.mark.run(order=3)\n def test_table_schema(self, host, port):\n url = ('http://' + host + ':' + port +\n '/table/schema?table={}&scope={}'.format(original_table_name,\n SCOPE))\n r = requests.get(url=url)\n print(r.text)\n assert r.status_code == 200\n assert len(r.json()['schema']) == 16\n\n @pytest.mark.run(order=4)\n def test_num_rows(self, host, port):\n url = 'http://' + host + ':' + port + '/query'\n sql = 'select count(*) as num_rows from {}'.format(original_table_name)\n payload = {'scope': SCOPE, 'sql': sql, 'collect_result': '1'}\n r = requests.post(url=url, json=payload)\n print(r.text)\n assert r.status_code == 200\n assert len(r.json()['result']) == 1\n assert r.json()['result'][0]['num_rows'] == _get_line_count(csv_path\n ) - 1\n\n @pytest.mark.run(order=5)\n def test_query(self, host, port):\n url = 'http://' + host + ':' + port + '/query'\n limit = 1\n sql = 'select * from {} limit {}'.format(original_table_name, limit)\n payload = {'scope': SCOPE, 'sql': sql, 'collect_result': '1'}\n r = requests.post(url=url, json=payload)\n print(r.text)\n assert r.status_code == 200\n assert len(r.json()['result']) == limit\n\n @pytest.mark.run(order=6)\n def test_create_table(self, host, port):\n url = 'http://' + host + ':' + port + '/query'\n payload = {'scope': SCOPE, 'sql':\n \"create table {} as (select VendorID, to_timestamp(tpep_pickup_datetime,'yyyy-MM-dd HH:mm:ss XXXXX') as tpep_pickup_datetime, to_timestamp(tpep_dropoff_datetime,'yyyy-MM-dd HH:mm:ss XXXXX') as tpep_dropoff_datetime, passenger_count, trip_distance, pickup_longitude, pickup_latitude, dropoff_longitude, dropoff_latitude, fare_amount, tip_amount, total_amount, buildingid_pickup, buildingid_dropoff, buildingtext_pickup, buildingtext_dropoff from {} where (pickup_longitude between -180 and 180) and (pickup_latitude between -90 and 90) and (dropoff_longitude between -180 and 180) and (dropoff_latitude between -90 and 90))\"\n .format(table_name, original_table_name), 'collect_result': '0'}\n r = requests.post(url=url, json=payload)\n print(r.text)\n assert r.status_code == 200\n\n @pytest.mark.run(order=7)\n def test_pointmap(self, host, port):\n url = 'http://' + host + ':' + port + '/pointmap'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_Point(pickup_longitude, pickup_latitude) as point from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-80.37976, 35.191296, -70.714099, 45.897445],\n 'coordinate_system': 'EPSG:4326', 'point_color': '#2DEF4A',\n 'point_size': 3, 'opacity': 0.5}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=8)\n def test_weighted_pointmap(self, host, port):\n url = 'http://' + host + ':' + port + '/weighted_pointmap'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_Point(pickup_longitude, pickup_latitude) as point, tip_amount as c, fare_amount as s from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-80.37976, 35.191296, -70.714099, 45.897445],\n 'color_gradient': ['#0000FF', '#FF0000'], 'color_bound': [0, 2],\n 'size_bound': [0, 10], 'opacity': 1.0, 'coordinate_system':\n 'EPSG:4326'}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=9)\n def test_heatmap(self, host, port):\n url = 'http://' + host + ':' + port + '/heatmap'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_Point(pickup_longitude, pickup_latitude) as point, passenger_count as w from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-80.37976, 35.191296, -70.714099, 45.897445],\n 'coordinate_system': 'EPSG:4326', 'map_zoom_level': 10,\n 'aggregation_type': 'sum'}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=10)\n def test_choroplethmap(self, host, port):\n url = 'http://' + host + ':' + port + '/choroplethmap'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_GeomFromText(buildingtext_dropoff) as wkt, passenger_count as w from {} where (buildingtext_dropoff!='')\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-80.37976, 35.191296, -70.714099, 45.897445],\n 'coordinate_system': 'EPSG:4326', 'color_gradient': ['#0000FF',\n '#FF0000'], 'color_bound': [2.5, 5], 'opacity': 1,\n 'aggregation_type': 'sum'}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=11)\n def test_icon_viz(self, host, port):\n url = 'http://' + host + ':' + port + '/icon_viz'\n import os\n dir_path = os.path.dirname(os.path.realpath(__file__))\n png_path = dir_path + '/taxi.png'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_Point(pickup_longitude, pickup_latitude) as point from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-75.37976, 40.191296, -71.714099, 41.897445],\n 'coordinate_system': 'EPSG:4326', 'icon_path': png_path}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=12)\n def test_fishnetmap(self, host, port):\n url = 'http://' + host + ':' + port + '/fishnetmap'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_Point(pickup_longitude, pickup_latitude) as point, passenger_count as w from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-80.37976, 35.191296, -70.714099, 45.897445],\n 'color_gradient': ['#0000FF', '#FF0000'], 'cell_size': 4,\n 'cell_spacing': 1, 'opacity': 1.0, 'coordinate_system':\n 'EPSG:4326', 'aggregation_type': 'sum'}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=13)\n def test_drop_table(self, host, port):\n url = 'http://' + host + ':' + port + '/query'\n sql1 = 'drop table if exists {}'.format(table_name)\n sql2 = 'drop table if exists {}'.format(original_table_name)\n payload1 = {'scope': SCOPE, 'sql': sql1, 'collect_result': '0'}\n payload2 = {'scope': SCOPE, 'sql': sql2, 'collect_result': '0'}\n r = requests.post(url=url, json=payload1)\n print(r.text)\n assert r.status_code == 200\n r = requests.post(url=url, json=payload2)\n print(r.text)\n assert r.status_code == 200\n\n @pytest.mark.run(order=14)\n def test_command(self, host, port):\n url = 'http://' + host + ':' + port + '/command'\n command = \"\"\"\nfrom __future__ import print_function\n\nimport sys\nfrom random import random\nfrom operator import add\n\npartitions = 2\nn = 100000 * partitions\n\ndef f(_):\n x = random() * 2 - 1\n y = random() * 2 - 1\n return 1 if x ** 2 + y ** 2 <= 1 else 0\n\ncount = spark.sparkContext.parallelize(range(1, n + 1), partitions).map(f).reduce(add)\nprint(\"Pi is roughly %f\" % (4.0 * count / n))\n \"\"\"\n payload = {'scope': SCOPE, 'command': command}\n r = requests.post(url=url, json=payload)\n print(r.text)\n assert r.status_code == 200\n\n @pytest.mark.run(order=15)\n def test_remove_scope(self, host, port):\n scope = SCOPE\n url = 'http://' + host + ':' + port + '/scope/' + scope\n r = requests.delete(url=url)\n print(r.text)\n assert r.status_code == 200\n",
"step-5": "\"\"\"\nCopyright (C) 2019-2020 Zilliz. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport pytest\nimport requests\n\noriginal_table_name = \"raw_data\"\ntable_name = \"nyctaxi\"\ncsv_path = \"/arctern/gui/server/arctern_server/data/0_5M_nyc_taxi_and_building.csv\"\nSCOPE = \"nyc_taxi\"\n\ndef _get_line_count(file):\n with open(file, \"r\") as f:\n return len(f.readlines())\n\nclass TestScope():\n @pytest.mark.run(order=1)\n def test_create_scope(self, host, port):\n url = \"http://\" + host + \":\" + port + \"/scope\"\n r = requests.post(url=url)\n print(r.text)\n assert r.status_code == 200\n global SCOPE # pylint: disable=global-statement\n SCOPE = r.json()['scope']\n\n @pytest.mark.run(order=2)\n def test_load_file(self, host, port):\n url = \"http://\" + host + \":\" + port + \"/loadfile\"\n payload = {\n \"scope\": SCOPE,\n \"tables\": [\n {\n \"name\": original_table_name,\n \"format\": \"csv\",\n \"path\": csv_path,\n \"options\": {\n \"header\": \"True\",\n \"delimiter\": \",\"\n },\n \"schema\": [\n {\"VendorID\": \"string\"},\n {\"tpep_pickup_datetime\": \"string\"},\n {\"tpep_dropoff_datetime\": \"string\"},\n {\"passenger_count\": \"long\"},\n {\"trip_distance\": \"double\"},\n {\"pickup_longitude\": \"double\"},\n {\"pickup_latitude\": \"double\"},\n {\"dropoff_longitude\": \"double\"},\n {\"dropoff_latitude\": \"double\"},\n {\"fare_amount\": \"double\"},\n {\"tip_amount\": \"double\"},\n {\"total_amount\": \"double\"},\n {\"buildingid_pickup\": \"long\"},\n {\"buildingid_dropoff\": \"long\"},\n {\"buildingtext_pickup\": \"string\"},\n {\"buildingtext_dropoff\": \"string\"}\n ]\n }\n ]\n }\n r = requests.post(url=url, json=payload)\n print(r.text)\n assert r.status_code == 200\n\n # TODO: neccessary for /savefile? not convenient for cleaning up\n\n @pytest.mark.run(order=3)\n def test_table_schema(self, host, port):\n url = \"http://\" + host + \":\" + port + \"/table/schema?table={}&scope={}\".format(original_table_name, SCOPE)\n r = requests.get(url=url)\n print(r.text)\n assert r.status_code == 200\n assert len(r.json()['schema']) == 16\n\n @pytest.mark.run(order=4)\n def test_num_rows(self, host, port):\n url = \"http://\" + host + \":\" + port + \"/query\"\n sql = \"select count(*) as num_rows from {}\".format(original_table_name)\n payload = {\n \"scope\": SCOPE,\n \"sql\": sql,\n \"collect_result\": \"1\"\n }\n r = requests.post(url=url, json=payload)\n print(r.text)\n assert r.status_code == 200\n assert len(r.json()['result']) == 1\n assert r.json()['result'][0]['num_rows'] == _get_line_count(csv_path) - 1\n\n @pytest.mark.run(order=5)\n def test_query(self, host, port):\n url = \"http://\" + host + \":\" + port + \"/query\"\n limit = 1\n sql = \"select * from {} limit {}\".format(original_table_name, limit)\n payload = {\n \"scope\": SCOPE,\n \"sql\": sql,\n \"collect_result\": \"1\"\n }\n r = requests.post(url=url, json=payload)\n print(r.text)\n assert r.status_code == 200\n assert len(r.json()['result']) == limit\n\n @pytest.mark.run(order=6)\n def test_create_table(self, host, port):\n url = \"http://\" + host + \":\" + port + \"/query\"\n payload = {\n \"scope\": SCOPE,\n \"sql\": \"create table {} as (select VendorID, to_timestamp(tpep_pickup_datetime,'yyyy-MM-dd HH:mm:ss XXXXX') as tpep_pickup_datetime, to_timestamp(tpep_dropoff_datetime,'yyyy-MM-dd HH:mm:ss XXXXX') as tpep_dropoff_datetime, passenger_count, trip_distance, pickup_longitude, pickup_latitude, dropoff_longitude, dropoff_latitude, fare_amount, tip_amount, total_amount, buildingid_pickup, buildingid_dropoff, buildingtext_pickup, buildingtext_dropoff from {} where (pickup_longitude between -180 and 180) and (pickup_latitude between -90 and 90) and (dropoff_longitude between -180 and 180) and (dropoff_latitude between -90 and 90))\".format(table_name, original_table_name),\n \"collect_result\": \"0\"\n }\n r = requests.post(url=url, json=payload)\n print(r.text)\n assert r.status_code == 200\n\n @pytest.mark.run(order=7)\n def test_pointmap(self, host, port):\n url = \"http://\" + host + \":\" + port + \"/pointmap\"\n payload = {\n \"scope\": SCOPE,\n \"sql\": \"select ST_Point(pickup_longitude, pickup_latitude) as point from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\".format(table_name),\n \"params\": {\n \"width\": 1024,\n \"height\": 896,\n \"bounding_box\": [-80.37976, 35.191296, -70.714099, 45.897445],\n \"coordinate_system\": \"EPSG:4326\",\n \"point_color\": \"#2DEF4A\",\n \"point_size\": 3,\n \"opacity\": 0.5\n }\n }\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n # assert r.json()[\"result\"] is not None\n\n @pytest.mark.run(order=8)\n def test_weighted_pointmap(self, host, port):\n url = \"http://\" + host + \":\" + port + \"/weighted_pointmap\"\n payload = {\n \"scope\": SCOPE,\n \"sql\": \"select ST_Point(pickup_longitude, pickup_latitude) as point, tip_amount as c, fare_amount as s from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\".format(table_name),\n \"params\": {\n \"width\": 1024,\n \"height\": 896,\n \"bounding_box\": [-80.37976, 35.191296, -70.714099, 45.897445],\n \"color_gradient\": [\"#0000FF\", \"#FF0000\"],\n \"color_bound\": [0, 2],\n \"size_bound\": [0, 10],\n \"opacity\": 1.0,\n \"coordinate_system\": \"EPSG:4326\"\n }\n }\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n # assert r.json()[\"result\"] is not None\n\n @pytest.mark.run(order=9)\n def test_heatmap(self, host, port):\n url = \"http://\" + host + \":\" + port + \"/heatmap\"\n payload = {\n \"scope\": SCOPE,\n \"sql\": \"select ST_Point(pickup_longitude, pickup_latitude) as point, passenger_count as w from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\".format(table_name),\n \"params\": {\n \"width\": 1024,\n \"height\": 896,\n \"bounding_box\": [-80.37976, 35.191296, -70.714099, 45.897445],\n \"coordinate_system\": \"EPSG:4326\",\n \"map_zoom_level\": 10,\n \"aggregation_type\": \"sum\"\n }\n }\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n # assert r.json()[\"result\"] is not None\n\n @pytest.mark.run(order=10)\n def test_choroplethmap(self, host, port):\n url = \"http://\" + host + \":\" + port + \"/choroplethmap\"\n payload = {\n \"scope\": SCOPE,\n \"sql\": \"select ST_GeomFromText(buildingtext_dropoff) as wkt, passenger_count as w from {} where (buildingtext_dropoff!='')\".format(table_name),\n \"params\": {\n \"width\": 1024,\n \"height\": 896,\n \"bounding_box\": [-80.37976, 35.191296, -70.714099, 45.897445],\n \"coordinate_system\": \"EPSG:4326\",\n \"color_gradient\": [\"#0000FF\", \"#FF0000\"],\n \"color_bound\": [2.5, 5],\n \"opacity\": 1,\n \"aggregation_type\": \"sum\"\n }\n }\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n # assert r.json()[\"result\"] is not None\n\n @pytest.mark.run(order=11)\n def test_icon_viz(self, host, port):\n url = \"http://\" + host + \":\" + port + \"/icon_viz\"\n import os\n dir_path = os.path.dirname(os.path.realpath(__file__))\n png_path = dir_path + \"/taxi.png\"\n payload = {\n \"scope\": SCOPE,\n \"sql\": \"select ST_Point(pickup_longitude, pickup_latitude) as point from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\".format(table_name),\n \"params\": {\n 'width': 1024,\n 'height': 896,\n 'bounding_box': [-75.37976, 40.191296, -71.714099, 41.897445],\n 'coordinate_system': 'EPSG:4326',\n 'icon_path': png_path\n }\n }\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n # assert r.json()[\"result\"] is not None\n\n @pytest.mark.run(order=12)\n def test_fishnetmap(self, host, port):\n url = \"http://\" + host + \":\" + port + \"/fishnetmap\"\n payload = {\n \"scope\": SCOPE,\n \"sql\": \"select ST_Point(pickup_longitude, pickup_latitude) as point, passenger_count as w from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\".format(table_name),\n \"params\": {\n \"width\": 1024,\n \"height\": 896,\n \"bounding_box\": [-80.37976, 35.191296, -70.714099, 45.897445],\n \"color_gradient\": [\"#0000FF\", \"#FF0000\"],\n \"cell_size\": 4,\n \"cell_spacing\": 1,\n \"opacity\": 1.0,\n \"coordinate_system\": \"EPSG:4326\",\n \"aggregation_type\": \"sum\"\n }\n }\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n # assert r.json()[\"result\"] is not None\n\n @pytest.mark.run(order=13)\n def test_drop_table(self, host, port):\n url = \"http://\" + host + \":\" + port + '/query'\n sql1 = \"drop table if exists {}\".format(table_name)\n sql2 = \"drop table if exists {}\".format(original_table_name)\n payload1 = {\n \"scope\": SCOPE,\n \"sql\": sql1,\n \"collect_result\": \"0\"\n }\n payload2 = {\n \"scope\": SCOPE,\n \"sql\": sql2,\n \"collect_result\": \"0\"\n }\n r = requests.post(url=url, json=payload1)\n print(r.text)\n assert r.status_code == 200\n r = requests.post(url=url, json=payload2)\n print(r.text)\n assert r.status_code == 200\n\n @pytest.mark.run(order=14)\n def test_command(self, host, port):\n url = \"http://\" + host + \":\" + port + '/command'\n command = \"\"\"\nfrom __future__ import print_function\n\nimport sys\nfrom random import random\nfrom operator import add\n\npartitions = 2\nn = 100000 * partitions\n\ndef f(_):\n x = random() * 2 - 1\n y = random() * 2 - 1\n return 1 if x ** 2 + y ** 2 <= 1 else 0\n\ncount = spark.sparkContext.parallelize(range(1, n + 1), partitions).map(f).reduce(add)\nprint(\"Pi is roughly %f\" % (4.0 * count / n))\n \"\"\"\n payload = {\n \"scope\": SCOPE,\n \"command\": command\n }\n r = requests.post(url=url, json=payload)\n print(r.text)\n assert r.status_code == 200\n\n @pytest.mark.run(order=15)\n def test_remove_scope(self, host, port):\n scope = SCOPE\n url = \"http://\" + host + \":\" + port + \"/scope/\" + scope\n r = requests.delete(url=url)\n print(r.text)\n assert r.status_code == 200\n",
"step-ids": [
10,
14,
17,
18,
20
]
}
|
[
10,
14,
17,
18,
20
] |
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms.validators import DataRequired, Length
from flask_ckeditor import CKEditorField
class BoldifyEncryptForm(FlaskForm):
boldMessage = StringField('Bolded Message: ', validators=[DataRequired()])
submit = SubmitField('Submit')
|
normal
|
{
"blob_id": "77b43d7d9cd6b912bcee471c564b47d7a7cdd552",
"index": 6227,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass BoldifyEncryptForm(FlaskForm):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass BoldifyEncryptForm(FlaskForm):\n boldMessage = StringField('Bolded Message: ', validators=[DataRequired()])\n submit = SubmitField('Submit')\n",
"step-4": "from flask_wtf import FlaskForm\nfrom wtforms import StringField, SubmitField\nfrom wtforms.validators import DataRequired, Length\nfrom flask_ckeditor import CKEditorField\n\n\nclass BoldifyEncryptForm(FlaskForm):\n boldMessage = StringField('Bolded Message: ', validators=[DataRequired()])\n submit = SubmitField('Submit')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
__author__ = 'susperius'
"""
Abstract class used to implement own fuzzers
"""
class Fuzzer:
NAME = []
CONFIG_PARAMS = []
@classmethod
def from_list(cls, params):
raise NotImplementedError("ABSTRACT METHOD")
@property
def prng_state(self):
raise NotImplementedError("ABSTRACT METHOD")
def fuzz(self):
raise NotImplementedError("ABSTRACT METHOD")
def set_state(self, state):
raise NotImplementedError("ABSTRACT METHOD")
def set_seed(self, seed):
raise NotImplementedError("ABSTRACT METHOD")
def create_testcases(self, count, directory):
raise NotImplementedError("ABSTRACT METHOD")
@property
def file_type(self):
raise NotImplementedError("ABSTRACT METHOD")
|
normal
|
{
"blob_id": "aa2a268143856d8f33b1aaf24f4e28ffd95cab01",
"index": 4658,
"step-1": "<mask token>\n\n\nclass Fuzzer:\n <mask token>\n <mask token>\n\n @classmethod\n def from_list(cls, params):\n raise NotImplementedError('ABSTRACT METHOD')\n\n @property\n def prng_state(self):\n raise NotImplementedError('ABSTRACT METHOD')\n\n def fuzz(self):\n raise NotImplementedError('ABSTRACT METHOD')\n\n def set_state(self, state):\n raise NotImplementedError('ABSTRACT METHOD')\n\n def set_seed(self, seed):\n raise NotImplementedError('ABSTRACT METHOD')\n <mask token>\n\n @property\n def file_type(self):\n raise NotImplementedError('ABSTRACT METHOD')\n",
"step-2": "<mask token>\n\n\nclass Fuzzer:\n <mask token>\n <mask token>\n\n @classmethod\n def from_list(cls, params):\n raise NotImplementedError('ABSTRACT METHOD')\n\n @property\n def prng_state(self):\n raise NotImplementedError('ABSTRACT METHOD')\n\n def fuzz(self):\n raise NotImplementedError('ABSTRACT METHOD')\n\n def set_state(self, state):\n raise NotImplementedError('ABSTRACT METHOD')\n\n def set_seed(self, seed):\n raise NotImplementedError('ABSTRACT METHOD')\n\n def create_testcases(self, count, directory):\n raise NotImplementedError('ABSTRACT METHOD')\n\n @property\n def file_type(self):\n raise NotImplementedError('ABSTRACT METHOD')\n",
"step-3": "<mask token>\n\n\nclass Fuzzer:\n NAME = []\n CONFIG_PARAMS = []\n\n @classmethod\n def from_list(cls, params):\n raise NotImplementedError('ABSTRACT METHOD')\n\n @property\n def prng_state(self):\n raise NotImplementedError('ABSTRACT METHOD')\n\n def fuzz(self):\n raise NotImplementedError('ABSTRACT METHOD')\n\n def set_state(self, state):\n raise NotImplementedError('ABSTRACT METHOD')\n\n def set_seed(self, seed):\n raise NotImplementedError('ABSTRACT METHOD')\n\n def create_testcases(self, count, directory):\n raise NotImplementedError('ABSTRACT METHOD')\n\n @property\n def file_type(self):\n raise NotImplementedError('ABSTRACT METHOD')\n",
"step-4": "__author__ = 'susperius'\n<mask token>\n\n\nclass Fuzzer:\n NAME = []\n CONFIG_PARAMS = []\n\n @classmethod\n def from_list(cls, params):\n raise NotImplementedError('ABSTRACT METHOD')\n\n @property\n def prng_state(self):\n raise NotImplementedError('ABSTRACT METHOD')\n\n def fuzz(self):\n raise NotImplementedError('ABSTRACT METHOD')\n\n def set_state(self, state):\n raise NotImplementedError('ABSTRACT METHOD')\n\n def set_seed(self, seed):\n raise NotImplementedError('ABSTRACT METHOD')\n\n def create_testcases(self, count, directory):\n raise NotImplementedError('ABSTRACT METHOD')\n\n @property\n def file_type(self):\n raise NotImplementedError('ABSTRACT METHOD')\n",
"step-5": "__author__ = 'susperius'\n\n\"\"\"\nAbstract class used to implement own fuzzers\n\"\"\"\n\nclass Fuzzer:\n NAME = []\n CONFIG_PARAMS = []\n\n @classmethod\n def from_list(cls, params):\n raise NotImplementedError(\"ABSTRACT METHOD\")\n\n @property\n def prng_state(self):\n raise NotImplementedError(\"ABSTRACT METHOD\")\n\n def fuzz(self):\n raise NotImplementedError(\"ABSTRACT METHOD\")\n\n def set_state(self, state):\n raise NotImplementedError(\"ABSTRACT METHOD\")\n\n def set_seed(self, seed):\n raise NotImplementedError(\"ABSTRACT METHOD\")\n\n def create_testcases(self, count, directory):\n raise NotImplementedError(\"ABSTRACT METHOD\")\n\n @property\n def file_type(self):\n raise NotImplementedError(\"ABSTRACT METHOD\")",
"step-ids": [
7,
8,
9,
10,
11
]
}
|
[
7,
8,
9,
10,
11
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
class ZoomPanHandler:
"""
Matplotlib callback class to handle pan and zoom events.
"""
def __init__(self, axes, scale_factor=2, mouse_button=2):
"""
Default constructor for the ZoomPanHandler class.
Parameters
axes: matplotlib.backend_bases.Axes
The axes to attach this handler to.
scale_factor: number
The scale factor to apply when zooming.
mouse_button: number or string
The mouse button used to activate the pan action. Default value is
2, meaning the middle mouse button.
"""
self._axes = axes
self._scale_factor = scale_factor
self._mouse_button = mouse_button
self._press_coords = None
self._curr_xlim = self.axes.get_xlim()
self._curr_ylim = self.axes.get_ylim()
# Mouse action callback IDs
self._cb_mouse_wheel_id = None
self._cb_mouse_button_id = None
self._cb_mouse_release_id = None
self._cb_mouse_motion_id = None
self._connect_cb()
def __del__(self):
self._disconnect_cb()
self._axes = None
@property
def axes(self):
return self._axes
@property
def scale_factor(self):
return self._scale_factor
@property
def mouse_button(self):
return self._mouse_button
def apply_transforms(self):
"""
Applies the zoom and pan transforms to the axes. Useful after reseting
the plot.
"""
self.axes.set_xlim(self._curr_xlim)
self.axes.set_ylim(self._curr_ylim)
def set_base_transforms(self):
"""
Queries the current axis limits and stores them.
"""
self._curr_xlim = self.axes.get_xlim()
self._curr_ylim = self.axes.get_ylim()
# Private methods
def _cb_mouse_wheel(self, event):
if event.inaxes:
curr_xlim = self.axes.get_xlim()
curr_ylim = self.axes.get_ylim()
xdata = event.xdata
ydata = event.ydata
xmin = xdata - curr_xlim[0]
ymin = ydata - curr_ylim[0]
xmax = curr_xlim[1] - xdata
ymax = curr_ylim[1] - ydata
xlim = ylim = []
if event.button == 'up': # zoom-in
xlim = [xdata - xmin / self.scale_factor,
xdata + xmax / self.scale_factor]
ylim = [ydata - ymin / self.scale_factor,
ydata + ymax / self.scale_factor]
elif event.button == 'down': # zoom-out
xlim = [xdata - xmin * self.scale_factor,
xdata + xmax * self.scale_factor]
ylim = [ydata - ymin * self.scale_factor,
ydata + ymax * self.scale_factor]
self._curr_xlim = xlim
self._curr_ylim = ylim
self.axes.set_xlim(xlim)
self.axes.set_ylim(ylim)
self.axes.figure.canvas.draw()
def _cb_mouse_button(self, event):
if not event.inaxes or event.button != self.mouse_button:
return
self._press_coords = (event.xdata, event.ydata)
def _cb_mouse_release(self, event):
self._press_coords = None
self.axes.figure.canvas.draw()
def _cb_mouse_motion(self, event):
if not event.inaxes or not self._press_coords:
return
xlim = self.axes.get_xlim()
ylim = self.axes.get_ylim()
xlim -= (event.xdata - self._press_coords[0])
ylim -= (event.ydata - self._press_coords[1])
self._curr_xlim = xlim
self._curr_ylim = ylim
self.axes.set_xlim(xlim)
self.axes.set_ylim(ylim)
self.axes.figure.canvas.draw()
def _connect_cb(self):
fig = self.axes.figure
self._cb_mouse_wheel_id = fig.canvas.mpl_connect(
'scroll_event', self._cb_mouse_wheel)
self._cb_mouse_button_id = fig.canvas.mpl_connect(
'button_press_event', self._cb_mouse_button)
self._cb_mouse_release_id = fig.canvas.mpl_connect(
'button_release_event', self._cb_mouse_release)
self._cb_mouse_motion_id = fig.canvas.mpl_connect(
'motion_notify_event', self._cb_mouse_motion)
def _disconnect_cb(self):
fig = self.axes.figure
if self._cb_mouse_wheel_id:
fig.canvas.mpl_disconnect(self._cb_mouse_wheel_id)
self._cb_mouse_wheel_id = None
if self._cb_mouse_button_id:
fig.canvas.mpl_disconnect(self._cb_mouse_button_id)
self._cb_mouse_button_id = None
if self._cb_mouse_release_id:
fig.canvas.mpl_disconnect(self._cb_mouse_release_id)
self._cb_mouse_release_id = None
if self._cb_mouse_motion_id:
fig.canvas.mpl_disconnect(self._cb_mouse_motion_id)
self._cb_mouse_motion_id = None
def main():
import matplotlib.pyplot as plt
fig = plt.figure()
axes = fig.add_subplot(111)
axes.scatter(x=np.arange(0, 10, 0.5), y=np.arange(
0, 20, 1), color='r', marker='o')
hand = ZoomPanHandler(axes, scale_factor=1.5)
plt.show()
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "6afcb8f17f7436f0ae9fa3a8c2a195245a9801f1",
"index": 6533,
"step-1": "<mask token>\n\n\nclass ZoomPanHandler:\n <mask token>\n\n def __init__(self, axes, scale_factor=2, mouse_button=2):\n \"\"\"\n Default constructor for the ZoomPanHandler class.\n\n Parameters\n axes: matplotlib.backend_bases.Axes\n The axes to attach this handler to.\n scale_factor: number\n The scale factor to apply when zooming.\n mouse_button: number or string\n The mouse button used to activate the pan action. Default value is\n 2, meaning the middle mouse button.\n \"\"\"\n self._axes = axes\n self._scale_factor = scale_factor\n self._mouse_button = mouse_button\n self._press_coords = None\n self._curr_xlim = self.axes.get_xlim()\n self._curr_ylim = self.axes.get_ylim()\n self._cb_mouse_wheel_id = None\n self._cb_mouse_button_id = None\n self._cb_mouse_release_id = None\n self._cb_mouse_motion_id = None\n self._connect_cb()\n\n def __del__(self):\n self._disconnect_cb()\n self._axes = None\n\n @property\n def axes(self):\n return self._axes\n\n @property\n def scale_factor(self):\n return self._scale_factor\n\n @property\n def mouse_button(self):\n return self._mouse_button\n\n def apply_transforms(self):\n \"\"\"\n Applies the zoom and pan transforms to the axes. Useful after reseting\n the plot.\n \"\"\"\n self.axes.set_xlim(self._curr_xlim)\n self.axes.set_ylim(self._curr_ylim)\n\n def set_base_transforms(self):\n \"\"\"\n Queries the current axis limits and stores them.\n \"\"\"\n self._curr_xlim = self.axes.get_xlim()\n self._curr_ylim = self.axes.get_ylim()\n\n def _cb_mouse_wheel(self, event):\n if event.inaxes:\n curr_xlim = self.axes.get_xlim()\n curr_ylim = self.axes.get_ylim()\n xdata = event.xdata\n ydata = event.ydata\n xmin = xdata - curr_xlim[0]\n ymin = ydata - curr_ylim[0]\n xmax = curr_xlim[1] - xdata\n ymax = curr_ylim[1] - ydata\n xlim = ylim = []\n if event.button == 'up':\n xlim = [xdata - xmin / self.scale_factor, xdata + xmax /\n self.scale_factor]\n ylim = [ydata - ymin / self.scale_factor, ydata + ymax /\n self.scale_factor]\n elif event.button == 'down':\n xlim = [xdata - xmin * self.scale_factor, xdata + xmax *\n self.scale_factor]\n ylim = [ydata - ymin * self.scale_factor, ydata + ymax *\n self.scale_factor]\n self._curr_xlim = xlim\n self._curr_ylim = ylim\n self.axes.set_xlim(xlim)\n self.axes.set_ylim(ylim)\n self.axes.figure.canvas.draw()\n\n def _cb_mouse_button(self, event):\n if not event.inaxes or event.button != self.mouse_button:\n return\n self._press_coords = event.xdata, event.ydata\n\n def _cb_mouse_release(self, event):\n self._press_coords = None\n self.axes.figure.canvas.draw()\n\n def _cb_mouse_motion(self, event):\n if not event.inaxes or not self._press_coords:\n return\n xlim = self.axes.get_xlim()\n ylim = self.axes.get_ylim()\n xlim -= event.xdata - self._press_coords[0]\n ylim -= event.ydata - self._press_coords[1]\n self._curr_xlim = xlim\n self._curr_ylim = ylim\n self.axes.set_xlim(xlim)\n self.axes.set_ylim(ylim)\n self.axes.figure.canvas.draw()\n <mask token>\n\n def _disconnect_cb(self):\n fig = self.axes.figure\n if self._cb_mouse_wheel_id:\n fig.canvas.mpl_disconnect(self._cb_mouse_wheel_id)\n self._cb_mouse_wheel_id = None\n if self._cb_mouse_button_id:\n fig.canvas.mpl_disconnect(self._cb_mouse_button_id)\n self._cb_mouse_button_id = None\n if self._cb_mouse_release_id:\n fig.canvas.mpl_disconnect(self._cb_mouse_release_id)\n self._cb_mouse_release_id = None\n if self._cb_mouse_motion_id:\n fig.canvas.mpl_disconnect(self._cb_mouse_motion_id)\n self._cb_mouse_motion_id = None\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ZoomPanHandler:\n <mask token>\n\n def __init__(self, axes, scale_factor=2, mouse_button=2):\n \"\"\"\n Default constructor for the ZoomPanHandler class.\n\n Parameters\n axes: matplotlib.backend_bases.Axes\n The axes to attach this handler to.\n scale_factor: number\n The scale factor to apply when zooming.\n mouse_button: number or string\n The mouse button used to activate the pan action. Default value is\n 2, meaning the middle mouse button.\n \"\"\"\n self._axes = axes\n self._scale_factor = scale_factor\n self._mouse_button = mouse_button\n self._press_coords = None\n self._curr_xlim = self.axes.get_xlim()\n self._curr_ylim = self.axes.get_ylim()\n self._cb_mouse_wheel_id = None\n self._cb_mouse_button_id = None\n self._cb_mouse_release_id = None\n self._cb_mouse_motion_id = None\n self._connect_cb()\n\n def __del__(self):\n self._disconnect_cb()\n self._axes = None\n\n @property\n def axes(self):\n return self._axes\n\n @property\n def scale_factor(self):\n return self._scale_factor\n\n @property\n def mouse_button(self):\n return self._mouse_button\n\n def apply_transforms(self):\n \"\"\"\n Applies the zoom and pan transforms to the axes. Useful after reseting\n the plot.\n \"\"\"\n self.axes.set_xlim(self._curr_xlim)\n self.axes.set_ylim(self._curr_ylim)\n\n def set_base_transforms(self):\n \"\"\"\n Queries the current axis limits and stores them.\n \"\"\"\n self._curr_xlim = self.axes.get_xlim()\n self._curr_ylim = self.axes.get_ylim()\n\n def _cb_mouse_wheel(self, event):\n if event.inaxes:\n curr_xlim = self.axes.get_xlim()\n curr_ylim = self.axes.get_ylim()\n xdata = event.xdata\n ydata = event.ydata\n xmin = xdata - curr_xlim[0]\n ymin = ydata - curr_ylim[0]\n xmax = curr_xlim[1] - xdata\n ymax = curr_ylim[1] - ydata\n xlim = ylim = []\n if event.button == 'up':\n xlim = [xdata - xmin / self.scale_factor, xdata + xmax /\n self.scale_factor]\n ylim = [ydata - ymin / self.scale_factor, ydata + ymax /\n self.scale_factor]\n elif event.button == 'down':\n xlim = [xdata - xmin * self.scale_factor, xdata + xmax *\n self.scale_factor]\n ylim = [ydata - ymin * self.scale_factor, ydata + ymax *\n self.scale_factor]\n self._curr_xlim = xlim\n self._curr_ylim = ylim\n self.axes.set_xlim(xlim)\n self.axes.set_ylim(ylim)\n self.axes.figure.canvas.draw()\n\n def _cb_mouse_button(self, event):\n if not event.inaxes or event.button != self.mouse_button:\n return\n self._press_coords = event.xdata, event.ydata\n\n def _cb_mouse_release(self, event):\n self._press_coords = None\n self.axes.figure.canvas.draw()\n\n def _cb_mouse_motion(self, event):\n if not event.inaxes or not self._press_coords:\n return\n xlim = self.axes.get_xlim()\n ylim = self.axes.get_ylim()\n xlim -= event.xdata - self._press_coords[0]\n ylim -= event.ydata - self._press_coords[1]\n self._curr_xlim = xlim\n self._curr_ylim = ylim\n self.axes.set_xlim(xlim)\n self.axes.set_ylim(ylim)\n self.axes.figure.canvas.draw()\n\n def _connect_cb(self):\n fig = self.axes.figure\n self._cb_mouse_wheel_id = fig.canvas.mpl_connect('scroll_event',\n self._cb_mouse_wheel)\n self._cb_mouse_button_id = fig.canvas.mpl_connect('button_press_event',\n self._cb_mouse_button)\n self._cb_mouse_release_id = fig.canvas.mpl_connect(\n 'button_release_event', self._cb_mouse_release)\n self._cb_mouse_motion_id = fig.canvas.mpl_connect('motion_notify_event'\n , self._cb_mouse_motion)\n\n def _disconnect_cb(self):\n fig = self.axes.figure\n if self._cb_mouse_wheel_id:\n fig.canvas.mpl_disconnect(self._cb_mouse_wheel_id)\n self._cb_mouse_wheel_id = None\n if self._cb_mouse_button_id:\n fig.canvas.mpl_disconnect(self._cb_mouse_button_id)\n self._cb_mouse_button_id = None\n if self._cb_mouse_release_id:\n fig.canvas.mpl_disconnect(self._cb_mouse_release_id)\n self._cb_mouse_release_id = None\n if self._cb_mouse_motion_id:\n fig.canvas.mpl_disconnect(self._cb_mouse_motion_id)\n self._cb_mouse_motion_id = None\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ZoomPanHandler:\n \"\"\"\n Matplotlib callback class to handle pan and zoom events.\n \"\"\"\n\n def __init__(self, axes, scale_factor=2, mouse_button=2):\n \"\"\"\n Default constructor for the ZoomPanHandler class.\n\n Parameters\n axes: matplotlib.backend_bases.Axes\n The axes to attach this handler to.\n scale_factor: number\n The scale factor to apply when zooming.\n mouse_button: number or string\n The mouse button used to activate the pan action. Default value is\n 2, meaning the middle mouse button.\n \"\"\"\n self._axes = axes\n self._scale_factor = scale_factor\n self._mouse_button = mouse_button\n self._press_coords = None\n self._curr_xlim = self.axes.get_xlim()\n self._curr_ylim = self.axes.get_ylim()\n self._cb_mouse_wheel_id = None\n self._cb_mouse_button_id = None\n self._cb_mouse_release_id = None\n self._cb_mouse_motion_id = None\n self._connect_cb()\n\n def __del__(self):\n self._disconnect_cb()\n self._axes = None\n\n @property\n def axes(self):\n return self._axes\n\n @property\n def scale_factor(self):\n return self._scale_factor\n\n @property\n def mouse_button(self):\n return self._mouse_button\n\n def apply_transforms(self):\n \"\"\"\n Applies the zoom and pan transforms to the axes. Useful after reseting\n the plot.\n \"\"\"\n self.axes.set_xlim(self._curr_xlim)\n self.axes.set_ylim(self._curr_ylim)\n\n def set_base_transforms(self):\n \"\"\"\n Queries the current axis limits and stores them.\n \"\"\"\n self._curr_xlim = self.axes.get_xlim()\n self._curr_ylim = self.axes.get_ylim()\n\n def _cb_mouse_wheel(self, event):\n if event.inaxes:\n curr_xlim = self.axes.get_xlim()\n curr_ylim = self.axes.get_ylim()\n xdata = event.xdata\n ydata = event.ydata\n xmin = xdata - curr_xlim[0]\n ymin = ydata - curr_ylim[0]\n xmax = curr_xlim[1] - xdata\n ymax = curr_ylim[1] - ydata\n xlim = ylim = []\n if event.button == 'up':\n xlim = [xdata - xmin / self.scale_factor, xdata + xmax /\n self.scale_factor]\n ylim = [ydata - ymin / self.scale_factor, ydata + ymax /\n self.scale_factor]\n elif event.button == 'down':\n xlim = [xdata - xmin * self.scale_factor, xdata + xmax *\n self.scale_factor]\n ylim = [ydata - ymin * self.scale_factor, ydata + ymax *\n self.scale_factor]\n self._curr_xlim = xlim\n self._curr_ylim = ylim\n self.axes.set_xlim(xlim)\n self.axes.set_ylim(ylim)\n self.axes.figure.canvas.draw()\n\n def _cb_mouse_button(self, event):\n if not event.inaxes or event.button != self.mouse_button:\n return\n self._press_coords = event.xdata, event.ydata\n\n def _cb_mouse_release(self, event):\n self._press_coords = None\n self.axes.figure.canvas.draw()\n\n def _cb_mouse_motion(self, event):\n if not event.inaxes or not self._press_coords:\n return\n xlim = self.axes.get_xlim()\n ylim = self.axes.get_ylim()\n xlim -= event.xdata - self._press_coords[0]\n ylim -= event.ydata - self._press_coords[1]\n self._curr_xlim = xlim\n self._curr_ylim = ylim\n self.axes.set_xlim(xlim)\n self.axes.set_ylim(ylim)\n self.axes.figure.canvas.draw()\n\n def _connect_cb(self):\n fig = self.axes.figure\n self._cb_mouse_wheel_id = fig.canvas.mpl_connect('scroll_event',\n self._cb_mouse_wheel)\n self._cb_mouse_button_id = fig.canvas.mpl_connect('button_press_event',\n self._cb_mouse_button)\n self._cb_mouse_release_id = fig.canvas.mpl_connect(\n 'button_release_event', self._cb_mouse_release)\n self._cb_mouse_motion_id = fig.canvas.mpl_connect('motion_notify_event'\n , self._cb_mouse_motion)\n\n def _disconnect_cb(self):\n fig = self.axes.figure\n if self._cb_mouse_wheel_id:\n fig.canvas.mpl_disconnect(self._cb_mouse_wheel_id)\n self._cb_mouse_wheel_id = None\n if self._cb_mouse_button_id:\n fig.canvas.mpl_disconnect(self._cb_mouse_button_id)\n self._cb_mouse_button_id = None\n if self._cb_mouse_release_id:\n fig.canvas.mpl_disconnect(self._cb_mouse_release_id)\n self._cb_mouse_release_id = None\n if self._cb_mouse_motion_id:\n fig.canvas.mpl_disconnect(self._cb_mouse_motion_id)\n self._cb_mouse_motion_id = None\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass ZoomPanHandler:\n \"\"\"\n Matplotlib callback class to handle pan and zoom events.\n \"\"\"\n\n def __init__(self, axes, scale_factor=2, mouse_button=2):\n \"\"\"\n Default constructor for the ZoomPanHandler class.\n\n Parameters\n axes: matplotlib.backend_bases.Axes\n The axes to attach this handler to.\n scale_factor: number\n The scale factor to apply when zooming.\n mouse_button: number or string\n The mouse button used to activate the pan action. Default value is\n 2, meaning the middle mouse button.\n \"\"\"\n self._axes = axes\n self._scale_factor = scale_factor\n self._mouse_button = mouse_button\n self._press_coords = None\n self._curr_xlim = self.axes.get_xlim()\n self._curr_ylim = self.axes.get_ylim()\n self._cb_mouse_wheel_id = None\n self._cb_mouse_button_id = None\n self._cb_mouse_release_id = None\n self._cb_mouse_motion_id = None\n self._connect_cb()\n\n def __del__(self):\n self._disconnect_cb()\n self._axes = None\n\n @property\n def axes(self):\n return self._axes\n\n @property\n def scale_factor(self):\n return self._scale_factor\n\n @property\n def mouse_button(self):\n return self._mouse_button\n\n def apply_transforms(self):\n \"\"\"\n Applies the zoom and pan transforms to the axes. Useful after reseting\n the plot.\n \"\"\"\n self.axes.set_xlim(self._curr_xlim)\n self.axes.set_ylim(self._curr_ylim)\n\n def set_base_transforms(self):\n \"\"\"\n Queries the current axis limits and stores them.\n \"\"\"\n self._curr_xlim = self.axes.get_xlim()\n self._curr_ylim = self.axes.get_ylim()\n\n def _cb_mouse_wheel(self, event):\n if event.inaxes:\n curr_xlim = self.axes.get_xlim()\n curr_ylim = self.axes.get_ylim()\n xdata = event.xdata\n ydata = event.ydata\n xmin = xdata - curr_xlim[0]\n ymin = ydata - curr_ylim[0]\n xmax = curr_xlim[1] - xdata\n ymax = curr_ylim[1] - ydata\n xlim = ylim = []\n if event.button == 'up':\n xlim = [xdata - xmin / self.scale_factor, xdata + xmax /\n self.scale_factor]\n ylim = [ydata - ymin / self.scale_factor, ydata + ymax /\n self.scale_factor]\n elif event.button == 'down':\n xlim = [xdata - xmin * self.scale_factor, xdata + xmax *\n self.scale_factor]\n ylim = [ydata - ymin * self.scale_factor, ydata + ymax *\n self.scale_factor]\n self._curr_xlim = xlim\n self._curr_ylim = ylim\n self.axes.set_xlim(xlim)\n self.axes.set_ylim(ylim)\n self.axes.figure.canvas.draw()\n\n def _cb_mouse_button(self, event):\n if not event.inaxes or event.button != self.mouse_button:\n return\n self._press_coords = event.xdata, event.ydata\n\n def _cb_mouse_release(self, event):\n self._press_coords = None\n self.axes.figure.canvas.draw()\n\n def _cb_mouse_motion(self, event):\n if not event.inaxes or not self._press_coords:\n return\n xlim = self.axes.get_xlim()\n ylim = self.axes.get_ylim()\n xlim -= event.xdata - self._press_coords[0]\n ylim -= event.ydata - self._press_coords[1]\n self._curr_xlim = xlim\n self._curr_ylim = ylim\n self.axes.set_xlim(xlim)\n self.axes.set_ylim(ylim)\n self.axes.figure.canvas.draw()\n\n def _connect_cb(self):\n fig = self.axes.figure\n self._cb_mouse_wheel_id = fig.canvas.mpl_connect('scroll_event',\n self._cb_mouse_wheel)\n self._cb_mouse_button_id = fig.canvas.mpl_connect('button_press_event',\n self._cb_mouse_button)\n self._cb_mouse_release_id = fig.canvas.mpl_connect(\n 'button_release_event', self._cb_mouse_release)\n self._cb_mouse_motion_id = fig.canvas.mpl_connect('motion_notify_event'\n , self._cb_mouse_motion)\n\n def _disconnect_cb(self):\n fig = self.axes.figure\n if self._cb_mouse_wheel_id:\n fig.canvas.mpl_disconnect(self._cb_mouse_wheel_id)\n self._cb_mouse_wheel_id = None\n if self._cb_mouse_button_id:\n fig.canvas.mpl_disconnect(self._cb_mouse_button_id)\n self._cb_mouse_button_id = None\n if self._cb_mouse_release_id:\n fig.canvas.mpl_disconnect(self._cb_mouse_release_id)\n self._cb_mouse_release_id = None\n if self._cb_mouse_motion_id:\n fig.canvas.mpl_disconnect(self._cb_mouse_motion_id)\n self._cb_mouse_motion_id = None\n\n\ndef main():\n import matplotlib.pyplot as plt\n fig = plt.figure()\n axes = fig.add_subplot(111)\n axes.scatter(x=np.arange(0, 10, 0.5), y=np.arange(0, 20, 1), color='r',\n marker='o')\n hand = ZoomPanHandler(axes, scale_factor=1.5)\n plt.show()\n\n\n<mask token>\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport numpy as np\n\n\nclass ZoomPanHandler:\n \"\"\"\n Matplotlib callback class to handle pan and zoom events.\n \"\"\"\n\n def __init__(self, axes, scale_factor=2, mouse_button=2):\n \"\"\"\n Default constructor for the ZoomPanHandler class.\n\n Parameters\n axes: matplotlib.backend_bases.Axes\n The axes to attach this handler to.\n scale_factor: number\n The scale factor to apply when zooming.\n mouse_button: number or string\n The mouse button used to activate the pan action. Default value is\n 2, meaning the middle mouse button.\n \"\"\"\n self._axes = axes\n self._scale_factor = scale_factor\n self._mouse_button = mouse_button\n\n self._press_coords = None\n self._curr_xlim = self.axes.get_xlim()\n self._curr_ylim = self.axes.get_ylim()\n\n # Mouse action callback IDs\n self._cb_mouse_wheel_id = None\n self._cb_mouse_button_id = None\n self._cb_mouse_release_id = None\n self._cb_mouse_motion_id = None\n\n self._connect_cb()\n\n def __del__(self):\n self._disconnect_cb()\n self._axes = None\n\n @property\n def axes(self):\n return self._axes\n\n @property\n def scale_factor(self):\n return self._scale_factor\n\n @property\n def mouse_button(self):\n return self._mouse_button\n\n def apply_transforms(self):\n \"\"\"\n Applies the zoom and pan transforms to the axes. Useful after reseting\n the plot.\n \"\"\"\n self.axes.set_xlim(self._curr_xlim)\n self.axes.set_ylim(self._curr_ylim)\n\n def set_base_transforms(self):\n \"\"\"\n Queries the current axis limits and stores them.\n \"\"\"\n self._curr_xlim = self.axes.get_xlim()\n self._curr_ylim = self.axes.get_ylim()\n\n # Private methods\n def _cb_mouse_wheel(self, event):\n if event.inaxes:\n curr_xlim = self.axes.get_xlim()\n curr_ylim = self.axes.get_ylim()\n\n xdata = event.xdata\n ydata = event.ydata\n\n xmin = xdata - curr_xlim[0]\n ymin = ydata - curr_ylim[0]\n\n xmax = curr_xlim[1] - xdata\n ymax = curr_ylim[1] - ydata\n\n xlim = ylim = []\n\n if event.button == 'up': # zoom-in\n xlim = [xdata - xmin / self.scale_factor,\n xdata + xmax / self.scale_factor]\n ylim = [ydata - ymin / self.scale_factor,\n ydata + ymax / self.scale_factor]\n elif event.button == 'down': # zoom-out\n xlim = [xdata - xmin * self.scale_factor,\n xdata + xmax * self.scale_factor]\n ylim = [ydata - ymin * self.scale_factor,\n ydata + ymax * self.scale_factor]\n\n self._curr_xlim = xlim\n self._curr_ylim = ylim\n\n self.axes.set_xlim(xlim)\n self.axes.set_ylim(ylim)\n\n self.axes.figure.canvas.draw()\n\n def _cb_mouse_button(self, event):\n if not event.inaxes or event.button != self.mouse_button:\n return\n self._press_coords = (event.xdata, event.ydata)\n\n def _cb_mouse_release(self, event):\n self._press_coords = None\n self.axes.figure.canvas.draw()\n\n def _cb_mouse_motion(self, event):\n if not event.inaxes or not self._press_coords:\n return\n xlim = self.axes.get_xlim()\n ylim = self.axes.get_ylim()\n xlim -= (event.xdata - self._press_coords[0])\n ylim -= (event.ydata - self._press_coords[1])\n self._curr_xlim = xlim\n self._curr_ylim = ylim\n self.axes.set_xlim(xlim)\n self.axes.set_ylim(ylim)\n self.axes.figure.canvas.draw()\n\n def _connect_cb(self):\n fig = self.axes.figure\n self._cb_mouse_wheel_id = fig.canvas.mpl_connect(\n 'scroll_event', self._cb_mouse_wheel)\n self._cb_mouse_button_id = fig.canvas.mpl_connect(\n 'button_press_event', self._cb_mouse_button)\n self._cb_mouse_release_id = fig.canvas.mpl_connect(\n 'button_release_event', self._cb_mouse_release)\n self._cb_mouse_motion_id = fig.canvas.mpl_connect(\n 'motion_notify_event', self._cb_mouse_motion)\n\n def _disconnect_cb(self):\n fig = self.axes.figure\n if self._cb_mouse_wheel_id:\n fig.canvas.mpl_disconnect(self._cb_mouse_wheel_id)\n self._cb_mouse_wheel_id = None\n if self._cb_mouse_button_id:\n fig.canvas.mpl_disconnect(self._cb_mouse_button_id)\n self._cb_mouse_button_id = None\n if self._cb_mouse_release_id:\n fig.canvas.mpl_disconnect(self._cb_mouse_release_id)\n self._cb_mouse_release_id = None\n if self._cb_mouse_motion_id:\n fig.canvas.mpl_disconnect(self._cb_mouse_motion_id)\n self._cb_mouse_motion_id = None\n\n\ndef main():\n import matplotlib.pyplot as plt\n fig = plt.figure()\n axes = fig.add_subplot(111)\n axes.scatter(x=np.arange(0, 10, 0.5), y=np.arange(\n 0, 20, 1), color='r', marker='o')\n hand = ZoomPanHandler(axes, scale_factor=1.5)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
13,
14,
15,
16,
19
]
}
|
[
13,
14,
15,
16,
19
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sqlite3 as lite
con = lite.connect('./logs.db')
with con:
cur = con.cursor()
cur.execute("DROP TABLE IF EXISTS log")
cur.execute('''CREATE TABLE log (msg_id text, u_id text, username text, first_name text, last_name text, msg text, ch_id text, day text)''')
|
normal
|
{
"blob_id": "1c31649ac75214a6d26bcb6d6822579be91e5074",
"index": 2748,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith con:\n cur = con.cursor()\n cur.execute('DROP TABLE IF EXISTS log')\n cur.execute(\n 'CREATE TABLE log (msg_id text, u_id text, username text, first_name text, last_name text, msg text, ch_id text, day text)'\n )\n",
"step-3": "<mask token>\ncon = lite.connect('./logs.db')\nwith con:\n cur = con.cursor()\n cur.execute('DROP TABLE IF EXISTS log')\n cur.execute(\n 'CREATE TABLE log (msg_id text, u_id text, username text, first_name text, last_name text, msg text, ch_id text, day text)'\n )\n",
"step-4": "import sqlite3 as lite\ncon = lite.connect('./logs.db')\nwith con:\n cur = con.cursor()\n cur.execute('DROP TABLE IF EXISTS log')\n cur.execute(\n 'CREATE TABLE log (msg_id text, u_id text, username text, first_name text, last_name text, msg text, ch_id text, day text)'\n )\n",
"step-5": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport sqlite3 as lite\n\ncon = lite.connect('./logs.db')\n\nwith con: \n cur = con.cursor() \n cur.execute(\"DROP TABLE IF EXISTS log\")\n cur.execute('''CREATE TABLE log (msg_id text, u_id text, username text, first_name text, last_name text, msg text, ch_id text, day text)''')",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import re
s=input('enter the string:')
def end_num(s):
text = re.compile(r".*[0-9]$")
if text.match(s):
return 'Yes!Number is present at the end of string'
else:
return 'No!Number is not present at the end of string'
print(end_num(s))
|
normal
|
{
"blob_id": "94334f91b1556c05dce0ed6f23c074bb8875f185",
"index": 2505,
"step-1": "<mask token>\n\n\ndef end_num(s):\n text = re.compile('.*[0-9]$')\n if text.match(s):\n return 'Yes!Number is present at the end of string'\n else:\n return 'No!Number is not present at the end of string'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef end_num(s):\n text = re.compile('.*[0-9]$')\n if text.match(s):\n return 'Yes!Number is present at the end of string'\n else:\n return 'No!Number is not present at the end of string'\n\n\nprint(end_num(s))\n",
"step-3": "<mask token>\ns = input('enter the string:')\n\n\ndef end_num(s):\n text = re.compile('.*[0-9]$')\n if text.match(s):\n return 'Yes!Number is present at the end of string'\n else:\n return 'No!Number is not present at the end of string'\n\n\nprint(end_num(s))\n",
"step-4": "import re\ns = input('enter the string:')\n\n\ndef end_num(s):\n text = re.compile('.*[0-9]$')\n if text.match(s):\n return 'Yes!Number is present at the end of string'\n else:\n return 'No!Number is not present at the end of string'\n\n\nprint(end_num(s))\n",
"step-5": "import re\r\ns=input('enter the string:')\r\ndef end_num(s):\r\n text = re.compile(r\".*[0-9]$\")\r\n if text.match(s):\r\n return 'Yes!Number is present at the end of string'\r\n else:\r\n return 'No!Number is not present at the end of string'\r\n\r\nprint(end_num(s))\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from django.urls import path
from . import views as user_views
from produtos import views as prod_views
from django.contrib.auth import views as auth_views
app_name = 'user'
urlpatterns = [
path('detalhes/', user_views.painel, name="painel"),
path('produto/ajax/delete_prod/', prod_views.deleteProd, name="deleteProd"),
path('produto/', user_views.painelProdutos, name="painel_produtos"),
path('<int:id_produto>', prod_views.detalheProduto, name="detalhe_prod"),
path('ajax/delete_prod/', prod_views.deleteProd, name='deleteProd'),
]
|
normal
|
{
"blob_id": "a7f2791e359b848a217beadc77fc983d971ef8b0",
"index": 8436,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napp_name = 'user'\nurlpatterns = [path('detalhes/', user_views.painel, name='painel'), path(\n 'produto/ajax/delete_prod/', prod_views.deleteProd, name='deleteProd'),\n path('produto/', user_views.painelProdutos, name='painel_produtos'),\n path('<int:id_produto>', prod_views.detalheProduto, name='detalhe_prod'\n ), path('ajax/delete_prod/', prod_views.deleteProd, name='deleteProd')]\n",
"step-3": "from django.urls import path\nfrom . import views as user_views\nfrom produtos import views as prod_views\nfrom django.contrib.auth import views as auth_views\napp_name = 'user'\nurlpatterns = [path('detalhes/', user_views.painel, name='painel'), path(\n 'produto/ajax/delete_prod/', prod_views.deleteProd, name='deleteProd'),\n path('produto/', user_views.painelProdutos, name='painel_produtos'),\n path('<int:id_produto>', prod_views.detalheProduto, name='detalhe_prod'\n ), path('ajax/delete_prod/', prod_views.deleteProd, name='deleteProd')]\n",
"step-4": "from django.urls import path\nfrom . import views as user_views\nfrom produtos import views as prod_views\nfrom django.contrib.auth import views as auth_views\n\napp_name = 'user'\n\nurlpatterns = [\n path('detalhes/', user_views.painel, name=\"painel\"),\n path('produto/ajax/delete_prod/', prod_views.deleteProd, name=\"deleteProd\"),\n path('produto/', user_views.painelProdutos, name=\"painel_produtos\"),\n path('<int:id_produto>', prod_views.detalheProduto, name=\"detalhe_prod\"),\n path('ajax/delete_prod/', prod_views.deleteProd, name='deleteProd'),\n]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from pulp import *
from collections import namedtuple
import networkx as nx
import itertools
from mcfpox.controller.lib import Flow, Hop
def get_host_from_ip(G, ip):
return next((i for i in G.nodes() if G.node[i].get('ip') == str(ip)), None)
# https://docs.python.org/2/library/itertools.html#recipes
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = itertools.tee(iterable)
next(b, None)
return itertools.izip(a, b)
def widest_path(G, src, dst):
S = set([src])
T = set([n for n in G.nodes() if n != src])
print S, T
N = G.nodes()
B = {}
for n in N:
b = {}
for k in N:
if k == n:
continue
try:
b[k] = G.edge[n][k]['capacity']
except KeyError:
b[k] = 0
B[n] = b
P = {n:[] for n in N}
while True:
k = None
highest = 0
neighbors = set([])
for n in S:
for m in G[n]:
if m in S:
continue
B[src][m] = G.edge[n][m]['capacity']
if B[src][m] > highest:
k = m
highest = B[src][m]
P[k] = P[n] + [k]
S.add(k)
T.remove(k)
if not T:
break
for n in T:
old = B[src][n]
new = min(B[src][k], B[k][n])
B[src][n] = max(old, new)
if new > old:
P[n] = P[k] + [n]
return P[dst]
def objective(graph, flows):
""" Return a list of paths through the graph for each flow.
Args:
graph:
A nx.Graph, annotated with network information including
IP addresses for hosts and port numbers for each link.
flows:
A list of mcfpox.controller.lib.Flow objects representing
5-tuples of flows to route through the network
Returns:
A dict mapping each flow in flows to a valid path through the graph.
The path is expressed as a list of mcfpox.controller.lib.Hop objects.
If no valid path can be found, the value for that entry is None.
"""
G = graph.copy()
rules = {}
flows.sort(key=lambda a: a[1], reverse=True)
for flow,demand in flows:
src = get_host_from_ip(G, flow.nw_src)
dst = get_host_from_ip(G, flow.nw_dst)
if not (src and dst):
continue
if not (src in G.nodes() and dst in G.nodes()):
continue
path = widest_path(G, src, dst)
hops = []
for a,b in pairwise(path):
hops.append(Hop(dpid=int(a[1:]), port=G.edge[a][b]['port']))
G.edge[a][b]['capacity'] -= demand
G.edge[b][a]['capacity'] -= demand
rules[flow] = hops
return rules
|
normal
|
{
"blob_id": "65bcb4a2fbc05ee19c8a94811d369562ec5e72ff",
"index": 9261,
"step-1": "from pulp import *\nfrom collections import namedtuple\nimport networkx as nx\nimport itertools\nfrom mcfpox.controller.lib import Flow, Hop\n\n\ndef get_host_from_ip(G, ip):\n return next((i for i in G.nodes() if G.node[i].get('ip') == str(ip)), None)\n\n\n# https://docs.python.org/2/library/itertools.html#recipes\ndef pairwise(iterable):\n \"s -> (s0,s1), (s1,s2), (s2, s3), ...\"\n a, b = itertools.tee(iterable)\n next(b, None)\n return itertools.izip(a, b)\n\n\ndef widest_path(G, src, dst):\n S = set([src])\n T = set([n for n in G.nodes() if n != src])\n print S, T\n\n N = G.nodes()\n B = {}\n for n in N:\n b = {}\n for k in N:\n if k == n:\n continue\n try:\n b[k] = G.edge[n][k]['capacity']\n except KeyError:\n b[k] = 0\n B[n] = b\n P = {n:[] for n in N}\n\n while True:\n k = None\n highest = 0\n neighbors = set([])\n\n for n in S:\n for m in G[n]:\n if m in S:\n continue\n B[src][m] = G.edge[n][m]['capacity']\n if B[src][m] > highest:\n k = m\n highest = B[src][m]\n P[k] = P[n] + [k]\n\n S.add(k)\n T.remove(k)\n if not T:\n break\n\n for n in T:\n old = B[src][n]\n new = min(B[src][k], B[k][n])\n B[src][n] = max(old, new)\n if new > old:\n P[n] = P[k] + [n]\n\n return P[dst]\n\n\ndef objective(graph, flows):\n \"\"\" Return a list of paths through the graph for each flow.\n\n Args:\n graph: \n A nx.Graph, annotated with network information including\n IP addresses for hosts and port numbers for each link.\n flows: \n A list of mcfpox.controller.lib.Flow objects representing\n 5-tuples of flows to route through the network\n\n Returns:\n A dict mapping each flow in flows to a valid path through the graph.\n The path is expressed as a list of mcfpox.controller.lib.Hop objects.\n If no valid path can be found, the value for that entry is None.\n \"\"\"\n\n G = graph.copy()\n rules = {}\n flows.sort(key=lambda a: a[1], reverse=True)\n\n for flow,demand in flows:\n src = get_host_from_ip(G, flow.nw_src)\n dst = get_host_from_ip(G, flow.nw_dst)\n\n if not (src and dst):\n continue\n if not (src in G.nodes() and dst in G.nodes()):\n continue\n\n path = widest_path(G, src, dst)\n\n hops = []\n for a,b in pairwise(path):\n hops.append(Hop(dpid=int(a[1:]), port=G.edge[a][b]['port']))\n G.edge[a][b]['capacity'] -= demand\n G.edge[b][a]['capacity'] -= demand\n\n rules[flow] = hops\n\n return rules\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from django.db import models
class Article(models.Model):
created = models.DateTimeField(auto_now_add=True)
published = models.DateTimeField(null=True, blank=True)
title = models.CharField(max_length=100)
slug = models.SlugField(max_length=100)
content = models.TextField()
|
normal
|
{
"blob_id": "28233cb4a56ee805e66f34e6abd49137503d5f7b",
"index": 1405,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Article(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Article(models.Model):\n created = models.DateTimeField(auto_now_add=True)\n published = models.DateTimeField(null=True, blank=True)\n title = models.CharField(max_length=100)\n slug = models.SlugField(max_length=100)\n content = models.TextField()\n",
"step-4": "from django.db import models\n\n\nclass Article(models.Model):\n created = models.DateTimeField(auto_now_add=True)\n published = models.DateTimeField(null=True, blank=True)\n title = models.CharField(max_length=100)\n slug = models.SlugField(max_length=100)\n content = models.TextField()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
import rospy
import numpy as np
import time
import RPi.GPIO as GPIO
from ccn_raspicar_ros.msg import RaspiCarWheel
from ccn_raspicar_ros.msg import RaspiCarWheelControl
from ccn_raspicar_ros.srv import RaspiCarMotorControl
class MotorControl(object):
def __init__(self, control_pin=[16, 18, 11, 13], t=0.1, dc_level=80, balance=1.0, pwm_freq=500):
self.control_pin = control_pin
self.t = t
self.balance = balance
self.l_level = dc_level * 2 / (balance + 1)
self.r_level = self.l_level * balance
GPIO.setmode(GPIO.BOARD)
[GPIO.setup(control_pin[pin], GPIO.OUT, initial=GPIO.LOW) for pin in range(4)]
self.pwm_r1 = GPIO.PWM(control_pin[0], pwm_freq)
self.pwm_r2 = GPIO.PWM(control_pin[1], pwm_freq)
self.pwm_l1 = GPIO.PWM(control_pin[2], pwm_freq)
self.pwm_l2 = GPIO.PWM(control_pin[3], pwm_freq)
self.pwm_r1.start(0)
self.pwm_r2.start(0)
self.pwm_l1.start(0)
self.pwm_l2.start(0)
def stop(self):
self.pwm_r1.ChangeDutyCycle(0)
self.pwm_r2.ChangeDutyCycle(0)
self.pwm_l1.ChangeDutyCycle(0)
self.pwm_l2.ChangeDutyCycle(0)
def forward(self, speed=1.0, t=None):
self.pwm_r1.ChangeDutyCycle(self.r_level*speed)
self.pwm_r2.ChangeDutyCycle(0)
self.pwm_l1.ChangeDutyCycle(self.l_level*speed)
self.pwm_l2.ChangeDutyCycle(0)
if t is None:
time.sleep(self.t)
else:
time.sleep(t)
self.stop()
def backward(self, speed=0.8, t=None):
self.pwm_r1.ChangeDutyCycle(0)
self.pwm_r2.ChangeDutyCycle(self.r_level*speed)
self.pwm_l1.ChangeDutyCycle(0)
self.pwm_l2.ChangeDutyCycle(self.l_level*speed)
if t is None:
time.sleep(self.t)
else:
time.sleep(t)
self.stop()
def turn_left(self, speed=0.6, t=None):
self.pwm_r1.ChangeDutyCycle(self.r_level*speed)
self.pwm_r2.ChangeDutyCycle(0)
self.pwm_l1.ChangeDutyCycle(0)
self.pwm_l2.ChangeDutyCycle(0)
if t is None:
time.sleep(self.t)
else:
time.sleep(t)
self.stop()
def turn_right(self, speed=0.6, t=None):
self.pwm_r1.ChangeDutyCycle(0)
self.pwm_r2.ChangeDutyCycle(0)
self.pwm_l1.ChangeDutyCycle(self.l_level*speed)
self.pwm_l2.ChangeDutyCycle(0)
if t is None:
time.sleep(self.t)
else:
time.sleep(t)
self.stop()
def arbitrary_speed(self, speed=[1.0, 1.0], t=None):
if 0 < speed[0]:
self.pwm_r1.ChangeDutyCycle(self.r_level * speed[0])
self.pwm_r2.ChangeDutyCycle(0)
elif speed[0] < 0:
self.pwm_r1.ChangeDutyCycle(0)
self.pwm_r2.ChangeDutyCycle(self.r_level * speed[0])
if 0 < speed[1]:
self.pwm_l1.ChangeDutyCycle(self.l_level * speed[1])
self.pwm_l2.ChangeDutyCycle(0)
elif speed[1] < 0:
self.pwm_l1.ChangeDutyCycle(0)
self.pwm_l2.ChangeDutyCycle(self.l_level * speed[1])
if t is None:
return
else:
time.sleep(t)
self.stop()
def cleanup(self):
self.stop()
self.pwm_r1.stop()
self.pwm_r2.stop()
self.pwm_l1.stop()
self.pwm_l2.stop()
GPIO.cleanup()
g_obstacle_detected = False
g_proximity = np.zeros([3])
g_wheel_count = np.zeros([2])
def turn_right_controlled(angle):
wheel_last = g_wheel_count
count = angle / 4.45
while not rospy.is_shutdown():
if not g_obstacle_detected:
if g_wheel_count[0] - wheel_last[0] < count:
motor.turn_right(speed=0.9, t=0.05)
elif g_wheel_count[0] - wheel_last[0] > count:
motor.turn_left(speed=0.8, t=0.03)
break
else:
break
time.sleep(0.05)
else:
time.sleep(0.1)
def turn_left_controlled(angle):
wheel_last = g_wheel_count
count = angle / 4.45
while not rospy.is_shutdown():
if not g_obstacle_detected:
if g_wheel_count[1] - wheel_last[1] < count:
motor.turn_left(speed=0.9, t=0.05)
elif g_wheel_count[1] - wheel_last[1] > count:
motor.turn_right(speed=0.8, t=0.03)
break
else:
break
time.sleep(0.05)
else:
time.sleep(0.1)
def forward_controlled(distance):
wheel_last = g_wheel_count
count = distance / 0.0113
while not rospy.is_shutdown():
if not g_obstacle_detected:
diff_of_both = g_wheel_count - wheel_last
if np.sum(diff_of_both)/2.0 < count:
motor.forward(speed=1.0, t=0.05)
else:
break
diff_between = diff_of_both[0] - diff_of_both[1]
if diff_between > 0:
motor.turn_left(speed=0.7, t=0.03 + diff_between * 0.005)
elif diff_between < 0:
motor.turn_right(speed=0.7, t=0.03 - diff_between * 0.005)
time.sleep(0.05)
else:
time.sleep(0.1)
def callback_RaspiCarWheel(data):
global g_wheel_count
g_wheel_count = np.array(data.wheel_count)
def handle_RaspiCarMotorControl_request(request):
print(request)
command = request.command
if command.startswith('test'):
return 'ack test', 'ok.'
elif command.startswith('fwd'):
try:
value = float(command.split(':')[1])
except KeyError:
value = 0.1
except ValueError:
value = 0
forward_controlled(value)
return 'ack fwd:%f' % value, 'ok.'
elif command.startswith('right'):
try:
value = float(command.split(':')[1])
except KeyError:
value = 10
except ValueError:
value = 0
turn_right_controlled(value)
return 'ack right:%f' % value, 'ok.'
elif command.startswith('left'):
try:
value = float(command.split(':')[1])
except KeyError:
value = 10
except ValueError:
value = 0
turn_left_controlled(value)
return 'ack left:%f' % value, 'ok.'
# elif data.startswith('obstacle'):
# global obstacle_detection_routine_stopper
# try:
# value = float(data.split(':')[1])
# except KeyError:
# if obstacle_detection_routine_stopper is None:
# value = 1
# else:
# value = 0
# except ValueError:
# value = 0
#
# if value > 0.0 and obstacle_detection_routine_stopper is None:
# obstacle_detection_routine_stopper = launch_obstacle_detection_routine()
# elif value == 0.0 and obstacle_detection_routine_stopper is not None:
# obstacle_detection_routine_stopper.set()
# obstacle_detection_routine_stopper = None
#
# connection.sendall(b'ack')
# rospy.loginfo('[tcp_server] sending ack to the client.')
else:
return 'error', 'ok.'
if __name__ == '__main__':
motor = MotorControl(dc_level=70, t=0.3)
rospy.loginfo('[motor_control] up and running...')
try:
rospy.init_node('RaspiCarMotorControl_node', anonymous=False)
rospy.Subscriber('RaspiCarWheel', RaspiCarWheel, callback_RaspiCarWheel)
s = rospy.Service('RaspiCarMotorControl', RaspiCarMotorControl, handle_RaspiCarMotorControl_request)
rospy.spin()
except rospy.ROSInterruptException as e:
rospy.loginfo(e)
finally:
motor.cleanup()
|
normal
|
{
"blob_id": "2985360c1e2d03c619ea2994c609fdf8c033bebd",
"index": 9177,
"step-1": "<mask token>\n\n\nclass MotorControl(object):\n <mask token>\n <mask token>\n\n def forward(self, speed=1.0, t=None):\n self.pwm_r1.ChangeDutyCycle(self.r_level * speed)\n self.pwm_r2.ChangeDutyCycle(0)\n self.pwm_l1.ChangeDutyCycle(self.l_level * speed)\n self.pwm_l2.ChangeDutyCycle(0)\n if t is None:\n time.sleep(self.t)\n else:\n time.sleep(t)\n self.stop()\n <mask token>\n\n def turn_left(self, speed=0.6, t=None):\n self.pwm_r1.ChangeDutyCycle(self.r_level * speed)\n self.pwm_r2.ChangeDutyCycle(0)\n self.pwm_l1.ChangeDutyCycle(0)\n self.pwm_l2.ChangeDutyCycle(0)\n if t is None:\n time.sleep(self.t)\n else:\n time.sleep(t)\n self.stop()\n\n def turn_right(self, speed=0.6, t=None):\n self.pwm_r1.ChangeDutyCycle(0)\n self.pwm_r2.ChangeDutyCycle(0)\n self.pwm_l1.ChangeDutyCycle(self.l_level * speed)\n self.pwm_l2.ChangeDutyCycle(0)\n if t is None:\n time.sleep(self.t)\n else:\n time.sleep(t)\n self.stop()\n <mask token>\n\n def cleanup(self):\n self.stop()\n self.pwm_r1.stop()\n self.pwm_r2.stop()\n self.pwm_l1.stop()\n self.pwm_l2.stop()\n GPIO.cleanup()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MotorControl(object):\n\n def __init__(self, control_pin=[16, 18, 11, 13], t=0.1, dc_level=80,\n balance=1.0, pwm_freq=500):\n self.control_pin = control_pin\n self.t = t\n self.balance = balance\n self.l_level = dc_level * 2 / (balance + 1)\n self.r_level = self.l_level * balance\n GPIO.setmode(GPIO.BOARD)\n [GPIO.setup(control_pin[pin], GPIO.OUT, initial=GPIO.LOW) for pin in\n range(4)]\n self.pwm_r1 = GPIO.PWM(control_pin[0], pwm_freq)\n self.pwm_r2 = GPIO.PWM(control_pin[1], pwm_freq)\n self.pwm_l1 = GPIO.PWM(control_pin[2], pwm_freq)\n self.pwm_l2 = GPIO.PWM(control_pin[3], pwm_freq)\n self.pwm_r1.start(0)\n self.pwm_r2.start(0)\n self.pwm_l1.start(0)\n self.pwm_l2.start(0)\n\n def stop(self):\n self.pwm_r1.ChangeDutyCycle(0)\n self.pwm_r2.ChangeDutyCycle(0)\n self.pwm_l1.ChangeDutyCycle(0)\n self.pwm_l2.ChangeDutyCycle(0)\n\n def forward(self, speed=1.0, t=None):\n self.pwm_r1.ChangeDutyCycle(self.r_level * speed)\n self.pwm_r2.ChangeDutyCycle(0)\n self.pwm_l1.ChangeDutyCycle(self.l_level * speed)\n self.pwm_l2.ChangeDutyCycle(0)\n if t is None:\n time.sleep(self.t)\n else:\n time.sleep(t)\n self.stop()\n\n def backward(self, speed=0.8, t=None):\n self.pwm_r1.ChangeDutyCycle(0)\n self.pwm_r2.ChangeDutyCycle(self.r_level * speed)\n self.pwm_l1.ChangeDutyCycle(0)\n self.pwm_l2.ChangeDutyCycle(self.l_level * speed)\n if t is None:\n time.sleep(self.t)\n else:\n time.sleep(t)\n self.stop()\n\n def turn_left(self, speed=0.6, t=None):\n self.pwm_r1.ChangeDutyCycle(self.r_level * speed)\n self.pwm_r2.ChangeDutyCycle(0)\n self.pwm_l1.ChangeDutyCycle(0)\n self.pwm_l2.ChangeDutyCycle(0)\n if t is None:\n time.sleep(self.t)\n else:\n time.sleep(t)\n self.stop()\n\n def turn_right(self, speed=0.6, t=None):\n self.pwm_r1.ChangeDutyCycle(0)\n self.pwm_r2.ChangeDutyCycle(0)\n self.pwm_l1.ChangeDutyCycle(self.l_level * speed)\n self.pwm_l2.ChangeDutyCycle(0)\n if t is None:\n time.sleep(self.t)\n else:\n time.sleep(t)\n self.stop()\n\n def arbitrary_speed(self, speed=[1.0, 1.0], t=None):\n if 0 < speed[0]:\n self.pwm_r1.ChangeDutyCycle(self.r_level * speed[0])\n self.pwm_r2.ChangeDutyCycle(0)\n elif speed[0] < 0:\n self.pwm_r1.ChangeDutyCycle(0)\n self.pwm_r2.ChangeDutyCycle(self.r_level * speed[0])\n if 0 < speed[1]:\n self.pwm_l1.ChangeDutyCycle(self.l_level * speed[1])\n self.pwm_l2.ChangeDutyCycle(0)\n elif speed[1] < 0:\n self.pwm_l1.ChangeDutyCycle(0)\n self.pwm_l2.ChangeDutyCycle(self.l_level * speed[1])\n if t is None:\n return\n else:\n time.sleep(t)\n self.stop()\n\n def cleanup(self):\n self.stop()\n self.pwm_r1.stop()\n self.pwm_r2.stop()\n self.pwm_l1.stop()\n self.pwm_l2.stop()\n GPIO.cleanup()\n\n\n<mask token>\n\n\ndef turn_right_controlled(angle):\n wheel_last = g_wheel_count\n count = angle / 4.45\n while not rospy.is_shutdown():\n if not g_obstacle_detected:\n if g_wheel_count[0] - wheel_last[0] < count:\n motor.turn_right(speed=0.9, t=0.05)\n elif g_wheel_count[0] - wheel_last[0] > count:\n motor.turn_left(speed=0.8, t=0.03)\n break\n else:\n break\n time.sleep(0.05)\n else:\n time.sleep(0.1)\n\n\n<mask token>\n\n\ndef callback_RaspiCarWheel(data):\n global g_wheel_count\n g_wheel_count = np.array(data.wheel_count)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass MotorControl(object):\n\n def __init__(self, control_pin=[16, 18, 11, 13], t=0.1, dc_level=80,\n balance=1.0, pwm_freq=500):\n self.control_pin = control_pin\n self.t = t\n self.balance = balance\n self.l_level = dc_level * 2 / (balance + 1)\n self.r_level = self.l_level * balance\n GPIO.setmode(GPIO.BOARD)\n [GPIO.setup(control_pin[pin], GPIO.OUT, initial=GPIO.LOW) for pin in\n range(4)]\n self.pwm_r1 = GPIO.PWM(control_pin[0], pwm_freq)\n self.pwm_r2 = GPIO.PWM(control_pin[1], pwm_freq)\n self.pwm_l1 = GPIO.PWM(control_pin[2], pwm_freq)\n self.pwm_l2 = GPIO.PWM(control_pin[3], pwm_freq)\n self.pwm_r1.start(0)\n self.pwm_r2.start(0)\n self.pwm_l1.start(0)\n self.pwm_l2.start(0)\n\n def stop(self):\n self.pwm_r1.ChangeDutyCycle(0)\n self.pwm_r2.ChangeDutyCycle(0)\n self.pwm_l1.ChangeDutyCycle(0)\n self.pwm_l2.ChangeDutyCycle(0)\n\n def forward(self, speed=1.0, t=None):\n self.pwm_r1.ChangeDutyCycle(self.r_level * speed)\n self.pwm_r2.ChangeDutyCycle(0)\n self.pwm_l1.ChangeDutyCycle(self.l_level * speed)\n self.pwm_l2.ChangeDutyCycle(0)\n if t is None:\n time.sleep(self.t)\n else:\n time.sleep(t)\n self.stop()\n\n def backward(self, speed=0.8, t=None):\n self.pwm_r1.ChangeDutyCycle(0)\n self.pwm_r2.ChangeDutyCycle(self.r_level * speed)\n self.pwm_l1.ChangeDutyCycle(0)\n self.pwm_l2.ChangeDutyCycle(self.l_level * speed)\n if t is None:\n time.sleep(self.t)\n else:\n time.sleep(t)\n self.stop()\n\n def turn_left(self, speed=0.6, t=None):\n self.pwm_r1.ChangeDutyCycle(self.r_level * speed)\n self.pwm_r2.ChangeDutyCycle(0)\n self.pwm_l1.ChangeDutyCycle(0)\n self.pwm_l2.ChangeDutyCycle(0)\n if t is None:\n time.sleep(self.t)\n else:\n time.sleep(t)\n self.stop()\n\n def turn_right(self, speed=0.6, t=None):\n self.pwm_r1.ChangeDutyCycle(0)\n self.pwm_r2.ChangeDutyCycle(0)\n self.pwm_l1.ChangeDutyCycle(self.l_level * speed)\n self.pwm_l2.ChangeDutyCycle(0)\n if t is None:\n time.sleep(self.t)\n else:\n time.sleep(t)\n self.stop()\n\n def arbitrary_speed(self, speed=[1.0, 1.0], t=None):\n if 0 < speed[0]:\n self.pwm_r1.ChangeDutyCycle(self.r_level * speed[0])\n self.pwm_r2.ChangeDutyCycle(0)\n elif speed[0] < 0:\n self.pwm_r1.ChangeDutyCycle(0)\n self.pwm_r2.ChangeDutyCycle(self.r_level * speed[0])\n if 0 < speed[1]:\n self.pwm_l1.ChangeDutyCycle(self.l_level * speed[1])\n self.pwm_l2.ChangeDutyCycle(0)\n elif speed[1] < 0:\n self.pwm_l1.ChangeDutyCycle(0)\n self.pwm_l2.ChangeDutyCycle(self.l_level * speed[1])\n if t is None:\n return\n else:\n time.sleep(t)\n self.stop()\n\n def cleanup(self):\n self.stop()\n self.pwm_r1.stop()\n self.pwm_r2.stop()\n self.pwm_l1.stop()\n self.pwm_l2.stop()\n GPIO.cleanup()\n\n\n<mask token>\n\n\ndef turn_right_controlled(angle):\n wheel_last = g_wheel_count\n count = angle / 4.45\n while not rospy.is_shutdown():\n if not g_obstacle_detected:\n if g_wheel_count[0] - wheel_last[0] < count:\n motor.turn_right(speed=0.9, t=0.05)\n elif g_wheel_count[0] - wheel_last[0] > count:\n motor.turn_left(speed=0.8, t=0.03)\n break\n else:\n break\n time.sleep(0.05)\n else:\n time.sleep(0.1)\n\n\n<mask token>\n\n\ndef forward_controlled(distance):\n wheel_last = g_wheel_count\n count = distance / 0.0113\n while not rospy.is_shutdown():\n if not g_obstacle_detected:\n diff_of_both = g_wheel_count - wheel_last\n if np.sum(diff_of_both) / 2.0 < count:\n motor.forward(speed=1.0, t=0.05)\n else:\n break\n diff_between = diff_of_both[0] - diff_of_both[1]\n if diff_between > 0:\n motor.turn_left(speed=0.7, t=0.03 + diff_between * 0.005)\n elif diff_between < 0:\n motor.turn_right(speed=0.7, t=0.03 - diff_between * 0.005)\n time.sleep(0.05)\n else:\n time.sleep(0.1)\n\n\ndef callback_RaspiCarWheel(data):\n global g_wheel_count\n g_wheel_count = np.array(data.wheel_count)\n\n\ndef handle_RaspiCarMotorControl_request(request):\n print(request)\n command = request.command\n if command.startswith('test'):\n return 'ack test', 'ok.'\n elif command.startswith('fwd'):\n try:\n value = float(command.split(':')[1])\n except KeyError:\n value = 0.1\n except ValueError:\n value = 0\n forward_controlled(value)\n return 'ack fwd:%f' % value, 'ok.'\n elif command.startswith('right'):\n try:\n value = float(command.split(':')[1])\n except KeyError:\n value = 10\n except ValueError:\n value = 0\n turn_right_controlled(value)\n return 'ack right:%f' % value, 'ok.'\n elif command.startswith('left'):\n try:\n value = float(command.split(':')[1])\n except KeyError:\n value = 10\n except ValueError:\n value = 0\n turn_left_controlled(value)\n return 'ack left:%f' % value, 'ok.'\n else:\n return 'error', 'ok.'\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass MotorControl(object):\n\n def __init__(self, control_pin=[16, 18, 11, 13], t=0.1, dc_level=80,\n balance=1.0, pwm_freq=500):\n self.control_pin = control_pin\n self.t = t\n self.balance = balance\n self.l_level = dc_level * 2 / (balance + 1)\n self.r_level = self.l_level * balance\n GPIO.setmode(GPIO.BOARD)\n [GPIO.setup(control_pin[pin], GPIO.OUT, initial=GPIO.LOW) for pin in\n range(4)]\n self.pwm_r1 = GPIO.PWM(control_pin[0], pwm_freq)\n self.pwm_r2 = GPIO.PWM(control_pin[1], pwm_freq)\n self.pwm_l1 = GPIO.PWM(control_pin[2], pwm_freq)\n self.pwm_l2 = GPIO.PWM(control_pin[3], pwm_freq)\n self.pwm_r1.start(0)\n self.pwm_r2.start(0)\n self.pwm_l1.start(0)\n self.pwm_l2.start(0)\n\n def stop(self):\n self.pwm_r1.ChangeDutyCycle(0)\n self.pwm_r2.ChangeDutyCycle(0)\n self.pwm_l1.ChangeDutyCycle(0)\n self.pwm_l2.ChangeDutyCycle(0)\n\n def forward(self, speed=1.0, t=None):\n self.pwm_r1.ChangeDutyCycle(self.r_level * speed)\n self.pwm_r2.ChangeDutyCycle(0)\n self.pwm_l1.ChangeDutyCycle(self.l_level * speed)\n self.pwm_l2.ChangeDutyCycle(0)\n if t is None:\n time.sleep(self.t)\n else:\n time.sleep(t)\n self.stop()\n\n def backward(self, speed=0.8, t=None):\n self.pwm_r1.ChangeDutyCycle(0)\n self.pwm_r2.ChangeDutyCycle(self.r_level * speed)\n self.pwm_l1.ChangeDutyCycle(0)\n self.pwm_l2.ChangeDutyCycle(self.l_level * speed)\n if t is None:\n time.sleep(self.t)\n else:\n time.sleep(t)\n self.stop()\n\n def turn_left(self, speed=0.6, t=None):\n self.pwm_r1.ChangeDutyCycle(self.r_level * speed)\n self.pwm_r2.ChangeDutyCycle(0)\n self.pwm_l1.ChangeDutyCycle(0)\n self.pwm_l2.ChangeDutyCycle(0)\n if t is None:\n time.sleep(self.t)\n else:\n time.sleep(t)\n self.stop()\n\n def turn_right(self, speed=0.6, t=None):\n self.pwm_r1.ChangeDutyCycle(0)\n self.pwm_r2.ChangeDutyCycle(0)\n self.pwm_l1.ChangeDutyCycle(self.l_level * speed)\n self.pwm_l2.ChangeDutyCycle(0)\n if t is None:\n time.sleep(self.t)\n else:\n time.sleep(t)\n self.stop()\n\n def arbitrary_speed(self, speed=[1.0, 1.0], t=None):\n if 0 < speed[0]:\n self.pwm_r1.ChangeDutyCycle(self.r_level * speed[0])\n self.pwm_r2.ChangeDutyCycle(0)\n elif speed[0] < 0:\n self.pwm_r1.ChangeDutyCycle(0)\n self.pwm_r2.ChangeDutyCycle(self.r_level * speed[0])\n if 0 < speed[1]:\n self.pwm_l1.ChangeDutyCycle(self.l_level * speed[1])\n self.pwm_l2.ChangeDutyCycle(0)\n elif speed[1] < 0:\n self.pwm_l1.ChangeDutyCycle(0)\n self.pwm_l2.ChangeDutyCycle(self.l_level * speed[1])\n if t is None:\n return\n else:\n time.sleep(t)\n self.stop()\n\n def cleanup(self):\n self.stop()\n self.pwm_r1.stop()\n self.pwm_r2.stop()\n self.pwm_l1.stop()\n self.pwm_l2.stop()\n GPIO.cleanup()\n\n\n<mask token>\n\n\ndef turn_right_controlled(angle):\n wheel_last = g_wheel_count\n count = angle / 4.45\n while not rospy.is_shutdown():\n if not g_obstacle_detected:\n if g_wheel_count[0] - wheel_last[0] < count:\n motor.turn_right(speed=0.9, t=0.05)\n elif g_wheel_count[0] - wheel_last[0] > count:\n motor.turn_left(speed=0.8, t=0.03)\n break\n else:\n break\n time.sleep(0.05)\n else:\n time.sleep(0.1)\n\n\ndef turn_left_controlled(angle):\n wheel_last = g_wheel_count\n count = angle / 4.45\n while not rospy.is_shutdown():\n if not g_obstacle_detected:\n if g_wheel_count[1] - wheel_last[1] < count:\n motor.turn_left(speed=0.9, t=0.05)\n elif g_wheel_count[1] - wheel_last[1] > count:\n motor.turn_right(speed=0.8, t=0.03)\n break\n else:\n break\n time.sleep(0.05)\n else:\n time.sleep(0.1)\n\n\ndef forward_controlled(distance):\n wheel_last = g_wheel_count\n count = distance / 0.0113\n while not rospy.is_shutdown():\n if not g_obstacle_detected:\n diff_of_both = g_wheel_count - wheel_last\n if np.sum(diff_of_both) / 2.0 < count:\n motor.forward(speed=1.0, t=0.05)\n else:\n break\n diff_between = diff_of_both[0] - diff_of_both[1]\n if diff_between > 0:\n motor.turn_left(speed=0.7, t=0.03 + diff_between * 0.005)\n elif diff_between < 0:\n motor.turn_right(speed=0.7, t=0.03 - diff_between * 0.005)\n time.sleep(0.05)\n else:\n time.sleep(0.1)\n\n\ndef callback_RaspiCarWheel(data):\n global g_wheel_count\n g_wheel_count = np.array(data.wheel_count)\n\n\ndef handle_RaspiCarMotorControl_request(request):\n print(request)\n command = request.command\n if command.startswith('test'):\n return 'ack test', 'ok.'\n elif command.startswith('fwd'):\n try:\n value = float(command.split(':')[1])\n except KeyError:\n value = 0.1\n except ValueError:\n value = 0\n forward_controlled(value)\n return 'ack fwd:%f' % value, 'ok.'\n elif command.startswith('right'):\n try:\n value = float(command.split(':')[1])\n except KeyError:\n value = 10\n except ValueError:\n value = 0\n turn_right_controlled(value)\n return 'ack right:%f' % value, 'ok.'\n elif command.startswith('left'):\n try:\n value = float(command.split(':')[1])\n except KeyError:\n value = 10\n except ValueError:\n value = 0\n turn_left_controlled(value)\n return 'ack left:%f' % value, 'ok.'\n else:\n return 'error', 'ok.'\n\n\nif __name__ == '__main__':\n motor = MotorControl(dc_level=70, t=0.3)\n rospy.loginfo('[motor_control] up and running...')\n try:\n rospy.init_node('RaspiCarMotorControl_node', anonymous=False)\n rospy.Subscriber('RaspiCarWheel', RaspiCarWheel, callback_RaspiCarWheel\n )\n s = rospy.Service('RaspiCarMotorControl', RaspiCarMotorControl,\n handle_RaspiCarMotorControl_request)\n rospy.spin()\n except rospy.ROSInterruptException as e:\n rospy.loginfo(e)\n finally:\n motor.cleanup()\n",
"step-5": "#!/usr/bin/env python\n\nimport rospy\nimport numpy as np\nimport time\nimport RPi.GPIO as GPIO\n\nfrom ccn_raspicar_ros.msg import RaspiCarWheel\nfrom ccn_raspicar_ros.msg import RaspiCarWheelControl\nfrom ccn_raspicar_ros.srv import RaspiCarMotorControl\n\n\nclass MotorControl(object):\n def __init__(self, control_pin=[16, 18, 11, 13], t=0.1, dc_level=80, balance=1.0, pwm_freq=500):\n self.control_pin = control_pin\n self.t = t\n self.balance = balance\n self.l_level = dc_level * 2 / (balance + 1)\n self.r_level = self.l_level * balance\n\n GPIO.setmode(GPIO.BOARD)\n [GPIO.setup(control_pin[pin], GPIO.OUT, initial=GPIO.LOW) for pin in range(4)]\n\n self.pwm_r1 = GPIO.PWM(control_pin[0], pwm_freq)\n self.pwm_r2 = GPIO.PWM(control_pin[1], pwm_freq)\n self.pwm_l1 = GPIO.PWM(control_pin[2], pwm_freq)\n self.pwm_l2 = GPIO.PWM(control_pin[3], pwm_freq)\n self.pwm_r1.start(0)\n self.pwm_r2.start(0)\n self.pwm_l1.start(0)\n self.pwm_l2.start(0)\n\n def stop(self):\n self.pwm_r1.ChangeDutyCycle(0)\n self.pwm_r2.ChangeDutyCycle(0)\n self.pwm_l1.ChangeDutyCycle(0)\n self.pwm_l2.ChangeDutyCycle(0)\n\n def forward(self, speed=1.0, t=None):\n self.pwm_r1.ChangeDutyCycle(self.r_level*speed)\n self.pwm_r2.ChangeDutyCycle(0)\n self.pwm_l1.ChangeDutyCycle(self.l_level*speed)\n self.pwm_l2.ChangeDutyCycle(0)\n if t is None:\n time.sleep(self.t)\n else:\n time.sleep(t)\n self.stop()\n\n def backward(self, speed=0.8, t=None):\n self.pwm_r1.ChangeDutyCycle(0)\n self.pwm_r2.ChangeDutyCycle(self.r_level*speed)\n self.pwm_l1.ChangeDutyCycle(0)\n self.pwm_l2.ChangeDutyCycle(self.l_level*speed)\n if t is None:\n time.sleep(self.t)\n else:\n time.sleep(t)\n self.stop()\n\n def turn_left(self, speed=0.6, t=None):\n self.pwm_r1.ChangeDutyCycle(self.r_level*speed)\n self.pwm_r2.ChangeDutyCycle(0)\n self.pwm_l1.ChangeDutyCycle(0)\n self.pwm_l2.ChangeDutyCycle(0)\n if t is None:\n time.sleep(self.t)\n else:\n time.sleep(t)\n self.stop()\n\n def turn_right(self, speed=0.6, t=None):\n self.pwm_r1.ChangeDutyCycle(0)\n self.pwm_r2.ChangeDutyCycle(0)\n self.pwm_l1.ChangeDutyCycle(self.l_level*speed)\n self.pwm_l2.ChangeDutyCycle(0)\n if t is None:\n time.sleep(self.t)\n else:\n time.sleep(t)\n self.stop()\n\n def arbitrary_speed(self, speed=[1.0, 1.0], t=None):\n if 0 < speed[0]:\n self.pwm_r1.ChangeDutyCycle(self.r_level * speed[0])\n self.pwm_r2.ChangeDutyCycle(0)\n elif speed[0] < 0:\n self.pwm_r1.ChangeDutyCycle(0)\n self.pwm_r2.ChangeDutyCycle(self.r_level * speed[0])\n if 0 < speed[1]:\n self.pwm_l1.ChangeDutyCycle(self.l_level * speed[1])\n self.pwm_l2.ChangeDutyCycle(0)\n elif speed[1] < 0:\n self.pwm_l1.ChangeDutyCycle(0)\n self.pwm_l2.ChangeDutyCycle(self.l_level * speed[1])\n if t is None:\n return\n else:\n time.sleep(t)\n self.stop()\n\n def cleanup(self):\n self.stop()\n self.pwm_r1.stop()\n self.pwm_r2.stop()\n self.pwm_l1.stop()\n self.pwm_l2.stop()\n GPIO.cleanup()\n\n\ng_obstacle_detected = False\ng_proximity = np.zeros([3])\ng_wheel_count = np.zeros([2])\n\n\ndef turn_right_controlled(angle):\n wheel_last = g_wheel_count\n count = angle / 4.45\n while not rospy.is_shutdown():\n if not g_obstacle_detected:\n\n if g_wheel_count[0] - wheel_last[0] < count:\n motor.turn_right(speed=0.9, t=0.05)\n elif g_wheel_count[0] - wheel_last[0] > count:\n motor.turn_left(speed=0.8, t=0.03)\n break\n else:\n break\n\n time.sleep(0.05)\n\n else:\n time.sleep(0.1)\n\n\ndef turn_left_controlled(angle):\n wheel_last = g_wheel_count\n count = angle / 4.45\n while not rospy.is_shutdown():\n if not g_obstacle_detected:\n \n if g_wheel_count[1] - wheel_last[1] < count:\n motor.turn_left(speed=0.9, t=0.05)\n elif g_wheel_count[1] - wheel_last[1] > count:\n motor.turn_right(speed=0.8, t=0.03)\n break\n else:\n break\n\n time.sleep(0.05)\n \n else:\n time.sleep(0.1)\n\n\ndef forward_controlled(distance):\n wheel_last = g_wheel_count\n count = distance / 0.0113\n while not rospy.is_shutdown(): \n if not g_obstacle_detected:\n \n diff_of_both = g_wheel_count - wheel_last\n\n if np.sum(diff_of_both)/2.0 < count:\n motor.forward(speed=1.0, t=0.05) \n else:\n break\n\n diff_between = diff_of_both[0] - diff_of_both[1]\n\n if diff_between > 0:\n motor.turn_left(speed=0.7, t=0.03 + diff_between * 0.005)\n elif diff_between < 0:\n motor.turn_right(speed=0.7, t=0.03 - diff_between * 0.005)\n \n time.sleep(0.05)\n \n else:\n time.sleep(0.1)\n\n\ndef callback_RaspiCarWheel(data):\n global g_wheel_count\n g_wheel_count = np.array(data.wheel_count)\n\n\ndef handle_RaspiCarMotorControl_request(request):\n\n print(request)\n\n command = request.command\n if command.startswith('test'):\n return 'ack test', 'ok.'\n\n elif command.startswith('fwd'):\n try:\n value = float(command.split(':')[1])\n except KeyError:\n value = 0.1\n except ValueError:\n value = 0\n forward_controlled(value)\n return 'ack fwd:%f' % value, 'ok.'\n\n elif command.startswith('right'):\n try:\n value = float(command.split(':')[1])\n except KeyError:\n value = 10\n except ValueError:\n value = 0\n turn_right_controlled(value)\n return 'ack right:%f' % value, 'ok.'\n elif command.startswith('left'):\n try:\n value = float(command.split(':')[1])\n except KeyError:\n value = 10\n except ValueError:\n value = 0\n turn_left_controlled(value)\n return 'ack left:%f' % value, 'ok.'\n # elif data.startswith('obstacle'):\n # global obstacle_detection_routine_stopper\n # try:\n # value = float(data.split(':')[1])\n # except KeyError:\n # if obstacle_detection_routine_stopper is None:\n # value = 1\n # else:\n # value = 0\n # except ValueError:\n # value = 0\n #\n # if value > 0.0 and obstacle_detection_routine_stopper is None:\n # obstacle_detection_routine_stopper = launch_obstacle_detection_routine()\n # elif value == 0.0 and obstacle_detection_routine_stopper is not None:\n # obstacle_detection_routine_stopper.set()\n # obstacle_detection_routine_stopper = None\n #\n # connection.sendall(b'ack')\n # rospy.loginfo('[tcp_server] sending ack to the client.')\n else:\n return 'error', 'ok.'\n\n\nif __name__ == '__main__':\n motor = MotorControl(dc_level=70, t=0.3)\n rospy.loginfo('[motor_control] up and running...')\n\n try:\n rospy.init_node('RaspiCarMotorControl_node', anonymous=False)\n rospy.Subscriber('RaspiCarWheel', RaspiCarWheel, callback_RaspiCarWheel)\n s = rospy.Service('RaspiCarMotorControl', RaspiCarMotorControl, handle_RaspiCarMotorControl_request)\n rospy.spin()\n\n except rospy.ROSInterruptException as e:\n rospy.loginfo(e)\n\n finally:\n motor.cleanup()\n\n",
"step-ids": [
5,
11,
13,
15,
18
]
}
|
[
5,
11,
13,
15,
18
] |
import torch
import numpy as np
from torch.autograd import Variable
from util import helpers
from util.metrics import ECELoss, ece_score
import sklearn.metrics as skm
import os
import pandas as pd
import pickle
def eval(path_in, path_out, net, testloader, oodloader, use_cuda=True, save_dir=None):
f1 = open(path_in, 'w')
f2 = open(path_out, 'w')
ece_criterion = ECELoss().cuda()
net.eval()
net.training = False
correct = 0
total = 0
logits_list = []
labels_list = []
confidence_list = []
correct_list = []
predicted_list = []
sne_embeddings = []
print('| Classification confidence for ID is saved at: {}'.format(path_in))
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs), Variable(targets)
outputs, hidden = net(inputs)
# this is the OOD magic
nnOutputs = helpers.softmax(outputs)
for k in range(len(inputs)):
f1.write("{}\n".format(np.max(nnOutputs[k])))
confidence_list.append(np.max(nnOutputs[k]))
sne_embeddings.append(hidden.data.cpu()[k].numpy())
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
correct_list.extend(predicted.eq(targets.data).cpu().tolist())
predicted_list.extend(predicted.cpu().tolist())
logits_list.append(outputs.data)
labels_list.append(targets.data)
logits = torch.cat(logits_list).cuda()
labels = torch.cat(labels_list).cuda()
ece = ece_criterion(logits, labels)
if save_dir:
with open(os.path.join(save_dir, 'mcp_sne.pkl'), 'wb') as f:
pickle.dump(sne_embeddings, f)
with open(os.path.join(save_dir, 'mcp_targets.txt'), 'w') as f:
for item in labels_list:
f.write('{}\n'.format(item.cpu().numpy()[0]))
with open(os.path.join(save_dir, 'mcp_pred.txt'), 'w') as f:
for item in predicted_list:
f.write('{}\n'.format(item))
with open(os.path.join(save_dir, 'mcp_correct.txt'), 'w') as f:
for item in correct_list:
f.write('{}\n'.format(item))
with open(os.path.join(save_dir, 'mcp_confidence.txt'), 'w') as f:
for item in confidence_list:
f.write('{}\n'.format(item))
acc = 100.*correct/total
acc_list = (sum(correct_list)/len(correct_list))
# calculate AUROC for classifcation accuracy
fpr, tpr, _ = skm.roc_curve(y_true = correct_list, y_score = confidence_list, pos_label = 1) #positive class is 1; negative class is 0
auroc_classification = skm.auc(fpr, tpr)
print("| Test Result\tAcc@1: %.2f%%" %(acc))
print(f'| ECE: {ece.item()}')
# print(f'| ECE v2: {ece_score(logits.cpu(), labels.cpu())}')
print(f'| Acc list: {acc_list}')
print(f'| AUROC classification: {auroc_classification}')
sne_embeddings_ood = []
print('| Classification confidence for OOD is saved at: {}'.format(path_out))
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(oodloader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs), Variable(targets)
outputs, hidden = net(inputs)
# this is the OOD magic
nnOutputs = helpers.softmax(outputs)
for k in range(len(inputs)):
f2.write("{}\n".format(np.max(nnOutputs[k])))
sne_embeddings_ood.append(hidden.data.cpu()[k].numpy())
if save_dir:
with open(os.path.join(save_dir, 'mcp_sne_ood.pkl'), 'wb') as f:
pickle.dump(sne_embeddings_ood, f)
def eval_cifar10(path_in, path_out, net, testloader, oodloader, use_cuda=True, save_dir=None):
f1 = open(path_in, 'w')
f2 = open(path_out, 'w')
ece_criterion = ECELoss().cuda()
net.eval()
net.training = False
correct = 0
total = 0
logits_list = []
labels_list = []
confidence_list = []
correct_list = []
predicted_list = []
sne_embeddings = []
print('| Classification confidence for ID is saved at: {}'.format(path_in))
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs), Variable(targets)
outputs, hidden = net(inputs)
# this is the OOD magic
nnOutputs = helpers.softmax(outputs)
for k in range(len(inputs)):
f1.write("{}\n".format(np.max(nnOutputs[k])))
confidence_list.append(np.max(nnOutputs[k]))
sne_embeddings.append(hidden.data.cpu()[k].numpy())
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
correct_list.extend(predicted.eq(targets.data).cpu().tolist())
predicted_list.extend(predicted.cpu().tolist())
logits_list.append(outputs.data)
labels_list.append(targets.data)
logits = torch.cat(logits_list).cuda()
labels = torch.cat(labels_list).cuda()
labels_list = torch.cat(labels_list).cpu().tolist()
ece = ece_criterion(logits, labels)
if save_dir:
with open(os.path.join(save_dir, 'mcp_sne_cifar10.pkl'), 'wb') as f:
pickle.dump(sne_embeddings, f)
with open(os.path.join(save_dir, 'mcp_targets_cifar10.txt'), 'w') as f:
for item in labels_list:
f.write('{}\n'.format(item))
with open(os.path.join(save_dir, 'mcp_pred_cifar10.txt'), 'w') as f:
for item in predicted_list:
f.write('{}\n'.format(item))
with open(os.path.join(save_dir, 'mcp_correct_cifar10.txt'), 'w') as f:
for item in correct_list:
f.write('{}\n'.format(item))
with open(os.path.join(save_dir, 'mcp_confidence_cifar10.txt'), 'w') as f:
for item in confidence_list:
f.write('{}\n'.format(item))
acc = 100.*correct/total
acc_list = (sum(correct_list)/len(correct_list))
# calculate AUROC for classifcation accuracy
fpr, tpr, _ = skm.roc_curve(y_true = correct_list, y_score = confidence_list, pos_label = 1) #positive class is 1; negative class is 0
auroc_classification = skm.auc(fpr, tpr)
print("| Test Result\tAcc@1: %.2f%%" %(acc))
print(f'| ECE: {ece.item()}')
# print(f'| ECE v2: {ece_score(logits.cpu(), labels.cpu())}')
print(f'| Acc list: {acc_list}')
print(f'| AUROC classification: {auroc_classification}')
sne_embeddings_ood = []
print('| Classification confidence for OOD is saved at: {}'.format(path_out))
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(oodloader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs), Variable(targets)
outputs, hidden = net(inputs)
# this is the OOD magic
nnOutputs = helpers.softmax(outputs)
for k in range(len(inputs)):
f2.write("{}\n".format(np.max(nnOutputs[k])))
sne_embeddings_ood.append(hidden.data.cpu()[k].numpy())
if save_dir:
with open(os.path.join(save_dir, 'mcp_sne_ood_cifar10.pkl'), 'wb') as f:
pickle.dump(sne_embeddings_ood, f)
def train():
pass
|
normal
|
{
"blob_id": "edd2b7b453d7fa33e6cca3b5dbc895f034a9e22a",
"index": 2746,
"step-1": "<mask token>\n\n\ndef eval_cifar10(path_in, path_out, net, testloader, oodloader, use_cuda=\n True, save_dir=None):\n f1 = open(path_in, 'w')\n f2 = open(path_out, 'w')\n ece_criterion = ECELoss().cuda()\n net.eval()\n net.training = False\n correct = 0\n total = 0\n logits_list = []\n labels_list = []\n confidence_list = []\n correct_list = []\n predicted_list = []\n sne_embeddings = []\n print('| Classification confidence for ID is saved at: {}'.format(path_in))\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(testloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = Variable(inputs), Variable(targets)\n outputs, hidden = net(inputs)\n nnOutputs = helpers.softmax(outputs)\n for k in range(len(inputs)):\n f1.write('{}\\n'.format(np.max(nnOutputs[k])))\n confidence_list.append(np.max(nnOutputs[k]))\n sne_embeddings.append(hidden.data.cpu()[k].numpy())\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += predicted.eq(targets.data).cpu().sum()\n correct_list.extend(predicted.eq(targets.data).cpu().tolist())\n predicted_list.extend(predicted.cpu().tolist())\n logits_list.append(outputs.data)\n labels_list.append(targets.data)\n logits = torch.cat(logits_list).cuda()\n labels = torch.cat(labels_list).cuda()\n labels_list = torch.cat(labels_list).cpu().tolist()\n ece = ece_criterion(logits, labels)\n if save_dir:\n with open(os.path.join(save_dir, 'mcp_sne_cifar10.pkl'), 'wb') as f:\n pickle.dump(sne_embeddings, f)\n with open(os.path.join(save_dir, 'mcp_targets_cifar10.txt'), 'w') as f:\n for item in labels_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_pred_cifar10.txt'), 'w') as f:\n for item in predicted_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_correct_cifar10.txt'), 'w') as f:\n for item in correct_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_confidence_cifar10.txt'), 'w'\n ) as f:\n for item in confidence_list:\n f.write('{}\\n'.format(item))\n acc = 100.0 * correct / total\n acc_list = sum(correct_list) / len(correct_list)\n fpr, tpr, _ = skm.roc_curve(y_true=correct_list, y_score=\n confidence_list, pos_label=1)\n auroc_classification = skm.auc(fpr, tpr)\n print('| Test Result\\tAcc@1: %.2f%%' % acc)\n print(f'| ECE: {ece.item()}')\n print(f'| Acc list: {acc_list}')\n print(f'| AUROC classification: {auroc_classification}')\n sne_embeddings_ood = []\n print('| Classification confidence for OOD is saved at: {}'.format(\n path_out))\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(oodloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = Variable(inputs), Variable(targets)\n outputs, hidden = net(inputs)\n nnOutputs = helpers.softmax(outputs)\n for k in range(len(inputs)):\n f2.write('{}\\n'.format(np.max(nnOutputs[k])))\n sne_embeddings_ood.append(hidden.data.cpu()[k].numpy())\n if save_dir:\n with open(os.path.join(save_dir, 'mcp_sne_ood_cifar10.pkl'), 'wb'\n ) as f:\n pickle.dump(sne_embeddings_ood, f)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef eval_cifar10(path_in, path_out, net, testloader, oodloader, use_cuda=\n True, save_dir=None):\n f1 = open(path_in, 'w')\n f2 = open(path_out, 'w')\n ece_criterion = ECELoss().cuda()\n net.eval()\n net.training = False\n correct = 0\n total = 0\n logits_list = []\n labels_list = []\n confidence_list = []\n correct_list = []\n predicted_list = []\n sne_embeddings = []\n print('| Classification confidence for ID is saved at: {}'.format(path_in))\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(testloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = Variable(inputs), Variable(targets)\n outputs, hidden = net(inputs)\n nnOutputs = helpers.softmax(outputs)\n for k in range(len(inputs)):\n f1.write('{}\\n'.format(np.max(nnOutputs[k])))\n confidence_list.append(np.max(nnOutputs[k]))\n sne_embeddings.append(hidden.data.cpu()[k].numpy())\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += predicted.eq(targets.data).cpu().sum()\n correct_list.extend(predicted.eq(targets.data).cpu().tolist())\n predicted_list.extend(predicted.cpu().tolist())\n logits_list.append(outputs.data)\n labels_list.append(targets.data)\n logits = torch.cat(logits_list).cuda()\n labels = torch.cat(labels_list).cuda()\n labels_list = torch.cat(labels_list).cpu().tolist()\n ece = ece_criterion(logits, labels)\n if save_dir:\n with open(os.path.join(save_dir, 'mcp_sne_cifar10.pkl'), 'wb') as f:\n pickle.dump(sne_embeddings, f)\n with open(os.path.join(save_dir, 'mcp_targets_cifar10.txt'), 'w') as f:\n for item in labels_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_pred_cifar10.txt'), 'w') as f:\n for item in predicted_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_correct_cifar10.txt'), 'w') as f:\n for item in correct_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_confidence_cifar10.txt'), 'w'\n ) as f:\n for item in confidence_list:\n f.write('{}\\n'.format(item))\n acc = 100.0 * correct / total\n acc_list = sum(correct_list) / len(correct_list)\n fpr, tpr, _ = skm.roc_curve(y_true=correct_list, y_score=\n confidence_list, pos_label=1)\n auroc_classification = skm.auc(fpr, tpr)\n print('| Test Result\\tAcc@1: %.2f%%' % acc)\n print(f'| ECE: {ece.item()}')\n print(f'| Acc list: {acc_list}')\n print(f'| AUROC classification: {auroc_classification}')\n sne_embeddings_ood = []\n print('| Classification confidence for OOD is saved at: {}'.format(\n path_out))\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(oodloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = Variable(inputs), Variable(targets)\n outputs, hidden = net(inputs)\n nnOutputs = helpers.softmax(outputs)\n for k in range(len(inputs)):\n f2.write('{}\\n'.format(np.max(nnOutputs[k])))\n sne_embeddings_ood.append(hidden.data.cpu()[k].numpy())\n if save_dir:\n with open(os.path.join(save_dir, 'mcp_sne_ood_cifar10.pkl'), 'wb'\n ) as f:\n pickle.dump(sne_embeddings_ood, f)\n\n\ndef train():\n pass\n",
"step-3": "<mask token>\n\n\ndef eval(path_in, path_out, net, testloader, oodloader, use_cuda=True,\n save_dir=None):\n f1 = open(path_in, 'w')\n f2 = open(path_out, 'w')\n ece_criterion = ECELoss().cuda()\n net.eval()\n net.training = False\n correct = 0\n total = 0\n logits_list = []\n labels_list = []\n confidence_list = []\n correct_list = []\n predicted_list = []\n sne_embeddings = []\n print('| Classification confidence for ID is saved at: {}'.format(path_in))\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(testloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = Variable(inputs), Variable(targets)\n outputs, hidden = net(inputs)\n nnOutputs = helpers.softmax(outputs)\n for k in range(len(inputs)):\n f1.write('{}\\n'.format(np.max(nnOutputs[k])))\n confidence_list.append(np.max(nnOutputs[k]))\n sne_embeddings.append(hidden.data.cpu()[k].numpy())\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += predicted.eq(targets.data).cpu().sum()\n correct_list.extend(predicted.eq(targets.data).cpu().tolist())\n predicted_list.extend(predicted.cpu().tolist())\n logits_list.append(outputs.data)\n labels_list.append(targets.data)\n logits = torch.cat(logits_list).cuda()\n labels = torch.cat(labels_list).cuda()\n ece = ece_criterion(logits, labels)\n if save_dir:\n with open(os.path.join(save_dir, 'mcp_sne.pkl'), 'wb') as f:\n pickle.dump(sne_embeddings, f)\n with open(os.path.join(save_dir, 'mcp_targets.txt'), 'w') as f:\n for item in labels_list:\n f.write('{}\\n'.format(item.cpu().numpy()[0]))\n with open(os.path.join(save_dir, 'mcp_pred.txt'), 'w') as f:\n for item in predicted_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_correct.txt'), 'w') as f:\n for item in correct_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_confidence.txt'), 'w') as f:\n for item in confidence_list:\n f.write('{}\\n'.format(item))\n acc = 100.0 * correct / total\n acc_list = sum(correct_list) / len(correct_list)\n fpr, tpr, _ = skm.roc_curve(y_true=correct_list, y_score=\n confidence_list, pos_label=1)\n auroc_classification = skm.auc(fpr, tpr)\n print('| Test Result\\tAcc@1: %.2f%%' % acc)\n print(f'| ECE: {ece.item()}')\n print(f'| Acc list: {acc_list}')\n print(f'| AUROC classification: {auroc_classification}')\n sne_embeddings_ood = []\n print('| Classification confidence for OOD is saved at: {}'.format(\n path_out))\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(oodloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = Variable(inputs), Variable(targets)\n outputs, hidden = net(inputs)\n nnOutputs = helpers.softmax(outputs)\n for k in range(len(inputs)):\n f2.write('{}\\n'.format(np.max(nnOutputs[k])))\n sne_embeddings_ood.append(hidden.data.cpu()[k].numpy())\n if save_dir:\n with open(os.path.join(save_dir, 'mcp_sne_ood.pkl'), 'wb') as f:\n pickle.dump(sne_embeddings_ood, f)\n\n\ndef eval_cifar10(path_in, path_out, net, testloader, oodloader, use_cuda=\n True, save_dir=None):\n f1 = open(path_in, 'w')\n f2 = open(path_out, 'w')\n ece_criterion = ECELoss().cuda()\n net.eval()\n net.training = False\n correct = 0\n total = 0\n logits_list = []\n labels_list = []\n confidence_list = []\n correct_list = []\n predicted_list = []\n sne_embeddings = []\n print('| Classification confidence for ID is saved at: {}'.format(path_in))\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(testloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = Variable(inputs), Variable(targets)\n outputs, hidden = net(inputs)\n nnOutputs = helpers.softmax(outputs)\n for k in range(len(inputs)):\n f1.write('{}\\n'.format(np.max(nnOutputs[k])))\n confidence_list.append(np.max(nnOutputs[k]))\n sne_embeddings.append(hidden.data.cpu()[k].numpy())\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += predicted.eq(targets.data).cpu().sum()\n correct_list.extend(predicted.eq(targets.data).cpu().tolist())\n predicted_list.extend(predicted.cpu().tolist())\n logits_list.append(outputs.data)\n labels_list.append(targets.data)\n logits = torch.cat(logits_list).cuda()\n labels = torch.cat(labels_list).cuda()\n labels_list = torch.cat(labels_list).cpu().tolist()\n ece = ece_criterion(logits, labels)\n if save_dir:\n with open(os.path.join(save_dir, 'mcp_sne_cifar10.pkl'), 'wb') as f:\n pickle.dump(sne_embeddings, f)\n with open(os.path.join(save_dir, 'mcp_targets_cifar10.txt'), 'w') as f:\n for item in labels_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_pred_cifar10.txt'), 'w') as f:\n for item in predicted_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_correct_cifar10.txt'), 'w') as f:\n for item in correct_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_confidence_cifar10.txt'), 'w'\n ) as f:\n for item in confidence_list:\n f.write('{}\\n'.format(item))\n acc = 100.0 * correct / total\n acc_list = sum(correct_list) / len(correct_list)\n fpr, tpr, _ = skm.roc_curve(y_true=correct_list, y_score=\n confidence_list, pos_label=1)\n auroc_classification = skm.auc(fpr, tpr)\n print('| Test Result\\tAcc@1: %.2f%%' % acc)\n print(f'| ECE: {ece.item()}')\n print(f'| Acc list: {acc_list}')\n print(f'| AUROC classification: {auroc_classification}')\n sne_embeddings_ood = []\n print('| Classification confidence for OOD is saved at: {}'.format(\n path_out))\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(oodloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = Variable(inputs), Variable(targets)\n outputs, hidden = net(inputs)\n nnOutputs = helpers.softmax(outputs)\n for k in range(len(inputs)):\n f2.write('{}\\n'.format(np.max(nnOutputs[k])))\n sne_embeddings_ood.append(hidden.data.cpu()[k].numpy())\n if save_dir:\n with open(os.path.join(save_dir, 'mcp_sne_ood_cifar10.pkl'), 'wb'\n ) as f:\n pickle.dump(sne_embeddings_ood, f)\n\n\ndef train():\n pass\n",
"step-4": "import torch\nimport numpy as np\nfrom torch.autograd import Variable\nfrom util import helpers\nfrom util.metrics import ECELoss, ece_score\nimport sklearn.metrics as skm\nimport os\nimport pandas as pd\nimport pickle\n\n\ndef eval(path_in, path_out, net, testloader, oodloader, use_cuda=True,\n save_dir=None):\n f1 = open(path_in, 'w')\n f2 = open(path_out, 'w')\n ece_criterion = ECELoss().cuda()\n net.eval()\n net.training = False\n correct = 0\n total = 0\n logits_list = []\n labels_list = []\n confidence_list = []\n correct_list = []\n predicted_list = []\n sne_embeddings = []\n print('| Classification confidence for ID is saved at: {}'.format(path_in))\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(testloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = Variable(inputs), Variable(targets)\n outputs, hidden = net(inputs)\n nnOutputs = helpers.softmax(outputs)\n for k in range(len(inputs)):\n f1.write('{}\\n'.format(np.max(nnOutputs[k])))\n confidence_list.append(np.max(nnOutputs[k]))\n sne_embeddings.append(hidden.data.cpu()[k].numpy())\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += predicted.eq(targets.data).cpu().sum()\n correct_list.extend(predicted.eq(targets.data).cpu().tolist())\n predicted_list.extend(predicted.cpu().tolist())\n logits_list.append(outputs.data)\n labels_list.append(targets.data)\n logits = torch.cat(logits_list).cuda()\n labels = torch.cat(labels_list).cuda()\n ece = ece_criterion(logits, labels)\n if save_dir:\n with open(os.path.join(save_dir, 'mcp_sne.pkl'), 'wb') as f:\n pickle.dump(sne_embeddings, f)\n with open(os.path.join(save_dir, 'mcp_targets.txt'), 'w') as f:\n for item in labels_list:\n f.write('{}\\n'.format(item.cpu().numpy()[0]))\n with open(os.path.join(save_dir, 'mcp_pred.txt'), 'w') as f:\n for item in predicted_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_correct.txt'), 'w') as f:\n for item in correct_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_confidence.txt'), 'w') as f:\n for item in confidence_list:\n f.write('{}\\n'.format(item))\n acc = 100.0 * correct / total\n acc_list = sum(correct_list) / len(correct_list)\n fpr, tpr, _ = skm.roc_curve(y_true=correct_list, y_score=\n confidence_list, pos_label=1)\n auroc_classification = skm.auc(fpr, tpr)\n print('| Test Result\\tAcc@1: %.2f%%' % acc)\n print(f'| ECE: {ece.item()}')\n print(f'| Acc list: {acc_list}')\n print(f'| AUROC classification: {auroc_classification}')\n sne_embeddings_ood = []\n print('| Classification confidence for OOD is saved at: {}'.format(\n path_out))\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(oodloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = Variable(inputs), Variable(targets)\n outputs, hidden = net(inputs)\n nnOutputs = helpers.softmax(outputs)\n for k in range(len(inputs)):\n f2.write('{}\\n'.format(np.max(nnOutputs[k])))\n sne_embeddings_ood.append(hidden.data.cpu()[k].numpy())\n if save_dir:\n with open(os.path.join(save_dir, 'mcp_sne_ood.pkl'), 'wb') as f:\n pickle.dump(sne_embeddings_ood, f)\n\n\ndef eval_cifar10(path_in, path_out, net, testloader, oodloader, use_cuda=\n True, save_dir=None):\n f1 = open(path_in, 'w')\n f2 = open(path_out, 'w')\n ece_criterion = ECELoss().cuda()\n net.eval()\n net.training = False\n correct = 0\n total = 0\n logits_list = []\n labels_list = []\n confidence_list = []\n correct_list = []\n predicted_list = []\n sne_embeddings = []\n print('| Classification confidence for ID is saved at: {}'.format(path_in))\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(testloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = Variable(inputs), Variable(targets)\n outputs, hidden = net(inputs)\n nnOutputs = helpers.softmax(outputs)\n for k in range(len(inputs)):\n f1.write('{}\\n'.format(np.max(nnOutputs[k])))\n confidence_list.append(np.max(nnOutputs[k]))\n sne_embeddings.append(hidden.data.cpu()[k].numpy())\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += predicted.eq(targets.data).cpu().sum()\n correct_list.extend(predicted.eq(targets.data).cpu().tolist())\n predicted_list.extend(predicted.cpu().tolist())\n logits_list.append(outputs.data)\n labels_list.append(targets.data)\n logits = torch.cat(logits_list).cuda()\n labels = torch.cat(labels_list).cuda()\n labels_list = torch.cat(labels_list).cpu().tolist()\n ece = ece_criterion(logits, labels)\n if save_dir:\n with open(os.path.join(save_dir, 'mcp_sne_cifar10.pkl'), 'wb') as f:\n pickle.dump(sne_embeddings, f)\n with open(os.path.join(save_dir, 'mcp_targets_cifar10.txt'), 'w') as f:\n for item in labels_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_pred_cifar10.txt'), 'w') as f:\n for item in predicted_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_correct_cifar10.txt'), 'w') as f:\n for item in correct_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_confidence_cifar10.txt'), 'w'\n ) as f:\n for item in confidence_list:\n f.write('{}\\n'.format(item))\n acc = 100.0 * correct / total\n acc_list = sum(correct_list) / len(correct_list)\n fpr, tpr, _ = skm.roc_curve(y_true=correct_list, y_score=\n confidence_list, pos_label=1)\n auroc_classification = skm.auc(fpr, tpr)\n print('| Test Result\\tAcc@1: %.2f%%' % acc)\n print(f'| ECE: {ece.item()}')\n print(f'| Acc list: {acc_list}')\n print(f'| AUROC classification: {auroc_classification}')\n sne_embeddings_ood = []\n print('| Classification confidence for OOD is saved at: {}'.format(\n path_out))\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(oodloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = Variable(inputs), Variable(targets)\n outputs, hidden = net(inputs)\n nnOutputs = helpers.softmax(outputs)\n for k in range(len(inputs)):\n f2.write('{}\\n'.format(np.max(nnOutputs[k])))\n sne_embeddings_ood.append(hidden.data.cpu()[k].numpy())\n if save_dir:\n with open(os.path.join(save_dir, 'mcp_sne_ood_cifar10.pkl'), 'wb'\n ) as f:\n pickle.dump(sne_embeddings_ood, f)\n\n\ndef train():\n pass\n",
"step-5": "import torch\nimport numpy as np \nfrom torch.autograd import Variable\n\nfrom util import helpers\nfrom util.metrics import ECELoss, ece_score\nimport sklearn.metrics as skm\nimport os\nimport pandas as pd\nimport pickle\n\ndef eval(path_in, path_out, net, testloader, oodloader, use_cuda=True, save_dir=None):\n f1 = open(path_in, 'w')\n f2 = open(path_out, 'w')\n ece_criterion = ECELoss().cuda()\n net.eval()\n net.training = False\n correct = 0\n total = 0\n logits_list = []\n labels_list = []\n confidence_list = []\n correct_list = []\n predicted_list = []\n sne_embeddings = []\n print('| Classification confidence for ID is saved at: {}'.format(path_in))\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(testloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = Variable(inputs), Variable(targets)\n outputs, hidden = net(inputs)\n # this is the OOD magic\n nnOutputs = helpers.softmax(outputs)\n for k in range(len(inputs)):\n f1.write(\"{}\\n\".format(np.max(nnOutputs[k])))\n confidence_list.append(np.max(nnOutputs[k]))\n sne_embeddings.append(hidden.data.cpu()[k].numpy())\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += predicted.eq(targets.data).cpu().sum()\n\n correct_list.extend(predicted.eq(targets.data).cpu().tolist())\n predicted_list.extend(predicted.cpu().tolist())\n logits_list.append(outputs.data)\n labels_list.append(targets.data)\n\n \n logits = torch.cat(logits_list).cuda()\n labels = torch.cat(labels_list).cuda()\n ece = ece_criterion(logits, labels)\n if save_dir:\n with open(os.path.join(save_dir, 'mcp_sne.pkl'), 'wb') as f:\n pickle.dump(sne_embeddings, f)\n with open(os.path.join(save_dir, 'mcp_targets.txt'), 'w') as f:\n for item in labels_list:\n f.write('{}\\n'.format(item.cpu().numpy()[0]))\n with open(os.path.join(save_dir, 'mcp_pred.txt'), 'w') as f:\n for item in predicted_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_correct.txt'), 'w') as f:\n for item in correct_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_confidence.txt'), 'w') as f:\n for item in confidence_list:\n f.write('{}\\n'.format(item))\n acc = 100.*correct/total\n acc_list = (sum(correct_list)/len(correct_list))\n\n # calculate AUROC for classifcation accuracy\n fpr, tpr, _ = skm.roc_curve(y_true = correct_list, y_score = confidence_list, pos_label = 1) #positive class is 1; negative class is 0\n auroc_classification = skm.auc(fpr, tpr)\n \n print(\"| Test Result\\tAcc@1: %.2f%%\" %(acc))\n print(f'| ECE: {ece.item()}')\n # print(f'| ECE v2: {ece_score(logits.cpu(), labels.cpu())}')\n print(f'| Acc list: {acc_list}')\n print(f'| AUROC classification: {auroc_classification}')\n\n sne_embeddings_ood = []\n\n print('| Classification confidence for OOD is saved at: {}'.format(path_out))\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(oodloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = Variable(inputs), Variable(targets)\n outputs, hidden = net(inputs)\n # this is the OOD magic\n nnOutputs = helpers.softmax(outputs)\n for k in range(len(inputs)):\n f2.write(\"{}\\n\".format(np.max(nnOutputs[k])))\n sne_embeddings_ood.append(hidden.data.cpu()[k].numpy())\n if save_dir:\n with open(os.path.join(save_dir, 'mcp_sne_ood.pkl'), 'wb') as f:\n pickle.dump(sne_embeddings_ood, f)\n\ndef eval_cifar10(path_in, path_out, net, testloader, oodloader, use_cuda=True, save_dir=None):\n f1 = open(path_in, 'w')\n f2 = open(path_out, 'w')\n ece_criterion = ECELoss().cuda()\n net.eval()\n net.training = False\n correct = 0\n total = 0\n logits_list = []\n labels_list = []\n confidence_list = []\n correct_list = []\n predicted_list = []\n sne_embeddings = []\n print('| Classification confidence for ID is saved at: {}'.format(path_in))\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(testloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = Variable(inputs), Variable(targets)\n outputs, hidden = net(inputs)\n # this is the OOD magic\n nnOutputs = helpers.softmax(outputs)\n for k in range(len(inputs)):\n f1.write(\"{}\\n\".format(np.max(nnOutputs[k])))\n confidence_list.append(np.max(nnOutputs[k]))\n sne_embeddings.append(hidden.data.cpu()[k].numpy())\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += predicted.eq(targets.data).cpu().sum()\n\n correct_list.extend(predicted.eq(targets.data).cpu().tolist())\n predicted_list.extend(predicted.cpu().tolist())\n logits_list.append(outputs.data)\n labels_list.append(targets.data)\n \n logits = torch.cat(logits_list).cuda()\n labels = torch.cat(labels_list).cuda()\n labels_list = torch.cat(labels_list).cpu().tolist()\n ece = ece_criterion(logits, labels)\n if save_dir:\n with open(os.path.join(save_dir, 'mcp_sne_cifar10.pkl'), 'wb') as f:\n pickle.dump(sne_embeddings, f)\n with open(os.path.join(save_dir, 'mcp_targets_cifar10.txt'), 'w') as f:\n for item in labels_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_pred_cifar10.txt'), 'w') as f:\n for item in predicted_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_correct_cifar10.txt'), 'w') as f:\n for item in correct_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_confidence_cifar10.txt'), 'w') as f:\n for item in confidence_list:\n f.write('{}\\n'.format(item))\n acc = 100.*correct/total\n acc_list = (sum(correct_list)/len(correct_list))\n\n # calculate AUROC for classifcation accuracy\n fpr, tpr, _ = skm.roc_curve(y_true = correct_list, y_score = confidence_list, pos_label = 1) #positive class is 1; negative class is 0\n auroc_classification = skm.auc(fpr, tpr)\n \n print(\"| Test Result\\tAcc@1: %.2f%%\" %(acc))\n print(f'| ECE: {ece.item()}')\n # print(f'| ECE v2: {ece_score(logits.cpu(), labels.cpu())}')\n print(f'| Acc list: {acc_list}')\n print(f'| AUROC classification: {auroc_classification}')\n sne_embeddings_ood = []\n print('| Classification confidence for OOD is saved at: {}'.format(path_out))\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(oodloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = Variable(inputs), Variable(targets)\n outputs, hidden = net(inputs)\n # this is the OOD magic\n nnOutputs = helpers.softmax(outputs)\n for k in range(len(inputs)):\n f2.write(\"{}\\n\".format(np.max(nnOutputs[k])))\n sne_embeddings_ood.append(hidden.data.cpu()[k].numpy())\n\n\n if save_dir:\n with open(os.path.join(save_dir, 'mcp_sne_ood_cifar10.pkl'), 'wb') as f:\n pickle.dump(sne_embeddings_ood, f)\ndef train():\n pass\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from selenium import webdriver
from time import sleep
from bs4 import BeautifulSoup
"""
With selenium we need web driver for our browser.
If you use google chrome, you can download chrome driver from here:
http://chromedriver.chromium.org/downloads
In linux (my OS) I extracted downloaded zip file and placed
exe file in "/home/UserName/bin"
I did this in order not to write chrome driver path everytime
"""
# IF you did not locate exe file in user/bin or user/local/bin
# then you have to specify the driver path while creating driver object
# driver object is browser which you can programatically control
driver = webdriver.Chrome('/Users/UserName/Downloads/chromedriver')
# open some page using get method
driver.get('https://www.facebook.com')
# driver.page_source
# Opens facebook's source html file
soup = BeautifulSoup(driver.page_source,'lxml')
print(soup.prettify())
# close webdriver object
driver.close()
|
normal
|
{
"blob_id": "03b2b722832eb46f3f81618f70fd0475f1f08c94",
"index": 2997,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndriver.get('https://www.facebook.com')\n<mask token>\nprint(soup.prettify())\ndriver.close()\n",
"step-3": "<mask token>\ndriver = webdriver.Chrome('/Users/UserName/Downloads/chromedriver')\ndriver.get('https://www.facebook.com')\nsoup = BeautifulSoup(driver.page_source, 'lxml')\nprint(soup.prettify())\ndriver.close()\n",
"step-4": "from selenium import webdriver\nfrom time import sleep\nfrom bs4 import BeautifulSoup\n<mask token>\ndriver = webdriver.Chrome('/Users/UserName/Downloads/chromedriver')\ndriver.get('https://www.facebook.com')\nsoup = BeautifulSoup(driver.page_source, 'lxml')\nprint(soup.prettify())\ndriver.close()\n",
"step-5": "\n\n\nfrom selenium import webdriver\nfrom time import sleep\nfrom bs4 import BeautifulSoup\n\n\n\n\n\"\"\"\n\nWith selenium we need web driver for our browser.\nIf you use google chrome, you can download chrome driver from here:\n \nhttp://chromedriver.chromium.org/downloads\n\n\nIn linux (my OS) I extracted downloaded zip file and placed\nexe file in \"/home/UserName/bin\"\n\n\nI did this in order not to write chrome driver path everytime\n\n\n\"\"\"\n\n# IF you did not locate exe file in user/bin or user/local/bin\n# then you have to specify the driver path while creating driver object\n# driver object is browser which you can programatically control\ndriver = webdriver.Chrome('/Users/UserName/Downloads/chromedriver')\n\n\n\n# open some page using get method\ndriver.get('https://www.facebook.com')\n\n\n# driver.page_source\n\n# Opens facebook's source html file\nsoup = BeautifulSoup(driver.page_source,'lxml')\n\nprint(soup.prettify())\n\n\n\n# close webdriver object\ndriver.close()\n\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
serviceType = "server"
serviceDesc = _({"en": "Icecream Daemon",
"tr": "Icecream Servisi"})
from comar.service import *
@synchronized
def start():
startService(command="/opt/icecream/sbin/iceccd",
args="-d -m 5 > /dev/null",
pidfile="/var/run/iceccd.pid",
donotify=True)
@synchronized
def stop():
stopService(pidfile="/var/run/iceccd.pid",
donotify=True)
def status():
return isServiceRunning("/var/run/iceccd.pid")
|
normal
|
{
"blob_id": "e3603d90bd5aa5de40baa27b62acf6f71eff9f6c",
"index": 6827,
"step-1": "<mask token>\n\n\n@synchronized\ndef start():\n startService(command='/opt/icecream/sbin/iceccd', args=\n '-d -m 5 > /dev/null', pidfile='/var/run/iceccd.pid', donotify=True)\n\n\n<mask token>\n\n\ndef status():\n return isServiceRunning('/var/run/iceccd.pid')\n",
"step-2": "<mask token>\n\n\n@synchronized\ndef start():\n startService(command='/opt/icecream/sbin/iceccd', args=\n '-d -m 5 > /dev/null', pidfile='/var/run/iceccd.pid', donotify=True)\n\n\n@synchronized\ndef stop():\n stopService(pidfile='/var/run/iceccd.pid', donotify=True)\n\n\ndef status():\n return isServiceRunning('/var/run/iceccd.pid')\n",
"step-3": "serviceType = 'server'\nserviceDesc = _({'en': 'Icecream Daemon', 'tr': 'Icecream Servisi'})\n<mask token>\n\n\n@synchronized\ndef start():\n startService(command='/opt/icecream/sbin/iceccd', args=\n '-d -m 5 > /dev/null', pidfile='/var/run/iceccd.pid', donotify=True)\n\n\n@synchronized\ndef stop():\n stopService(pidfile='/var/run/iceccd.pid', donotify=True)\n\n\ndef status():\n return isServiceRunning('/var/run/iceccd.pid')\n",
"step-4": "serviceType = 'server'\nserviceDesc = _({'en': 'Icecream Daemon', 'tr': 'Icecream Servisi'})\nfrom comar.service import *\n\n\n@synchronized\ndef start():\n startService(command='/opt/icecream/sbin/iceccd', args=\n '-d -m 5 > /dev/null', pidfile='/var/run/iceccd.pid', donotify=True)\n\n\n@synchronized\ndef stop():\n stopService(pidfile='/var/run/iceccd.pid', donotify=True)\n\n\ndef status():\n return isServiceRunning('/var/run/iceccd.pid')\n",
"step-5": "# -*- coding: utf-8 -*-\nserviceType = \"server\"\nserviceDesc = _({\"en\": \"Icecream Daemon\",\n \"tr\": \"Icecream Servisi\"})\n\nfrom comar.service import *\n\n@synchronized\ndef start():\n startService(command=\"/opt/icecream/sbin/iceccd\",\n args=\"-d -m 5 > /dev/null\",\n pidfile=\"/var/run/iceccd.pid\",\n donotify=True)\n\n@synchronized\ndef stop():\n stopService(pidfile=\"/var/run/iceccd.pid\",\n donotify=True)\n\ndef status():\n return isServiceRunning(\"/var/run/iceccd.pid\")\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from django.db import models
from helpers.models import BaseAbstractModel
from Auth.models import Profile
# from Jobs.models import UserJob
from django.db.models.signals import post_save
from django.dispatch import receiver
# Create your models here.
class Notification(BaseAbstractModel):
title = models.CharField(max_length=200)
body = models.TextField()
recipients = models.ManyToManyField(to=Profile,
related_name='notifications',
related_query_name='notification')
time_stamp = models.DateTimeField(auto_now_add=True)
read = models.BooleanField(default=False)
# @receiver(post_save, sender=UserJob)
# def job_handler(sender, instance, **kwargs):
# if instance.is_active:
# profile_list = instance.author.profile.all()
# subscribed_users = profile_list.filter(
# Q(user__notification_subscription__in_app_notifications=True) | Q(
# user__notification_subscription__email_notifications=True))
# email_subscribed_users = profile_list.filter(
# user__notification_subscription__email_notifications=True)
# if(subscribed_users.count() >= 1):
# notification = Notification.objects.create(
# title="New Job on Twous",
# body=re.sub(' +', ' ', "{} has published another job \
# titled {}".format(
# instance.author.first_name.capitalize(),
# instance.title)))
# notification.recipients.add(*subscribed_users)
# if(email_subscribed_users.count() >= 1):
# send_emails_to_recipients(notification, email_subscribed_users)
# notification.save()
|
normal
|
{
"blob_id": "1066f86d3a35e892ca2a7054dfc89fe79f1d32c8",
"index": 7496,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Notification(BaseAbstractModel):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Notification(BaseAbstractModel):\n title = models.CharField(max_length=200)\n body = models.TextField()\n recipients = models.ManyToManyField(to=Profile, related_name=\n 'notifications', related_query_name='notification')\n time_stamp = models.DateTimeField(auto_now_add=True)\n read = models.BooleanField(default=False)\n",
"step-4": "from django.db import models\nfrom helpers.models import BaseAbstractModel\nfrom Auth.models import Profile\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\n\n\nclass Notification(BaseAbstractModel):\n title = models.CharField(max_length=200)\n body = models.TextField()\n recipients = models.ManyToManyField(to=Profile, related_name=\n 'notifications', related_query_name='notification')\n time_stamp = models.DateTimeField(auto_now_add=True)\n read = models.BooleanField(default=False)\n",
"step-5": "from django.db import models\nfrom helpers.models import BaseAbstractModel\nfrom Auth.models import Profile\n# from Jobs.models import UserJob\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\n# Create your models here.\nclass Notification(BaseAbstractModel):\n title = models.CharField(max_length=200)\n body = models.TextField()\n recipients = models.ManyToManyField(to=Profile,\n related_name='notifications',\n related_query_name='notification')\n time_stamp = models.DateTimeField(auto_now_add=True)\n read = models.BooleanField(default=False)\n\n# @receiver(post_save, sender=UserJob)\n# def job_handler(sender, instance, **kwargs):\n# if instance.is_active:\n# profile_list = instance.author.profile.all()\n# subscribed_users = profile_list.filter(\n# Q(user__notification_subscription__in_app_notifications=True) | Q(\n# user__notification_subscription__email_notifications=True))\n\n# email_subscribed_users = profile_list.filter(\n# user__notification_subscription__email_notifications=True)\n# if(subscribed_users.count() >= 1):\n\n# notification = Notification.objects.create(\n# title=\"New Job on Twous\",\n# body=re.sub(' +', ' ', \"{} has published another job \\\n# titled {}\".format(\n# instance.author.first_name.capitalize(),\n# instance.title)))\n# notification.recipients.add(*subscribed_users)\n\n# if(email_subscribed_users.count() >= 1):\n# send_emails_to_recipients(notification, email_subscribed_users)\n\n# notification.save()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
def ispalindrome(s):
if len(s) <= 1:
return True
elif s[0] != s[-1]:
return False
else:
return ispalindrome(s[1:-1])
|
normal
|
{
"blob_id": "c20a414f7f96a96f6e458fc27e5d2c7ac7ab05cf",
"index": 8574,
"step-1": "<mask token>\n",
"step-2": "def ispalindrome(s):\n if len(s) <= 1:\n return True\n elif s[0] != s[-1]:\n return False\n else:\n return ispalindrome(s[1:-1])\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
# Generated by Django 2.2.6 on 2019-11-13 13:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('interface', '0010_auto_20191104_2107'),
]
operations = [
migrations.AlterField(
model_name='submission',
name='review_score',
field=models.DecimalField(decimal_places=2, editable=False, max_digits=5, null=True),
),
migrations.AlterField(
model_name='submission',
name='total_score',
field=models.DecimalField(decimal_places=2, editable=False, max_digits=5, null=True),
),
]
|
normal
|
{
"blob_id": "3b42e218acf1c93fab3a0893efa8bf32a274eb23",
"index": 448,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('interface', '0010_auto_20191104_2107')]\n operations = [migrations.AlterField(model_name='submission', name=\n 'review_score', field=models.DecimalField(decimal_places=2,\n editable=False, max_digits=5, null=True)), migrations.AlterField(\n model_name='submission', name='total_score', field=models.\n DecimalField(decimal_places=2, editable=False, max_digits=5, null=\n True))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('interface', '0010_auto_20191104_2107')]\n operations = [migrations.AlterField(model_name='submission', name=\n 'review_score', field=models.DecimalField(decimal_places=2,\n editable=False, max_digits=5, null=True)), migrations.AlterField(\n model_name='submission', name='total_score', field=models.\n DecimalField(decimal_places=2, editable=False, max_digits=5, null=\n True))]\n",
"step-5": "# Generated by Django 2.2.6 on 2019-11-13 13:11\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('interface', '0010_auto_20191104_2107'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='submission',\n name='review_score',\n field=models.DecimalField(decimal_places=2, editable=False, max_digits=5, null=True),\n ),\n migrations.AlterField(\n model_name='submission',\n name='total_score',\n field=models.DecimalField(decimal_places=2, editable=False, max_digits=5, null=True),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django import forms
from django.forms import ModelForm
from django.contrib.auth.models import User
from .models import Attendance, Holidays
#I think the update forms are not required here. They might be required in the profiles app. For this app, update attendance option can be available to the staff and faculty members. Update outpass form is required for the student to update the outpass if he/she wants to accomodate the query so as to get the permission without any trouble
#class UserUpdateForm(ModelForm):
# class Meta:
# model = User
# fields = ('first_name', 'last_name', 'email', 'username')
#only available for student
#for staff and faculty
class HolidaysCreateForm(ModelForm):
class Meta:
model = Holidays
fields = ('date_of_holiday','reason')
#for staff and faculty. Only available to selected faculty, but all hostel staff
class HolidaysUpdateForm(ModelForm):
class Meta:
model = Holidays
fields = ('date_of_holiday','reason')
#for hostel staff or faculty
class AttendanceUpdateForm(ModelForm):
class Meta:
model = Attendance
fields = ('enrollment_id', 'date', 'present', 'absent', 'outpass')
|
normal
|
{
"blob_id": "d48f02d8d5469b966f109e8652f25352bc9b3b80",
"index": 7252,
"step-1": "<mask token>\n\n\nclass AttendanceUpdateForm(ModelForm):\n\n\n class Meta:\n model = Attendance\n fields = 'enrollment_id', 'date', 'present', 'absent', 'outpass'\n",
"step-2": "<mask token>\n\n\nclass HolidaysUpdateForm(ModelForm):\n\n\n class Meta:\n model = Holidays\n fields = 'date_of_holiday', 'reason'\n\n\nclass AttendanceUpdateForm(ModelForm):\n\n\n class Meta:\n model = Attendance\n fields = 'enrollment_id', 'date', 'present', 'absent', 'outpass'\n",
"step-3": "<mask token>\n\n\nclass HolidaysCreateForm(ModelForm):\n\n\n class Meta:\n model = Holidays\n fields = 'date_of_holiday', 'reason'\n\n\nclass HolidaysUpdateForm(ModelForm):\n\n\n class Meta:\n model = Holidays\n fields = 'date_of_holiday', 'reason'\n\n\nclass AttendanceUpdateForm(ModelForm):\n\n\n class Meta:\n model = Attendance\n fields = 'enrollment_id', 'date', 'present', 'absent', 'outpass'\n",
"step-4": "from django import forms\nfrom django.forms import ModelForm\nfrom django.contrib.auth.models import User\nfrom .models import Attendance, Holidays\n\n\nclass HolidaysCreateForm(ModelForm):\n\n\n class Meta:\n model = Holidays\n fields = 'date_of_holiday', 'reason'\n\n\nclass HolidaysUpdateForm(ModelForm):\n\n\n class Meta:\n model = Holidays\n fields = 'date_of_holiday', 'reason'\n\n\nclass AttendanceUpdateForm(ModelForm):\n\n\n class Meta:\n model = Attendance\n fields = 'enrollment_id', 'date', 'present', 'absent', 'outpass'\n",
"step-5": "from django import forms\nfrom django.forms import ModelForm\n\nfrom django.contrib.auth.models import User\nfrom .models import Attendance, Holidays\n\n#I think the update forms are not required here. They might be required in the profiles app. For this app, update attendance option can be available to the staff and faculty members. Update outpass form is required for the student to update the outpass if he/she wants to accomodate the query so as to get the permission without any trouble\n\n#class UserUpdateForm(ModelForm):\n# class Meta:\n# model = User\n# fields = ('first_name', 'last_name', 'email', 'username')\n\n\n#only available for student\n\n#for staff and faculty\nclass HolidaysCreateForm(ModelForm):\n class Meta:\n model = Holidays\n fields = ('date_of_holiday','reason')\n\n#for staff and faculty. Only available to selected faculty, but all hostel staff\nclass HolidaysUpdateForm(ModelForm):\n class Meta:\n model = Holidays\n fields = ('date_of_holiday','reason')\n\n\n#for hostel staff or faculty\nclass AttendanceUpdateForm(ModelForm):\n class Meta:\n model = Attendance\n fields = ('enrollment_id', 'date', 'present', 'absent', 'outpass')\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import unittest
import BasicVmLifecycleTestBase
class testVmIsAccessibleViaSsh(BasicVmLifecycleTestBase.
VmIsAccessibleViaSshTestBase):
vmName = 'cernvm'
timeout = 20 * 60
sshTimeout = 5 * 60
def suite():
return unittest.TestLoader().loadTestsFromTestCase(testVmIsAccessibleViaSsh
)
|
normal
|
{
"blob_id": "79e4e37fc17462508abf259e3a7861bd76797280",
"index": 9182,
"step-1": "<mask token>\n\n\nclass testVmIsAccessibleViaSsh(BasicVmLifecycleTestBase.\n VmIsAccessibleViaSshTestBase):\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass testVmIsAccessibleViaSsh(BasicVmLifecycleTestBase.\n VmIsAccessibleViaSshTestBase):\n vmName = 'cernvm'\n timeout = 20 * 60\n sshTimeout = 5 * 60\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass testVmIsAccessibleViaSsh(BasicVmLifecycleTestBase.\n VmIsAccessibleViaSshTestBase):\n vmName = 'cernvm'\n timeout = 20 * 60\n sshTimeout = 5 * 60\n\n\ndef suite():\n return unittest.TestLoader().loadTestsFromTestCase(testVmIsAccessibleViaSsh\n )\n",
"step-4": "import unittest\nimport BasicVmLifecycleTestBase\n\n\nclass testVmIsAccessibleViaSsh(BasicVmLifecycleTestBase.\n VmIsAccessibleViaSshTestBase):\n vmName = 'cernvm'\n timeout = 20 * 60\n sshTimeout = 5 * 60\n\n\ndef suite():\n return unittest.TestLoader().loadTestsFromTestCase(testVmIsAccessibleViaSsh\n )\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
#!/usr/bin/python3
"""
request api and write in JSON file
all tasks todo for every users
"""
import json
import requests
import sys
if __name__ == "__main__":
req = "https://jsonplaceholder.typicode.com/todos"
response = requests.get(req).json()
d = {}
req_user = "https://jsonplaceholder.typicode.com/users"
users = requests.get(req_user).json()
for user in users:
reso_todos = "https://jsonplaceholder.typicode.com/users/{}/todos"\
.format(user['id'])
rq = requests.get(reso_todos).json()
list_tasks = []
for content in rq:
d_task = {}
d_task['task'] = content['title']
d_task['completed'] = content['completed']
d_task['username'] = user['username']
list_tasks.append(d_task)
d[user['id']] = list_tasks
with open('todo_all_employees.json', 'w') as f:
json.dump(d, f)
|
normal
|
{
"blob_id": "53de53614b3c503a4232c00e8f2fd5a0f4cb6615",
"index": 1624,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n req = 'https://jsonplaceholder.typicode.com/todos'\n response = requests.get(req).json()\n d = {}\n req_user = 'https://jsonplaceholder.typicode.com/users'\n users = requests.get(req_user).json()\n for user in users:\n reso_todos = ('https://jsonplaceholder.typicode.com/users/{}/todos'\n .format(user['id']))\n rq = requests.get(reso_todos).json()\n list_tasks = []\n for content in rq:\n d_task = {}\n d_task['task'] = content['title']\n d_task['completed'] = content['completed']\n d_task['username'] = user['username']\n list_tasks.append(d_task)\n d[user['id']] = list_tasks\n with open('todo_all_employees.json', 'w') as f:\n json.dump(d, f)\n",
"step-3": "<mask token>\nimport json\nimport requests\nimport sys\nif __name__ == '__main__':\n req = 'https://jsonplaceholder.typicode.com/todos'\n response = requests.get(req).json()\n d = {}\n req_user = 'https://jsonplaceholder.typicode.com/users'\n users = requests.get(req_user).json()\n for user in users:\n reso_todos = ('https://jsonplaceholder.typicode.com/users/{}/todos'\n .format(user['id']))\n rq = requests.get(reso_todos).json()\n list_tasks = []\n for content in rq:\n d_task = {}\n d_task['task'] = content['title']\n d_task['completed'] = content['completed']\n d_task['username'] = user['username']\n list_tasks.append(d_task)\n d[user['id']] = list_tasks\n with open('todo_all_employees.json', 'w') as f:\n json.dump(d, f)\n",
"step-4": "#!/usr/bin/python3\n\"\"\"\n request api and write in JSON file\n all tasks todo for every users\n\"\"\"\nimport json\nimport requests\nimport sys\n\n\nif __name__ == \"__main__\":\n req = \"https://jsonplaceholder.typicode.com/todos\"\n response = requests.get(req).json()\n d = {}\n req_user = \"https://jsonplaceholder.typicode.com/users\"\n users = requests.get(req_user).json()\n for user in users:\n reso_todos = \"https://jsonplaceholder.typicode.com/users/{}/todos\"\\\n .format(user['id'])\n rq = requests.get(reso_todos).json()\n list_tasks = []\n for content in rq:\n d_task = {}\n d_task['task'] = content['title']\n d_task['completed'] = content['completed']\n d_task['username'] = user['username']\n list_tasks.append(d_task)\n d[user['id']] = list_tasks\n with open('todo_all_employees.json', 'w') as f:\n json.dump(d, f)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""
Create a list of words and with it, create a new dictionary
in which the key is the word and the value is the same word
reversed.
"""
word_list = ['Tree','Apple','Snake','flowers']
word_dict = {word:word[::-1] for word in word_list}
print(word_dict)
#Output: {'Tree': 'eerT', 'Apple': 'elppA', 'Snake': 'ekanS', 'flowers': 'srewolf'}
"""
Let's try this one again:
Using the range function, create a sequence of numbers
from 1 to 100, and using the comprehension to return only
those that are multiplies of 2.
"""
use_range = range(1,101)
multiple_list = [i for i in use_range if i%2==0]
print(multiple_list)
"""
[[1, 2, 3, 4], [5, 6, 7, 8]]
Use the list above and create nested comprehensions so that
the final value is a new list like the following
[[2, 4, 6, 8], [10, 12, 14, 16]] The number multiplied by 2
"""
list_above = [[1, 2, 3, 4], [5, 6, 7, 8]]
final_list = [[bottom*2 for bottom in top] for top in list_above]
print(final_list)
|
normal
|
{
"blob_id": "5ac489a2d30155bb92767184ad546247817e28ea",
"index": 1478,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(word_dict)\n<mask token>\nprint(multiple_list)\n<mask token>\nprint(final_list)\n",
"step-3": "<mask token>\nword_list = ['Tree', 'Apple', 'Snake', 'flowers']\nword_dict = {word: word[::-1] for word in word_list}\nprint(word_dict)\n<mask token>\nuse_range = range(1, 101)\nmultiple_list = [i for i in use_range if i % 2 == 0]\nprint(multiple_list)\n<mask token>\nlist_above = [[1, 2, 3, 4], [5, 6, 7, 8]]\nfinal_list = [[(bottom * 2) for bottom in top] for top in list_above]\nprint(final_list)\n",
"step-4": "\"\"\"\nCreate a list of words and with it, create a new dictionary\nin which the key is the word and the value is the same word\nreversed.\n\"\"\"\n\nword_list = ['Tree','Apple','Snake','flowers']\nword_dict = {word:word[::-1] for word in word_list}\nprint(word_dict)\n#Output: {'Tree': 'eerT', 'Apple': 'elppA', 'Snake': 'ekanS', 'flowers': 'srewolf'}\n\n\"\"\"\nLet's try this one again:\nUsing the range function, create a sequence of numbers\nfrom 1 to 100, and using the comprehension to return only\nthose that are multiplies of 2.\n\"\"\"\nuse_range = range(1,101)\nmultiple_list = [i for i in use_range if i%2==0]\nprint(multiple_list)\n\n\n\"\"\"\n[[1, 2, 3, 4], [5, 6, 7, 8]]\nUse the list above and create nested comprehensions so that\nthe final value is a new list like the following\n[[2, 4, 6, 8], [10, 12, 14, 16]] The number multiplied by 2\n\"\"\"\nlist_above = [[1, 2, 3, 4], [5, 6, 7, 8]]\n\nfinal_list = [[bottom*2 for bottom in top] for top in list_above]\nprint(final_list)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from point import Point
from velocity import Velocity
import arcade
import config
PADDLE_WIDTH = 15
PADDLE_HEIGHT = 30
class Paddle:
def __init__(self):
self.center = Point(390, 50)
self.velocity = Velocity(0, 5)
def draw(self):
self.drawing = arcade.draw_rectangle_filled(self.center.x, self.center.y, config.PADDLE_WIDTH, config.PADDLE_HEIGHT, arcade.color.ELECTRIC_LIME)
def move_up(self):
if self.center.y < config.SCREEN_HEIGHT - (config.PADDLE_HEIGHT / 2):
self.center.y = self.center.y + self.velocity.dy
def move_down(self):
if self.center.y > 0 + (config.PADDLE_HEIGHT / 2):
self.center.y = self.center.y - self.velocity.dy
|
normal
|
{
"blob_id": "cb3c1adb9d91aecee5b21774d61dfe9400a330fa",
"index": 619,
"step-1": "<mask token>\n\n\nclass Paddle:\n\n def __init__(self):\n self.center = Point(390, 50)\n self.velocity = Velocity(0, 5)\n <mask token>\n\n def move_up(self):\n if self.center.y < config.SCREEN_HEIGHT - config.PADDLE_HEIGHT / 2:\n self.center.y = self.center.y + self.velocity.dy\n\n def move_down(self):\n if self.center.y > 0 + config.PADDLE_HEIGHT / 2:\n self.center.y = self.center.y - self.velocity.dy\n",
"step-2": "<mask token>\n\n\nclass Paddle:\n\n def __init__(self):\n self.center = Point(390, 50)\n self.velocity = Velocity(0, 5)\n\n def draw(self):\n self.drawing = arcade.draw_rectangle_filled(self.center.x, self.\n center.y, config.PADDLE_WIDTH, config.PADDLE_HEIGHT, arcade.\n color.ELECTRIC_LIME)\n\n def move_up(self):\n if self.center.y < config.SCREEN_HEIGHT - config.PADDLE_HEIGHT / 2:\n self.center.y = self.center.y + self.velocity.dy\n\n def move_down(self):\n if self.center.y > 0 + config.PADDLE_HEIGHT / 2:\n self.center.y = self.center.y - self.velocity.dy\n",
"step-3": "<mask token>\nPADDLE_WIDTH = 15\nPADDLE_HEIGHT = 30\n\n\nclass Paddle:\n\n def __init__(self):\n self.center = Point(390, 50)\n self.velocity = Velocity(0, 5)\n\n def draw(self):\n self.drawing = arcade.draw_rectangle_filled(self.center.x, self.\n center.y, config.PADDLE_WIDTH, config.PADDLE_HEIGHT, arcade.\n color.ELECTRIC_LIME)\n\n def move_up(self):\n if self.center.y < config.SCREEN_HEIGHT - config.PADDLE_HEIGHT / 2:\n self.center.y = self.center.y + self.velocity.dy\n\n def move_down(self):\n if self.center.y > 0 + config.PADDLE_HEIGHT / 2:\n self.center.y = self.center.y - self.velocity.dy\n",
"step-4": "from point import Point\nfrom velocity import Velocity\nimport arcade\nimport config\nPADDLE_WIDTH = 15\nPADDLE_HEIGHT = 30\n\n\nclass Paddle:\n\n def __init__(self):\n self.center = Point(390, 50)\n self.velocity = Velocity(0, 5)\n\n def draw(self):\n self.drawing = arcade.draw_rectangle_filled(self.center.x, self.\n center.y, config.PADDLE_WIDTH, config.PADDLE_HEIGHT, arcade.\n color.ELECTRIC_LIME)\n\n def move_up(self):\n if self.center.y < config.SCREEN_HEIGHT - config.PADDLE_HEIGHT / 2:\n self.center.y = self.center.y + self.velocity.dy\n\n def move_down(self):\n if self.center.y > 0 + config.PADDLE_HEIGHT / 2:\n self.center.y = self.center.y - self.velocity.dy\n",
"step-5": "from point import Point\nfrom velocity import Velocity\nimport arcade\nimport config\n\nPADDLE_WIDTH = 15\nPADDLE_HEIGHT = 30\n\nclass Paddle:\n\n def __init__(self):\n self.center = Point(390, 50)\n self.velocity = Velocity(0, 5)\n\n def draw(self):\n self.drawing = arcade.draw_rectangle_filled(self.center.x, self.center.y, config.PADDLE_WIDTH, config.PADDLE_HEIGHT, arcade.color.ELECTRIC_LIME)\n\n def move_up(self):\n if self.center.y < config.SCREEN_HEIGHT - (config.PADDLE_HEIGHT / 2):\n self.center.y = self.center.y + self.velocity.dy\n\n def move_down(self):\n if self.center.y > 0 + (config.PADDLE_HEIGHT / 2):\n self.center.y = self.center.y - self.velocity.dy\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
from django.conf.urls import url
from price_App import views
from rest_framework.urlpatterns import format_suffix_patterns
urlpatterns = [
url(r'^api/price/(?P<pk>[0-9]+)$', views.product_price),
url(r'^api/price_history/(?P<pk>[0-9]+)$', views.product_history),]
urlpatterns = format_suffix_patterns(urlpatterns)
|
normal
|
{
"blob_id": "9816a8265bcdb8c099f599efbe1cfe1a554e71f5",
"index": 8396,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [url('^api/price/(?P<pk>[0-9]+)$', views.product_price), url(\n '^api/price_history/(?P<pk>[0-9]+)$', views.product_history)]\nurlpatterns = format_suffix_patterns(urlpatterns)\n",
"step-3": "from django.conf.urls import url\nfrom price_App import views\nfrom rest_framework.urlpatterns import format_suffix_patterns\nurlpatterns = [url('^api/price/(?P<pk>[0-9]+)$', views.product_price), url(\n '^api/price_history/(?P<pk>[0-9]+)$', views.product_history)]\nurlpatterns = format_suffix_patterns(urlpatterns)\n",
"step-4": "from django.conf.urls import url \nfrom price_App import views\nfrom rest_framework.urlpatterns import format_suffix_patterns\n\nurlpatterns = [ \n \turl(r'^api/price/(?P<pk>[0-9]+)$', views.product_price),\n url(r'^api/price_history/(?P<pk>[0-9]+)$', views.product_history),] \n\nurlpatterns = format_suffix_patterns(urlpatterns)\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import os
import math
import shutil
from evoplotter import utils
from evoplotter.dims import *
from evoplotter import printer
import numpy as np
CHECK_CORRECTNESS_OF_FILES = 1
STATUS_FILE_NAME = "results/status.txt"
OPT_SOLUTIONS_FILE_NAME = "opt_solutions.txt"
class TableGenerator:
"""Generates table from data."""
def __init__(self, f_cell, dim_rows, dim_cols, headerRowNames, title="", color_scheme=None,
table_postprocessor=None, vertical_border=1, table_variants=None,
default_color_thresholds=None, layered_headline=True,
only_nonempty_rows=True, **kwargs):
self.f_cell = f_cell
self.dim_rows = dim_rows
self.dim_cols = dim_cols
self.title = title
self.color_scheme = color_scheme
self.table_postprocessor = table_postprocessor
self.vertical_border = vertical_border
self.headerRowNames = headerRowNames
# create a table for each variant and put them next to each other
self.table_variants = table_variants if table_variants is not None else [lambda p: True]
self.default_color_thresholds = default_color_thresholds
self.layered_headline = layered_headline
self.only_nonempty_rows = only_nonempty_rows
self.init_kwargs = kwargs.copy()
def apply(self, props, new_color_thresholds=None):
text = ""
for variant in self.table_variants: # each variant is some predicate on data
props_variant = [p for p in props if variant(p)]
if self.only_nonempty_rows:
dim_rows_variant = Dim([c for c in self.dim_rows.configs if len(c.filter_props(props_variant)) > 0])
else:
dim_rows_variant = self.dim_rows
txt = printer.latex_table(props_variant, dim_rows_variant, self.dim_cols, self.f_cell,
layered_headline=self.layered_headline, vertical_border=self.vertical_border,
headerRowNames=self.headerRowNames, **self.init_kwargs)
txt = self.table_postprocessor(txt)
ct = new_color_thresholds if new_color_thresholds is not None else self.default_color_thresholds
if self.color_scheme is not None and ct is not None:
cv0, cv1, cv2 = ct
txt = printer.table_color_map(txt, cv0, cv1, cv2, "colorLow", "colorMedium", "colorHigh")
text += r"\noindent"
text += txt
return text
class Experiment:
def __init__(self):
self.tables = []
self.listings = []
def delete_logs(props, pred, verbose=True, simulate=False):
for p in props:
if "evoplotter.file" in p and pred(p):
path = p["evoplotter.file"]
if not simulate:
os.remove(path)
if verbose:
print("File removed: {0}".format(path))
def print_props_filenames(props):
for p in props:
if "thisFileName" in p:
print(p["thisFileName"])
else:
print("'thisFileName' not specified! Printing content instead: " + str(p))
def create_errors_listing(error_props, filename):
f = open("results/listings/{0}".format(filename), "w")
print("Creating log of errors ({0})...".format(filename))
for i, p in enumerate(error_props):
if i > 0:
f.write("\n" + ("-" * 50) + "\n")
for k in sorted(p.keys()):
v = p[k]
f.write("{0} = {1}\n".format(k, v))
f.close()
def create_errors_solver_listing(error_props, filename, pred=None):
if pred is None:
pred = lambda x: True
f = open("results/listings/{0}".format(filename), "w")
print("Creating log of errors ({0})...".format(filename))
for i, p in enumerate(error_props):
if not pred(p): # ignore properties with certain features, e.g., types of errors
continue
if i > 0:
f.write("\n" + ("-" * 50) + "\n\n")
# read the whole original file, because multiline error messages are not preserved in dicts
with open(p["evoplotter.file"], 'r') as content_file:
content = content_file.read()
f.write(content)
f.close()
def load_correct_props(folders):
props_cdgpError = utils.load_properties_dirs(folders, exts=[".cdgp.error"], add_file_path=True)
exts = [".cdgp"]
props0 = utils.load_properties_dirs(folders, exts=exts, add_file_path=True)
def is_correct(p):
return "result.best.verificationDecision" in p
# Filtering props so only correct ones are left
props = [p for p in props0 if is_correct(p)]
# print("Filtered (props):")
# for p in props:
# if "resistance_par3_c1_10" in p["benchmark"] and p["method"] == "CDGP":
# print(p["evoplotter.file"])
# print("Filtered (props_cdgpError):")
# for p in props_cdgpError:
# if "resistance_par3_c1_10" in p["benchmark"] and p["method"] == "CDGP":
# print(p["evoplotter.file"])
# Clear log file
# print("[del] props")
# fun = lambda p: p["method"] == "CDGP" and p["partialConstraintsInFitness"] == "true"
# delete_logs(props, fun, simulate=True)
# print("[del] props_cdgpError")
# delete_logs(props_cdgpError, fun, simulate=True)
create_errors_solver_listing(props_cdgpError, "errors_solver.txt")
# Printing names of files which finished with error status or are incomplete.
if CHECK_CORRECTNESS_OF_FILES:
props_errors = [p for p in props0 if not is_correct(p)]
create_errors_listing(props_errors, "errors_run.txt")
if len(props_errors) > 0:
print("Files with error status:")
print_props_filenames(props_errors)
print("Loaded: {0} correct property files, {1} incorrect; All log files: {2}".format(len(props), len(props_errors), len(props) + len
(props_errors)))
print("Runs that ended with '.cdgp.error': {0}".format(len(props_cdgpError)))
print_props_filenames(props_cdgpError)
return props
def produce_status_matrix(dim, props):
"""Generates a status data in the form of a python list. It can be
later used to retry missing runs.
:param dim: (Dimension) dimensions on which data are to be divided.
:param props: (dict[str,str]) properties files.
:return: (str) Python code of a list containing specified data.
"""
text = "["
for config in dim:
numRuns = len(config.filter_props(props))
text += "({0}, {1}), ".format(config.stored_values, numRuns)
return text + "]"
def save_listings(props, dim_rows, dim_cols):
"""Saves listings of various useful info to separate text files."""
assert isinstance(dim_rows, Dim)
assert isinstance(dim_cols, Dim)
utils.ensure_dir("results/listings/errors/")
# Saving optimal verified solutions
for dr in dim_rows:
bench = dr.get_caption()
bench = bench[:bench.rfind(".")] if "." in bench else bench
f = open("results/listings/verified_{0}.txt".format(bench), "w")
f_errors = open("results/listings/errors/verified_{0}.txt".format(bench), "w")
props_bench = dr.filter_props(props)
for dc in dim_cols:
f.write("{0}\n".format(dc.get_caption()))
f_errors.write("{0}\n".format(dc.get_caption())) # TODO: finish
props_final = [p for p in dc.filter_props(props_bench) if is_verified_solution(p)]
for p in props_final:
fname = p["thisFileName"].replace("/home/ibladek/workspace/GECCO19/gecco19/", "")
best = p["result.best"]
fit = float(p["result.best.mse"])
if fit >= 1e-15:
f.write("{0}\t\t\t(FILE: {1}) (MSE: {2})\n".format(best, fname, fit))
else:
f.write("{0}\t\t\t(FILE: {1})\n".format(best, fname))
f.write("\n\n")
f.close()
f_errors.close()
def normalized_total_time(p, max_time=3600000):
"""If time was longer than max_time, then return max_time, otherwise return time. Time is counted in miliseconds."""
if "cdgp.wasTimeout" in p and p["cdgp.wasTimeout"] == "true":
v = 3600000
else:
v = int(float(p["result.totalTimeSystem"]))
return max_time if v > max_time else v
def is_verified_solution(p):
k = "result.best.verificationDecision"
return p["result.best.isOptimal"] == "true" and p[k] == "unsat"
def is_approximated_solution(p):
"""Checks if the MSE was below the threshold."""
tr = float(p["optThreshold"])
# TODO: finish
k = "result.best.verificationDecision"
return p["result.best.isOptimal"] == "true" and p[k] == "unsat"
def get_num_optimal(props):
props2 = [p for p in props if is_verified_solution(p)]
return len(props2)
def get_num_optimalOnlyMse(props):
# "cdgp.optThreshold" in p and
for p in props:
if "optThreshold" not in p:
print(str(p))
# Sometimes it is 'optThreshold', and sometimes 'cdgp.optThreshold'...
# props2 = [p for p in props if float(p["result.best.mse"]) <= float(p["optThreshold"])]
num = 0
for p in props:
if "optThreshold" in p:
tr = p["optThreshold"]
elif "optThreshold" in p:
tr = p["cdgp.optThreshold"]
else:
raise Exception("No optThreshold in log file")
if float(p["result.best.mse"]) <= tr:
num += 1
return num
def get_num_allPropertiesMet(props):
props2 = [p for p in props if p["result.best.verificationDecision"] == "unsat"]
return len(props2)
def get_num_computed(filtered):
return len(filtered)
def fun_successRate_full(filtered):
if len(filtered) == 0:
return "-"
num_opt = get_num_optimal(filtered)
return "{0}/{1}".format(str(num_opt), str(len(filtered)))
def get_successRate(filtered):
num_opt = get_num_optimal(filtered)
return float(num_opt) / float(len(filtered))
def fun_successRateMseOnly(filtered):
if len(filtered) == 0:
return "-"
n = get_num_optimalOnlyMse(filtered)
if n == 0:
return "-"
else:
sr = n / float(len(filtered))
return "{0}".format("%0.2f" % round(sr, 2))
def fun_average_mse(filtered):
res = 0.0
num = 0
# Sometimes there was "inf" in the results. We will ignore those elements.
for p in filtered:
x = float(p["result.best.mse"])
if not "n" in str(x):
res += x
num += 1
else:
print("Nan encountered")
if num == 0:
return "-"
else:
return res / num
def fun_average_mse_sd(filtered):
"""Returns average together with standard deviation."""
res = 0.0
num = 0
# Sometimes there was "inf" in the results. We will ignore those elements.
for p in filtered:
x = float(p["result.best.mse"])
if not "n" in str(x):
res += x
num += 1
else:
print("Nan encountered")
avg = res / num
sd = 0.0
for p in filtered:
x = float(p["result.best.mse"])
if not "n" in str(x):
sd += (x - avg) ** 2.0
sd = math.sqrt(sd / num)
if num == 0:
return "-"
else:
return r"${0} \pm{1}$".format(avg, sd)
def fun_successRate(filtered):
if len(filtered) == 0:
return "-"
sr = get_successRate(filtered)
return "{0}".format("%0.2f" % round(sr, 2))
def fun_allPropertiesMet(filtered):
if len(filtered) == 0:
return "-"
num_opt = get_num_allPropertiesMet(filtered)
sr = float(num_opt) / float(len(filtered))
return "{0}".format("%0.2f" % round(sr, 2))
def get_stats_size(props):
vals = [float(p["result.best.size"]) for p in props]
if len(vals) == 0:
return "-"#-1.0, -1.0
else:
return str(int(round(np.mean(vals)))) #, np.std(vals)
def get_stats_sizeOnlySuccessful(props):
vals = [float(p["result.best.size"]) for p in props if is_verified_solution(p)]
if len(vals) == 0:
return "-"#-1.0, -1.0
else:
return str(int(round(np.mean(vals)))) #, np.std(vals)
def get_stats_maxSolverTime(props):
if len(props) == 0 or "solver.allTimesCountMap" not in props[0]:
return "-"
times = []
for p in props:
timesMap = p["solver.allTimesCountMap"]
parts = timesMap.split(", ")[-1].split(",")
times.append(float(parts[0].replace("(", "")))
return "%0.3f" % max(times)
def get_stats_avgSolverTime(props):
if len(props) == 0 or "solver.allTimesCountMap" not in props[0] or props[0]["method"] != "CDGP":
return "-"
sum = 0.0
sumWeights = 0.0
for p in props:
timesMap = p["solver.allTimesCountMap"]
pairs = timesMap.split(", ")
if len(pairs) == 0:
continue
for x in pairs:
time = float(x.split(",")[0].replace("(", ""))
weight = float(x.split(",")[1].replace(")", ""))
sum += time * weight
sumWeights += weight
if sumWeights == 0.0:
return "%0.3f" % 0.0
else:
return "%0.3f" % (sum / sumWeights)
def get_avgSolverTotalCalls(props):
if len(props) == 0 or "solver.totalCalls" not in props[0]:
return "-"
vals = [float(p["solver.totalCalls"]) / 1000.0 for p in props]
return "%0.1f" % round(np.mean(vals), 1) # "%d"
def get_numSolverCallsOverXs(props):
if len(props) == 0 or "solver.allTimesCountMap" not in props[0]:
return "-"
TRESHOLD = 0.5
sum = 0
for p in props:
timesMap = p["solver.allTimesCountMap"]
pairs = timesMap.split(", ")
if len(pairs) == 0:
continue
for x in pairs:
time = float(x.split(",")[0].replace("(", ""))
if time > TRESHOLD:
# print("Name of file: " + p["thisFileName"])
weight = int(x.split(",")[1].replace(")", ""))
sum += weight
return sum
def get_avg_totalTests(props):
vals = [float(p["tests.total"]) for p in props]
if len(vals) == 0:
return "-" # -1.0, -1.0
else:
x = np.mean(vals)
if x < 1e-5:
x = 0.0
return str(int(round(x))) #"%0.1f" % x
def get_avg_mse(props):
vals = []
for p in props:
vals.append(float(p["result.best.mse"]))
if len(vals) == 0:
return "-" # -1.0, -1.0
else:
return "%0.5f" % np.mean(vals) # , np.std(vals)
def get_avg_runtime_helper(vals):
if len(vals) == 0:
return "n/a" # -1.0, -1.0
else:
x = np.mean(vals)
if x >= 10.0:
return "%d" % x
else:
return "%0.1f" % x # , np.std(vals)
def get_avg_runtimeOnlySuccessful(props):
if len(props) == 0:
return "-"
else:
vals = [float(normalized_total_time(p, max_time=1800000)) / 1000.0 for p in props if is_verified_solution(p)]
return get_avg_runtime_helper(vals)
def get_avg_runtime(props):
if len(props) == 0:
return "-"
else:
vals = [float(normalized_total_time(p, max_time=1800000)) / 1000.0 for p in props]
return get_avg_runtime_helper(vals)
def get_avg_generation(props):
if len(props) == 0:
return "-"
if len(props) > 0 and "result.totalGenerations" not in props[0]:
return "-"
vals = [float(p["result.totalGenerations"]) for p in props]
if len(vals) == 0:
return "-"
else:
return str(int(round(np.mean(vals)))) #"%0.1f" % np.mean(vals) # , np.std(vals)
def get_avg_generationSuccessful(props):
if len(props) == 0:
return "-"
else:
vals = [float(p["result.best.generation"]) for p in props if is_verified_solution(p)]
if len(vals) == 0:
return "n/a" # -1.0, -1.0
else:
return str(int(round(np.mean(vals)))) # "%0.1f" % np.mean(vals) # , np.std(vals)
def get_avg_evaluated(props):
if len(props) == 0:
return "-"
vals = []
for p in props:
if p["evolutionMode"] == "steadyState":
vals.append(float(p["result.totalGenerations"]))
else:
vals.append(float(p["result.totalGenerations"]) * float(p["populationSize"]))
return str(int(round(np.mean(vals)))) #"%0.1f" % np.mean(vals) # , np.std(vals)
def get_avg_evaluatedSuccessful(props):
if len(props) == 0:
return "-"
vals = []
for p in props:
if is_verified_solution(p):
if p["evolutionMode"] == "steadyState":
vals.append(float(p["result.totalGenerations"]))
else:
vals.append(float(p["result.totalGenerations"]) * float(p["populationSize"]))
if len(vals) == 0:
return "n/a" # -1.0, -1.0
else:
return str(int(round(np.mean(vals)))) # "%0.1f" % np.mean(vals) # , np.std(vals)
def get_avg_runtimePerProgram(props):
if len(props) == 0:
return "-" # -1.0, -1.0
sAvgGen = get_avg_generation(props)
if sAvgGen == "-" or sAvgGen is None:
return "-"
avgGen = float(sAvgGen) # avg number of generations in all runs
avgRuntime = float(get_avg_runtime(props)) # avg runtime of all runs
populationSize = float(props[0]["populationSize"])
if props[0]["evolutionMode"] == "steadyState":
approxNumPrograms = populationSize + avgGen # in steady state we have many generations, but in each of them created is one new program
else:
approxNumPrograms = populationSize * avgGen
approxTimePerProgram = avgRuntime / approxNumPrograms
return "%0.3f" % approxTimePerProgram
def get_sum_solverRestarts(props):
if len(props) == 0:
return "-"
vals = [int(p["solver.totalRestarts"]) for p in props if "solver.totalRestarts" in p]
if len(vals) != len(props):
print("WARNING: solver.totalRestarts was not present in all files.")
if len(vals) == 0:
return "0"
else:
return str(np.sum(vals))
def print_solved_in_time(props, upper_time):
if len(props) == 0:
return
# totalTimeSystem is in miliseconds
solved = 0
solvedRuns = 0
num = 0
for p in props:
if p["result.best.isOptimal"] == "false":
continue
num += 1
if int(normalized_total_time(p, max_time=1800000)) <= upper_time:
solved += 1
for p in props:
if int(normalized_total_time(p, max_time=1800000)) <= upper_time:
solvedRuns += 1
print("\nRuns which ended under {0} s: {1} / {2} ({3} %)".format(upper_time / 1000.0, solvedRuns, len(props), solvedRuns / len(props)))
print("Optimal solutions found under {0} s: {1} / {2} ({3} %)\n".format(upper_time / 1000.0, solved, num, solved / num))
|
normal
|
{
"blob_id": "b3cb94a44f64091714650efb81c4cad27b211cef",
"index": 8804,
"step-1": "<mask token>\n\n\nclass TableGenerator:\n \"\"\"Generates table from data.\"\"\"\n\n def __init__(self, f_cell, dim_rows, dim_cols, headerRowNames, title='',\n color_scheme=None, table_postprocessor=None, vertical_border=1,\n table_variants=None, default_color_thresholds=None,\n layered_headline=True, only_nonempty_rows=True, **kwargs):\n self.f_cell = f_cell\n self.dim_rows = dim_rows\n self.dim_cols = dim_cols\n self.title = title\n self.color_scheme = color_scheme\n self.table_postprocessor = table_postprocessor\n self.vertical_border = vertical_border\n self.headerRowNames = headerRowNames\n self.table_variants = (table_variants if table_variants is not None\n else [lambda p: True])\n self.default_color_thresholds = default_color_thresholds\n self.layered_headline = layered_headline\n self.only_nonempty_rows = only_nonempty_rows\n self.init_kwargs = kwargs.copy()\n\n def apply(self, props, new_color_thresholds=None):\n text = ''\n for variant in self.table_variants:\n props_variant = [p for p in props if variant(p)]\n if self.only_nonempty_rows:\n dim_rows_variant = Dim([c for c in self.dim_rows.configs if\n len(c.filter_props(props_variant)) > 0])\n else:\n dim_rows_variant = self.dim_rows\n txt = printer.latex_table(props_variant, dim_rows_variant, self\n .dim_cols, self.f_cell, layered_headline=self.\n layered_headline, vertical_border=self.vertical_border,\n headerRowNames=self.headerRowNames, **self.init_kwargs)\n txt = self.table_postprocessor(txt)\n ct = (new_color_thresholds if new_color_thresholds is not None else\n self.default_color_thresholds)\n if self.color_scheme is not None and ct is not None:\n cv0, cv1, cv2 = ct\n txt = printer.table_color_map(txt, cv0, cv1, cv2,\n 'colorLow', 'colorMedium', 'colorHigh')\n text += '\\\\noindent'\n text += txt\n return text\n\n\nclass Experiment:\n\n def __init__(self):\n self.tables = []\n self.listings = []\n\n\n<mask token>\n\n\ndef print_props_filenames(props):\n for p in props:\n if 'thisFileName' in p:\n print(p['thisFileName'])\n else:\n print(\n \"'thisFileName' not specified! Printing content instead: \" +\n str(p))\n\n\n<mask token>\n\n\ndef create_errors_solver_listing(error_props, filename, pred=None):\n if pred is None:\n pred = lambda x: True\n f = open('results/listings/{0}'.format(filename), 'w')\n print('Creating log of errors ({0})...'.format(filename))\n for i, p in enumerate(error_props):\n if not pred(p):\n continue\n if i > 0:\n f.write('\\n' + '-' * 50 + '\\n\\n')\n with open(p['evoplotter.file'], 'r') as content_file:\n content = content_file.read()\n f.write(content)\n f.close()\n\n\n<mask token>\n\n\ndef produce_status_matrix(dim, props):\n \"\"\"Generates a status data in the form of a python list. It can be\n later used to retry missing runs.\n\n :param dim: (Dimension) dimensions on which data are to be divided.\n :param props: (dict[str,str]) properties files.\n :return: (str) Python code of a list containing specified data.\n \"\"\"\n text = '['\n for config in dim:\n numRuns = len(config.filter_props(props))\n text += '({0}, {1}), '.format(config.stored_values, numRuns)\n return text + ']'\n\n\n<mask token>\n\n\ndef get_num_optimal(props):\n props2 = [p for p in props if is_verified_solution(p)]\n return len(props2)\n\n\n<mask token>\n\n\ndef fun_successRate_full(filtered):\n if len(filtered) == 0:\n return '-'\n num_opt = get_num_optimal(filtered)\n return '{0}/{1}'.format(str(num_opt), str(len(filtered)))\n\n\ndef get_successRate(filtered):\n num_opt = get_num_optimal(filtered)\n return float(num_opt) / float(len(filtered))\n\n\n<mask token>\n\n\ndef fun_average_mse_sd(filtered):\n \"\"\"Returns average together with standard deviation.\"\"\"\n res = 0.0\n num = 0\n for p in filtered:\n x = float(p['result.best.mse'])\n if not 'n' in str(x):\n res += x\n num += 1\n else:\n print('Nan encountered')\n avg = res / num\n sd = 0.0\n for p in filtered:\n x = float(p['result.best.mse'])\n if not 'n' in str(x):\n sd += (x - avg) ** 2.0\n sd = math.sqrt(sd / num)\n if num == 0:\n return '-'\n else:\n return '${0} \\\\pm{1}$'.format(avg, sd)\n\n\n<mask token>\n\n\ndef get_stats_maxSolverTime(props):\n if len(props) == 0 or 'solver.allTimesCountMap' not in props[0]:\n return '-'\n times = []\n for p in props:\n timesMap = p['solver.allTimesCountMap']\n parts = timesMap.split(', ')[-1].split(',')\n times.append(float(parts[0].replace('(', '')))\n return '%0.3f' % max(times)\n\n\n<mask token>\n\n\ndef get_avgSolverTotalCalls(props):\n if len(props) == 0 or 'solver.totalCalls' not in props[0]:\n return '-'\n vals = [(float(p['solver.totalCalls']) / 1000.0) for p in props]\n return '%0.1f' % round(np.mean(vals), 1)\n\n\n<mask token>\n\n\ndef get_avg_runtime_helper(vals):\n if len(vals) == 0:\n return 'n/a'\n else:\n x = np.mean(vals)\n if x >= 10.0:\n return '%d' % x\n else:\n return '%0.1f' % x\n\n\ndef get_avg_runtimeOnlySuccessful(props):\n if len(props) == 0:\n return '-'\n else:\n vals = [(float(normalized_total_time(p, max_time=1800000)) / 1000.0\n ) for p in props if is_verified_solution(p)]\n return get_avg_runtime_helper(vals)\n\n\n<mask token>\n\n\ndef get_avg_generation(props):\n if len(props) == 0:\n return '-'\n if len(props) > 0 and 'result.totalGenerations' not in props[0]:\n return '-'\n vals = [float(p['result.totalGenerations']) for p in props]\n if len(vals) == 0:\n return '-'\n else:\n return str(int(round(np.mean(vals))))\n\n\n<mask token>\n\n\ndef get_avg_evaluated(props):\n if len(props) == 0:\n return '-'\n vals = []\n for p in props:\n if p['evolutionMode'] == 'steadyState':\n vals.append(float(p['result.totalGenerations']))\n else:\n vals.append(float(p['result.totalGenerations']) * float(p[\n 'populationSize']))\n return str(int(round(np.mean(vals))))\n\n\ndef get_avg_evaluatedSuccessful(props):\n if len(props) == 0:\n return '-'\n vals = []\n for p in props:\n if is_verified_solution(p):\n if p['evolutionMode'] == 'steadyState':\n vals.append(float(p['result.totalGenerations']))\n else:\n vals.append(float(p['result.totalGenerations']) * float(p[\n 'populationSize']))\n if len(vals) == 0:\n return 'n/a'\n else:\n return str(int(round(np.mean(vals))))\n\n\n<mask token>\n\n\ndef get_sum_solverRestarts(props):\n if len(props) == 0:\n return '-'\n vals = [int(p['solver.totalRestarts']) for p in props if \n 'solver.totalRestarts' in p]\n if len(vals) != len(props):\n print('WARNING: solver.totalRestarts was not present in all files.')\n if len(vals) == 0:\n return '0'\n else:\n return str(np.sum(vals))\n\n\ndef print_solved_in_time(props, upper_time):\n if len(props) == 0:\n return\n solved = 0\n solvedRuns = 0\n num = 0\n for p in props:\n if p['result.best.isOptimal'] == 'false':\n continue\n num += 1\n if int(normalized_total_time(p, max_time=1800000)) <= upper_time:\n solved += 1\n for p in props:\n if int(normalized_total_time(p, max_time=1800000)) <= upper_time:\n solvedRuns += 1\n print('\\nRuns which ended under {0} s: {1} / {2} ({3} %)'.format(\n upper_time / 1000.0, solvedRuns, len(props), solvedRuns / len(props)))\n print('Optimal solutions found under {0} s: {1} / {2} ({3} %)\\n'.\n format(upper_time / 1000.0, solved, num, solved / num))\n",
"step-2": "<mask token>\n\n\nclass TableGenerator:\n \"\"\"Generates table from data.\"\"\"\n\n def __init__(self, f_cell, dim_rows, dim_cols, headerRowNames, title='',\n color_scheme=None, table_postprocessor=None, vertical_border=1,\n table_variants=None, default_color_thresholds=None,\n layered_headline=True, only_nonempty_rows=True, **kwargs):\n self.f_cell = f_cell\n self.dim_rows = dim_rows\n self.dim_cols = dim_cols\n self.title = title\n self.color_scheme = color_scheme\n self.table_postprocessor = table_postprocessor\n self.vertical_border = vertical_border\n self.headerRowNames = headerRowNames\n self.table_variants = (table_variants if table_variants is not None\n else [lambda p: True])\n self.default_color_thresholds = default_color_thresholds\n self.layered_headline = layered_headline\n self.only_nonempty_rows = only_nonempty_rows\n self.init_kwargs = kwargs.copy()\n\n def apply(self, props, new_color_thresholds=None):\n text = ''\n for variant in self.table_variants:\n props_variant = [p for p in props if variant(p)]\n if self.only_nonempty_rows:\n dim_rows_variant = Dim([c for c in self.dim_rows.configs if\n len(c.filter_props(props_variant)) > 0])\n else:\n dim_rows_variant = self.dim_rows\n txt = printer.latex_table(props_variant, dim_rows_variant, self\n .dim_cols, self.f_cell, layered_headline=self.\n layered_headline, vertical_border=self.vertical_border,\n headerRowNames=self.headerRowNames, **self.init_kwargs)\n txt = self.table_postprocessor(txt)\n ct = (new_color_thresholds if new_color_thresholds is not None else\n self.default_color_thresholds)\n if self.color_scheme is not None and ct is not None:\n cv0, cv1, cv2 = ct\n txt = printer.table_color_map(txt, cv0, cv1, cv2,\n 'colorLow', 'colorMedium', 'colorHigh')\n text += '\\\\noindent'\n text += txt\n return text\n\n\nclass Experiment:\n\n def __init__(self):\n self.tables = []\n self.listings = []\n\n\n<mask token>\n\n\ndef print_props_filenames(props):\n for p in props:\n if 'thisFileName' in p:\n print(p['thisFileName'])\n else:\n print(\n \"'thisFileName' not specified! Printing content instead: \" +\n str(p))\n\n\n<mask token>\n\n\ndef create_errors_solver_listing(error_props, filename, pred=None):\n if pred is None:\n pred = lambda x: True\n f = open('results/listings/{0}'.format(filename), 'w')\n print('Creating log of errors ({0})...'.format(filename))\n for i, p in enumerate(error_props):\n if not pred(p):\n continue\n if i > 0:\n f.write('\\n' + '-' * 50 + '\\n\\n')\n with open(p['evoplotter.file'], 'r') as content_file:\n content = content_file.read()\n f.write(content)\n f.close()\n\n\n<mask token>\n\n\ndef produce_status_matrix(dim, props):\n \"\"\"Generates a status data in the form of a python list. It can be\n later used to retry missing runs.\n\n :param dim: (Dimension) dimensions on which data are to be divided.\n :param props: (dict[str,str]) properties files.\n :return: (str) Python code of a list containing specified data.\n \"\"\"\n text = '['\n for config in dim:\n numRuns = len(config.filter_props(props))\n text += '({0}, {1}), '.format(config.stored_values, numRuns)\n return text + ']'\n\n\n<mask token>\n\n\ndef normalized_total_time(p, max_time=3600000):\n \"\"\"If time was longer than max_time, then return max_time, otherwise return time. Time is counted in miliseconds.\"\"\"\n if 'cdgp.wasTimeout' in p and p['cdgp.wasTimeout'] == 'true':\n v = 3600000\n else:\n v = int(float(p['result.totalTimeSystem']))\n return max_time if v > max_time else v\n\n\n<mask token>\n\n\ndef get_num_optimal(props):\n props2 = [p for p in props if is_verified_solution(p)]\n return len(props2)\n\n\n<mask token>\n\n\ndef get_num_computed(filtered):\n return len(filtered)\n\n\ndef fun_successRate_full(filtered):\n if len(filtered) == 0:\n return '-'\n num_opt = get_num_optimal(filtered)\n return '{0}/{1}'.format(str(num_opt), str(len(filtered)))\n\n\ndef get_successRate(filtered):\n num_opt = get_num_optimal(filtered)\n return float(num_opt) / float(len(filtered))\n\n\ndef fun_successRateMseOnly(filtered):\n if len(filtered) == 0:\n return '-'\n n = get_num_optimalOnlyMse(filtered)\n if n == 0:\n return '-'\n else:\n sr = n / float(len(filtered))\n return '{0}'.format('%0.2f' % round(sr, 2))\n\n\ndef fun_average_mse(filtered):\n res = 0.0\n num = 0\n for p in filtered:\n x = float(p['result.best.mse'])\n if not 'n' in str(x):\n res += x\n num += 1\n else:\n print('Nan encountered')\n if num == 0:\n return '-'\n else:\n return res / num\n\n\ndef fun_average_mse_sd(filtered):\n \"\"\"Returns average together with standard deviation.\"\"\"\n res = 0.0\n num = 0\n for p in filtered:\n x = float(p['result.best.mse'])\n if not 'n' in str(x):\n res += x\n num += 1\n else:\n print('Nan encountered')\n avg = res / num\n sd = 0.0\n for p in filtered:\n x = float(p['result.best.mse'])\n if not 'n' in str(x):\n sd += (x - avg) ** 2.0\n sd = math.sqrt(sd / num)\n if num == 0:\n return '-'\n else:\n return '${0} \\\\pm{1}$'.format(avg, sd)\n\n\n<mask token>\n\n\ndef get_stats_size(props):\n vals = [float(p['result.best.size']) for p in props]\n if len(vals) == 0:\n return '-'\n else:\n return str(int(round(np.mean(vals))))\n\n\ndef get_stats_sizeOnlySuccessful(props):\n vals = [float(p['result.best.size']) for p in props if\n is_verified_solution(p)]\n if len(vals) == 0:\n return '-'\n else:\n return str(int(round(np.mean(vals))))\n\n\ndef get_stats_maxSolverTime(props):\n if len(props) == 0 or 'solver.allTimesCountMap' not in props[0]:\n return '-'\n times = []\n for p in props:\n timesMap = p['solver.allTimesCountMap']\n parts = timesMap.split(', ')[-1].split(',')\n times.append(float(parts[0].replace('(', '')))\n return '%0.3f' % max(times)\n\n\ndef get_stats_avgSolverTime(props):\n if len(props) == 0 or 'solver.allTimesCountMap' not in props[0] or props[0\n ]['method'] != 'CDGP':\n return '-'\n sum = 0.0\n sumWeights = 0.0\n for p in props:\n timesMap = p['solver.allTimesCountMap']\n pairs = timesMap.split(', ')\n if len(pairs) == 0:\n continue\n for x in pairs:\n time = float(x.split(',')[0].replace('(', ''))\n weight = float(x.split(',')[1].replace(')', ''))\n sum += time * weight\n sumWeights += weight\n if sumWeights == 0.0:\n return '%0.3f' % 0.0\n else:\n return '%0.3f' % (sum / sumWeights)\n\n\ndef get_avgSolverTotalCalls(props):\n if len(props) == 0 or 'solver.totalCalls' not in props[0]:\n return '-'\n vals = [(float(p['solver.totalCalls']) / 1000.0) for p in props]\n return '%0.1f' % round(np.mean(vals), 1)\n\n\n<mask token>\n\n\ndef get_avg_totalTests(props):\n vals = [float(p['tests.total']) for p in props]\n if len(vals) == 0:\n return '-'\n else:\n x = np.mean(vals)\n if x < 1e-05:\n x = 0.0\n return str(int(round(x)))\n\n\n<mask token>\n\n\ndef get_avg_runtime_helper(vals):\n if len(vals) == 0:\n return 'n/a'\n else:\n x = np.mean(vals)\n if x >= 10.0:\n return '%d' % x\n else:\n return '%0.1f' % x\n\n\ndef get_avg_runtimeOnlySuccessful(props):\n if len(props) == 0:\n return '-'\n else:\n vals = [(float(normalized_total_time(p, max_time=1800000)) / 1000.0\n ) for p in props if is_verified_solution(p)]\n return get_avg_runtime_helper(vals)\n\n\n<mask token>\n\n\ndef get_avg_generation(props):\n if len(props) == 0:\n return '-'\n if len(props) > 0 and 'result.totalGenerations' not in props[0]:\n return '-'\n vals = [float(p['result.totalGenerations']) for p in props]\n if len(vals) == 0:\n return '-'\n else:\n return str(int(round(np.mean(vals))))\n\n\n<mask token>\n\n\ndef get_avg_evaluated(props):\n if len(props) == 0:\n return '-'\n vals = []\n for p in props:\n if p['evolutionMode'] == 'steadyState':\n vals.append(float(p['result.totalGenerations']))\n else:\n vals.append(float(p['result.totalGenerations']) * float(p[\n 'populationSize']))\n return str(int(round(np.mean(vals))))\n\n\ndef get_avg_evaluatedSuccessful(props):\n if len(props) == 0:\n return '-'\n vals = []\n for p in props:\n if is_verified_solution(p):\n if p['evolutionMode'] == 'steadyState':\n vals.append(float(p['result.totalGenerations']))\n else:\n vals.append(float(p['result.totalGenerations']) * float(p[\n 'populationSize']))\n if len(vals) == 0:\n return 'n/a'\n else:\n return str(int(round(np.mean(vals))))\n\n\n<mask token>\n\n\ndef get_sum_solverRestarts(props):\n if len(props) == 0:\n return '-'\n vals = [int(p['solver.totalRestarts']) for p in props if \n 'solver.totalRestarts' in p]\n if len(vals) != len(props):\n print('WARNING: solver.totalRestarts was not present in all files.')\n if len(vals) == 0:\n return '0'\n else:\n return str(np.sum(vals))\n\n\ndef print_solved_in_time(props, upper_time):\n if len(props) == 0:\n return\n solved = 0\n solvedRuns = 0\n num = 0\n for p in props:\n if p['result.best.isOptimal'] == 'false':\n continue\n num += 1\n if int(normalized_total_time(p, max_time=1800000)) <= upper_time:\n solved += 1\n for p in props:\n if int(normalized_total_time(p, max_time=1800000)) <= upper_time:\n solvedRuns += 1\n print('\\nRuns which ended under {0} s: {1} / {2} ({3} %)'.format(\n upper_time / 1000.0, solvedRuns, len(props), solvedRuns / len(props)))\n print('Optimal solutions found under {0} s: {1} / {2} ({3} %)\\n'.\n format(upper_time / 1000.0, solved, num, solved / num))\n",
"step-3": "<mask token>\n\n\nclass TableGenerator:\n \"\"\"Generates table from data.\"\"\"\n\n def __init__(self, f_cell, dim_rows, dim_cols, headerRowNames, title='',\n color_scheme=None, table_postprocessor=None, vertical_border=1,\n table_variants=None, default_color_thresholds=None,\n layered_headline=True, only_nonempty_rows=True, **kwargs):\n self.f_cell = f_cell\n self.dim_rows = dim_rows\n self.dim_cols = dim_cols\n self.title = title\n self.color_scheme = color_scheme\n self.table_postprocessor = table_postprocessor\n self.vertical_border = vertical_border\n self.headerRowNames = headerRowNames\n self.table_variants = (table_variants if table_variants is not None\n else [lambda p: True])\n self.default_color_thresholds = default_color_thresholds\n self.layered_headline = layered_headline\n self.only_nonempty_rows = only_nonempty_rows\n self.init_kwargs = kwargs.copy()\n\n def apply(self, props, new_color_thresholds=None):\n text = ''\n for variant in self.table_variants:\n props_variant = [p for p in props if variant(p)]\n if self.only_nonempty_rows:\n dim_rows_variant = Dim([c for c in self.dim_rows.configs if\n len(c.filter_props(props_variant)) > 0])\n else:\n dim_rows_variant = self.dim_rows\n txt = printer.latex_table(props_variant, dim_rows_variant, self\n .dim_cols, self.f_cell, layered_headline=self.\n layered_headline, vertical_border=self.vertical_border,\n headerRowNames=self.headerRowNames, **self.init_kwargs)\n txt = self.table_postprocessor(txt)\n ct = (new_color_thresholds if new_color_thresholds is not None else\n self.default_color_thresholds)\n if self.color_scheme is not None and ct is not None:\n cv0, cv1, cv2 = ct\n txt = printer.table_color_map(txt, cv0, cv1, cv2,\n 'colorLow', 'colorMedium', 'colorHigh')\n text += '\\\\noindent'\n text += txt\n return text\n\n\nclass Experiment:\n\n def __init__(self):\n self.tables = []\n self.listings = []\n\n\n<mask token>\n\n\ndef print_props_filenames(props):\n for p in props:\n if 'thisFileName' in p:\n print(p['thisFileName'])\n else:\n print(\n \"'thisFileName' not specified! Printing content instead: \" +\n str(p))\n\n\n<mask token>\n\n\ndef create_errors_solver_listing(error_props, filename, pred=None):\n if pred is None:\n pred = lambda x: True\n f = open('results/listings/{0}'.format(filename), 'w')\n print('Creating log of errors ({0})...'.format(filename))\n for i, p in enumerate(error_props):\n if not pred(p):\n continue\n if i > 0:\n f.write('\\n' + '-' * 50 + '\\n\\n')\n with open(p['evoplotter.file'], 'r') as content_file:\n content = content_file.read()\n f.write(content)\n f.close()\n\n\n<mask token>\n\n\ndef produce_status_matrix(dim, props):\n \"\"\"Generates a status data in the form of a python list. It can be\n later used to retry missing runs.\n\n :param dim: (Dimension) dimensions on which data are to be divided.\n :param props: (dict[str,str]) properties files.\n :return: (str) Python code of a list containing specified data.\n \"\"\"\n text = '['\n for config in dim:\n numRuns = len(config.filter_props(props))\n text += '({0}, {1}), '.format(config.stored_values, numRuns)\n return text + ']'\n\n\n<mask token>\n\n\ndef normalized_total_time(p, max_time=3600000):\n \"\"\"If time was longer than max_time, then return max_time, otherwise return time. Time is counted in miliseconds.\"\"\"\n if 'cdgp.wasTimeout' in p and p['cdgp.wasTimeout'] == 'true':\n v = 3600000\n else:\n v = int(float(p['result.totalTimeSystem']))\n return max_time if v > max_time else v\n\n\n<mask token>\n\n\ndef is_approximated_solution(p):\n \"\"\"Checks if the MSE was below the threshold.\"\"\"\n tr = float(p['optThreshold'])\n k = 'result.best.verificationDecision'\n return p['result.best.isOptimal'] == 'true' and p[k] == 'unsat'\n\n\ndef get_num_optimal(props):\n props2 = [p for p in props if is_verified_solution(p)]\n return len(props2)\n\n\n<mask token>\n\n\ndef get_num_computed(filtered):\n return len(filtered)\n\n\ndef fun_successRate_full(filtered):\n if len(filtered) == 0:\n return '-'\n num_opt = get_num_optimal(filtered)\n return '{0}/{1}'.format(str(num_opt), str(len(filtered)))\n\n\ndef get_successRate(filtered):\n num_opt = get_num_optimal(filtered)\n return float(num_opt) / float(len(filtered))\n\n\ndef fun_successRateMseOnly(filtered):\n if len(filtered) == 0:\n return '-'\n n = get_num_optimalOnlyMse(filtered)\n if n == 0:\n return '-'\n else:\n sr = n / float(len(filtered))\n return '{0}'.format('%0.2f' % round(sr, 2))\n\n\ndef fun_average_mse(filtered):\n res = 0.0\n num = 0\n for p in filtered:\n x = float(p['result.best.mse'])\n if not 'n' in str(x):\n res += x\n num += 1\n else:\n print('Nan encountered')\n if num == 0:\n return '-'\n else:\n return res / num\n\n\ndef fun_average_mse_sd(filtered):\n \"\"\"Returns average together with standard deviation.\"\"\"\n res = 0.0\n num = 0\n for p in filtered:\n x = float(p['result.best.mse'])\n if not 'n' in str(x):\n res += x\n num += 1\n else:\n print('Nan encountered')\n avg = res / num\n sd = 0.0\n for p in filtered:\n x = float(p['result.best.mse'])\n if not 'n' in str(x):\n sd += (x - avg) ** 2.0\n sd = math.sqrt(sd / num)\n if num == 0:\n return '-'\n else:\n return '${0} \\\\pm{1}$'.format(avg, sd)\n\n\ndef fun_successRate(filtered):\n if len(filtered) == 0:\n return '-'\n sr = get_successRate(filtered)\n return '{0}'.format('%0.2f' % round(sr, 2))\n\n\n<mask token>\n\n\ndef get_stats_size(props):\n vals = [float(p['result.best.size']) for p in props]\n if len(vals) == 0:\n return '-'\n else:\n return str(int(round(np.mean(vals))))\n\n\ndef get_stats_sizeOnlySuccessful(props):\n vals = [float(p['result.best.size']) for p in props if\n is_verified_solution(p)]\n if len(vals) == 0:\n return '-'\n else:\n return str(int(round(np.mean(vals))))\n\n\ndef get_stats_maxSolverTime(props):\n if len(props) == 0 or 'solver.allTimesCountMap' not in props[0]:\n return '-'\n times = []\n for p in props:\n timesMap = p['solver.allTimesCountMap']\n parts = timesMap.split(', ')[-1].split(',')\n times.append(float(parts[0].replace('(', '')))\n return '%0.3f' % max(times)\n\n\ndef get_stats_avgSolverTime(props):\n if len(props) == 0 or 'solver.allTimesCountMap' not in props[0] or props[0\n ]['method'] != 'CDGP':\n return '-'\n sum = 0.0\n sumWeights = 0.0\n for p in props:\n timesMap = p['solver.allTimesCountMap']\n pairs = timesMap.split(', ')\n if len(pairs) == 0:\n continue\n for x in pairs:\n time = float(x.split(',')[0].replace('(', ''))\n weight = float(x.split(',')[1].replace(')', ''))\n sum += time * weight\n sumWeights += weight\n if sumWeights == 0.0:\n return '%0.3f' % 0.0\n else:\n return '%0.3f' % (sum / sumWeights)\n\n\ndef get_avgSolverTotalCalls(props):\n if len(props) == 0 or 'solver.totalCalls' not in props[0]:\n return '-'\n vals = [(float(p['solver.totalCalls']) / 1000.0) for p in props]\n return '%0.1f' % round(np.mean(vals), 1)\n\n\ndef get_numSolverCallsOverXs(props):\n if len(props) == 0 or 'solver.allTimesCountMap' not in props[0]:\n return '-'\n TRESHOLD = 0.5\n sum = 0\n for p in props:\n timesMap = p['solver.allTimesCountMap']\n pairs = timesMap.split(', ')\n if len(pairs) == 0:\n continue\n for x in pairs:\n time = float(x.split(',')[0].replace('(', ''))\n if time > TRESHOLD:\n weight = int(x.split(',')[1].replace(')', ''))\n sum += weight\n return sum\n\n\ndef get_avg_totalTests(props):\n vals = [float(p['tests.total']) for p in props]\n if len(vals) == 0:\n return '-'\n else:\n x = np.mean(vals)\n if x < 1e-05:\n x = 0.0\n return str(int(round(x)))\n\n\ndef get_avg_mse(props):\n vals = []\n for p in props:\n vals.append(float(p['result.best.mse']))\n if len(vals) == 0:\n return '-'\n else:\n return '%0.5f' % np.mean(vals)\n\n\ndef get_avg_runtime_helper(vals):\n if len(vals) == 0:\n return 'n/a'\n else:\n x = np.mean(vals)\n if x >= 10.0:\n return '%d' % x\n else:\n return '%0.1f' % x\n\n\ndef get_avg_runtimeOnlySuccessful(props):\n if len(props) == 0:\n return '-'\n else:\n vals = [(float(normalized_total_time(p, max_time=1800000)) / 1000.0\n ) for p in props if is_verified_solution(p)]\n return get_avg_runtime_helper(vals)\n\n\ndef get_avg_runtime(props):\n if len(props) == 0:\n return '-'\n else:\n vals = [(float(normalized_total_time(p, max_time=1800000)) / 1000.0\n ) for p in props]\n return get_avg_runtime_helper(vals)\n\n\ndef get_avg_generation(props):\n if len(props) == 0:\n return '-'\n if len(props) > 0 and 'result.totalGenerations' not in props[0]:\n return '-'\n vals = [float(p['result.totalGenerations']) for p in props]\n if len(vals) == 0:\n return '-'\n else:\n return str(int(round(np.mean(vals))))\n\n\ndef get_avg_generationSuccessful(props):\n if len(props) == 0:\n return '-'\n else:\n vals = [float(p['result.best.generation']) for p in props if\n is_verified_solution(p)]\n if len(vals) == 0:\n return 'n/a'\n else:\n return str(int(round(np.mean(vals))))\n\n\ndef get_avg_evaluated(props):\n if len(props) == 0:\n return '-'\n vals = []\n for p in props:\n if p['evolutionMode'] == 'steadyState':\n vals.append(float(p['result.totalGenerations']))\n else:\n vals.append(float(p['result.totalGenerations']) * float(p[\n 'populationSize']))\n return str(int(round(np.mean(vals))))\n\n\ndef get_avg_evaluatedSuccessful(props):\n if len(props) == 0:\n return '-'\n vals = []\n for p in props:\n if is_verified_solution(p):\n if p['evolutionMode'] == 'steadyState':\n vals.append(float(p['result.totalGenerations']))\n else:\n vals.append(float(p['result.totalGenerations']) * float(p[\n 'populationSize']))\n if len(vals) == 0:\n return 'n/a'\n else:\n return str(int(round(np.mean(vals))))\n\n\ndef get_avg_runtimePerProgram(props):\n if len(props) == 0:\n return '-'\n sAvgGen = get_avg_generation(props)\n if sAvgGen == '-' or sAvgGen is None:\n return '-'\n avgGen = float(sAvgGen)\n avgRuntime = float(get_avg_runtime(props))\n populationSize = float(props[0]['populationSize'])\n if props[0]['evolutionMode'] == 'steadyState':\n approxNumPrograms = populationSize + avgGen\n else:\n approxNumPrograms = populationSize * avgGen\n approxTimePerProgram = avgRuntime / approxNumPrograms\n return '%0.3f' % approxTimePerProgram\n\n\ndef get_sum_solverRestarts(props):\n if len(props) == 0:\n return '-'\n vals = [int(p['solver.totalRestarts']) for p in props if \n 'solver.totalRestarts' in p]\n if len(vals) != len(props):\n print('WARNING: solver.totalRestarts was not present in all files.')\n if len(vals) == 0:\n return '0'\n else:\n return str(np.sum(vals))\n\n\ndef print_solved_in_time(props, upper_time):\n if len(props) == 0:\n return\n solved = 0\n solvedRuns = 0\n num = 0\n for p in props:\n if p['result.best.isOptimal'] == 'false':\n continue\n num += 1\n if int(normalized_total_time(p, max_time=1800000)) <= upper_time:\n solved += 1\n for p in props:\n if int(normalized_total_time(p, max_time=1800000)) <= upper_time:\n solvedRuns += 1\n print('\\nRuns which ended under {0} s: {1} / {2} ({3} %)'.format(\n upper_time / 1000.0, solvedRuns, len(props), solvedRuns / len(props)))\n print('Optimal solutions found under {0} s: {1} / {2} ({3} %)\\n'.\n format(upper_time / 1000.0, solved, num, solved / num))\n",
"step-4": "<mask token>\n\n\nclass TableGenerator:\n \"\"\"Generates table from data.\"\"\"\n\n def __init__(self, f_cell, dim_rows, dim_cols, headerRowNames, title='',\n color_scheme=None, table_postprocessor=None, vertical_border=1,\n table_variants=None, default_color_thresholds=None,\n layered_headline=True, only_nonempty_rows=True, **kwargs):\n self.f_cell = f_cell\n self.dim_rows = dim_rows\n self.dim_cols = dim_cols\n self.title = title\n self.color_scheme = color_scheme\n self.table_postprocessor = table_postprocessor\n self.vertical_border = vertical_border\n self.headerRowNames = headerRowNames\n self.table_variants = (table_variants if table_variants is not None\n else [lambda p: True])\n self.default_color_thresholds = default_color_thresholds\n self.layered_headline = layered_headline\n self.only_nonempty_rows = only_nonempty_rows\n self.init_kwargs = kwargs.copy()\n\n def apply(self, props, new_color_thresholds=None):\n text = ''\n for variant in self.table_variants:\n props_variant = [p for p in props if variant(p)]\n if self.only_nonempty_rows:\n dim_rows_variant = Dim([c for c in self.dim_rows.configs if\n len(c.filter_props(props_variant)) > 0])\n else:\n dim_rows_variant = self.dim_rows\n txt = printer.latex_table(props_variant, dim_rows_variant, self\n .dim_cols, self.f_cell, layered_headline=self.\n layered_headline, vertical_border=self.vertical_border,\n headerRowNames=self.headerRowNames, **self.init_kwargs)\n txt = self.table_postprocessor(txt)\n ct = (new_color_thresholds if new_color_thresholds is not None else\n self.default_color_thresholds)\n if self.color_scheme is not None and ct is not None:\n cv0, cv1, cv2 = ct\n txt = printer.table_color_map(txt, cv0, cv1, cv2,\n 'colorLow', 'colorMedium', 'colorHigh')\n text += '\\\\noindent'\n text += txt\n return text\n\n\nclass Experiment:\n\n def __init__(self):\n self.tables = []\n self.listings = []\n\n\ndef delete_logs(props, pred, verbose=True, simulate=False):\n for p in props:\n if 'evoplotter.file' in p and pred(p):\n path = p['evoplotter.file']\n if not simulate:\n os.remove(path)\n if verbose:\n print('File removed: {0}'.format(path))\n\n\ndef print_props_filenames(props):\n for p in props:\n if 'thisFileName' in p:\n print(p['thisFileName'])\n else:\n print(\n \"'thisFileName' not specified! Printing content instead: \" +\n str(p))\n\n\ndef create_errors_listing(error_props, filename):\n f = open('results/listings/{0}'.format(filename), 'w')\n print('Creating log of errors ({0})...'.format(filename))\n for i, p in enumerate(error_props):\n if i > 0:\n f.write('\\n' + '-' * 50 + '\\n')\n for k in sorted(p.keys()):\n v = p[k]\n f.write('{0} = {1}\\n'.format(k, v))\n f.close()\n\n\ndef create_errors_solver_listing(error_props, filename, pred=None):\n if pred is None:\n pred = lambda x: True\n f = open('results/listings/{0}'.format(filename), 'w')\n print('Creating log of errors ({0})...'.format(filename))\n for i, p in enumerate(error_props):\n if not pred(p):\n continue\n if i > 0:\n f.write('\\n' + '-' * 50 + '\\n\\n')\n with open(p['evoplotter.file'], 'r') as content_file:\n content = content_file.read()\n f.write(content)\n f.close()\n\n\ndef load_correct_props(folders):\n props_cdgpError = utils.load_properties_dirs(folders, exts=[\n '.cdgp.error'], add_file_path=True)\n exts = ['.cdgp']\n props0 = utils.load_properties_dirs(folders, exts=exts, add_file_path=True)\n\n def is_correct(p):\n return 'result.best.verificationDecision' in p\n props = [p for p in props0 if is_correct(p)]\n create_errors_solver_listing(props_cdgpError, 'errors_solver.txt')\n if CHECK_CORRECTNESS_OF_FILES:\n props_errors = [p for p in props0 if not is_correct(p)]\n create_errors_listing(props_errors, 'errors_run.txt')\n if len(props_errors) > 0:\n print('Files with error status:')\n print_props_filenames(props_errors)\n print(\n 'Loaded: {0} correct property files, {1} incorrect; All log files: {2}'\n .format(len(props), len(props_errors), len(props) + len(\n props_errors)))\n print(\"Runs that ended with '.cdgp.error': {0}\".format(len(\n props_cdgpError)))\n print_props_filenames(props_cdgpError)\n return props\n\n\ndef produce_status_matrix(dim, props):\n \"\"\"Generates a status data in the form of a python list. It can be\n later used to retry missing runs.\n\n :param dim: (Dimension) dimensions on which data are to be divided.\n :param props: (dict[str,str]) properties files.\n :return: (str) Python code of a list containing specified data.\n \"\"\"\n text = '['\n for config in dim:\n numRuns = len(config.filter_props(props))\n text += '({0}, {1}), '.format(config.stored_values, numRuns)\n return text + ']'\n\n\ndef save_listings(props, dim_rows, dim_cols):\n \"\"\"Saves listings of various useful info to separate text files.\"\"\"\n assert isinstance(dim_rows, Dim)\n assert isinstance(dim_cols, Dim)\n utils.ensure_dir('results/listings/errors/')\n for dr in dim_rows:\n bench = dr.get_caption()\n bench = bench[:bench.rfind('.')] if '.' in bench else bench\n f = open('results/listings/verified_{0}.txt'.format(bench), 'w')\n f_errors = open('results/listings/errors/verified_{0}.txt'.format(\n bench), 'w')\n props_bench = dr.filter_props(props)\n for dc in dim_cols:\n f.write('{0}\\n'.format(dc.get_caption()))\n f_errors.write('{0}\\n'.format(dc.get_caption()))\n props_final = [p for p in dc.filter_props(props_bench) if\n is_verified_solution(p)]\n for p in props_final:\n fname = p['thisFileName'].replace(\n '/home/ibladek/workspace/GECCO19/gecco19/', '')\n best = p['result.best']\n fit = float(p['result.best.mse'])\n if fit >= 1e-15:\n f.write('{0}\\t\\t\\t(FILE: {1}) (MSE: {2})\\n'.format(best,\n fname, fit))\n else:\n f.write('{0}\\t\\t\\t(FILE: {1})\\n'.format(best, fname))\n f.write('\\n\\n')\n f.close()\n f_errors.close()\n\n\ndef normalized_total_time(p, max_time=3600000):\n \"\"\"If time was longer than max_time, then return max_time, otherwise return time. Time is counted in miliseconds.\"\"\"\n if 'cdgp.wasTimeout' in p and p['cdgp.wasTimeout'] == 'true':\n v = 3600000\n else:\n v = int(float(p['result.totalTimeSystem']))\n return max_time if v > max_time else v\n\n\n<mask token>\n\n\ndef is_approximated_solution(p):\n \"\"\"Checks if the MSE was below the threshold.\"\"\"\n tr = float(p['optThreshold'])\n k = 'result.best.verificationDecision'\n return p['result.best.isOptimal'] == 'true' and p[k] == 'unsat'\n\n\ndef get_num_optimal(props):\n props2 = [p for p in props if is_verified_solution(p)]\n return len(props2)\n\n\ndef get_num_optimalOnlyMse(props):\n for p in props:\n if 'optThreshold' not in p:\n print(str(p))\n num = 0\n for p in props:\n if 'optThreshold' in p:\n tr = p['optThreshold']\n elif 'optThreshold' in p:\n tr = p['cdgp.optThreshold']\n else:\n raise Exception('No optThreshold in log file')\n if float(p['result.best.mse']) <= tr:\n num += 1\n return num\n\n\ndef get_num_allPropertiesMet(props):\n props2 = [p for p in props if p['result.best.verificationDecision'] ==\n 'unsat']\n return len(props2)\n\n\ndef get_num_computed(filtered):\n return len(filtered)\n\n\ndef fun_successRate_full(filtered):\n if len(filtered) == 0:\n return '-'\n num_opt = get_num_optimal(filtered)\n return '{0}/{1}'.format(str(num_opt), str(len(filtered)))\n\n\ndef get_successRate(filtered):\n num_opt = get_num_optimal(filtered)\n return float(num_opt) / float(len(filtered))\n\n\ndef fun_successRateMseOnly(filtered):\n if len(filtered) == 0:\n return '-'\n n = get_num_optimalOnlyMse(filtered)\n if n == 0:\n return '-'\n else:\n sr = n / float(len(filtered))\n return '{0}'.format('%0.2f' % round(sr, 2))\n\n\ndef fun_average_mse(filtered):\n res = 0.0\n num = 0\n for p in filtered:\n x = float(p['result.best.mse'])\n if not 'n' in str(x):\n res += x\n num += 1\n else:\n print('Nan encountered')\n if num == 0:\n return '-'\n else:\n return res / num\n\n\ndef fun_average_mse_sd(filtered):\n \"\"\"Returns average together with standard deviation.\"\"\"\n res = 0.0\n num = 0\n for p in filtered:\n x = float(p['result.best.mse'])\n if not 'n' in str(x):\n res += x\n num += 1\n else:\n print('Nan encountered')\n avg = res / num\n sd = 0.0\n for p in filtered:\n x = float(p['result.best.mse'])\n if not 'n' in str(x):\n sd += (x - avg) ** 2.0\n sd = math.sqrt(sd / num)\n if num == 0:\n return '-'\n else:\n return '${0} \\\\pm{1}$'.format(avg, sd)\n\n\ndef fun_successRate(filtered):\n if len(filtered) == 0:\n return '-'\n sr = get_successRate(filtered)\n return '{0}'.format('%0.2f' % round(sr, 2))\n\n\ndef fun_allPropertiesMet(filtered):\n if len(filtered) == 0:\n return '-'\n num_opt = get_num_allPropertiesMet(filtered)\n sr = float(num_opt) / float(len(filtered))\n return '{0}'.format('%0.2f' % round(sr, 2))\n\n\ndef get_stats_size(props):\n vals = [float(p['result.best.size']) for p in props]\n if len(vals) == 0:\n return '-'\n else:\n return str(int(round(np.mean(vals))))\n\n\ndef get_stats_sizeOnlySuccessful(props):\n vals = [float(p['result.best.size']) for p in props if\n is_verified_solution(p)]\n if len(vals) == 0:\n return '-'\n else:\n return str(int(round(np.mean(vals))))\n\n\ndef get_stats_maxSolverTime(props):\n if len(props) == 0 or 'solver.allTimesCountMap' not in props[0]:\n return '-'\n times = []\n for p in props:\n timesMap = p['solver.allTimesCountMap']\n parts = timesMap.split(', ')[-1].split(',')\n times.append(float(parts[0].replace('(', '')))\n return '%0.3f' % max(times)\n\n\ndef get_stats_avgSolverTime(props):\n if len(props) == 0 or 'solver.allTimesCountMap' not in props[0] or props[0\n ]['method'] != 'CDGP':\n return '-'\n sum = 0.0\n sumWeights = 0.0\n for p in props:\n timesMap = p['solver.allTimesCountMap']\n pairs = timesMap.split(', ')\n if len(pairs) == 0:\n continue\n for x in pairs:\n time = float(x.split(',')[0].replace('(', ''))\n weight = float(x.split(',')[1].replace(')', ''))\n sum += time * weight\n sumWeights += weight\n if sumWeights == 0.0:\n return '%0.3f' % 0.0\n else:\n return '%0.3f' % (sum / sumWeights)\n\n\ndef get_avgSolverTotalCalls(props):\n if len(props) == 0 or 'solver.totalCalls' not in props[0]:\n return '-'\n vals = [(float(p['solver.totalCalls']) / 1000.0) for p in props]\n return '%0.1f' % round(np.mean(vals), 1)\n\n\ndef get_numSolverCallsOverXs(props):\n if len(props) == 0 or 'solver.allTimesCountMap' not in props[0]:\n return '-'\n TRESHOLD = 0.5\n sum = 0\n for p in props:\n timesMap = p['solver.allTimesCountMap']\n pairs = timesMap.split(', ')\n if len(pairs) == 0:\n continue\n for x in pairs:\n time = float(x.split(',')[0].replace('(', ''))\n if time > TRESHOLD:\n weight = int(x.split(',')[1].replace(')', ''))\n sum += weight\n return sum\n\n\ndef get_avg_totalTests(props):\n vals = [float(p['tests.total']) for p in props]\n if len(vals) == 0:\n return '-'\n else:\n x = np.mean(vals)\n if x < 1e-05:\n x = 0.0\n return str(int(round(x)))\n\n\ndef get_avg_mse(props):\n vals = []\n for p in props:\n vals.append(float(p['result.best.mse']))\n if len(vals) == 0:\n return '-'\n else:\n return '%0.5f' % np.mean(vals)\n\n\ndef get_avg_runtime_helper(vals):\n if len(vals) == 0:\n return 'n/a'\n else:\n x = np.mean(vals)\n if x >= 10.0:\n return '%d' % x\n else:\n return '%0.1f' % x\n\n\ndef get_avg_runtimeOnlySuccessful(props):\n if len(props) == 0:\n return '-'\n else:\n vals = [(float(normalized_total_time(p, max_time=1800000)) / 1000.0\n ) for p in props if is_verified_solution(p)]\n return get_avg_runtime_helper(vals)\n\n\ndef get_avg_runtime(props):\n if len(props) == 0:\n return '-'\n else:\n vals = [(float(normalized_total_time(p, max_time=1800000)) / 1000.0\n ) for p in props]\n return get_avg_runtime_helper(vals)\n\n\ndef get_avg_generation(props):\n if len(props) == 0:\n return '-'\n if len(props) > 0 and 'result.totalGenerations' not in props[0]:\n return '-'\n vals = [float(p['result.totalGenerations']) for p in props]\n if len(vals) == 0:\n return '-'\n else:\n return str(int(round(np.mean(vals))))\n\n\ndef get_avg_generationSuccessful(props):\n if len(props) == 0:\n return '-'\n else:\n vals = [float(p['result.best.generation']) for p in props if\n is_verified_solution(p)]\n if len(vals) == 0:\n return 'n/a'\n else:\n return str(int(round(np.mean(vals))))\n\n\ndef get_avg_evaluated(props):\n if len(props) == 0:\n return '-'\n vals = []\n for p in props:\n if p['evolutionMode'] == 'steadyState':\n vals.append(float(p['result.totalGenerations']))\n else:\n vals.append(float(p['result.totalGenerations']) * float(p[\n 'populationSize']))\n return str(int(round(np.mean(vals))))\n\n\ndef get_avg_evaluatedSuccessful(props):\n if len(props) == 0:\n return '-'\n vals = []\n for p in props:\n if is_verified_solution(p):\n if p['evolutionMode'] == 'steadyState':\n vals.append(float(p['result.totalGenerations']))\n else:\n vals.append(float(p['result.totalGenerations']) * float(p[\n 'populationSize']))\n if len(vals) == 0:\n return 'n/a'\n else:\n return str(int(round(np.mean(vals))))\n\n\ndef get_avg_runtimePerProgram(props):\n if len(props) == 0:\n return '-'\n sAvgGen = get_avg_generation(props)\n if sAvgGen == '-' or sAvgGen is None:\n return '-'\n avgGen = float(sAvgGen)\n avgRuntime = float(get_avg_runtime(props))\n populationSize = float(props[0]['populationSize'])\n if props[0]['evolutionMode'] == 'steadyState':\n approxNumPrograms = populationSize + avgGen\n else:\n approxNumPrograms = populationSize * avgGen\n approxTimePerProgram = avgRuntime / approxNumPrograms\n return '%0.3f' % approxTimePerProgram\n\n\ndef get_sum_solverRestarts(props):\n if len(props) == 0:\n return '-'\n vals = [int(p['solver.totalRestarts']) for p in props if \n 'solver.totalRestarts' in p]\n if len(vals) != len(props):\n print('WARNING: solver.totalRestarts was not present in all files.')\n if len(vals) == 0:\n return '0'\n else:\n return str(np.sum(vals))\n\n\ndef print_solved_in_time(props, upper_time):\n if len(props) == 0:\n return\n solved = 0\n solvedRuns = 0\n num = 0\n for p in props:\n if p['result.best.isOptimal'] == 'false':\n continue\n num += 1\n if int(normalized_total_time(p, max_time=1800000)) <= upper_time:\n solved += 1\n for p in props:\n if int(normalized_total_time(p, max_time=1800000)) <= upper_time:\n solvedRuns += 1\n print('\\nRuns which ended under {0} s: {1} / {2} ({3} %)'.format(\n upper_time / 1000.0, solvedRuns, len(props), solvedRuns / len(props)))\n print('Optimal solutions found under {0} s: {1} / {2} ({3} %)\\n'.\n format(upper_time / 1000.0, solved, num, solved / num))\n",
"step-5": "import os\nimport math\nimport shutil\nfrom evoplotter import utils\nfrom evoplotter.dims import *\nfrom evoplotter import printer\nimport numpy as np\n\n\nCHECK_CORRECTNESS_OF_FILES = 1\nSTATUS_FILE_NAME = \"results/status.txt\"\nOPT_SOLUTIONS_FILE_NAME = \"opt_solutions.txt\"\n\n\n\nclass TableGenerator:\n \"\"\"Generates table from data.\"\"\"\n def __init__(self, f_cell, dim_rows, dim_cols, headerRowNames, title=\"\", color_scheme=None,\n table_postprocessor=None, vertical_border=1, table_variants=None,\n default_color_thresholds=None, layered_headline=True,\n only_nonempty_rows=True, **kwargs):\n self.f_cell = f_cell\n self.dim_rows = dim_rows\n self.dim_cols = dim_cols\n self.title = title\n self.color_scheme = color_scheme\n self.table_postprocessor = table_postprocessor\n self.vertical_border = vertical_border\n self.headerRowNames = headerRowNames\n # create a table for each variant and put them next to each other\n self.table_variants = table_variants if table_variants is not None else [lambda p: True]\n self.default_color_thresholds = default_color_thresholds\n self.layered_headline = layered_headline\n self.only_nonempty_rows = only_nonempty_rows\n self.init_kwargs = kwargs.copy()\n\n\n def apply(self, props, new_color_thresholds=None):\n text = \"\"\n for variant in self.table_variants: # each variant is some predicate on data\n props_variant = [p for p in props if variant(p)]\n if self.only_nonempty_rows:\n dim_rows_variant = Dim([c for c in self.dim_rows.configs if len(c.filter_props(props_variant)) > 0])\n else:\n dim_rows_variant = self.dim_rows\n\n txt = printer.latex_table(props_variant, dim_rows_variant, self.dim_cols, self.f_cell,\n layered_headline=self.layered_headline, vertical_border=self.vertical_border,\n headerRowNames=self.headerRowNames, **self.init_kwargs)\n txt = self.table_postprocessor(txt)\n ct = new_color_thresholds if new_color_thresholds is not None else self.default_color_thresholds\n if self.color_scheme is not None and ct is not None:\n cv0, cv1, cv2 = ct\n txt = printer.table_color_map(txt, cv0, cv1, cv2, \"colorLow\", \"colorMedium\", \"colorHigh\")\n\n text += r\"\\noindent\"\n text += txt\n return text\n\n\n\nclass Experiment:\n def __init__(self):\n self.tables = []\n self.listings = []\n\n\n\ndef delete_logs(props, pred, verbose=True, simulate=False):\n for p in props:\n if \"evoplotter.file\" in p and pred(p):\n path = p[\"evoplotter.file\"]\n if not simulate:\n os.remove(path)\n if verbose:\n print(\"File removed: {0}\".format(path))\n\n\ndef print_props_filenames(props):\n for p in props:\n if \"thisFileName\" in p:\n print(p[\"thisFileName\"])\n else:\n print(\"'thisFileName' not specified! Printing content instead: \" + str(p))\n\n\ndef create_errors_listing(error_props, filename):\n f = open(\"results/listings/{0}\".format(filename), \"w\")\n print(\"Creating log of errors ({0})...\".format(filename))\n for i, p in enumerate(error_props):\n if i > 0:\n f.write(\"\\n\" + (\"-\" * 50) + \"\\n\")\n for k in sorted(p.keys()):\n v = p[k]\n f.write(\"{0} = {1}\\n\".format(k, v))\n f.close()\n\n\ndef create_errors_solver_listing(error_props, filename, pred=None):\n if pred is None:\n pred = lambda x: True\n f = open(\"results/listings/{0}\".format(filename), \"w\")\n print(\"Creating log of errors ({0})...\".format(filename))\n for i, p in enumerate(error_props):\n if not pred(p): # ignore properties with certain features, e.g., types of errors\n continue\n\n if i > 0:\n f.write(\"\\n\" + (\"-\" * 50) + \"\\n\\n\")\n\n # read the whole original file, because multiline error messages are not preserved in dicts\n with open(p[\"evoplotter.file\"], 'r') as content_file:\n content = content_file.read()\n f.write(content)\n f.close()\n\n\ndef load_correct_props(folders):\n props_cdgpError = utils.load_properties_dirs(folders, exts=[\".cdgp.error\"], add_file_path=True)\n exts = [\".cdgp\"]\n props0 = utils.load_properties_dirs(folders, exts=exts, add_file_path=True)\n\n def is_correct(p):\n return \"result.best.verificationDecision\" in p\n\n # Filtering props so only correct ones are left\n props = [p for p in props0 if is_correct(p)]\n\n # print(\"Filtered (props):\")\n # for p in props:\n # if \"resistance_par3_c1_10\" in p[\"benchmark\"] and p[\"method\"] == \"CDGP\":\n # print(p[\"evoplotter.file\"])\n # print(\"Filtered (props_cdgpError):\")\n # for p in props_cdgpError:\n # if \"resistance_par3_c1_10\" in p[\"benchmark\"] and p[\"method\"] == \"CDGP\":\n # print(p[\"evoplotter.file\"])\n\n # Clear log file\n # print(\"[del] props\")\n # fun = lambda p: p[\"method\"] == \"CDGP\" and p[\"partialConstraintsInFitness\"] == \"true\"\n # delete_logs(props, fun, simulate=True)\n # print(\"[del] props_cdgpError\")\n # delete_logs(props_cdgpError, fun, simulate=True)\n\n\n create_errors_solver_listing(props_cdgpError, \"errors_solver.txt\")\n\n # Printing names of files which finished with error status or are incomplete.\n if CHECK_CORRECTNESS_OF_FILES:\n props_errors = [p for p in props0 if not is_correct(p)]\n create_errors_listing(props_errors, \"errors_run.txt\")\n if len(props_errors) > 0:\n print(\"Files with error status:\")\n print_props_filenames(props_errors)\n print(\"Loaded: {0} correct property files, {1} incorrect; All log files: {2}\".format(len(props), len(props_errors), len(props) + len\n (props_errors)))\n print(\"Runs that ended with '.cdgp.error': {0}\".format(len(props_cdgpError)))\n print_props_filenames(props_cdgpError)\n return props\n\n\ndef produce_status_matrix(dim, props):\n \"\"\"Generates a status data in the form of a python list. It can be\n later used to retry missing runs.\n\n :param dim: (Dimension) dimensions on which data are to be divided.\n :param props: (dict[str,str]) properties files.\n :return: (str) Python code of a list containing specified data.\n \"\"\"\n text = \"[\"\n for config in dim:\n numRuns = len(config.filter_props(props))\n text += \"({0}, {1}), \".format(config.stored_values, numRuns)\n return text + \"]\"\n\n\n\ndef save_listings(props, dim_rows, dim_cols):\n \"\"\"Saves listings of various useful info to separate text files.\"\"\"\n assert isinstance(dim_rows, Dim)\n assert isinstance(dim_cols, Dim)\n utils.ensure_dir(\"results/listings/errors/\")\n\n # Saving optimal verified solutions\n for dr in dim_rows:\n bench = dr.get_caption()\n bench = bench[:bench.rfind(\".\")] if \".\" in bench else bench\n f = open(\"results/listings/verified_{0}.txt\".format(bench), \"w\")\n f_errors = open(\"results/listings/errors/verified_{0}.txt\".format(bench), \"w\")\n\n props_bench = dr.filter_props(props)\n for dc in dim_cols:\n f.write(\"{0}\\n\".format(dc.get_caption()))\n f_errors.write(\"{0}\\n\".format(dc.get_caption())) # TODO: finish\n props_final = [p for p in dc.filter_props(props_bench) if is_verified_solution(p)]\n\n for p in props_final:\n fname = p[\"thisFileName\"].replace(\"/home/ibladek/workspace/GECCO19/gecco19/\", \"\")\n best = p[\"result.best\"]\n fit = float(p[\"result.best.mse\"])\n if fit >= 1e-15:\n f.write(\"{0}\\t\\t\\t(FILE: {1}) (MSE: {2})\\n\".format(best, fname, fit))\n else:\n f.write(\"{0}\\t\\t\\t(FILE: {1})\\n\".format(best, fname))\n\n f.write(\"\\n\\n\")\n f.close()\n f_errors.close()\n\n\n\ndef normalized_total_time(p, max_time=3600000):\n \"\"\"If time was longer than max_time, then return max_time, otherwise return time. Time is counted in miliseconds.\"\"\"\n if \"cdgp.wasTimeout\" in p and p[\"cdgp.wasTimeout\"] == \"true\":\n v = 3600000\n else:\n v = int(float(p[\"result.totalTimeSystem\"]))\n return max_time if v > max_time else v\n\ndef is_verified_solution(p):\n k = \"result.best.verificationDecision\"\n return p[\"result.best.isOptimal\"] == \"true\" and p[k] == \"unsat\"\n\ndef is_approximated_solution(p):\n \"\"\"Checks if the MSE was below the threshold.\"\"\"\n tr = float(p[\"optThreshold\"])\n # TODO: finish\n k = \"result.best.verificationDecision\"\n return p[\"result.best.isOptimal\"] == \"true\" and p[k] == \"unsat\"\n\ndef get_num_optimal(props):\n props2 = [p for p in props if is_verified_solution(p)]\n return len(props2)\ndef get_num_optimalOnlyMse(props):\n # \"cdgp.optThreshold\" in p and\n for p in props:\n if \"optThreshold\" not in p:\n print(str(p))\n # Sometimes it is 'optThreshold', and sometimes 'cdgp.optThreshold'...\n # props2 = [p for p in props if float(p[\"result.best.mse\"]) <= float(p[\"optThreshold\"])]\n num = 0\n for p in props:\n if \"optThreshold\" in p:\n tr = p[\"optThreshold\"]\n elif \"optThreshold\" in p:\n tr = p[\"cdgp.optThreshold\"]\n else:\n raise Exception(\"No optThreshold in log file\")\n if float(p[\"result.best.mse\"]) <= tr:\n num += 1\n return num\n\ndef get_num_allPropertiesMet(props):\n props2 = [p for p in props if p[\"result.best.verificationDecision\"] == \"unsat\"]\n return len(props2)\n\ndef get_num_computed(filtered):\n return len(filtered)\ndef fun_successRate_full(filtered):\n if len(filtered) == 0:\n return \"-\"\n num_opt = get_num_optimal(filtered)\n return \"{0}/{1}\".format(str(num_opt), str(len(filtered)))\ndef get_successRate(filtered):\n num_opt = get_num_optimal(filtered)\n return float(num_opt) / float(len(filtered))\ndef fun_successRateMseOnly(filtered):\n if len(filtered) == 0:\n return \"-\"\n n = get_num_optimalOnlyMse(filtered)\n if n == 0:\n return \"-\"\n else:\n sr = n / float(len(filtered))\n return \"{0}\".format(\"%0.2f\" % round(sr, 2))\ndef fun_average_mse(filtered):\n res = 0.0\n num = 0\n # Sometimes there was \"inf\" in the results. We will ignore those elements.\n for p in filtered:\n x = float(p[\"result.best.mse\"])\n if not \"n\" in str(x):\n res += x\n num += 1\n else:\n print(\"Nan encountered\")\n if num == 0:\n return \"-\"\n else:\n return res / num\ndef fun_average_mse_sd(filtered):\n \"\"\"Returns average together with standard deviation.\"\"\"\n res = 0.0\n num = 0\n # Sometimes there was \"inf\" in the results. We will ignore those elements.\n for p in filtered:\n x = float(p[\"result.best.mse\"])\n if not \"n\" in str(x):\n res += x\n num += 1\n else:\n print(\"Nan encountered\")\n avg = res / num\n sd = 0.0\n for p in filtered:\n x = float(p[\"result.best.mse\"])\n if not \"n\" in str(x):\n sd += (x - avg) ** 2.0\n sd = math.sqrt(sd / num)\n if num == 0:\n return \"-\"\n else:\n return r\"${0} \\pm{1}$\".format(avg, sd)\n\ndef fun_successRate(filtered):\n if len(filtered) == 0:\n return \"-\"\n sr = get_successRate(filtered)\n return \"{0}\".format(\"%0.2f\" % round(sr, 2))\ndef fun_allPropertiesMet(filtered):\n if len(filtered) == 0:\n return \"-\"\n num_opt = get_num_allPropertiesMet(filtered)\n sr = float(num_opt) / float(len(filtered))\n return \"{0}\".format(\"%0.2f\" % round(sr, 2))\ndef get_stats_size(props):\n vals = [float(p[\"result.best.size\"]) for p in props]\n if len(vals) == 0:\n return \"-\"#-1.0, -1.0\n else:\n return str(int(round(np.mean(vals)))) #, np.std(vals)\ndef get_stats_sizeOnlySuccessful(props):\n vals = [float(p[\"result.best.size\"]) for p in props if is_verified_solution(p)]\n if len(vals) == 0:\n return \"-\"#-1.0, -1.0\n else:\n return str(int(round(np.mean(vals)))) #, np.std(vals)\ndef get_stats_maxSolverTime(props):\n if len(props) == 0 or \"solver.allTimesCountMap\" not in props[0]:\n return \"-\"\n times = []\n for p in props:\n timesMap = p[\"solver.allTimesCountMap\"]\n parts = timesMap.split(\", \")[-1].split(\",\")\n times.append(float(parts[0].replace(\"(\", \"\")))\n return \"%0.3f\" % max(times)\ndef get_stats_avgSolverTime(props):\n if len(props) == 0 or \"solver.allTimesCountMap\" not in props[0] or props[0][\"method\"] != \"CDGP\":\n return \"-\"\n sum = 0.0\n sumWeights = 0.0\n for p in props:\n timesMap = p[\"solver.allTimesCountMap\"]\n pairs = timesMap.split(\", \")\n if len(pairs) == 0:\n continue\n for x in pairs:\n time = float(x.split(\",\")[0].replace(\"(\", \"\"))\n weight = float(x.split(\",\")[1].replace(\")\", \"\"))\n sum += time * weight\n sumWeights += weight\n if sumWeights == 0.0:\n return \"%0.3f\" % 0.0\n else:\n return \"%0.3f\" % (sum / sumWeights)\ndef get_avgSolverTotalCalls(props):\n if len(props) == 0 or \"solver.totalCalls\" not in props[0]:\n return \"-\"\n vals = [float(p[\"solver.totalCalls\"]) / 1000.0 for p in props]\n return \"%0.1f\" % round(np.mean(vals), 1) # \"%d\"\ndef get_numSolverCallsOverXs(props):\n if len(props) == 0 or \"solver.allTimesCountMap\" not in props[0]:\n return \"-\"\n TRESHOLD = 0.5\n sum = 0\n for p in props:\n timesMap = p[\"solver.allTimesCountMap\"]\n pairs = timesMap.split(\", \")\n if len(pairs) == 0:\n continue\n for x in pairs:\n time = float(x.split(\",\")[0].replace(\"(\", \"\"))\n if time > TRESHOLD:\n # print(\"Name of file: \" + p[\"thisFileName\"])\n weight = int(x.split(\",\")[1].replace(\")\", \"\"))\n sum += weight\n return sum\ndef get_avg_totalTests(props):\n vals = [float(p[\"tests.total\"]) for p in props]\n if len(vals) == 0:\n return \"-\" # -1.0, -1.0\n else:\n x = np.mean(vals)\n if x < 1e-5:\n x = 0.0\n return str(int(round(x))) #\"%0.1f\" % x\ndef get_avg_mse(props):\n vals = []\n for p in props:\n vals.append(float(p[\"result.best.mse\"]))\n if len(vals) == 0:\n return \"-\" # -1.0, -1.0\n else:\n return \"%0.5f\" % np.mean(vals) # , np.std(vals)\ndef get_avg_runtime_helper(vals):\n if len(vals) == 0:\n return \"n/a\" # -1.0, -1.0\n else:\n x = np.mean(vals)\n if x >= 10.0:\n return \"%d\" % x\n else:\n return \"%0.1f\" % x # , np.std(vals)\ndef get_avg_runtimeOnlySuccessful(props):\n if len(props) == 0:\n return \"-\"\n else:\n vals = [float(normalized_total_time(p, max_time=1800000)) / 1000.0 for p in props if is_verified_solution(p)]\n return get_avg_runtime_helper(vals)\ndef get_avg_runtime(props):\n if len(props) == 0:\n return \"-\"\n else:\n vals = [float(normalized_total_time(p, max_time=1800000)) / 1000.0 for p in props]\n return get_avg_runtime_helper(vals)\ndef get_avg_generation(props):\n if len(props) == 0:\n return \"-\"\n if len(props) > 0 and \"result.totalGenerations\" not in props[0]:\n return \"-\"\n vals = [float(p[\"result.totalGenerations\"]) for p in props]\n if len(vals) == 0:\n return \"-\"\n else:\n return str(int(round(np.mean(vals)))) #\"%0.1f\" % np.mean(vals) # , np.std(vals)\ndef get_avg_generationSuccessful(props):\n if len(props) == 0:\n return \"-\"\n else:\n vals = [float(p[\"result.best.generation\"]) for p in props if is_verified_solution(p)]\n if len(vals) == 0:\n return \"n/a\" # -1.0, -1.0\n else:\n return str(int(round(np.mean(vals)))) # \"%0.1f\" % np.mean(vals) # , np.std(vals)\ndef get_avg_evaluated(props):\n if len(props) == 0:\n return \"-\"\n vals = []\n for p in props:\n if p[\"evolutionMode\"] == \"steadyState\":\n vals.append(float(p[\"result.totalGenerations\"]))\n else:\n vals.append(float(p[\"result.totalGenerations\"]) * float(p[\"populationSize\"]))\n return str(int(round(np.mean(vals)))) #\"%0.1f\" % np.mean(vals) # , np.std(vals)\ndef get_avg_evaluatedSuccessful(props):\n if len(props) == 0:\n return \"-\"\n vals = []\n for p in props:\n if is_verified_solution(p):\n if p[\"evolutionMode\"] == \"steadyState\":\n vals.append(float(p[\"result.totalGenerations\"]))\n else:\n vals.append(float(p[\"result.totalGenerations\"]) * float(p[\"populationSize\"]))\n if len(vals) == 0:\n return \"n/a\" # -1.0, -1.0\n else:\n return str(int(round(np.mean(vals)))) # \"%0.1f\" % np.mean(vals) # , np.std(vals)\ndef get_avg_runtimePerProgram(props):\n if len(props) == 0:\n return \"-\" # -1.0, -1.0\n sAvgGen = get_avg_generation(props)\n if sAvgGen == \"-\" or sAvgGen is None:\n return \"-\"\n avgGen = float(sAvgGen) # avg number of generations in all runs\n avgRuntime = float(get_avg_runtime(props)) # avg runtime of all runs\n populationSize = float(props[0][\"populationSize\"])\n if props[0][\"evolutionMode\"] == \"steadyState\":\n approxNumPrograms = populationSize + avgGen # in steady state we have many generations, but in each of them created is one new program\n else:\n approxNumPrograms = populationSize * avgGen\n approxTimePerProgram = avgRuntime / approxNumPrograms\n return \"%0.3f\" % approxTimePerProgram\ndef get_sum_solverRestarts(props):\n if len(props) == 0:\n return \"-\"\n vals = [int(p[\"solver.totalRestarts\"]) for p in props if \"solver.totalRestarts\" in p]\n if len(vals) != len(props):\n print(\"WARNING: solver.totalRestarts was not present in all files.\")\n if len(vals) == 0:\n return \"0\"\n else:\n return str(np.sum(vals))\n\ndef print_solved_in_time(props, upper_time):\n if len(props) == 0:\n return\n # totalTimeSystem is in miliseconds\n solved = 0\n solvedRuns = 0\n num = 0\n for p in props:\n if p[\"result.best.isOptimal\"] == \"false\":\n continue\n num += 1\n if int(normalized_total_time(p, max_time=1800000)) <= upper_time:\n solved += 1\n\n for p in props:\n if int(normalized_total_time(p, max_time=1800000)) <= upper_time:\n solvedRuns += 1\n print(\"\\nRuns which ended under {0} s: {1} / {2} ({3} %)\".format(upper_time / 1000.0, solvedRuns, len(props), solvedRuns / len(props)))\n print(\"Optimal solutions found under {0} s: {1} / {2} ({3} %)\\n\".format(upper_time / 1000.0, solved, num, solved / num))\n",
"step-ids": [
22,
30,
37,
44,
48
]
}
|
[
22,
30,
37,
44,
48
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 1 17:08:06 2023
@author: Alice Wells
Plotting script for Figure 10 in Wells et al., 2023
Aerosol extinction coefficient vertical profile averaged longitudinally.
Averaged monthly CALIOP (centre) aerosol extinction coefficient vertical
profiles (night retrievals only) with monthly average tropopause height
(solid black). UKESM1 SO2 only (left) and SO2+ash (right) simulations with
imposed CALIOP minimum retrieval limits and mask.
"""
# =============================================================================
# Import functions
# =============================================================================
import numpy as np
import matplotlib.pyplot as plt
import calendar
import matplotlib.colors as colors
import matplotlib.cm as mpl_cm
# =============================================================================
# Load data
# =============================================================================
#CALIOP observations
caliop = np.load('caliop_perturbation_daily_zonal_average_extinction_532nm.npy')
#CALIOP tropopause height
caliop_tph = np.load('calipso_daily_zonal_average_tropopause_height.npy')
#Model SO2+ash with CALIOP limits imposed
so2_ash = np.load('SO2_ash_perturbation_monthly_zonal_average_extinction_caliop_limits_532nm_1x1deg.npy')
#Model SO2only with CALIOP limits imposed
so2_only = np.load('SO2_only_perturbation_monthly_zonal_average_extinction_caliop_limits_532nm_1x1deg.npy')
#Model altitude profile
model_alts = np.load('Model_altitude.npy')
model_alts[0] = 0
#Model tropopause height
model_tph = np.load('Model_monthly_zonal_average_tropopause_height.npy')
# =============================================================================
# Create the caliop model mask
# =============================================================================
#Find model points only where calipso data exists
caliop_mask = np.nanmean(caliop, axis = (1,2))
mask = np.ones( (181, 12) )
mask[np.isnan(caliop_mask)] = np.nan
#Mask the model data
so2_ash_masked = np.zeros( (181, 85, 12) )
so2_only_masked = np.zeros( (181, 85, 12) )
for i in range(85):
so2_ash_masked[:, i, :] = so2_ash[:, i, :] * mask
so2_only_masked[:, i, :] = so2_only[:, i, :] * mask
masked_tph = model_tph * mask
# =============================================================================
# Define altitude profile
# =============================================================================
alts1 = np.linspace(-500, 20200, 346)
alts2 = np.linspace(20380, 29740, 53)
caliop_alts = np.hstack( (alts1, alts2) )/1000
#Define latitude coordinates
latitude = range(-90, 91)
#Create months for plotting dates
months = calendar.month_name[6:13] + calendar.month_name[1:6]
#Calculate monthly average for CALIOP
caliop_monthly_mean = np.nanmean(caliop[:, :, :, :], axis = 2)
caliop_monthly_tph = np.nanmean(caliop_tph, axis = 1)
# =============================================================================
# Plotting
# =============================================================================
params = {'legend.fontsize': 25,
'axes.labelsize': 30,
'axes.titlesize':35,
'axes.linewidth':3,
'axes.grid': True,
'xtick.labelsize':25,
'ytick.labelsize':25,
'xtick.major.size': 8,
'xtick.minor.size': 5,
'xtick.minor.visible':True,
'ytick.major.size':8,
'ytick.minor.size':5,
'ytick.minor.visible':True,
'lines.linewidth': 4}
plt.rcParams.update(params)
fig = plt.figure(figsize = (37, 38))
gs = fig.add_gridspec(6, 4, width_ratios = [25, 25, 25, 5])
fig.text(0.5, 0.08, 'Latitude', ha = 'center', va = 'center', fontsize = 35, fontweight = 'bold')
fig.text(0.08, 0.5, 'Altitude [km]', ha = 'center', va = 'center', rotation = 'vertical', fontsize = 35, fontweight = 'bold')
col_map = mpl_cm.get_cmap('plasma')
lvs = np.linspace(0, 1.2, 13)
norm = colors.BoundaryNorm(lvs, col_map.N)
i = 1
for n in range(6):
ax1 = fig.add_subplot(gs[n, 0])
ax1.contourf(latitude, model_alts, np.transpose(so2_only_masked[:, :, n+1]*100), cmap = col_map, levels = lvs, norm = norm, extend = 'both')
ax1.plot(latitude, masked_tph[:, n+1]/1000, linewidth = 4, color = 'k')
ax1.set_xlim([25, 85])
ax1.set_ylim([5, 20])
ax1.grid(which = 'minor', axis = 'y', alpha = 0.2)
ax1.grid(which = 'minor', axis = 'x', alpha = 0.2)
ax1.set_title('UKESM1 SO2only ' + months[n+1], fontweight = 'bold', fontsize = 25)
ax2 = fig.add_subplot(gs[n, 1])
ax2.contourf(latitude, caliop_alts, np.transpose(caliop_monthly_mean[:, :, n+1]*100000), cmap = col_map, levels = lvs, norm = norm, extend = 'both')
ax2.plot(latitude, caliop_monthly_tph[:, n+1], linewidth = 4, color = 'k')
ax2.set_xlim([25, 85])
ax2.set_ylim([5, 20])
ax2.grid(which = 'minor', axis = 'y', alpha = 0.2)
ax2.grid(which = 'minor', axis = 'x', alpha = 0.2)
ax2.set_title('CALIOP ' + months[n+1], fontweight = 'bold', fontsize = 25)
ax3 = fig.add_subplot(gs[n, 2])
cb = ax3.contourf(latitude, model_alts, np.transpose(so2_ash_masked[:, :, n+1]*100), cmap = col_map, levels = lvs, norm = norm, extend = 'both')
ax3.plot(latitude, masked_tph[:, n+1]/1000, linewidth = 4, color = 'k')
ax3.set_xlim([25, 85])
ax3.set_ylim([5, 20])
ax3.grid(which = 'minor', axis = 'y', alpha = 0.2)
ax3.grid(which = 'minor', axis = 'x', alpha = 0.2)
ax3.set_title('UKESM1 SO2+ash ' + months[n+1], fontweight = 'bold', fontsize = 25)
cax = fig.add_subplot(gs[:, -1])
plt.colorbar(cb, cax=cax, orientation = 'vertical', label = 'Aerosol extinction coefficient [$x10^{-2}$ km$^{-1}$]')
i = i + 4
plt.savefig('Figure10.png', dpi = 300)
plt.show()
|
normal
|
{
"blob_id": "e77c855ba87bc36ab09b0a3eca5c1b7123535794",
"index": 2802,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(85):\n so2_ash_masked[:, i, :] = so2_ash[:, i, :] * mask\n so2_only_masked[:, i, :] = so2_only[:, i, :] * mask\n<mask token>\nplt.rcParams.update(params)\n<mask token>\nfig.text(0.5, 0.08, 'Latitude', ha='center', va='center', fontsize=35,\n fontweight='bold')\nfig.text(0.08, 0.5, 'Altitude [km]', ha='center', va='center', rotation=\n 'vertical', fontsize=35, fontweight='bold')\n<mask token>\nfor n in range(6):\n ax1 = fig.add_subplot(gs[n, 0])\n ax1.contourf(latitude, model_alts, np.transpose(so2_only_masked[:, :, n +\n 1] * 100), cmap=col_map, levels=lvs, norm=norm, extend='both')\n ax1.plot(latitude, masked_tph[:, n + 1] / 1000, linewidth=4, color='k')\n ax1.set_xlim([25, 85])\n ax1.set_ylim([5, 20])\n ax1.grid(which='minor', axis='y', alpha=0.2)\n ax1.grid(which='minor', axis='x', alpha=0.2)\n ax1.set_title('UKESM1 SO2only ' + months[n + 1], fontweight='bold',\n fontsize=25)\n ax2 = fig.add_subplot(gs[n, 1])\n ax2.contourf(latitude, caliop_alts, np.transpose(caliop_monthly_mean[:,\n :, n + 1] * 100000), cmap=col_map, levels=lvs, norm=norm, extend='both'\n )\n ax2.plot(latitude, caliop_monthly_tph[:, n + 1], linewidth=4, color='k')\n ax2.set_xlim([25, 85])\n ax2.set_ylim([5, 20])\n ax2.grid(which='minor', axis='y', alpha=0.2)\n ax2.grid(which='minor', axis='x', alpha=0.2)\n ax2.set_title('CALIOP ' + months[n + 1], fontweight='bold', fontsize=25)\n ax3 = fig.add_subplot(gs[n, 2])\n cb = ax3.contourf(latitude, model_alts, np.transpose(so2_ash_masked[:,\n :, n + 1] * 100), cmap=col_map, levels=lvs, norm=norm, extend='both')\n ax3.plot(latitude, masked_tph[:, n + 1] / 1000, linewidth=4, color='k')\n ax3.set_xlim([25, 85])\n ax3.set_ylim([5, 20])\n ax3.grid(which='minor', axis='y', alpha=0.2)\n ax3.grid(which='minor', axis='x', alpha=0.2)\n ax3.set_title('UKESM1 SO2+ash ' + months[n + 1], fontweight='bold',\n fontsize=25)\n cax = fig.add_subplot(gs[:, -1])\n plt.colorbar(cb, cax=cax, orientation='vertical', label=\n 'Aerosol extinction coefficient [$x10^{-2}$ km$^{-1}$]')\n i = i + 4\nplt.savefig('Figure10.png', dpi=300)\nplt.show()\n",
"step-3": "<mask token>\ncaliop = np.load('caliop_perturbation_daily_zonal_average_extinction_532nm.npy'\n )\ncaliop_tph = np.load('calipso_daily_zonal_average_tropopause_height.npy')\nso2_ash = np.load(\n 'SO2_ash_perturbation_monthly_zonal_average_extinction_caliop_limits_532nm_1x1deg.npy'\n )\nso2_only = np.load(\n 'SO2_only_perturbation_monthly_zonal_average_extinction_caliop_limits_532nm_1x1deg.npy'\n )\nmodel_alts = np.load('Model_altitude.npy')\nmodel_alts[0] = 0\nmodel_tph = np.load('Model_monthly_zonal_average_tropopause_height.npy')\ncaliop_mask = np.nanmean(caliop, axis=(1, 2))\nmask = np.ones((181, 12))\nmask[np.isnan(caliop_mask)] = np.nan\nso2_ash_masked = np.zeros((181, 85, 12))\nso2_only_masked = np.zeros((181, 85, 12))\nfor i in range(85):\n so2_ash_masked[:, i, :] = so2_ash[:, i, :] * mask\n so2_only_masked[:, i, :] = so2_only[:, i, :] * mask\nmasked_tph = model_tph * mask\nalts1 = np.linspace(-500, 20200, 346)\nalts2 = np.linspace(20380, 29740, 53)\ncaliop_alts = np.hstack((alts1, alts2)) / 1000\nlatitude = range(-90, 91)\nmonths = calendar.month_name[6:13] + calendar.month_name[1:6]\ncaliop_monthly_mean = np.nanmean(caliop[:, :, :, :], axis=2)\ncaliop_monthly_tph = np.nanmean(caliop_tph, axis=1)\nparams = {'legend.fontsize': 25, 'axes.labelsize': 30, 'axes.titlesize': 35,\n 'axes.linewidth': 3, 'axes.grid': True, 'xtick.labelsize': 25,\n 'ytick.labelsize': 25, 'xtick.major.size': 8, 'xtick.minor.size': 5,\n 'xtick.minor.visible': True, 'ytick.major.size': 8, 'ytick.minor.size':\n 5, 'ytick.minor.visible': True, 'lines.linewidth': 4}\nplt.rcParams.update(params)\nfig = plt.figure(figsize=(37, 38))\ngs = fig.add_gridspec(6, 4, width_ratios=[25, 25, 25, 5])\nfig.text(0.5, 0.08, 'Latitude', ha='center', va='center', fontsize=35,\n fontweight='bold')\nfig.text(0.08, 0.5, 'Altitude [km]', ha='center', va='center', rotation=\n 'vertical', fontsize=35, fontweight='bold')\ncol_map = mpl_cm.get_cmap('plasma')\nlvs = np.linspace(0, 1.2, 13)\nnorm = colors.BoundaryNorm(lvs, col_map.N)\ni = 1\nfor n in range(6):\n ax1 = fig.add_subplot(gs[n, 0])\n ax1.contourf(latitude, model_alts, np.transpose(so2_only_masked[:, :, n +\n 1] * 100), cmap=col_map, levels=lvs, norm=norm, extend='both')\n ax1.plot(latitude, masked_tph[:, n + 1] / 1000, linewidth=4, color='k')\n ax1.set_xlim([25, 85])\n ax1.set_ylim([5, 20])\n ax1.grid(which='minor', axis='y', alpha=0.2)\n ax1.grid(which='minor', axis='x', alpha=0.2)\n ax1.set_title('UKESM1 SO2only ' + months[n + 1], fontweight='bold',\n fontsize=25)\n ax2 = fig.add_subplot(gs[n, 1])\n ax2.contourf(latitude, caliop_alts, np.transpose(caliop_monthly_mean[:,\n :, n + 1] * 100000), cmap=col_map, levels=lvs, norm=norm, extend='both'\n )\n ax2.plot(latitude, caliop_monthly_tph[:, n + 1], linewidth=4, color='k')\n ax2.set_xlim([25, 85])\n ax2.set_ylim([5, 20])\n ax2.grid(which='minor', axis='y', alpha=0.2)\n ax2.grid(which='minor', axis='x', alpha=0.2)\n ax2.set_title('CALIOP ' + months[n + 1], fontweight='bold', fontsize=25)\n ax3 = fig.add_subplot(gs[n, 2])\n cb = ax3.contourf(latitude, model_alts, np.transpose(so2_ash_masked[:,\n :, n + 1] * 100), cmap=col_map, levels=lvs, norm=norm, extend='both')\n ax3.plot(latitude, masked_tph[:, n + 1] / 1000, linewidth=4, color='k')\n ax3.set_xlim([25, 85])\n ax3.set_ylim([5, 20])\n ax3.grid(which='minor', axis='y', alpha=0.2)\n ax3.grid(which='minor', axis='x', alpha=0.2)\n ax3.set_title('UKESM1 SO2+ash ' + months[n + 1], fontweight='bold',\n fontsize=25)\n cax = fig.add_subplot(gs[:, -1])\n plt.colorbar(cb, cax=cax, orientation='vertical', label=\n 'Aerosol extinction coefficient [$x10^{-2}$ km$^{-1}$]')\n i = i + 4\nplt.savefig('Figure10.png', dpi=300)\nplt.show()\n",
"step-4": "<mask token>\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport calendar\nimport matplotlib.colors as colors\nimport matplotlib.cm as mpl_cm\ncaliop = np.load('caliop_perturbation_daily_zonal_average_extinction_532nm.npy'\n )\ncaliop_tph = np.load('calipso_daily_zonal_average_tropopause_height.npy')\nso2_ash = np.load(\n 'SO2_ash_perturbation_monthly_zonal_average_extinction_caliop_limits_532nm_1x1deg.npy'\n )\nso2_only = np.load(\n 'SO2_only_perturbation_monthly_zonal_average_extinction_caliop_limits_532nm_1x1deg.npy'\n )\nmodel_alts = np.load('Model_altitude.npy')\nmodel_alts[0] = 0\nmodel_tph = np.load('Model_monthly_zonal_average_tropopause_height.npy')\ncaliop_mask = np.nanmean(caliop, axis=(1, 2))\nmask = np.ones((181, 12))\nmask[np.isnan(caliop_mask)] = np.nan\nso2_ash_masked = np.zeros((181, 85, 12))\nso2_only_masked = np.zeros((181, 85, 12))\nfor i in range(85):\n so2_ash_masked[:, i, :] = so2_ash[:, i, :] * mask\n so2_only_masked[:, i, :] = so2_only[:, i, :] * mask\nmasked_tph = model_tph * mask\nalts1 = np.linspace(-500, 20200, 346)\nalts2 = np.linspace(20380, 29740, 53)\ncaliop_alts = np.hstack((alts1, alts2)) / 1000\nlatitude = range(-90, 91)\nmonths = calendar.month_name[6:13] + calendar.month_name[1:6]\ncaliop_monthly_mean = np.nanmean(caliop[:, :, :, :], axis=2)\ncaliop_monthly_tph = np.nanmean(caliop_tph, axis=1)\nparams = {'legend.fontsize': 25, 'axes.labelsize': 30, 'axes.titlesize': 35,\n 'axes.linewidth': 3, 'axes.grid': True, 'xtick.labelsize': 25,\n 'ytick.labelsize': 25, 'xtick.major.size': 8, 'xtick.minor.size': 5,\n 'xtick.minor.visible': True, 'ytick.major.size': 8, 'ytick.minor.size':\n 5, 'ytick.minor.visible': True, 'lines.linewidth': 4}\nplt.rcParams.update(params)\nfig = plt.figure(figsize=(37, 38))\ngs = fig.add_gridspec(6, 4, width_ratios=[25, 25, 25, 5])\nfig.text(0.5, 0.08, 'Latitude', ha='center', va='center', fontsize=35,\n fontweight='bold')\nfig.text(0.08, 0.5, 'Altitude [km]', ha='center', va='center', rotation=\n 'vertical', fontsize=35, fontweight='bold')\ncol_map = mpl_cm.get_cmap('plasma')\nlvs = np.linspace(0, 1.2, 13)\nnorm = colors.BoundaryNorm(lvs, col_map.N)\ni = 1\nfor n in range(6):\n ax1 = fig.add_subplot(gs[n, 0])\n ax1.contourf(latitude, model_alts, np.transpose(so2_only_masked[:, :, n +\n 1] * 100), cmap=col_map, levels=lvs, norm=norm, extend='both')\n ax1.plot(latitude, masked_tph[:, n + 1] / 1000, linewidth=4, color='k')\n ax1.set_xlim([25, 85])\n ax1.set_ylim([5, 20])\n ax1.grid(which='minor', axis='y', alpha=0.2)\n ax1.grid(which='minor', axis='x', alpha=0.2)\n ax1.set_title('UKESM1 SO2only ' + months[n + 1], fontweight='bold',\n fontsize=25)\n ax2 = fig.add_subplot(gs[n, 1])\n ax2.contourf(latitude, caliop_alts, np.transpose(caliop_monthly_mean[:,\n :, n + 1] * 100000), cmap=col_map, levels=lvs, norm=norm, extend='both'\n )\n ax2.plot(latitude, caliop_monthly_tph[:, n + 1], linewidth=4, color='k')\n ax2.set_xlim([25, 85])\n ax2.set_ylim([5, 20])\n ax2.grid(which='minor', axis='y', alpha=0.2)\n ax2.grid(which='minor', axis='x', alpha=0.2)\n ax2.set_title('CALIOP ' + months[n + 1], fontweight='bold', fontsize=25)\n ax3 = fig.add_subplot(gs[n, 2])\n cb = ax3.contourf(latitude, model_alts, np.transpose(so2_ash_masked[:,\n :, n + 1] * 100), cmap=col_map, levels=lvs, norm=norm, extend='both')\n ax3.plot(latitude, masked_tph[:, n + 1] / 1000, linewidth=4, color='k')\n ax3.set_xlim([25, 85])\n ax3.set_ylim([5, 20])\n ax3.grid(which='minor', axis='y', alpha=0.2)\n ax3.grid(which='minor', axis='x', alpha=0.2)\n ax3.set_title('UKESM1 SO2+ash ' + months[n + 1], fontweight='bold',\n fontsize=25)\n cax = fig.add_subplot(gs[:, -1])\n plt.colorbar(cb, cax=cax, orientation='vertical', label=\n 'Aerosol extinction coefficient [$x10^{-2}$ km$^{-1}$]')\n i = i + 4\nplt.savefig('Figure10.png', dpi=300)\nplt.show()\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 1 17:08:06 2023\n\n@author: Alice Wells\n\nPlotting script for Figure 10 in Wells et al., 2023\n\nAerosol extinction coefficient vertical profile averaged longitudinally. \nAveraged monthly CALIOP (centre) aerosol extinction coefficient vertical \nprofiles (night retrievals only) with monthly average tropopause height \n(solid black). UKESM1 SO2 only (left) and SO2+ash (right) simulations with \nimposed CALIOP minimum retrieval limits and mask. \n\n\"\"\"\n# =============================================================================\n# Import functions\n# =============================================================================\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport calendar\nimport matplotlib.colors as colors\nimport matplotlib.cm as mpl_cm\n\n# =============================================================================\n# Load data\n# =============================================================================\n\n#CALIOP observations\ncaliop = np.load('caliop_perturbation_daily_zonal_average_extinction_532nm.npy')\n#CALIOP tropopause height \ncaliop_tph = np.load('calipso_daily_zonal_average_tropopause_height.npy')\n#Model SO2+ash with CALIOP limits imposed\nso2_ash = np.load('SO2_ash_perturbation_monthly_zonal_average_extinction_caliop_limits_532nm_1x1deg.npy')\n#Model SO2only with CALIOP limits imposed\nso2_only = np.load('SO2_only_perturbation_monthly_zonal_average_extinction_caliop_limits_532nm_1x1deg.npy')\n#Model altitude profile\nmodel_alts = np.load('Model_altitude.npy')\nmodel_alts[0] = 0\n#Model tropopause height\nmodel_tph = np.load('Model_monthly_zonal_average_tropopause_height.npy')\n\n# =============================================================================\n# Create the caliop model mask\n# =============================================================================\n\n#Find model points only where calipso data exists\ncaliop_mask = np.nanmean(caliop, axis = (1,2))\nmask = np.ones( (181, 12) )\nmask[np.isnan(caliop_mask)] = np.nan\n\n#Mask the model data\nso2_ash_masked = np.zeros( (181, 85, 12) )\nso2_only_masked = np.zeros( (181, 85, 12) )\nfor i in range(85):\n so2_ash_masked[:, i, :] = so2_ash[:, i, :] * mask\n so2_only_masked[:, i, :] = so2_only[:, i, :] * mask\n\nmasked_tph = model_tph * mask \n\n# =============================================================================\n# Define altitude profile\n# =============================================================================\n \nalts1 = np.linspace(-500, 20200, 346)\nalts2 = np.linspace(20380, 29740, 53)\ncaliop_alts = np.hstack( (alts1, alts2) )/1000\n \n#Define latitude coordinates\nlatitude = range(-90, 91)\n#Create months for plotting dates\nmonths = calendar.month_name[6:13] + calendar.month_name[1:6]\n\n#Calculate monthly average for CALIOP\ncaliop_monthly_mean = np.nanmean(caliop[:, :, :, :], axis = 2)\ncaliop_monthly_tph = np.nanmean(caliop_tph, axis = 1)\n\n# =============================================================================\n# Plotting\n# =============================================================================\n\nparams = {'legend.fontsize': 25,\n 'axes.labelsize': 30,\n 'axes.titlesize':35,\n 'axes.linewidth':3,\n 'axes.grid': True,\n 'xtick.labelsize':25,\n 'ytick.labelsize':25,\n 'xtick.major.size': 8,\n 'xtick.minor.size': 5,\n 'xtick.minor.visible':True,\n 'ytick.major.size':8,\n 'ytick.minor.size':5,\n 'ytick.minor.visible':True,\n 'lines.linewidth': 4} \n\nplt.rcParams.update(params)\n\nfig = plt.figure(figsize = (37, 38))\ngs = fig.add_gridspec(6, 4, width_ratios = [25, 25, 25, 5])\n\nfig.text(0.5, 0.08, 'Latitude', ha = 'center', va = 'center', fontsize = 35, fontweight = 'bold')\nfig.text(0.08, 0.5, 'Altitude [km]', ha = 'center', va = 'center', rotation = 'vertical', fontsize = 35, fontweight = 'bold')\n\ncol_map = mpl_cm.get_cmap('plasma')\nlvs = np.linspace(0, 1.2, 13)\nnorm = colors.BoundaryNorm(lvs, col_map.N)\n\ni = 1\n\nfor n in range(6):\n \n ax1 = fig.add_subplot(gs[n, 0])\n ax1.contourf(latitude, model_alts, np.transpose(so2_only_masked[:, :, n+1]*100), cmap = col_map, levels = lvs, norm = norm, extend = 'both')\n ax1.plot(latitude, masked_tph[:, n+1]/1000, linewidth = 4, color = 'k')\n\n ax1.set_xlim([25, 85])\n ax1.set_ylim([5, 20])\n ax1.grid(which = 'minor', axis = 'y', alpha = 0.2)\n ax1.grid(which = 'minor', axis = 'x', alpha = 0.2)\n ax1.set_title('UKESM1 SO2only ' + months[n+1], fontweight = 'bold', fontsize = 25)\n \n ax2 = fig.add_subplot(gs[n, 1])\n ax2.contourf(latitude, caliop_alts, np.transpose(caliop_monthly_mean[:, :, n+1]*100000), cmap = col_map, levels = lvs, norm = norm, extend = 'both')\n ax2.plot(latitude, caliop_monthly_tph[:, n+1], linewidth = 4, color = 'k') \n \n ax2.set_xlim([25, 85])\n ax2.set_ylim([5, 20])\n ax2.grid(which = 'minor', axis = 'y', alpha = 0.2)\n ax2.grid(which = 'minor', axis = 'x', alpha = 0.2) \n ax2.set_title('CALIOP ' + months[n+1], fontweight = 'bold', fontsize = 25)\n \n ax3 = fig.add_subplot(gs[n, 2])\n cb = ax3.contourf(latitude, model_alts, np.transpose(so2_ash_masked[:, :, n+1]*100), cmap = col_map, levels = lvs, norm = norm, extend = 'both')\n ax3.plot(latitude, masked_tph[:, n+1]/1000, linewidth = 4, color = 'k')\n \n ax3.set_xlim([25, 85])\n ax3.set_ylim([5, 20])\n ax3.grid(which = 'minor', axis = 'y', alpha = 0.2)\n ax3.grid(which = 'minor', axis = 'x', alpha = 0.2)\n ax3.set_title('UKESM1 SO2+ash ' + months[n+1], fontweight = 'bold', fontsize = 25)\n \n cax = fig.add_subplot(gs[:, -1])\n plt.colorbar(cb, cax=cax, orientation = 'vertical', label = 'Aerosol extinction coefficient [$x10^{-2}$ km$^{-1}$]')\n \n i = i + 4\n\nplt.savefig('Figure10.png', dpi = 300)\nplt.show()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#
# abc088 c
#
import sys
from io import StringIO
import unittest
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_入力例_1(self):
input = """1 0 1
2 1 2
1 0 1"""
output = """Yes"""
self.assertIO(input, output)
def test_入力例_2(self):
input = """2 2 2
2 1 2
2 2 2"""
output = """No"""
self.assertIO(input, output)
def test_入力例_3(self):
input = """0 8 8
0 8 8
0 8 8"""
output = """Yes"""
self.assertIO(input, output)
def test_入力例_4(self):
input = """1 8 6
2 9 7
0 7 7"""
output = """No"""
self.assertIO(input, output)
def resolve():
c = []
for _ in range(3):
c.append(list(map(int, input().split())))
a1 = 0
b1 = c[0][0] - a1
b2 = c[0][1] - a1
b3 = c[0][2] - a1
a2 = c[1][0] - b1
a3 = c[2][0] - b1
if a2+b2 == c[1][1] and a2+b3 == c[1][2] and a3+b2 == c[2][1] and a3+b3 == c[2][2]:
print("Yes")
else:
print("No")
if __name__ == "__main__":
# unittest.main()
resolve()
|
normal
|
{
"blob_id": "8b97c1e14adfcb09806e2d37e2f5c4f0b356c009",
"index": 2742,
"step-1": "<mask token>\n\n\nclass TestClass(unittest.TestCase):\n <mask token>\n\n def test_入力例_1(self):\n input = '1 0 1\\n2 1 2\\n1 0 1'\n output = 'Yes'\n self.assertIO(input, output)\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestClass(unittest.TestCase):\n\n def assertIO(self, input, output):\n stdout, stdin = sys.stdout, sys.stdin\n sys.stdout, sys.stdin = StringIO(), StringIO(input)\n resolve()\n sys.stdout.seek(0)\n out = sys.stdout.read()[:-1]\n sys.stdout, sys.stdin = stdout, stdin\n self.assertEqual(out, output)\n\n def test_入力例_1(self):\n input = '1 0 1\\n2 1 2\\n1 0 1'\n output = 'Yes'\n self.assertIO(input, output)\n\n def test_入力例_2(self):\n input = '2 2 2\\n2 1 2\\n2 2 2'\n output = 'No'\n self.assertIO(input, output)\n\n def test_入力例_3(self):\n input = '0 8 8\\n0 8 8\\n0 8 8'\n output = 'Yes'\n self.assertIO(input, output)\n\n def test_入力例_4(self):\n input = '1 8 6\\n2 9 7\\n0 7 7'\n output = 'No'\n self.assertIO(input, output)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestClass(unittest.TestCase):\n\n def assertIO(self, input, output):\n stdout, stdin = sys.stdout, sys.stdin\n sys.stdout, sys.stdin = StringIO(), StringIO(input)\n resolve()\n sys.stdout.seek(0)\n out = sys.stdout.read()[:-1]\n sys.stdout, sys.stdin = stdout, stdin\n self.assertEqual(out, output)\n\n def test_入力例_1(self):\n input = '1 0 1\\n2 1 2\\n1 0 1'\n output = 'Yes'\n self.assertIO(input, output)\n\n def test_入力例_2(self):\n input = '2 2 2\\n2 1 2\\n2 2 2'\n output = 'No'\n self.assertIO(input, output)\n\n def test_入力例_3(self):\n input = '0 8 8\\n0 8 8\\n0 8 8'\n output = 'Yes'\n self.assertIO(input, output)\n\n def test_入力例_4(self):\n input = '1 8 6\\n2 9 7\\n0 7 7'\n output = 'No'\n self.assertIO(input, output)\n\n\ndef resolve():\n c = []\n for _ in range(3):\n c.append(list(map(int, input().split())))\n a1 = 0\n b1 = c[0][0] - a1\n b2 = c[0][1] - a1\n b3 = c[0][2] - a1\n a2 = c[1][0] - b1\n a3 = c[2][0] - b1\n if a2 + b2 == c[1][1] and a2 + b3 == c[1][2] and a3 + b2 == c[2][1\n ] and a3 + b3 == c[2][2]:\n print('Yes')\n else:\n print('No')\n\n\nif __name__ == '__main__':\n resolve()\n",
"step-4": "import sys\nfrom io import StringIO\nimport unittest\n\n\nclass TestClass(unittest.TestCase):\n\n def assertIO(self, input, output):\n stdout, stdin = sys.stdout, sys.stdin\n sys.stdout, sys.stdin = StringIO(), StringIO(input)\n resolve()\n sys.stdout.seek(0)\n out = sys.stdout.read()[:-1]\n sys.stdout, sys.stdin = stdout, stdin\n self.assertEqual(out, output)\n\n def test_入力例_1(self):\n input = '1 0 1\\n2 1 2\\n1 0 1'\n output = 'Yes'\n self.assertIO(input, output)\n\n def test_入力例_2(self):\n input = '2 2 2\\n2 1 2\\n2 2 2'\n output = 'No'\n self.assertIO(input, output)\n\n def test_入力例_3(self):\n input = '0 8 8\\n0 8 8\\n0 8 8'\n output = 'Yes'\n self.assertIO(input, output)\n\n def test_入力例_4(self):\n input = '1 8 6\\n2 9 7\\n0 7 7'\n output = 'No'\n self.assertIO(input, output)\n\n\ndef resolve():\n c = []\n for _ in range(3):\n c.append(list(map(int, input().split())))\n a1 = 0\n b1 = c[0][0] - a1\n b2 = c[0][1] - a1\n b3 = c[0][2] - a1\n a2 = c[1][0] - b1\n a3 = c[2][0] - b1\n if a2 + b2 == c[1][1] and a2 + b3 == c[1][2] and a3 + b2 == c[2][1\n ] and a3 + b3 == c[2][2]:\n print('Yes')\n else:\n print('No')\n\n\nif __name__ == '__main__':\n resolve()\n",
"step-5": "#\n# abc088 c\n#\nimport sys\nfrom io import StringIO\nimport unittest\n\n\nclass TestClass(unittest.TestCase):\n def assertIO(self, input, output):\n stdout, stdin = sys.stdout, sys.stdin\n sys.stdout, sys.stdin = StringIO(), StringIO(input)\n resolve()\n sys.stdout.seek(0)\n out = sys.stdout.read()[:-1]\n sys.stdout, sys.stdin = stdout, stdin\n self.assertEqual(out, output)\n\n def test_入力例_1(self):\n input = \"\"\"1 0 1\n2 1 2\n1 0 1\"\"\"\n output = \"\"\"Yes\"\"\"\n self.assertIO(input, output)\n\n def test_入力例_2(self):\n input = \"\"\"2 2 2\n2 1 2\n2 2 2\"\"\"\n output = \"\"\"No\"\"\"\n self.assertIO(input, output)\n\n def test_入力例_3(self):\n input = \"\"\"0 8 8\n0 8 8\n0 8 8\"\"\"\n output = \"\"\"Yes\"\"\"\n self.assertIO(input, output)\n\n def test_入力例_4(self):\n input = \"\"\"1 8 6\n2 9 7\n0 7 7\"\"\"\n output = \"\"\"No\"\"\"\n self.assertIO(input, output)\n\n\ndef resolve():\n c = []\n for _ in range(3):\n c.append(list(map(int, input().split())))\n\n a1 = 0\n b1 = c[0][0] - a1\n b2 = c[0][1] - a1\n b3 = c[0][2] - a1\n a2 = c[1][0] - b1\n a3 = c[2][0] - b1\n\n if a2+b2 == c[1][1] and a2+b3 == c[1][2] and a3+b2 == c[2][1] and a3+b3 == c[2][2]:\n print(\"Yes\")\n else:\n print(\"No\")\n\n\nif __name__ == \"__main__\":\n # unittest.main()\n resolve()\n",
"step-ids": [
2,
6,
8,
9,
10
]
}
|
[
2,
6,
8,
9,
10
] |
from flask_restful import Resource, reqparse
import sqlite3
from flask_jwt import jwt_required
from models.item_model import ItemModel
from flask_sqlalchemy import SQLAlchemy
from d import db
from models.store_model import StoreModel
class Modell(Resource):
def get(self, name):
item = StoreModel.find_by_name(name)
return item.json()
def post(self, name):
if StoreModel.find_by_name(name):
return {"message": "sorry no store available in this name"}
#data = Modell.requested.parse_args()
item = StoreModel(name)
item.save_to_db()
return item.json()
def put(self, name):
# data = Modell.requested.parse_args()
item = StoreModel.find_by_name(name)
item.save_to_db()
return item.json()
def delete(self, name):
item=StoreModel.find_by_name(name)
if item:
item.delete_from_db()
return {"m":"delted successfully"}
class Storelist(Resource):
def get(self):
return {"item":[x for x in StoreModel.query.all()]}
|
normal
|
{
"blob_id": "5616ec135a2233e742ff3b2b1f378ec12298b935",
"index": 9578,
"step-1": "<mask token>\n\n\nclass Modell(Resource):\n <mask token>\n <mask token>\n\n def put(self, name):\n item = StoreModel.find_by_name(name)\n item.save_to_db()\n return item.json()\n <mask token>\n\n\nclass Storelist(Resource):\n\n def get(self):\n return {'item': [x for x in StoreModel.query.all()]}\n",
"step-2": "<mask token>\n\n\nclass Modell(Resource):\n <mask token>\n\n def post(self, name):\n if StoreModel.find_by_name(name):\n return {'message': 'sorry no store available in this name'}\n item = StoreModel(name)\n item.save_to_db()\n return item.json()\n\n def put(self, name):\n item = StoreModel.find_by_name(name)\n item.save_to_db()\n return item.json()\n <mask token>\n\n\nclass Storelist(Resource):\n\n def get(self):\n return {'item': [x for x in StoreModel.query.all()]}\n",
"step-3": "<mask token>\n\n\nclass Modell(Resource):\n\n def get(self, name):\n item = StoreModel.find_by_name(name)\n return item.json()\n\n def post(self, name):\n if StoreModel.find_by_name(name):\n return {'message': 'sorry no store available in this name'}\n item = StoreModel(name)\n item.save_to_db()\n return item.json()\n\n def put(self, name):\n item = StoreModel.find_by_name(name)\n item.save_to_db()\n return item.json()\n <mask token>\n\n\nclass Storelist(Resource):\n\n def get(self):\n return {'item': [x for x in StoreModel.query.all()]}\n",
"step-4": "from flask_restful import Resource, reqparse\nimport sqlite3\nfrom flask_jwt import jwt_required\nfrom models.item_model import ItemModel\nfrom flask_sqlalchemy import SQLAlchemy\nfrom d import db\nfrom models.store_model import StoreModel\n\n\nclass Modell(Resource):\n\n def get(self, name):\n item = StoreModel.find_by_name(name)\n return item.json()\n\n def post(self, name):\n if StoreModel.find_by_name(name):\n return {'message': 'sorry no store available in this name'}\n item = StoreModel(name)\n item.save_to_db()\n return item.json()\n\n def put(self, name):\n item = StoreModel.find_by_name(name)\n item.save_to_db()\n return item.json()\n\n def delete(self, name):\n item = StoreModel.find_by_name(name)\n if item:\n item.delete_from_db()\n return {'m': 'delted successfully'}\n\n\nclass Storelist(Resource):\n\n def get(self):\n return {'item': [x for x in StoreModel.query.all()]}\n",
"step-5": "from flask_restful import Resource, reqparse\r\nimport sqlite3\r\nfrom flask_jwt import jwt_required\r\nfrom models.item_model import ItemModel\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nfrom d import db\r\nfrom models.store_model import StoreModel\r\n\r\n\r\nclass Modell(Resource):\r\n\r\n\r\n def get(self, name):\r\n item = StoreModel.find_by_name(name)\r\n return item.json()\r\n\r\n\r\n def post(self, name):\r\n if StoreModel.find_by_name(name):\r\n return {\"message\": \"sorry no store available in this name\"}\r\n #data = Modell.requested.parse_args()\r\n item = StoreModel(name)\r\n item.save_to_db()\r\n return item.json()\r\n\r\n\r\n def put(self, name):\r\n# data = Modell.requested.parse_args()\r\n item = StoreModel.find_by_name(name)\r\n\r\n\r\n\r\n item.save_to_db()\r\n return item.json()\r\n\r\n\r\n def delete(self, name):\r\n item=StoreModel.find_by_name(name)\r\n if item:\r\n item.delete_from_db()\r\n return {\"m\":\"delted successfully\"}\r\n\r\n\r\n\r\nclass Storelist(Resource):\r\n\r\n def get(self):\r\n return {\"item\":[x for x in StoreModel.query.all()]}",
"step-ids": [
4,
5,
6,
8,
9
]
}
|
[
4,
5,
6,
8,
9
] |
'''
Aaditya Upadhyay
oooo$$$$$$$$$$$
oo$$$$$$$$$$$$$$$$$$$$$$$o
oo$$$$$$$$$$$$$$$$$$$$$$$$$$$$$o o$ $$ o$
o $ oo o$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$o $$ $$ $o$
oo $ $ "$ o$$$$$$$$$ $$$$$$$$$$$$$ $$$$$$$$o $$o$o$
"$$$$$o$ o$$$$$$$$$ $$$$$$$$$$$ $$$$$$$$$o $$$$$$$$
$$$$$$$ $$$$$$$$$$$ $$$$$$$$$$$ $$$$$$$$$$$$$$$$$$$$$$$
$$$$$$$$$$$$$$$$$$$$$$$ $$$$$$$$$$$$$ $$$$$$$$$$$$$$ """$$$
"$$$""""$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ "$$$
$$$ o$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ "$$o
o$$" $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ $$o
$$$ $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$" "$$$$$ooooo$$$o
o$$
$$$$$ $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ o$$$$$$$$$$$$$$$$$
$$$$$$$$"$$$$ $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ $$$$""""""""
"""" $$$$ "$$$$$$$$$$$$$$$$$$$$$$$$$$$$" o$$$
"$$o """$$$$$$$$$$$$$$$$$$"$$" $$$
$$o "$$""$$$$$$"""" o$$$
$$$o o$$$"
"$$$o o$$$$$o"$$$o o$$$$
"$$$$oo ""$$$o$$$$o o$$$$""
""$$$$
"$$o$$$$$$$$$"""
""$$$$$$oo $$$$$$$$$$
""""$$$$$$$$$$$
$$$$$$$$$$$$
$$$$$$$$$$"
"$$$""""
'''
from sys import stdin, stdout
from collections import *
from math import gcd, floor, ceil
def st(): return list(stdin.readline().strip())
def li(): return list(map(int, stdin.readline().split()))
def mp(): return map(int, stdin.readline().split())
def inp(): return int(stdin.readline())
def pr(n): return stdout.write(str(n)+"\n")
mod = 1000000007
INF = float('inf')
def solve():
def check(n):
temp = n
while temp:
x = temp % 10
temp //= 10
if x != 0:
if n % x != 0:
return False
return True
n = inp()
while True:
if check(n):
pr(n)
return
n += 1
for _ in range(inp()):
solve()
|
normal
|
{
"blob_id": "9cd1cb84c457db64019fa542efcf6500aa8d6d42",
"index": 9275,
"step-1": "<mask token>\n\n\ndef li():\n return list(map(int, stdin.readline().split()))\n\n\ndef mp():\n return map(int, stdin.readline().split())\n\n\n<mask token>\n\n\ndef pr(n):\n return stdout.write(str(n) + '\\n')\n\n\n<mask token>\n\n\ndef solve():\n\n def check(n):\n temp = n\n while temp:\n x = temp % 10\n temp //= 10\n if x != 0:\n if n % x != 0:\n return False\n return True\n n = inp()\n while True:\n if check(n):\n pr(n)\n return\n n += 1\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef st():\n return list(stdin.readline().strip())\n\n\ndef li():\n return list(map(int, stdin.readline().split()))\n\n\ndef mp():\n return map(int, stdin.readline().split())\n\n\ndef inp():\n return int(stdin.readline())\n\n\ndef pr(n):\n return stdout.write(str(n) + '\\n')\n\n\n<mask token>\n\n\ndef solve():\n\n def check(n):\n temp = n\n while temp:\n x = temp % 10\n temp //= 10\n if x != 0:\n if n % x != 0:\n return False\n return True\n n = inp()\n while True:\n if check(n):\n pr(n)\n return\n n += 1\n\n\nfor _ in range(inp()):\n solve()\n",
"step-3": "<mask token>\n\n\ndef st():\n return list(stdin.readline().strip())\n\n\ndef li():\n return list(map(int, stdin.readline().split()))\n\n\ndef mp():\n return map(int, stdin.readline().split())\n\n\ndef inp():\n return int(stdin.readline())\n\n\ndef pr(n):\n return stdout.write(str(n) + '\\n')\n\n\nmod = 1000000007\nINF = float('inf')\n\n\ndef solve():\n\n def check(n):\n temp = n\n while temp:\n x = temp % 10\n temp //= 10\n if x != 0:\n if n % x != 0:\n return False\n return True\n n = inp()\n while True:\n if check(n):\n pr(n)\n return\n n += 1\n\n\nfor _ in range(inp()):\n solve()\n",
"step-4": "<mask token>\nfrom sys import stdin, stdout\nfrom collections import *\nfrom math import gcd, floor, ceil\n\n\ndef st():\n return list(stdin.readline().strip())\n\n\ndef li():\n return list(map(int, stdin.readline().split()))\n\n\ndef mp():\n return map(int, stdin.readline().split())\n\n\ndef inp():\n return int(stdin.readline())\n\n\ndef pr(n):\n return stdout.write(str(n) + '\\n')\n\n\nmod = 1000000007\nINF = float('inf')\n\n\ndef solve():\n\n def check(n):\n temp = n\n while temp:\n x = temp % 10\n temp //= 10\n if x != 0:\n if n % x != 0:\n return False\n return True\n n = inp()\n while True:\n if check(n):\n pr(n)\n return\n n += 1\n\n\nfor _ in range(inp()):\n solve()\n",
"step-5": "'''\nAaditya Upadhyay\n\n oooo$$$$$$$$$$$\n \n oo$$$$$$$$$$$$$$$$$$$$$$$o\n oo$$$$$$$$$$$$$$$$$$$$$$$$$$$$$o o$ $$ o$\n o $ oo o$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$o $$ $$ $o$\noo $ $ \"$ o$$$$$$$$$ $$$$$$$$$$$$$ $$$$$$$$o $$o$o$\n\"$$$$$o$ o$$$$$$$$$ $$$$$$$$$$$ $$$$$$$$$o $$$$$$$$\n $$$$$$$ $$$$$$$$$$$ $$$$$$$$$$$ $$$$$$$$$$$$$$$$$$$$$$$\n $$$$$$$$$$$$$$$$$$$$$$$ $$$$$$$$$$$$$ $$$$$$$$$$$$$$ \"\"\"$$$\n \"$$$\"\"\"\"$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ \"$$$\n $$$ o$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ \"$$o\n o$$\" $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ $$o\n $$$ $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\" \"$$$$$ooooo$$$o\n o$$\n $$$$$ $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ o$$$$$$$$$$$$$$$$$\n $$$$$$$$\"$$$$ $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ $$$$\"\"\"\"\"\"\"\"\n \"\"\"\" $$$$ \"$$$$$$$$$$$$$$$$$$$$$$$$$$$$\" o$$$\n \"$$o \"\"\"$$$$$$$$$$$$$$$$$$\"$$\" $$$\n $$o \"$$\"\"$$$$$$\"\"\"\" o$$$\n $$$o o$$$\"\n \"$$$o o$$$$$o\"$$$o o$$$$\n \"$$$$oo \"\"$$$o$$$$o o$$$$\"\"\n \"\"$$$$\n \"$$o$$$$$$$$$\"\"\"\n \"\"$$$$$$oo $$$$$$$$$$\n \"\"\"\"$$$$$$$$$$$\n $$$$$$$$$$$$\n $$$$$$$$$$\"\n \"$$$\"\"\"\"\n\n'''\n\nfrom sys import stdin, stdout\nfrom collections import *\nfrom math import gcd, floor, ceil\ndef st(): return list(stdin.readline().strip())\n\n\ndef li(): return list(map(int, stdin.readline().split()))\ndef mp(): return map(int, stdin.readline().split())\ndef inp(): return int(stdin.readline())\ndef pr(n): return stdout.write(str(n)+\"\\n\")\n\n\nmod = 1000000007\nINF = float('inf')\n\n\ndef solve():\n def check(n):\n temp = n\n while temp:\n x = temp % 10\n temp //= 10\n if x != 0:\n if n % x != 0:\n return False\n return True\n\n n = inp()\n while True:\n if check(n):\n pr(n)\n return\n n += 1\n\n\nfor _ in range(inp()):\n solve()\n",
"step-ids": [
4,
7,
8,
9,
10
]
}
|
[
4,
7,
8,
9,
10
] |
from keras.preprocessing.image import img_to_array
from keras.models import load_model
import tensorflow as tf
import numpy as np
import argparse
import imutils
import pickle
import cv2
# USAGE
# python classify.py --model output/fashion.model --categorybin output/category_lb.pickle
# --colorbin output/color_lb.pickle --image examples/black_dress.jpg
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-m", "--model", required=True, help="path to trained model model")
ap.add_argument("-l", "--categorybin", required=True, help="path to output category label binarizer")
ap.add_argument("-c", "--colorbin", required=True, help="path to output color label binarizer")
ap.add_argument("-i", "--image", required=True, help="path to input image")
args = vars(ap.parse_args())
# load the image
image = cv2.imread(args["image"])
output = imutils.resize(image, width=400)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# pre-process the image for classification
image = cv2.resize(image, (96, 96))
image = image.astype("float") / 255.0
image = img_to_array(image)
image = np.expand_dims(image, axis=0)
# load the trained convolutional neural network from disk, followed
# by the category and color label binarizers, respectively
print("[INFO] loading network...")
model = load_model(args["model"], custom_objects={"tf": tf})
categoryLB = pickle.loads(open(args["categorybin"], "rb").read())
colorLB = pickle.loads(open(args["colorbin"], "rb").read())
# classify the input image using Keras' multi-output functionality
print("[INFO] classifying image...")
(categoryProba, colorProba) = model.predict(image)
# find indexes of both the category and color outputs with the
# largest probabilities, then determine the corresponding class
# labels
categoryIdx = categoryProba[0].argmax()
colorIdx = colorProba[0].argmax()
categoryLabel = categoryLB.classes_[categoryIdx]
colorLabel = colorLB.classes_[colorIdx]
# draw the category label and color label on the image
categoryText = "category: {} ({:.2f}%)".format(categoryLabel, categoryProba[0][categoryIdx] * 100)
colorText = "color: {} ({:.2f}%)".format(colorLabel, colorProba[0][colorIdx] * 100)
cv2.putText(output, categoryText, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
cv2.putText(output, colorText, (10, 55), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
# display the predictions to the terminal as well
print("[INFO] {}".format(categoryText))
print("[INFO] {}".format(colorText))
# show the output image
cv2.imshow("Output", output)
cv2.waitKey(0)
|
normal
|
{
"blob_id": "8ff9961c1415c04899bbc15ba64811a1b3ade262",
"index": 3082,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nap.add_argument('-m', '--model', required=True, help=\n 'path to trained model model')\nap.add_argument('-l', '--categorybin', required=True, help=\n 'path to output category label binarizer')\nap.add_argument('-c', '--colorbin', required=True, help=\n 'path to output color label binarizer')\nap.add_argument('-i', '--image', required=True, help='path to input image')\n<mask token>\nprint('[INFO] loading network...')\n<mask token>\nprint('[INFO] classifying image...')\n<mask token>\ncv2.putText(output, categoryText, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7,\n (0, 255, 0), 2)\ncv2.putText(output, colorText, (10, 55), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,\n 255, 0), 2)\nprint('[INFO] {}'.format(categoryText))\nprint('[INFO] {}'.format(colorText))\ncv2.imshow('Output', output)\ncv2.waitKey(0)\n",
"step-3": "<mask token>\nap = argparse.ArgumentParser()\nap.add_argument('-m', '--model', required=True, help=\n 'path to trained model model')\nap.add_argument('-l', '--categorybin', required=True, help=\n 'path to output category label binarizer')\nap.add_argument('-c', '--colorbin', required=True, help=\n 'path to output color label binarizer')\nap.add_argument('-i', '--image', required=True, help='path to input image')\nargs = vars(ap.parse_args())\nimage = cv2.imread(args['image'])\noutput = imutils.resize(image, width=400)\nimage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\nimage = cv2.resize(image, (96, 96))\nimage = image.astype('float') / 255.0\nimage = img_to_array(image)\nimage = np.expand_dims(image, axis=0)\nprint('[INFO] loading network...')\nmodel = load_model(args['model'], custom_objects={'tf': tf})\ncategoryLB = pickle.loads(open(args['categorybin'], 'rb').read())\ncolorLB = pickle.loads(open(args['colorbin'], 'rb').read())\nprint('[INFO] classifying image...')\ncategoryProba, colorProba = model.predict(image)\ncategoryIdx = categoryProba[0].argmax()\ncolorIdx = colorProba[0].argmax()\ncategoryLabel = categoryLB.classes_[categoryIdx]\ncolorLabel = colorLB.classes_[colorIdx]\ncategoryText = 'category: {} ({:.2f}%)'.format(categoryLabel, categoryProba\n [0][categoryIdx] * 100)\ncolorText = 'color: {} ({:.2f}%)'.format(colorLabel, colorProba[0][colorIdx\n ] * 100)\ncv2.putText(output, categoryText, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7,\n (0, 255, 0), 2)\ncv2.putText(output, colorText, (10, 55), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,\n 255, 0), 2)\nprint('[INFO] {}'.format(categoryText))\nprint('[INFO] {}'.format(colorText))\ncv2.imshow('Output', output)\ncv2.waitKey(0)\n",
"step-4": "from keras.preprocessing.image import img_to_array\nfrom keras.models import load_model\nimport tensorflow as tf\nimport numpy as np\nimport argparse\nimport imutils\nimport pickle\nimport cv2\nap = argparse.ArgumentParser()\nap.add_argument('-m', '--model', required=True, help=\n 'path to trained model model')\nap.add_argument('-l', '--categorybin', required=True, help=\n 'path to output category label binarizer')\nap.add_argument('-c', '--colorbin', required=True, help=\n 'path to output color label binarizer')\nap.add_argument('-i', '--image', required=True, help='path to input image')\nargs = vars(ap.parse_args())\nimage = cv2.imread(args['image'])\noutput = imutils.resize(image, width=400)\nimage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\nimage = cv2.resize(image, (96, 96))\nimage = image.astype('float') / 255.0\nimage = img_to_array(image)\nimage = np.expand_dims(image, axis=0)\nprint('[INFO] loading network...')\nmodel = load_model(args['model'], custom_objects={'tf': tf})\ncategoryLB = pickle.loads(open(args['categorybin'], 'rb').read())\ncolorLB = pickle.loads(open(args['colorbin'], 'rb').read())\nprint('[INFO] classifying image...')\ncategoryProba, colorProba = model.predict(image)\ncategoryIdx = categoryProba[0].argmax()\ncolorIdx = colorProba[0].argmax()\ncategoryLabel = categoryLB.classes_[categoryIdx]\ncolorLabel = colorLB.classes_[colorIdx]\ncategoryText = 'category: {} ({:.2f}%)'.format(categoryLabel, categoryProba\n [0][categoryIdx] * 100)\ncolorText = 'color: {} ({:.2f}%)'.format(colorLabel, colorProba[0][colorIdx\n ] * 100)\ncv2.putText(output, categoryText, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7,\n (0, 255, 0), 2)\ncv2.putText(output, colorText, (10, 55), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,\n 255, 0), 2)\nprint('[INFO] {}'.format(categoryText))\nprint('[INFO] {}'.format(colorText))\ncv2.imshow('Output', output)\ncv2.waitKey(0)\n",
"step-5": "from keras.preprocessing.image import img_to_array\nfrom keras.models import load_model\nimport tensorflow as tf\nimport numpy as np\nimport argparse\nimport imutils\nimport pickle\nimport cv2\n\n# USAGE\n# python classify.py --model output/fashion.model --categorybin output/category_lb.pickle\n# --colorbin output/color_lb.pickle --image examples/black_dress.jpg\n\n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-m\", \"--model\", required=True, help=\"path to trained model model\")\nap.add_argument(\"-l\", \"--categorybin\", required=True, help=\"path to output category label binarizer\")\nap.add_argument(\"-c\", \"--colorbin\", required=True, help=\"path to output color label binarizer\")\nap.add_argument(\"-i\", \"--image\", required=True, help=\"path to input image\")\nargs = vars(ap.parse_args())\n\n# load the image\nimage = cv2.imread(args[\"image\"])\noutput = imutils.resize(image, width=400)\nimage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n# pre-process the image for classification\nimage = cv2.resize(image, (96, 96))\nimage = image.astype(\"float\") / 255.0\nimage = img_to_array(image)\nimage = np.expand_dims(image, axis=0)\n\n# load the trained convolutional neural network from disk, followed\n# by the category and color label binarizers, respectively\nprint(\"[INFO] loading network...\")\nmodel = load_model(args[\"model\"], custom_objects={\"tf\": tf})\ncategoryLB = pickle.loads(open(args[\"categorybin\"], \"rb\").read())\ncolorLB = pickle.loads(open(args[\"colorbin\"], \"rb\").read())\n\n# classify the input image using Keras' multi-output functionality\nprint(\"[INFO] classifying image...\")\n(categoryProba, colorProba) = model.predict(image)\n\n# find indexes of both the category and color outputs with the\n# largest probabilities, then determine the corresponding class\n# labels\ncategoryIdx = categoryProba[0].argmax()\ncolorIdx = colorProba[0].argmax()\ncategoryLabel = categoryLB.classes_[categoryIdx]\ncolorLabel = colorLB.classes_[colorIdx]\n\n# draw the category label and color label on the image\ncategoryText = \"category: {} ({:.2f}%)\".format(categoryLabel, categoryProba[0][categoryIdx] * 100)\ncolorText = \"color: {} ({:.2f}%)\".format(colorLabel, colorProba[0][colorIdx] * 100)\ncv2.putText(output, categoryText, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)\ncv2.putText(output, colorText, (10, 55), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)\n\n# display the predictions to the terminal as well\nprint(\"[INFO] {}\".format(categoryText))\nprint(\"[INFO] {}\".format(colorText))\n\n# show the output image\ncv2.imshow(\"Output\", output)\ncv2.waitKey(0)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
def pantip(k, n, arr, path,len):
if len == 0:
if sum(path)==k:
path.reverse()
print(path)
return
path.append(arr[len-1])
pantip(k,n,arr,path,len-1)
path.pop()
#backtrack
pantip(k,n,arr,path,len-1)
inp = input('Enter Input (Money, Product) : ').split('/')
arr = [int(i) for i in inp[1].split()]
len = len(arr)
pattern = pantip(int(inp[0]), 0, arr, [],len)
print("Krisada can purchase Product: {0} with: {1} Baht | {2} Pattern".format(arr, inp[0], pattern))
|
normal
|
{
"blob_id": "6cdaf89d97be8f5ef37ab35f2916a36b4c75ddbe",
"index": 7513,
"step-1": "<mask token>\n",
"step-2": "def pantip(k, n, arr, path, len):\n if len == 0:\n if sum(path) == k:\n path.reverse()\n print(path)\n return\n path.append(arr[len - 1])\n pantip(k, n, arr, path, len - 1)\n path.pop()\n pantip(k, n, arr, path, len - 1)\n\n\n<mask token>\n",
"step-3": "def pantip(k, n, arr, path, len):\n if len == 0:\n if sum(path) == k:\n path.reverse()\n print(path)\n return\n path.append(arr[len - 1])\n pantip(k, n, arr, path, len - 1)\n path.pop()\n pantip(k, n, arr, path, len - 1)\n\n\n<mask token>\nprint('Krisada can purchase Product: {0} with: {1} Baht | {2} Pattern'.\n format(arr, inp[0], pattern))\n",
"step-4": "def pantip(k, n, arr, path, len):\n if len == 0:\n if sum(path) == k:\n path.reverse()\n print(path)\n return\n path.append(arr[len - 1])\n pantip(k, n, arr, path, len - 1)\n path.pop()\n pantip(k, n, arr, path, len - 1)\n\n\ninp = input('Enter Input (Money, Product) : ').split('/')\narr = [int(i) for i in inp[1].split()]\nlen = len(arr)\npattern = pantip(int(inp[0]), 0, arr, [], len)\nprint('Krisada can purchase Product: {0} with: {1} Baht | {2} Pattern'.\n format(arr, inp[0], pattern))\n",
"step-5": "def pantip(k, n, arr, path,len):\r\n if len == 0:\r\n if sum(path)==k:\r\n path.reverse()\r\n print(path)\r\n return \r\n path.append(arr[len-1])\r\n pantip(k,n,arr,path,len-1)\r\n path.pop()\r\n #backtrack\r\n pantip(k,n,arr,path,len-1)\r\ninp = input('Enter Input (Money, Product) : ').split('/')\r\narr = [int(i) for i in inp[1].split()]\r\nlen = len(arr)\r\npattern = pantip(int(inp[0]), 0, arr, [],len)\r\nprint(\"Krisada can purchase Product: {0} with: {1} Baht | {2} Pattern\".format(arr, inp[0], pattern))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# NumPy(Numerical Python) 是 Python 语言的一个扩展程序库,
# 支持大量的维度数组与矩阵运算,此外也针对数组运算提供大量的数学函数库。
|
normal
|
{
"blob_id": "94348aed0585024c70062e9201fb41aae2122625",
"index": 9331,
"step-1": "# NumPy(Numerical Python) 是 Python 语言的一个扩展程序库,\r\n# 支持大量的维度数组与矩阵运算,此外也针对数组运算提供大量的数学函数库。",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
1
]
}
|
[
1
] |
'''
MDSANIMA Setup
'''
import sys
import pathlib
from setuptools import setup, find_packages
HERE = pathlib.Path(__file__).parent
CURRENT_PYTHON = sys.version_info[:2]
REQUIRED_PYTHON = (3, 6)
# This check and everything above must remain compatible with Python 2.7.
if CURRENT_PYTHON < REQUIRED_PYTHON:
sys.stderr.write("""==========================
Unsupported Python Version
==========================
This version of MDSANIMA requires Python {}.{}
but you're trying to install it on Python {}.{}
""".format(*(REQUIRED_PYTHON + CURRENT_PYTHON)))
sys.exit(1)
VERSION = '0.2.0'
PACKAGE_NAME = 'mdsanima'
AUTHOR = 'Marcin Rozewski'
AUTHOR_EMAIL = '[email protected]'
URL = 'https://github.com/mdsanima/mdsanima'
LICENSE = 'MIT'
DESCRIPTION = 'The package contains modules that will help in calculating rendering time.'
LONG_DESCRIPTION = (HERE / "README.rst").read_text()
LONG_DESC_TYPE = "text/x-rst"
INSTALL_REQUIRES = [
'humanfriendly'
]
KEYWORDS = [
'mdsanima',
'render time',
'calculator render time',
'blender',
'blener3d',
'rendering',
'houdini',
'sidefx',
'vfx',
'cinema4d',
'cycles',
'redshift',
'render engine',
'octane render',
'mantra',
'vray',
'clarisse ifx'
]
setup(name=PACKAGE_NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type=LONG_DESC_TYPE,
author=AUTHOR,
license=LICENSE,
author_email=AUTHOR_EMAIL,
url=URL,
install_requires=INSTALL_REQUIRES,
packages=find_packages(),
extras_require={
"docs": [
'sphinx',
'sphinx-autoapi',
'sphinx-rtd-theme',
'sphinx-bootstrap-theme',
'sphinx-prompt',
'sphinx-tabs',
'recommonmark'
],
},
python_requires='>=3.6',
keywords=KEYWORDS,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows :: Windows 10',
'Operating System :: MacOS',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Multimedia :: Graphics :: 3D Rendering',
'Topic :: Multimedia :: Graphics :: 3D Modeling',
],
)
|
normal
|
{
"blob_id": "2827a56c12c1e15a6fe26ce182aa07d76735d77f",
"index": 407,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif CURRENT_PYTHON < REQUIRED_PYTHON:\n sys.stderr.write(\n \"\"\"==========================\nUnsupported Python Version\n==========================\nThis version of MDSANIMA requires Python {}.{}\nbut you're trying to install it on Python {}.{}\n\"\"\"\n .format(*(REQUIRED_PYTHON + CURRENT_PYTHON)))\n sys.exit(1)\n<mask token>\nsetup(name=PACKAGE_NAME, version=VERSION, description=DESCRIPTION,\n long_description=LONG_DESCRIPTION, long_description_content_type=\n LONG_DESC_TYPE, author=AUTHOR, license=LICENSE, author_email=\n AUTHOR_EMAIL, url=URL, install_requires=INSTALL_REQUIRES, packages=\n find_packages(), extras_require={'docs': ['sphinx', 'sphinx-autoapi',\n 'sphinx-rtd-theme', 'sphinx-bootstrap-theme', 'sphinx-prompt',\n 'sphinx-tabs', 'recommonmark']}, python_requires='>=3.6', keywords=\n KEYWORDS, classifiers=['Development Status :: 5 - Production/Stable',\n 'Environment :: Console', 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License', 'Natural Language :: English',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: Microsoft :: Windows :: Windows 10',\n 'Operating System :: MacOS', 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Multimedia :: Graphics :: 3D Rendering',\n 'Topic :: Multimedia :: Graphics :: 3D Modeling'])\n",
"step-3": "<mask token>\nHERE = pathlib.Path(__file__).parent\nCURRENT_PYTHON = sys.version_info[:2]\nREQUIRED_PYTHON = 3, 6\nif CURRENT_PYTHON < REQUIRED_PYTHON:\n sys.stderr.write(\n \"\"\"==========================\nUnsupported Python Version\n==========================\nThis version of MDSANIMA requires Python {}.{}\nbut you're trying to install it on Python {}.{}\n\"\"\"\n .format(*(REQUIRED_PYTHON + CURRENT_PYTHON)))\n sys.exit(1)\nVERSION = '0.2.0'\nPACKAGE_NAME = 'mdsanima'\nAUTHOR = 'Marcin Rozewski'\nAUTHOR_EMAIL = '[email protected]'\nURL = 'https://github.com/mdsanima/mdsanima'\nLICENSE = 'MIT'\nDESCRIPTION = (\n 'The package contains modules that will help in calculating rendering time.'\n )\nLONG_DESCRIPTION = (HERE / 'README.rst').read_text()\nLONG_DESC_TYPE = 'text/x-rst'\nINSTALL_REQUIRES = ['humanfriendly']\nKEYWORDS = ['mdsanima', 'render time', 'calculator render time', 'blender',\n 'blener3d', 'rendering', 'houdini', 'sidefx', 'vfx', 'cinema4d',\n 'cycles', 'redshift', 'render engine', 'octane render', 'mantra',\n 'vray', 'clarisse ifx']\nsetup(name=PACKAGE_NAME, version=VERSION, description=DESCRIPTION,\n long_description=LONG_DESCRIPTION, long_description_content_type=\n LONG_DESC_TYPE, author=AUTHOR, license=LICENSE, author_email=\n AUTHOR_EMAIL, url=URL, install_requires=INSTALL_REQUIRES, packages=\n find_packages(), extras_require={'docs': ['sphinx', 'sphinx-autoapi',\n 'sphinx-rtd-theme', 'sphinx-bootstrap-theme', 'sphinx-prompt',\n 'sphinx-tabs', 'recommonmark']}, python_requires='>=3.6', keywords=\n KEYWORDS, classifiers=['Development Status :: 5 - Production/Stable',\n 'Environment :: Console', 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License', 'Natural Language :: English',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: Microsoft :: Windows :: Windows 10',\n 'Operating System :: MacOS', 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Multimedia :: Graphics :: 3D Rendering',\n 'Topic :: Multimedia :: Graphics :: 3D Modeling'])\n",
"step-4": "<mask token>\nimport sys\nimport pathlib\nfrom setuptools import setup, find_packages\nHERE = pathlib.Path(__file__).parent\nCURRENT_PYTHON = sys.version_info[:2]\nREQUIRED_PYTHON = 3, 6\nif CURRENT_PYTHON < REQUIRED_PYTHON:\n sys.stderr.write(\n \"\"\"==========================\nUnsupported Python Version\n==========================\nThis version of MDSANIMA requires Python {}.{}\nbut you're trying to install it on Python {}.{}\n\"\"\"\n .format(*(REQUIRED_PYTHON + CURRENT_PYTHON)))\n sys.exit(1)\nVERSION = '0.2.0'\nPACKAGE_NAME = 'mdsanima'\nAUTHOR = 'Marcin Rozewski'\nAUTHOR_EMAIL = '[email protected]'\nURL = 'https://github.com/mdsanima/mdsanima'\nLICENSE = 'MIT'\nDESCRIPTION = (\n 'The package contains modules that will help in calculating rendering time.'\n )\nLONG_DESCRIPTION = (HERE / 'README.rst').read_text()\nLONG_DESC_TYPE = 'text/x-rst'\nINSTALL_REQUIRES = ['humanfriendly']\nKEYWORDS = ['mdsanima', 'render time', 'calculator render time', 'blender',\n 'blener3d', 'rendering', 'houdini', 'sidefx', 'vfx', 'cinema4d',\n 'cycles', 'redshift', 'render engine', 'octane render', 'mantra',\n 'vray', 'clarisse ifx']\nsetup(name=PACKAGE_NAME, version=VERSION, description=DESCRIPTION,\n long_description=LONG_DESCRIPTION, long_description_content_type=\n LONG_DESC_TYPE, author=AUTHOR, license=LICENSE, author_email=\n AUTHOR_EMAIL, url=URL, install_requires=INSTALL_REQUIRES, packages=\n find_packages(), extras_require={'docs': ['sphinx', 'sphinx-autoapi',\n 'sphinx-rtd-theme', 'sphinx-bootstrap-theme', 'sphinx-prompt',\n 'sphinx-tabs', 'recommonmark']}, python_requires='>=3.6', keywords=\n KEYWORDS, classifiers=['Development Status :: 5 - Production/Stable',\n 'Environment :: Console', 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License', 'Natural Language :: English',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: Microsoft :: Windows :: Windows 10',\n 'Operating System :: MacOS', 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Multimedia :: Graphics :: 3D Rendering',\n 'Topic :: Multimedia :: Graphics :: 3D Modeling'])\n",
"step-5": "'''\nMDSANIMA Setup\n'''\n\nimport sys\nimport pathlib\nfrom setuptools import setup, find_packages\n\nHERE = pathlib.Path(__file__).parent\n\nCURRENT_PYTHON = sys.version_info[:2]\nREQUIRED_PYTHON = (3, 6)\n\n# This check and everything above must remain compatible with Python 2.7.\nif CURRENT_PYTHON < REQUIRED_PYTHON:\n sys.stderr.write(\"\"\"==========================\nUnsupported Python Version\n==========================\nThis version of MDSANIMA requires Python {}.{}\nbut you're trying to install it on Python {}.{}\n\"\"\".format(*(REQUIRED_PYTHON + CURRENT_PYTHON)))\n sys.exit(1)\n\nVERSION = '0.2.0'\nPACKAGE_NAME = 'mdsanima'\nAUTHOR = 'Marcin Rozewski'\nAUTHOR_EMAIL = '[email protected]'\nURL = 'https://github.com/mdsanima/mdsanima'\n\nLICENSE = 'MIT'\nDESCRIPTION = 'The package contains modules that will help in calculating rendering time.'\nLONG_DESCRIPTION = (HERE / \"README.rst\").read_text()\nLONG_DESC_TYPE = \"text/x-rst\"\n\nINSTALL_REQUIRES = [\n 'humanfriendly'\n]\n\nKEYWORDS = [\n 'mdsanima',\n 'render time',\n 'calculator render time',\n 'blender',\n 'blener3d',\n 'rendering',\n 'houdini',\n 'sidefx',\n 'vfx',\n 'cinema4d',\n 'cycles',\n 'redshift',\n 'render engine',\n 'octane render',\n 'mantra',\n 'vray',\n 'clarisse ifx'\n]\n\nsetup(name=PACKAGE_NAME,\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=LONG_DESC_TYPE,\n author=AUTHOR,\n license=LICENSE,\n author_email=AUTHOR_EMAIL,\n url=URL,\n install_requires=INSTALL_REQUIRES,\n packages=find_packages(),\n extras_require={\n \"docs\": [\n 'sphinx', \n 'sphinx-autoapi', \n 'sphinx-rtd-theme', \n 'sphinx-bootstrap-theme', \n 'sphinx-prompt', \n 'sphinx-tabs', \n 'recommonmark'\n ],\n },\n python_requires='>=3.6',\n keywords=KEYWORDS,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: Microsoft :: Windows :: Windows 10',\n 'Operating System :: MacOS',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Multimedia :: Graphics :: 3D Rendering',\n 'Topic :: Multimedia :: Graphics :: 3D Modeling',\n ],\n )",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from __future__ import print_function
from __future__ import division
import os
import sys
import time
import datetime
import os.path as osp
from collections import defaultdict
import numpy as np
import math
from functools import partial
from tqdm import tqdm
import glog as log
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from args import argument_parser, image_dataset_kwargs, optimizer_kwargs
from torchreid.data_manager import ImageDataManager
from torchreid import models
from torchreid.utils.iotools import save_checkpoint, check_isfile
from torchreid.utils.avgmeter import AverageMeter
from torchreid.utils.loggers import Logger
from torchreid.utils.torchtools import count_num_param
from torchreid.utils.reidtools import visualize_ranked_results, distmat_hist, calc_distmat
from torchreid.eval_metrics import test
from torchreid.utils.load_weights import load_weights
from torchreid.utils.absorb_bn import search_absorbed_bn
from torchreid.evaluate_lfw import evaluate, compute_embeddings_lfw
# global variables
parser = argument_parser()
args = parser.parse_args()
def main():
global args
torch.manual_seed(args.seed)
if not args.use_avai_gpus: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
use_gpu = torch.cuda.is_available()
if args.use_cpu: use_gpu = False
log_name = 'log_test.txt'
sys.stdout = Logger(osp.join(args.save_dir, log_name))
print("==========\nArgs:{}\n==========".format(args))
if use_gpu:
print("Currently using GPU {}".format(args.gpu_devices))
cudnn.benchmark = True
torch.cuda.manual_seed_all(args.seed)
else:
print("Currently using CPU, however, GPU is highly recommended")
print("Initializing image data manager")
if not args.convert_to_onnx: # and not args.infer:
dm = ImageDataManager(use_gpu, **image_dataset_kwargs(args))
trainloader, trainloader_dict, testloader_dict = dm.return_dataloaders()
num_train_pids = 100
print("Initializing model: {}".format(args.arch))
model = models.init_model(name=args.arch, num_classes=num_train_pids, loss={'xent', 'htri'},
pretrained=False if args.load_weights else 'imagenet', grayscale=args.grayscale,
ceil_mode=not args.convert_to_onnx, infer=True, bits=args.bits,
normalize_embeddings=args.normalize_embeddings, normalize_fc=args.normalize_fc, convbn=args.convbn)
print("Model size: {:.3f} M".format(count_num_param(model)))
if args.load_weights and check_isfile(args.load_weights):
# load pretrained weights but ignore layers that don't match in size
load_weights(model, args.load_weights)
print("Loaded pretrained weights from '{}'".format(args.load_weights))
if args.absorb_bn:
search_absorbed_bn(model)
if args.quantization or args.save_quantized_model:
from gap_quantization.quantization import ModelQuantizer
from gap_quantization.dump_utils import dump_quant_params, remove_extra_dump, remove_cat_files
if args.quant_data_dir is None:
raise AttributeError('quant-data-dir argument is required.')
num_channels = 1 if args.grayscale else 3
cfg = {
"bits": args.bits, # number of bits to store weights and activations
"accum_bits": 32, # number of bits to store intermediate convolution result
"signed": True, # use signed numbers
"save_folder": args.save_dir, # folder to save results
"data_source": args.quant_data_dir, # folder with images to collect dataset statistics
"use_gpu": False, # use GPU for inference
"batch_size": 1,
"num_workers": 0, # number of workers for PyTorch dataloader
"verbose": True,
"save_params": args.save_quantized_model, # save quantization parameters to the file
"quantize_forward": True, # replace usual convs, poolings, ... with GAP-like ones
"num_input_channels": num_channels,
"raw_input": args.no_normalize,
"double_precision": args.double_precision # use double precision convolutions
}
model = model.cpu()
quantizer = ModelQuantizer(model, cfg, dm.transform_test) # transform test is OK if we use args.no_normalize
quantizer.quantize_model() # otherwise we need to add QuantizeInput operation
if args.infer:
if args.image_path == '':
raise AttributeError('Image for inference is required')
quantizer.dump_activations(args.image_path, dm.transform_test,
save_dir=os.path.join(args.save_dir, 'activations_dump'))
dump_quant_params(args.save_dir, args.convbn)
if args.convbn:
remove_extra_dump(os.path.join(args.save_dir, 'activations_dump'))
remove_cat_files(args.save_dir)
if use_gpu:
model = nn.DataParallel(model).cuda()
if args.evaluate:
print("Evaluate only")
for name in args.target_names:
if not 'lfw' in name.lower():
print("Evaluating {} ...".format(name))
queryloader = testloader_dict[name]['query']
galleryloader = testloader_dict[name]['gallery']
distmat = test(args, model, queryloader, galleryloader, use_gpu, return_distmat=True)
if args.visualize_ranks:
visualize_ranked_results(
distmat, dm.return_testdataset_by_name(name),
save_dir=osp.join(args.save_dir, 'ranked_results', name),
topk=20
)
else:
model.eval()
same_acc, diff_acc, all_acc, auc, thresh = evaluate(args, dm.lfw_dataset, model, compute_embeddings_lfw,
args.test_batch_size, verbose=False, show_failed=args.show_failed, load_embeddings=args.load_embeddings)
log.info('Validation accuracy: {0:.4f}, {1:.4f}'.format(same_acc, diff_acc))
log.info('Validation accuracy mean: {0:.4f}'.format(all_acc))
log.info('Validation AUC: {0:.4f}'.format(auc))
log.info('Estimated threshold: {0:.4f}'.format(thresh))
#roc_auc(model, '/home/maxim/data/lfw/pairsTest.txt', '/media/slow_drive/cropped_lfw', args, use_gpu)
return
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "0ad529298f321d2f3a63cde8179a50cf2881ee00",
"index": 2162,
"step-1": "<mask token>\n\n\ndef main():\n global args\n torch.manual_seed(args.seed)\n if not args.use_avai_gpus:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices\n use_gpu = torch.cuda.is_available()\n if args.use_cpu:\n use_gpu = False\n log_name = 'log_test.txt'\n sys.stdout = Logger(osp.join(args.save_dir, log_name))\n print('==========\\nArgs:{}\\n=========='.format(args))\n if use_gpu:\n print('Currently using GPU {}'.format(args.gpu_devices))\n cudnn.benchmark = True\n torch.cuda.manual_seed_all(args.seed)\n else:\n print('Currently using CPU, however, GPU is highly recommended')\n print('Initializing image data manager')\n if not args.convert_to_onnx:\n dm = ImageDataManager(use_gpu, **image_dataset_kwargs(args))\n trainloader, trainloader_dict, testloader_dict = dm.return_dataloaders(\n )\n num_train_pids = 100\n print('Initializing model: {}'.format(args.arch))\n model = models.init_model(name=args.arch, num_classes=num_train_pids,\n loss={'xent', 'htri'}, pretrained=False if args.load_weights else\n 'imagenet', grayscale=args.grayscale, ceil_mode=not args.\n convert_to_onnx, infer=True, bits=args.bits, normalize_embeddings=\n args.normalize_embeddings, normalize_fc=args.normalize_fc, convbn=\n args.convbn)\n print('Model size: {:.3f} M'.format(count_num_param(model)))\n if args.load_weights and check_isfile(args.load_weights):\n load_weights(model, args.load_weights)\n print(\"Loaded pretrained weights from '{}'\".format(args.load_weights))\n if args.absorb_bn:\n search_absorbed_bn(model)\n if args.quantization or args.save_quantized_model:\n from gap_quantization.quantization import ModelQuantizer\n from gap_quantization.dump_utils import dump_quant_params, remove_extra_dump, remove_cat_files\n if args.quant_data_dir is None:\n raise AttributeError('quant-data-dir argument is required.')\n num_channels = 1 if args.grayscale else 3\n cfg = {'bits': args.bits, 'accum_bits': 32, 'signed': True,\n 'save_folder': args.save_dir, 'data_source': args.\n quant_data_dir, 'use_gpu': False, 'batch_size': 1,\n 'num_workers': 0, 'verbose': True, 'save_params': args.\n save_quantized_model, 'quantize_forward': True,\n 'num_input_channels': num_channels, 'raw_input': args.\n no_normalize, 'double_precision': args.double_precision}\n model = model.cpu()\n quantizer = ModelQuantizer(model, cfg, dm.transform_test)\n quantizer.quantize_model()\n if args.infer:\n if args.image_path == '':\n raise AttributeError('Image for inference is required')\n quantizer.dump_activations(args.image_path, dm.transform_test,\n save_dir=os.path.join(args.save_dir, 'activations_dump'))\n dump_quant_params(args.save_dir, args.convbn)\n if args.convbn:\n remove_extra_dump(os.path.join(args.save_dir,\n 'activations_dump'))\n remove_cat_files(args.save_dir)\n if use_gpu:\n model = nn.DataParallel(model).cuda()\n if args.evaluate:\n print('Evaluate only')\n for name in args.target_names:\n if not 'lfw' in name.lower():\n print('Evaluating {} ...'.format(name))\n queryloader = testloader_dict[name]['query']\n galleryloader = testloader_dict[name]['gallery']\n distmat = test(args, model, queryloader, galleryloader,\n use_gpu, return_distmat=True)\n if args.visualize_ranks:\n visualize_ranked_results(distmat, dm.\n return_testdataset_by_name(name), save_dir=osp.join\n (args.save_dir, 'ranked_results', name), topk=20)\n else:\n model.eval()\n same_acc, diff_acc, all_acc, auc, thresh = evaluate(args,\n dm.lfw_dataset, model, compute_embeddings_lfw, args.\n test_batch_size, verbose=False, show_failed=args.\n show_failed, load_embeddings=args.load_embeddings)\n log.info('Validation accuracy: {0:.4f}, {1:.4f}'.format(\n same_acc, diff_acc))\n log.info('Validation accuracy mean: {0:.4f}'.format(all_acc))\n log.info('Validation AUC: {0:.4f}'.format(auc))\n log.info('Estimated threshold: {0:.4f}'.format(thresh))\n return\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n global args\n torch.manual_seed(args.seed)\n if not args.use_avai_gpus:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices\n use_gpu = torch.cuda.is_available()\n if args.use_cpu:\n use_gpu = False\n log_name = 'log_test.txt'\n sys.stdout = Logger(osp.join(args.save_dir, log_name))\n print('==========\\nArgs:{}\\n=========='.format(args))\n if use_gpu:\n print('Currently using GPU {}'.format(args.gpu_devices))\n cudnn.benchmark = True\n torch.cuda.manual_seed_all(args.seed)\n else:\n print('Currently using CPU, however, GPU is highly recommended')\n print('Initializing image data manager')\n if not args.convert_to_onnx:\n dm = ImageDataManager(use_gpu, **image_dataset_kwargs(args))\n trainloader, trainloader_dict, testloader_dict = dm.return_dataloaders(\n )\n num_train_pids = 100\n print('Initializing model: {}'.format(args.arch))\n model = models.init_model(name=args.arch, num_classes=num_train_pids,\n loss={'xent', 'htri'}, pretrained=False if args.load_weights else\n 'imagenet', grayscale=args.grayscale, ceil_mode=not args.\n convert_to_onnx, infer=True, bits=args.bits, normalize_embeddings=\n args.normalize_embeddings, normalize_fc=args.normalize_fc, convbn=\n args.convbn)\n print('Model size: {:.3f} M'.format(count_num_param(model)))\n if args.load_weights and check_isfile(args.load_weights):\n load_weights(model, args.load_weights)\n print(\"Loaded pretrained weights from '{}'\".format(args.load_weights))\n if args.absorb_bn:\n search_absorbed_bn(model)\n if args.quantization or args.save_quantized_model:\n from gap_quantization.quantization import ModelQuantizer\n from gap_quantization.dump_utils import dump_quant_params, remove_extra_dump, remove_cat_files\n if args.quant_data_dir is None:\n raise AttributeError('quant-data-dir argument is required.')\n num_channels = 1 if args.grayscale else 3\n cfg = {'bits': args.bits, 'accum_bits': 32, 'signed': True,\n 'save_folder': args.save_dir, 'data_source': args.\n quant_data_dir, 'use_gpu': False, 'batch_size': 1,\n 'num_workers': 0, 'verbose': True, 'save_params': args.\n save_quantized_model, 'quantize_forward': True,\n 'num_input_channels': num_channels, 'raw_input': args.\n no_normalize, 'double_precision': args.double_precision}\n model = model.cpu()\n quantizer = ModelQuantizer(model, cfg, dm.transform_test)\n quantizer.quantize_model()\n if args.infer:\n if args.image_path == '':\n raise AttributeError('Image for inference is required')\n quantizer.dump_activations(args.image_path, dm.transform_test,\n save_dir=os.path.join(args.save_dir, 'activations_dump'))\n dump_quant_params(args.save_dir, args.convbn)\n if args.convbn:\n remove_extra_dump(os.path.join(args.save_dir,\n 'activations_dump'))\n remove_cat_files(args.save_dir)\n if use_gpu:\n model = nn.DataParallel(model).cuda()\n if args.evaluate:\n print('Evaluate only')\n for name in args.target_names:\n if not 'lfw' in name.lower():\n print('Evaluating {} ...'.format(name))\n queryloader = testloader_dict[name]['query']\n galleryloader = testloader_dict[name]['gallery']\n distmat = test(args, model, queryloader, galleryloader,\n use_gpu, return_distmat=True)\n if args.visualize_ranks:\n visualize_ranked_results(distmat, dm.\n return_testdataset_by_name(name), save_dir=osp.join\n (args.save_dir, 'ranked_results', name), topk=20)\n else:\n model.eval()\n same_acc, diff_acc, all_acc, auc, thresh = evaluate(args,\n dm.lfw_dataset, model, compute_embeddings_lfw, args.\n test_batch_size, verbose=False, show_failed=args.\n show_failed, load_embeddings=args.load_embeddings)\n log.info('Validation accuracy: {0:.4f}, {1:.4f}'.format(\n same_acc, diff_acc))\n log.info('Validation accuracy mean: {0:.4f}'.format(all_acc))\n log.info('Validation AUC: {0:.4f}'.format(auc))\n log.info('Estimated threshold: {0:.4f}'.format(thresh))\n return\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nparser = argument_parser()\nargs = parser.parse_args()\n\n\ndef main():\n global args\n torch.manual_seed(args.seed)\n if not args.use_avai_gpus:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices\n use_gpu = torch.cuda.is_available()\n if args.use_cpu:\n use_gpu = False\n log_name = 'log_test.txt'\n sys.stdout = Logger(osp.join(args.save_dir, log_name))\n print('==========\\nArgs:{}\\n=========='.format(args))\n if use_gpu:\n print('Currently using GPU {}'.format(args.gpu_devices))\n cudnn.benchmark = True\n torch.cuda.manual_seed_all(args.seed)\n else:\n print('Currently using CPU, however, GPU is highly recommended')\n print('Initializing image data manager')\n if not args.convert_to_onnx:\n dm = ImageDataManager(use_gpu, **image_dataset_kwargs(args))\n trainloader, trainloader_dict, testloader_dict = dm.return_dataloaders(\n )\n num_train_pids = 100\n print('Initializing model: {}'.format(args.arch))\n model = models.init_model(name=args.arch, num_classes=num_train_pids,\n loss={'xent', 'htri'}, pretrained=False if args.load_weights else\n 'imagenet', grayscale=args.grayscale, ceil_mode=not args.\n convert_to_onnx, infer=True, bits=args.bits, normalize_embeddings=\n args.normalize_embeddings, normalize_fc=args.normalize_fc, convbn=\n args.convbn)\n print('Model size: {:.3f} M'.format(count_num_param(model)))\n if args.load_weights and check_isfile(args.load_weights):\n load_weights(model, args.load_weights)\n print(\"Loaded pretrained weights from '{}'\".format(args.load_weights))\n if args.absorb_bn:\n search_absorbed_bn(model)\n if args.quantization or args.save_quantized_model:\n from gap_quantization.quantization import ModelQuantizer\n from gap_quantization.dump_utils import dump_quant_params, remove_extra_dump, remove_cat_files\n if args.quant_data_dir is None:\n raise AttributeError('quant-data-dir argument is required.')\n num_channels = 1 if args.grayscale else 3\n cfg = {'bits': args.bits, 'accum_bits': 32, 'signed': True,\n 'save_folder': args.save_dir, 'data_source': args.\n quant_data_dir, 'use_gpu': False, 'batch_size': 1,\n 'num_workers': 0, 'verbose': True, 'save_params': args.\n save_quantized_model, 'quantize_forward': True,\n 'num_input_channels': num_channels, 'raw_input': args.\n no_normalize, 'double_precision': args.double_precision}\n model = model.cpu()\n quantizer = ModelQuantizer(model, cfg, dm.transform_test)\n quantizer.quantize_model()\n if args.infer:\n if args.image_path == '':\n raise AttributeError('Image for inference is required')\n quantizer.dump_activations(args.image_path, dm.transform_test,\n save_dir=os.path.join(args.save_dir, 'activations_dump'))\n dump_quant_params(args.save_dir, args.convbn)\n if args.convbn:\n remove_extra_dump(os.path.join(args.save_dir,\n 'activations_dump'))\n remove_cat_files(args.save_dir)\n if use_gpu:\n model = nn.DataParallel(model).cuda()\n if args.evaluate:\n print('Evaluate only')\n for name in args.target_names:\n if not 'lfw' in name.lower():\n print('Evaluating {} ...'.format(name))\n queryloader = testloader_dict[name]['query']\n galleryloader = testloader_dict[name]['gallery']\n distmat = test(args, model, queryloader, galleryloader,\n use_gpu, return_distmat=True)\n if args.visualize_ranks:\n visualize_ranked_results(distmat, dm.\n return_testdataset_by_name(name), save_dir=osp.join\n (args.save_dir, 'ranked_results', name), topk=20)\n else:\n model.eval()\n same_acc, diff_acc, all_acc, auc, thresh = evaluate(args,\n dm.lfw_dataset, model, compute_embeddings_lfw, args.\n test_batch_size, verbose=False, show_failed=args.\n show_failed, load_embeddings=args.load_embeddings)\n log.info('Validation accuracy: {0:.4f}, {1:.4f}'.format(\n same_acc, diff_acc))\n log.info('Validation accuracy mean: {0:.4f}'.format(all_acc))\n log.info('Validation AUC: {0:.4f}'.format(auc))\n log.info('Estimated threshold: {0:.4f}'.format(thresh))\n return\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from __future__ import print_function\nfrom __future__ import division\nimport os\nimport sys\nimport time\nimport datetime\nimport os.path as osp\nfrom collections import defaultdict\nimport numpy as np\nimport math\nfrom functools import partial\nfrom tqdm import tqdm\nimport glog as log\nimport torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nfrom args import argument_parser, image_dataset_kwargs, optimizer_kwargs\nfrom torchreid.data_manager import ImageDataManager\nfrom torchreid import models\nfrom torchreid.utils.iotools import save_checkpoint, check_isfile\nfrom torchreid.utils.avgmeter import AverageMeter\nfrom torchreid.utils.loggers import Logger\nfrom torchreid.utils.torchtools import count_num_param\nfrom torchreid.utils.reidtools import visualize_ranked_results, distmat_hist, calc_distmat\nfrom torchreid.eval_metrics import test\nfrom torchreid.utils.load_weights import load_weights\nfrom torchreid.utils.absorb_bn import search_absorbed_bn\nfrom torchreid.evaluate_lfw import evaluate, compute_embeddings_lfw\nparser = argument_parser()\nargs = parser.parse_args()\n\n\ndef main():\n global args\n torch.manual_seed(args.seed)\n if not args.use_avai_gpus:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices\n use_gpu = torch.cuda.is_available()\n if args.use_cpu:\n use_gpu = False\n log_name = 'log_test.txt'\n sys.stdout = Logger(osp.join(args.save_dir, log_name))\n print('==========\\nArgs:{}\\n=========='.format(args))\n if use_gpu:\n print('Currently using GPU {}'.format(args.gpu_devices))\n cudnn.benchmark = True\n torch.cuda.manual_seed_all(args.seed)\n else:\n print('Currently using CPU, however, GPU is highly recommended')\n print('Initializing image data manager')\n if not args.convert_to_onnx:\n dm = ImageDataManager(use_gpu, **image_dataset_kwargs(args))\n trainloader, trainloader_dict, testloader_dict = dm.return_dataloaders(\n )\n num_train_pids = 100\n print('Initializing model: {}'.format(args.arch))\n model = models.init_model(name=args.arch, num_classes=num_train_pids,\n loss={'xent', 'htri'}, pretrained=False if args.load_weights else\n 'imagenet', grayscale=args.grayscale, ceil_mode=not args.\n convert_to_onnx, infer=True, bits=args.bits, normalize_embeddings=\n args.normalize_embeddings, normalize_fc=args.normalize_fc, convbn=\n args.convbn)\n print('Model size: {:.3f} M'.format(count_num_param(model)))\n if args.load_weights and check_isfile(args.load_weights):\n load_weights(model, args.load_weights)\n print(\"Loaded pretrained weights from '{}'\".format(args.load_weights))\n if args.absorb_bn:\n search_absorbed_bn(model)\n if args.quantization or args.save_quantized_model:\n from gap_quantization.quantization import ModelQuantizer\n from gap_quantization.dump_utils import dump_quant_params, remove_extra_dump, remove_cat_files\n if args.quant_data_dir is None:\n raise AttributeError('quant-data-dir argument is required.')\n num_channels = 1 if args.grayscale else 3\n cfg = {'bits': args.bits, 'accum_bits': 32, 'signed': True,\n 'save_folder': args.save_dir, 'data_source': args.\n quant_data_dir, 'use_gpu': False, 'batch_size': 1,\n 'num_workers': 0, 'verbose': True, 'save_params': args.\n save_quantized_model, 'quantize_forward': True,\n 'num_input_channels': num_channels, 'raw_input': args.\n no_normalize, 'double_precision': args.double_precision}\n model = model.cpu()\n quantizer = ModelQuantizer(model, cfg, dm.transform_test)\n quantizer.quantize_model()\n if args.infer:\n if args.image_path == '':\n raise AttributeError('Image for inference is required')\n quantizer.dump_activations(args.image_path, dm.transform_test,\n save_dir=os.path.join(args.save_dir, 'activations_dump'))\n dump_quant_params(args.save_dir, args.convbn)\n if args.convbn:\n remove_extra_dump(os.path.join(args.save_dir,\n 'activations_dump'))\n remove_cat_files(args.save_dir)\n if use_gpu:\n model = nn.DataParallel(model).cuda()\n if args.evaluate:\n print('Evaluate only')\n for name in args.target_names:\n if not 'lfw' in name.lower():\n print('Evaluating {} ...'.format(name))\n queryloader = testloader_dict[name]['query']\n galleryloader = testloader_dict[name]['gallery']\n distmat = test(args, model, queryloader, galleryloader,\n use_gpu, return_distmat=True)\n if args.visualize_ranks:\n visualize_ranked_results(distmat, dm.\n return_testdataset_by_name(name), save_dir=osp.join\n (args.save_dir, 'ranked_results', name), topk=20)\n else:\n model.eval()\n same_acc, diff_acc, all_acc, auc, thresh = evaluate(args,\n dm.lfw_dataset, model, compute_embeddings_lfw, args.\n test_batch_size, verbose=False, show_failed=args.\n show_failed, load_embeddings=args.load_embeddings)\n log.info('Validation accuracy: {0:.4f}, {1:.4f}'.format(\n same_acc, diff_acc))\n log.info('Validation accuracy mean: {0:.4f}'.format(all_acc))\n log.info('Validation AUC: {0:.4f}'.format(auc))\n log.info('Estimated threshold: {0:.4f}'.format(thresh))\n return\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "from __future__ import print_function\nfrom __future__ import division\n\nimport os\nimport sys\nimport time\nimport datetime\nimport os.path as osp\nfrom collections import defaultdict\nimport numpy as np\nimport math\nfrom functools import partial\nfrom tqdm import tqdm\nimport glog as log\n\nimport torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\n\n\nfrom args import argument_parser, image_dataset_kwargs, optimizer_kwargs\nfrom torchreid.data_manager import ImageDataManager\nfrom torchreid import models\nfrom torchreid.utils.iotools import save_checkpoint, check_isfile\nfrom torchreid.utils.avgmeter import AverageMeter\nfrom torchreid.utils.loggers import Logger\nfrom torchreid.utils.torchtools import count_num_param\nfrom torchreid.utils.reidtools import visualize_ranked_results, distmat_hist, calc_distmat\nfrom torchreid.eval_metrics import test\nfrom torchreid.utils.load_weights import load_weights\nfrom torchreid.utils.absorb_bn import search_absorbed_bn\nfrom torchreid.evaluate_lfw import evaluate, compute_embeddings_lfw\n\n\n# global variables\nparser = argument_parser()\nargs = parser.parse_args()\n\n\ndef main():\n global args\n\n torch.manual_seed(args.seed)\n if not args.use_avai_gpus: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices\n use_gpu = torch.cuda.is_available()\n if args.use_cpu: use_gpu = False\n log_name = 'log_test.txt'\n sys.stdout = Logger(osp.join(args.save_dir, log_name))\n print(\"==========\\nArgs:{}\\n==========\".format(args))\n\n if use_gpu:\n print(\"Currently using GPU {}\".format(args.gpu_devices))\n cudnn.benchmark = True\n torch.cuda.manual_seed_all(args.seed)\n else:\n print(\"Currently using CPU, however, GPU is highly recommended\")\n\n print(\"Initializing image data manager\")\n if not args.convert_to_onnx: # and not args.infer:\n dm = ImageDataManager(use_gpu, **image_dataset_kwargs(args))\n trainloader, trainloader_dict, testloader_dict = dm.return_dataloaders()\n\n num_train_pids = 100\n\n print(\"Initializing model: {}\".format(args.arch))\n model = models.init_model(name=args.arch, num_classes=num_train_pids, loss={'xent', 'htri'},\n pretrained=False if args.load_weights else 'imagenet', grayscale=args.grayscale,\n ceil_mode=not args.convert_to_onnx, infer=True, bits=args.bits,\n normalize_embeddings=args.normalize_embeddings, normalize_fc=args.normalize_fc, convbn=args.convbn)\n print(\"Model size: {:.3f} M\".format(count_num_param(model)))\n\n if args.load_weights and check_isfile(args.load_weights):\n # load pretrained weights but ignore layers that don't match in size\n load_weights(model, args.load_weights)\n print(\"Loaded pretrained weights from '{}'\".format(args.load_weights))\n\n if args.absorb_bn:\n search_absorbed_bn(model)\n\n if args.quantization or args.save_quantized_model:\n from gap_quantization.quantization import ModelQuantizer\n from gap_quantization.dump_utils import dump_quant_params, remove_extra_dump, remove_cat_files\n\n if args.quant_data_dir is None:\n raise AttributeError('quant-data-dir argument is required.')\n\n num_channels = 1 if args.grayscale else 3\n cfg = {\n \"bits\": args.bits, # number of bits to store weights and activations\n \"accum_bits\": 32, # number of bits to store intermediate convolution result\n \"signed\": True, # use signed numbers\n \"save_folder\": args.save_dir, # folder to save results\n \"data_source\": args.quant_data_dir, # folder with images to collect dataset statistics\n \"use_gpu\": False, # use GPU for inference\n \"batch_size\": 1,\n \"num_workers\": 0, # number of workers for PyTorch dataloader\n \"verbose\": True,\n \"save_params\": args.save_quantized_model, # save quantization parameters to the file\n \"quantize_forward\": True, # replace usual convs, poolings, ... with GAP-like ones\n \"num_input_channels\": num_channels,\n \"raw_input\": args.no_normalize,\n \"double_precision\": args.double_precision # use double precision convolutions\n }\n\n model = model.cpu()\n quantizer = ModelQuantizer(model, cfg, dm.transform_test) # transform test is OK if we use args.no_normalize\n quantizer.quantize_model() # otherwise we need to add QuantizeInput operation\n\n if args.infer:\n if args.image_path == '':\n raise AttributeError('Image for inference is required')\n\n quantizer.dump_activations(args.image_path, dm.transform_test,\n save_dir=os.path.join(args.save_dir, 'activations_dump'))\n dump_quant_params(args.save_dir, args.convbn)\n if args.convbn:\n remove_extra_dump(os.path.join(args.save_dir, 'activations_dump'))\n remove_cat_files(args.save_dir)\n\n\n if use_gpu:\n model = nn.DataParallel(model).cuda()\n\n\n if args.evaluate:\n print(\"Evaluate only\")\n\n for name in args.target_names:\n if not 'lfw' in name.lower():\n print(\"Evaluating {} ...\".format(name))\n queryloader = testloader_dict[name]['query']\n galleryloader = testloader_dict[name]['gallery']\n distmat = test(args, model, queryloader, galleryloader, use_gpu, return_distmat=True)\n\n if args.visualize_ranks:\n visualize_ranked_results(\n distmat, dm.return_testdataset_by_name(name),\n save_dir=osp.join(args.save_dir, 'ranked_results', name),\n topk=20\n )\n\n else:\n model.eval()\n same_acc, diff_acc, all_acc, auc, thresh = evaluate(args, dm.lfw_dataset, model, compute_embeddings_lfw,\n args.test_batch_size, verbose=False, show_failed=args.show_failed, load_embeddings=args.load_embeddings)\n log.info('Validation accuracy: {0:.4f}, {1:.4f}'.format(same_acc, diff_acc))\n log.info('Validation accuracy mean: {0:.4f}'.format(all_acc))\n log.info('Validation AUC: {0:.4f}'.format(auc))\n log.info('Estimated threshold: {0:.4f}'.format(thresh))\n #roc_auc(model, '/home/maxim/data/lfw/pairsTest.txt', '/media/slow_drive/cropped_lfw', args, use_gpu)\n return\n\nif __name__ == '__main__':\n main()",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import tensorflow as tf
optimizer = tf.train.GradientDescentOptimizer(0.001).minimize(loss)
_, l = sess.run([optimizer, loss], feed_dict={X:x, Y:y})
Session looks at all trainable variables that loss depends on and update them
tf.Variable(initializer=None, trainable=True, collections=None, validate_shape=True, caching_device=None,
name=None, variable_def=None, dtype=None, expected_shape=None, import_scope=None)
List of optimizers in TF
1. tf.train.GradientDescentOptimizer
2. tf.train.AdagradOptimizer
3. tf.train.MomentumOptimizer
4. tf.train.AdamOptimizer
5. tf.train.ProximalGradientDescentOptimizer
6. tf.train.ProximalAdagradOptimizer
7. tf.train.RMSPropOptimizer
And more
|
normal
|
{
"blob_id": "edb206a8cd5bc48e831142d5632fd7eb90abd209",
"index": 72,
"step-1": "import tensorflow as tf\noptimizer = tf.train.GradientDescentOptimizer(0.001).minimize(loss)\n_, l = sess.run([optimizer, loss], feed_dict={X:x, Y:y})\n\nSession looks at all trainable variables that loss depends on and update them\ntf.Variable(initializer=None, trainable=True, collections=None, validate_shape=True, caching_device=None,\n name=None, variable_def=None, dtype=None, expected_shape=None, import_scope=None)\n\nList of optimizers in TF\n1. tf.train.GradientDescentOptimizer\n2. tf.train.AdagradOptimizer\n3. tf.train.MomentumOptimizer\n4. tf.train.AdamOptimizer\n5. tf.train.ProximalGradientDescentOptimizer\n6. tf.train.ProximalAdagradOptimizer\n7. tf.train.RMSPropOptimizer\nAnd more",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from collections import deque
def solution(play_time, adv_time, logs):
'''
Strategy :
adv_start_time을 log start time 부터 < 995959 - adv time
sliding window
Step 1.
String time -> integer time
Step 2. pseudo code : Two pointer algorithm
max time = 0
return max time
'''
## Step 1.
MAX = str2int(play_time)
max_view = 0
ans_time = 0
adv_time = str2int(adv_time)
logs = [[str2int(log.split("-")[0]),str2int(log.split("-")[1])] for log in logs]
view_list = [0] * (MAX+1)
## Step 2.
## 도함수
for start_time,end_time in logs:
view_list[start_time] += 1
view_list[end_time] -= 1
## 함수
for i in range(1,MAX+1):
view_list[i] = view_list[i]+view_list[i-1]
## 누적 합
for i in range(1,MAX+1):
view_list[i] = view_list[i]+view_list[i-1]
for start_time in range(MAX-adv_time+1):
## start time 0,1,2,... MAX-adv_time
## end time adv_time, ... MAX
end_time = start_time + adv_time
temp_view = view_list[end_time] - view_list[start_time]
if temp_view > max_view:
max_view = temp_view
ans_time = start_time
if ans_time != 0:
ans_time += 1
return int2str(ans_time)
def str2int(strtime:str):
hh,mm,ss = strtime.split(":")
return 3600*int(hh)+60*int(mm)+int(ss)
def int2str(inttime:int):
hh = inttime//3600
mm = (inttime%3600)//60
ss = inttime%60
return str(hh).zfill(2)+":"+str(mm).zfill(2)+":"+str(ss).zfill(2)
if __name__ == "__main__":
play_time = "02:03:55"
adv_time = "00:14:15"
logs = ["01:20:15-01:45:14", "00:25:50-00:48:29", "00:40:31-01:00:00", "01:37:44-02:02:30", "01:30:59-01:53:29"]
result = "01:30:59"
print(solution(play_time, adv_time, logs))
print(result)
play_time = "99:59:59"
adv_time = "25:00:00"
logs = ["69:59:59-89:59:59", "01:00:00-21:00:00", "79:59:59-99:59:59", "11:00:00-31:00:00"]
result = "01:00:00"
print(solution(play_time, adv_time, logs))
print(result)
play_time = "50:00:00"
adv_time = "50:00:00"
logs = ["15:36:51-38:21:49", "10:14:18-15:36:51", "38:21:49-42:51:45"]
result = "00:00:00"
print(solution(play_time, adv_time, logs))
print(result)
|
normal
|
{
"blob_id": "cb50a5352b0ad7b04dee9393c50da54fdf507376",
"index": 2018,
"step-1": "<mask token>\n\n\ndef str2int(strtime: str):\n hh, mm, ss = strtime.split(':')\n return 3600 * int(hh) + 60 * int(mm) + int(ss)\n\n\ndef int2str(inttime: int):\n hh = inttime // 3600\n mm = inttime % 3600 // 60\n ss = inttime % 60\n return str(hh).zfill(2) + ':' + str(mm).zfill(2) + ':' + str(ss).zfill(2)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef solution(play_time, adv_time, logs):\n \"\"\"\n Strategy : \n adv_start_time을 log start time 부터 < 995959 - adv time\n sliding window \n\n Step 1. \n String time -> integer time\n\n Step 2. pseudo code : Two pointer algorithm\n max time = 0\n \n return max time\n \"\"\"\n MAX = str2int(play_time)\n max_view = 0\n ans_time = 0\n adv_time = str2int(adv_time)\n logs = [[str2int(log.split('-')[0]), str2int(log.split('-')[1])] for\n log in logs]\n view_list = [0] * (MAX + 1)\n for start_time, end_time in logs:\n view_list[start_time] += 1\n view_list[end_time] -= 1\n for i in range(1, MAX + 1):\n view_list[i] = view_list[i] + view_list[i - 1]\n for i in range(1, MAX + 1):\n view_list[i] = view_list[i] + view_list[i - 1]\n for start_time in range(MAX - adv_time + 1):\n end_time = start_time + adv_time\n temp_view = view_list[end_time] - view_list[start_time]\n if temp_view > max_view:\n max_view = temp_view\n ans_time = start_time\n if ans_time != 0:\n ans_time += 1\n return int2str(ans_time)\n\n\ndef str2int(strtime: str):\n hh, mm, ss = strtime.split(':')\n return 3600 * int(hh) + 60 * int(mm) + int(ss)\n\n\ndef int2str(inttime: int):\n hh = inttime // 3600\n mm = inttime % 3600 // 60\n ss = inttime % 60\n return str(hh).zfill(2) + ':' + str(mm).zfill(2) + ':' + str(ss).zfill(2)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef solution(play_time, adv_time, logs):\n \"\"\"\n Strategy : \n adv_start_time을 log start time 부터 < 995959 - adv time\n sliding window \n\n Step 1. \n String time -> integer time\n\n Step 2. pseudo code : Two pointer algorithm\n max time = 0\n \n return max time\n \"\"\"\n MAX = str2int(play_time)\n max_view = 0\n ans_time = 0\n adv_time = str2int(adv_time)\n logs = [[str2int(log.split('-')[0]), str2int(log.split('-')[1])] for\n log in logs]\n view_list = [0] * (MAX + 1)\n for start_time, end_time in logs:\n view_list[start_time] += 1\n view_list[end_time] -= 1\n for i in range(1, MAX + 1):\n view_list[i] = view_list[i] + view_list[i - 1]\n for i in range(1, MAX + 1):\n view_list[i] = view_list[i] + view_list[i - 1]\n for start_time in range(MAX - adv_time + 1):\n end_time = start_time + adv_time\n temp_view = view_list[end_time] - view_list[start_time]\n if temp_view > max_view:\n max_view = temp_view\n ans_time = start_time\n if ans_time != 0:\n ans_time += 1\n return int2str(ans_time)\n\n\ndef str2int(strtime: str):\n hh, mm, ss = strtime.split(':')\n return 3600 * int(hh) + 60 * int(mm) + int(ss)\n\n\ndef int2str(inttime: int):\n hh = inttime // 3600\n mm = inttime % 3600 // 60\n ss = inttime % 60\n return str(hh).zfill(2) + ':' + str(mm).zfill(2) + ':' + str(ss).zfill(2)\n\n\nif __name__ == '__main__':\n play_time = '02:03:55'\n adv_time = '00:14:15'\n logs = ['01:20:15-01:45:14', '00:25:50-00:48:29', '00:40:31-01:00:00',\n '01:37:44-02:02:30', '01:30:59-01:53:29']\n result = '01:30:59'\n print(solution(play_time, adv_time, logs))\n print(result)\n play_time = '99:59:59'\n adv_time = '25:00:00'\n logs = ['69:59:59-89:59:59', '01:00:00-21:00:00', '79:59:59-99:59:59',\n '11:00:00-31:00:00']\n result = '01:00:00'\n print(solution(play_time, adv_time, logs))\n print(result)\n play_time = '50:00:00'\n adv_time = '50:00:00'\n logs = ['15:36:51-38:21:49', '10:14:18-15:36:51', '38:21:49-42:51:45']\n result = '00:00:00'\n print(solution(play_time, adv_time, logs))\n print(result)\n",
"step-4": "from collections import deque\n\n\ndef solution(play_time, adv_time, logs):\n \"\"\"\n Strategy : \n adv_start_time을 log start time 부터 < 995959 - adv time\n sliding window \n\n Step 1. \n String time -> integer time\n\n Step 2. pseudo code : Two pointer algorithm\n max time = 0\n \n return max time\n \"\"\"\n MAX = str2int(play_time)\n max_view = 0\n ans_time = 0\n adv_time = str2int(adv_time)\n logs = [[str2int(log.split('-')[0]), str2int(log.split('-')[1])] for\n log in logs]\n view_list = [0] * (MAX + 1)\n for start_time, end_time in logs:\n view_list[start_time] += 1\n view_list[end_time] -= 1\n for i in range(1, MAX + 1):\n view_list[i] = view_list[i] + view_list[i - 1]\n for i in range(1, MAX + 1):\n view_list[i] = view_list[i] + view_list[i - 1]\n for start_time in range(MAX - adv_time + 1):\n end_time = start_time + adv_time\n temp_view = view_list[end_time] - view_list[start_time]\n if temp_view > max_view:\n max_view = temp_view\n ans_time = start_time\n if ans_time != 0:\n ans_time += 1\n return int2str(ans_time)\n\n\ndef str2int(strtime: str):\n hh, mm, ss = strtime.split(':')\n return 3600 * int(hh) + 60 * int(mm) + int(ss)\n\n\ndef int2str(inttime: int):\n hh = inttime // 3600\n mm = inttime % 3600 // 60\n ss = inttime % 60\n return str(hh).zfill(2) + ':' + str(mm).zfill(2) + ':' + str(ss).zfill(2)\n\n\nif __name__ == '__main__':\n play_time = '02:03:55'\n adv_time = '00:14:15'\n logs = ['01:20:15-01:45:14', '00:25:50-00:48:29', '00:40:31-01:00:00',\n '01:37:44-02:02:30', '01:30:59-01:53:29']\n result = '01:30:59'\n print(solution(play_time, adv_time, logs))\n print(result)\n play_time = '99:59:59'\n adv_time = '25:00:00'\n logs = ['69:59:59-89:59:59', '01:00:00-21:00:00', '79:59:59-99:59:59',\n '11:00:00-31:00:00']\n result = '01:00:00'\n print(solution(play_time, adv_time, logs))\n print(result)\n play_time = '50:00:00'\n adv_time = '50:00:00'\n logs = ['15:36:51-38:21:49', '10:14:18-15:36:51', '38:21:49-42:51:45']\n result = '00:00:00'\n print(solution(play_time, adv_time, logs))\n print(result)\n",
"step-5": "from collections import deque\ndef solution(play_time, adv_time, logs):\n\n '''\n Strategy : \n adv_start_time을 log start time 부터 < 995959 - adv time\n sliding window \n\n Step 1. \n String time -> integer time\n\n Step 2. pseudo code : Two pointer algorithm\n max time = 0\n \n return max time\n '''\n ## Step 1.\n MAX = str2int(play_time)\n max_view = 0\n ans_time = 0\n adv_time = str2int(adv_time)\n logs = [[str2int(log.split(\"-\")[0]),str2int(log.split(\"-\")[1])] for log in logs]\n view_list = [0] * (MAX+1)\n ## Step 2.\n ## 도함수\n for start_time,end_time in logs:\n view_list[start_time] += 1\n view_list[end_time] -= 1\n\n ## 함수\n for i in range(1,MAX+1):\n view_list[i] = view_list[i]+view_list[i-1]\n\n ## 누적 합\n for i in range(1,MAX+1):\n view_list[i] = view_list[i]+view_list[i-1]\n \n\n for start_time in range(MAX-adv_time+1):\n ## start time 0,1,2,... MAX-adv_time\n ## end time adv_time, ... MAX\n end_time = start_time + adv_time\n temp_view = view_list[end_time] - view_list[start_time]\n if temp_view > max_view:\n max_view = temp_view\n ans_time = start_time\n if ans_time != 0:\n ans_time += 1\n return int2str(ans_time)\n\ndef str2int(strtime:str):\n hh,mm,ss = strtime.split(\":\")\n return 3600*int(hh)+60*int(mm)+int(ss)\n\ndef int2str(inttime:int):\n hh = inttime//3600\n mm = (inttime%3600)//60\n ss = inttime%60\n return str(hh).zfill(2)+\":\"+str(mm).zfill(2)+\":\"+str(ss).zfill(2)\n\n\nif __name__ == \"__main__\":\n play_time = \"02:03:55\"\n adv_time = \"00:14:15\"\n logs = [\"01:20:15-01:45:14\", \"00:25:50-00:48:29\", \"00:40:31-01:00:00\", \"01:37:44-02:02:30\", \"01:30:59-01:53:29\"]\n result = \"01:30:59\"\n print(solution(play_time, adv_time, logs))\n print(result)\n play_time = \"99:59:59\"\n adv_time = \"25:00:00\"\n logs = [\"69:59:59-89:59:59\", \"01:00:00-21:00:00\", \"79:59:59-99:59:59\", \"11:00:00-31:00:00\"]\n result = \"01:00:00\"\n print(solution(play_time, adv_time, logs))\n print(result)\n play_time = \"50:00:00\"\n adv_time = \"50:00:00\"\n logs = [\"15:36:51-38:21:49\", \"10:14:18-15:36:51\", \"38:21:49-42:51:45\"]\n result = \"00:00:00\"\n print(solution(play_time, adv_time, logs))\n print(result)",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import times_series_learning as tsl
import numpy as np
import time
import datetime as dt
import sortedcontainers
import pandas as pd
from collections import defaultdict
class ServerProfileLearning(object):
def __init__(self, data, parameters, distribution, distribution_period, level_threshold,
processus=True, moving_window=60,train_mode=True, verbose=False):
self.label_number = 1 #len(np.unique(data['label'].values))
self.label = 1 #np.unique(data['label'].values)
self.data = data
self.parameters = np.ones((self.label_number + 1, 4)) * parameters # see parameters in times_series_learning
self.data_prep = None
self.hostname = self.data.iloc[0, 1]
self.server_profile = dict()
self.distribution = distribution # distribution of distance list same for all servers all clusters be carefull sorted containers
self.distribution_period = distribution_period # distribution period where we compute metrics
self.level_threshold = level_threshold # level we consider for outliers
self.verbose = verbose
self.processus = processus
self.moving_window = moving_window
self.train_mode = train_mode
self.measures = self.initdict()
self.timestamp_anomaly = pd.DataFrame(columns=['Timestamp','Area_Difference'])
def initdict(self):
d = defaultdict(dict)
for i in range(int((24*6*60)/self.distribution_period)+1):
d[i] = {}
d[i]['Area_Difference'] = []
d[i]['Max_Spread'] = []
return d
# sortedcontainers.SortedDict(sortedcontainers.SortedList())
def preprocess_data(self, data):
data_prep = data.drop(self.data.columns[1:len(self.data.columns) - 1], axis=1)
data_prep = data_prep.groupby(['label'])
return data_prep
def set_profile(self):
t0 = time.time()
t = tsl.TimesSeriesLearning(self.parameters[0, :],
self.distribution_period,
self.level_threshold, self.timestamp_anomaly, self.processus)
t.set_profile(self.data)
self.server_profile[self.hostname + "_general"] = t
#self.data_prep = self.preprocess_data(self.data)
# i = 0
# for k, v in self.data_prep:
# t = tsl.TimesSeriesLearning(self.parameters[i, :],
# self.distribution_period, self.level_threshold, self.processus)
# t.set_profile(v)
# self.server_profile[self.hostname + "_" + str(k)] = t
# print('cluster number ' + str(k) + ' of hostname: ' + self.hostname)
# i += 1
print("Learning Server" + self.hostname + " Done in " + str(time.time() - t0))
# Process distance and update distribution
def process_distance(self, streaming_data):
t0 = time.time()
cluster_name = self.hostname + "_general"
t = self.server_profile[cluster_name]
anomaly, max_spread, min_spread, d, date, threshold, quant = t.compute_distance_profile(streaming_data,
self.distribution,
self.measures,
self.train_mode,
self.verbose)
#streaming_data_prep = self.preprocess_data(streaming_data)
# for k, v in streaming_data_prep:
# cluster_name = self.hostname + "_" + str(k)
# if cluster_name in self.server_profile.keys():
# t = self.server_profile[cluster_name]
# anomaly, max_spread, min_spread, d, date, threshold, quant = t.compute_distance_profile(v,
# self.distribution,
# self.train_mode,
# self.verbose)
# #if anomaly:
# # break
# else:
# print('cluster: ',k)
# print("Logs does not belong to any cluster")
# break
#print("stream proccessed in :", time.time()-t0)
return anomaly, max_spread, min_spread, d, date, threshold, quant
# def simulate_streaming(self, streaming_data,date_start):
# streaming_data.index = pd.to_datetime(streaming_data.timestamp, format='%Y-%m-%d %H:%M:%S')
# streaming_data = streaming_data.sort_index()
# data_list = []
# date = streaming_data.index[0]
# while date < streaming_data.index[-1]:
# data_to_add = streaming_data.loc[date.isoformat():
# (date + dt.timedelta(minutes=self.parameters[2, 0]))].reset_index(drop=True)
# if data_to_add.shape[0]>0:
# data_list.append(data_to_add)
# date += dt.timedelta(minutes=self.parameters[0, 2])
#
# return data
|
normal
|
{
"blob_id": "53dd753356d8a8d60975c8f4cdaf20de66c2db46",
"index": 3486,
"step-1": "<mask token>\n\n\nclass ServerProfileLearning(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def process_distance(self, streaming_data):\n t0 = time.time()\n cluster_name = self.hostname + '_general'\n t = self.server_profile[cluster_name]\n anomaly, max_spread, min_spread, d, date, threshold, quant = (t.\n compute_distance_profile(streaming_data, self.distribution,\n self.measures, self.train_mode, self.verbose))\n return anomaly, max_spread, min_spread, d, date, threshold, quant\n",
"step-2": "<mask token>\n\n\nclass ServerProfileLearning(object):\n <mask token>\n <mask token>\n\n def preprocess_data(self, data):\n data_prep = data.drop(self.data.columns[1:len(self.data.columns) - \n 1], axis=1)\n data_prep = data_prep.groupby(['label'])\n return data_prep\n <mask token>\n\n def process_distance(self, streaming_data):\n t0 = time.time()\n cluster_name = self.hostname + '_general'\n t = self.server_profile[cluster_name]\n anomaly, max_spread, min_spread, d, date, threshold, quant = (t.\n compute_distance_profile(streaming_data, self.distribution,\n self.measures, self.train_mode, self.verbose))\n return anomaly, max_spread, min_spread, d, date, threshold, quant\n",
"step-3": "<mask token>\n\n\nclass ServerProfileLearning(object):\n <mask token>\n\n def initdict(self):\n d = defaultdict(dict)\n for i in range(int(24 * 6 * 60 / self.distribution_period) + 1):\n d[i] = {}\n d[i]['Area_Difference'] = []\n d[i]['Max_Spread'] = []\n return d\n\n def preprocess_data(self, data):\n data_prep = data.drop(self.data.columns[1:len(self.data.columns) - \n 1], axis=1)\n data_prep = data_prep.groupby(['label'])\n return data_prep\n <mask token>\n\n def process_distance(self, streaming_data):\n t0 = time.time()\n cluster_name = self.hostname + '_general'\n t = self.server_profile[cluster_name]\n anomaly, max_spread, min_spread, d, date, threshold, quant = (t.\n compute_distance_profile(streaming_data, self.distribution,\n self.measures, self.train_mode, self.verbose))\n return anomaly, max_spread, min_spread, d, date, threshold, quant\n",
"step-4": "<mask token>\n\n\nclass ServerProfileLearning(object):\n <mask token>\n\n def initdict(self):\n d = defaultdict(dict)\n for i in range(int(24 * 6 * 60 / self.distribution_period) + 1):\n d[i] = {}\n d[i]['Area_Difference'] = []\n d[i]['Max_Spread'] = []\n return d\n\n def preprocess_data(self, data):\n data_prep = data.drop(self.data.columns[1:len(self.data.columns) - \n 1], axis=1)\n data_prep = data_prep.groupby(['label'])\n return data_prep\n\n def set_profile(self):\n t0 = time.time()\n t = tsl.TimesSeriesLearning(self.parameters[0, :], self.\n distribution_period, self.level_threshold, self.\n timestamp_anomaly, self.processus)\n t.set_profile(self.data)\n self.server_profile[self.hostname + '_general'] = t\n print('Learning Server' + self.hostname + ' Done in ' + str(time.\n time() - t0))\n\n def process_distance(self, streaming_data):\n t0 = time.time()\n cluster_name = self.hostname + '_general'\n t = self.server_profile[cluster_name]\n anomaly, max_spread, min_spread, d, date, threshold, quant = (t.\n compute_distance_profile(streaming_data, self.distribution,\n self.measures, self.train_mode, self.verbose))\n return anomaly, max_spread, min_spread, d, date, threshold, quant\n",
"step-5": "import times_series_learning as tsl\nimport numpy as np\nimport time\nimport datetime as dt\nimport sortedcontainers\nimport pandas as pd\nfrom collections import defaultdict\n\n\nclass ServerProfileLearning(object):\n\n def __init__(self, data, parameters, distribution, distribution_period, level_threshold,\n processus=True, moving_window=60,train_mode=True, verbose=False):\n self.label_number = 1 #len(np.unique(data['label'].values))\n self.label = 1 #np.unique(data['label'].values)\n self.data = data\n self.parameters = np.ones((self.label_number + 1, 4)) * parameters # see parameters in times_series_learning\n self.data_prep = None\n self.hostname = self.data.iloc[0, 1]\n self.server_profile = dict()\n self.distribution = distribution # distribution of distance list same for all servers all clusters be carefull sorted containers\n self.distribution_period = distribution_period # distribution period where we compute metrics\n self.level_threshold = level_threshold # level we consider for outliers\n self.verbose = verbose\n self.processus = processus\n self.moving_window = moving_window\n self.train_mode = train_mode\n self.measures = self.initdict()\n self.timestamp_anomaly = pd.DataFrame(columns=['Timestamp','Area_Difference'])\n\n def initdict(self):\n d = defaultdict(dict)\n for i in range(int((24*6*60)/self.distribution_period)+1):\n d[i] = {}\n d[i]['Area_Difference'] = []\n d[i]['Max_Spread'] = []\n return d\n\n\n # sortedcontainers.SortedDict(sortedcontainers.SortedList())\n\n def preprocess_data(self, data):\n data_prep = data.drop(self.data.columns[1:len(self.data.columns) - 1], axis=1)\n data_prep = data_prep.groupby(['label'])\n return data_prep\n\n def set_profile(self):\n t0 = time.time()\n t = tsl.TimesSeriesLearning(self.parameters[0, :],\n self.distribution_period,\n self.level_threshold, self.timestamp_anomaly, self.processus)\n t.set_profile(self.data)\n self.server_profile[self.hostname + \"_general\"] = t\n #self.data_prep = self.preprocess_data(self.data)\n # i = 0\n # for k, v in self.data_prep:\n # t = tsl.TimesSeriesLearning(self.parameters[i, :],\n # self.distribution_period, self.level_threshold, self.processus)\n # t.set_profile(v)\n # self.server_profile[self.hostname + \"_\" + str(k)] = t\n # print('cluster number ' + str(k) + ' of hostname: ' + self.hostname)\n # i += 1\n print(\"Learning Server\" + self.hostname + \" Done in \" + str(time.time() - t0))\n\n # Process distance and update distribution\n def process_distance(self, streaming_data):\n t0 = time.time()\n cluster_name = self.hostname + \"_general\"\n t = self.server_profile[cluster_name]\n anomaly, max_spread, min_spread, d, date, threshold, quant = t.compute_distance_profile(streaming_data,\n self.distribution,\n self.measures,\n self.train_mode,\n self.verbose)\n #streaming_data_prep = self.preprocess_data(streaming_data)\n # for k, v in streaming_data_prep:\n # cluster_name = self.hostname + \"_\" + str(k)\n # if cluster_name in self.server_profile.keys():\n # t = self.server_profile[cluster_name]\n # anomaly, max_spread, min_spread, d, date, threshold, quant = t.compute_distance_profile(v,\n # self.distribution,\n # self.train_mode,\n # self.verbose)\n # #if anomaly:\n # # break\n # else:\n # print('cluster: ',k)\n # print(\"Logs does not belong to any cluster\")\n # break\n #print(\"stream proccessed in :\", time.time()-t0)\n return anomaly, max_spread, min_spread, d, date, threshold, quant\n\n # def simulate_streaming(self, streaming_data,date_start):\n # streaming_data.index = pd.to_datetime(streaming_data.timestamp, format='%Y-%m-%d %H:%M:%S')\n # streaming_data = streaming_data.sort_index()\n # data_list = []\n # date = streaming_data.index[0]\n # while date < streaming_data.index[-1]:\n # data_to_add = streaming_data.loc[date.isoformat():\n # (date + dt.timedelta(minutes=self.parameters[2, 0]))].reset_index(drop=True)\n # if data_to_add.shape[0]>0:\n # data_list.append(data_to_add)\n # date += dt.timedelta(minutes=self.parameters[0, 2])\n #\n # return data\n",
"step-ids": [
2,
3,
4,
5,
8
]
}
|
[
2,
3,
4,
5,
8
] |
from compas.geometry import Line
# This import is use to test __repr__.
from compas.geometry import Point # noqa: F401
def test_line():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert line.start == p1
assert line.end == p2
def test_equality():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert (p1, p2) == line
assert line == Line(p1, p2)
assert line != (p2, p1)
assert line != 1
def test___repr__():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert line == eval(repr(line))
def test___getitem__():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert line[0] == p1
assert line[1] == p2
|
normal
|
{
"blob_id": "03629e62b11e66eeb0e111fee551c75c8463cbb8",
"index": 1059,
"step-1": "<mask token>\n\n\ndef test_line():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line.start == p1\n assert line.end == p2\n\n\n<mask token>\n\n\ndef test___getitem__():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line[0] == p1\n assert line[1] == p2\n",
"step-2": "<mask token>\n\n\ndef test_line():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line.start == p1\n assert line.end == p2\n\n\ndef test_equality():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert (p1, p2) == line\n assert line == Line(p1, p2)\n assert line != (p2, p1)\n assert line != 1\n\n\n<mask token>\n\n\ndef test___getitem__():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line[0] == p1\n assert line[1] == p2\n",
"step-3": "<mask token>\n\n\ndef test_line():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line.start == p1\n assert line.end == p2\n\n\ndef test_equality():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert (p1, p2) == line\n assert line == Line(p1, p2)\n assert line != (p2, p1)\n assert line != 1\n\n\ndef test___repr__():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line == eval(repr(line))\n\n\ndef test___getitem__():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line[0] == p1\n assert line[1] == p2\n",
"step-4": "from compas.geometry import Line\nfrom compas.geometry import Point\n\n\ndef test_line():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line.start == p1\n assert line.end == p2\n\n\ndef test_equality():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert (p1, p2) == line\n assert line == Line(p1, p2)\n assert line != (p2, p1)\n assert line != 1\n\n\ndef test___repr__():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line == eval(repr(line))\n\n\ndef test___getitem__():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line[0] == p1\n assert line[1] == p2\n",
"step-5": "from compas.geometry import Line\n\n# This import is use to test __repr__.\nfrom compas.geometry import Point # noqa: F401\n\n\ndef test_line():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line.start == p1\n assert line.end == p2\n\n\ndef test_equality():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert (p1, p2) == line\n assert line == Line(p1, p2)\n assert line != (p2, p1)\n assert line != 1\n\n\ndef test___repr__():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line == eval(repr(line))\n\n\ndef test___getitem__():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line[0] == p1\n assert line[1] == p2\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
class Region:
"""
A region (represented by a list of long/lat coordinates).
"""
def __init__(self, coords, r_votes, d_votes, o_votes):
self.coords = coords
def lats(self):
"Return a list of the latitudes of all the coordinates in the region"
return [y for x,y in self.coords]
def longs(self):
"Return a list of the longitudes of all the coordinates in the region"
return [x for x,y in self.coords]
def min_lat(self):
"Return the minimum latitude of the region"
return min(self.lats())
def min_long(self):
"Return the minimum longitude of the region"
return min(self.longs())
def max_lat(self):
"Return the maximum latitude of the region"
return max(self.lats())
def max_long(self):
"Return the maximum longitude of the region"
return max(self.longs())
|
normal
|
{
"blob_id": "517436d61ac9993bee5ecfd932f272dbb8bec60b",
"index": 7608,
"step-1": "class Region:\n <mask token>\n\n def __init__(self, coords, r_votes, d_votes, o_votes):\n self.coords = coords\n\n def lats(self):\n \"\"\"Return a list of the latitudes of all the coordinates in the region\"\"\"\n return [y for x, y in self.coords]\n <mask token>\n\n def min_lat(self):\n \"\"\"Return the minimum latitude of the region\"\"\"\n return min(self.lats())\n\n def min_long(self):\n \"\"\"Return the minimum longitude of the region\"\"\"\n return min(self.longs())\n\n def max_lat(self):\n \"\"\"Return the maximum latitude of the region\"\"\"\n return max(self.lats())\n <mask token>\n",
"step-2": "class Region:\n <mask token>\n\n def __init__(self, coords, r_votes, d_votes, o_votes):\n self.coords = coords\n\n def lats(self):\n \"\"\"Return a list of the latitudes of all the coordinates in the region\"\"\"\n return [y for x, y in self.coords]\n\n def longs(self):\n \"\"\"Return a list of the longitudes of all the coordinates in the region\"\"\"\n return [x for x, y in self.coords]\n\n def min_lat(self):\n \"\"\"Return the minimum latitude of the region\"\"\"\n return min(self.lats())\n\n def min_long(self):\n \"\"\"Return the minimum longitude of the region\"\"\"\n return min(self.longs())\n\n def max_lat(self):\n \"\"\"Return the maximum latitude of the region\"\"\"\n return max(self.lats())\n <mask token>\n",
"step-3": "class Region:\n <mask token>\n\n def __init__(self, coords, r_votes, d_votes, o_votes):\n self.coords = coords\n\n def lats(self):\n \"\"\"Return a list of the latitudes of all the coordinates in the region\"\"\"\n return [y for x, y in self.coords]\n\n def longs(self):\n \"\"\"Return a list of the longitudes of all the coordinates in the region\"\"\"\n return [x for x, y in self.coords]\n\n def min_lat(self):\n \"\"\"Return the minimum latitude of the region\"\"\"\n return min(self.lats())\n\n def min_long(self):\n \"\"\"Return the minimum longitude of the region\"\"\"\n return min(self.longs())\n\n def max_lat(self):\n \"\"\"Return the maximum latitude of the region\"\"\"\n return max(self.lats())\n\n def max_long(self):\n \"\"\"Return the maximum longitude of the region\"\"\"\n return max(self.longs())\n",
"step-4": "class Region:\n \"\"\"\n A region (represented by a list of long/lat coordinates).\n \"\"\"\n\n def __init__(self, coords, r_votes, d_votes, o_votes):\n self.coords = coords\n\n def lats(self):\n \"\"\"Return a list of the latitudes of all the coordinates in the region\"\"\"\n return [y for x, y in self.coords]\n\n def longs(self):\n \"\"\"Return a list of the longitudes of all the coordinates in the region\"\"\"\n return [x for x, y in self.coords]\n\n def min_lat(self):\n \"\"\"Return the minimum latitude of the region\"\"\"\n return min(self.lats())\n\n def min_long(self):\n \"\"\"Return the minimum longitude of the region\"\"\"\n return min(self.longs())\n\n def max_lat(self):\n \"\"\"Return the maximum latitude of the region\"\"\"\n return max(self.lats())\n\n def max_long(self):\n \"\"\"Return the maximum longitude of the region\"\"\"\n return max(self.longs())\n",
"step-5": "class Region:\n \"\"\"\n A region (represented by a list of long/lat coordinates).\n \"\"\"\n\n def __init__(self, coords, r_votes, d_votes, o_votes):\n self.coords = coords\n\n def lats(self):\n \"Return a list of the latitudes of all the coordinates in the region\"\n return [y for x,y in self.coords]\n\n def longs(self):\n \"Return a list of the longitudes of all the coordinates in the region\"\n return [x for x,y in self.coords]\n\n def min_lat(self):\n \"Return the minimum latitude of the region\"\n return min(self.lats())\n\n def min_long(self):\n \"Return the minimum longitude of the region\"\n return min(self.longs())\n\n def max_lat(self):\n \"Return the maximum latitude of the region\"\n return max(self.lats())\n\n def max_long(self):\n \"Return the maximum longitude of the region\"\n return max(self.longs())\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
#!/bin/python3
def word_ladder(start_word, end_word, dictionary_file='words5.dict'):
'''
Returns a list satisfying the following properties:
1. the first element is `start_word`
2. the last element is `end_word`
3. elements at index i and i+1 are `_adjacent`
4. all elements are entries in the `dictionary_file` file
For example, running the command
```
word_ladder('stone','money')
```
may give the output
```
['stone', 'shone', 'phone', 'phony', 'peony', 'penny', 'benny', 'bonny', 'boney', 'money']
```
but the possible outputs are not unique,
so you may also get the output
```
['stone', 'shone', 'shote', 'shots', 'soots', 'hoots', 'hooty', 'hooey', 'honey', 'money']
```
(We cannot use doctests here because the outputs are not unique.)
Whenever it is impossible to generate a word ladder between the two words,
the function returns `None`.
HINT:
See <https://github.com/mikeizbicki/cmc-csci046/issues/472> for a discussion about a common memory management bug that causes the generated word ladders to be too long in some cases.
'''
def verify_word_ladder(ladder):
'''
Returns True if each entry of the input list is adjacent to its neighbors;
otherwise returns False.
>>> verify_word_ladder(['stone', 'shone', 'phone', 'phony'])
True
>>> verify_word_ladder(['stone', 'shone', 'phony'])
False
'''
def _adjacent(word1, word2):
'''
Returns True if the input words differ by only a single character;
returns False otherwise.
>>> _adjacent('phone','phony')
True
>>> _adjacent('stone','money')
False
'''
|
normal
|
{
"blob_id": "631323e79f4fb32611d7094af92cff8f923fa996",
"index": 303,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef _adjacent(word1, word2):\n \"\"\"\n Returns True if the input words differ by only a single character;\n returns False otherwise.\n\n >>> _adjacent('phone','phony')\n True\n >>> _adjacent('stone','money')\n False\n \"\"\"\n",
"step-3": "def word_ladder(start_word, end_word, dictionary_file='words5.dict'):\n \"\"\"\n Returns a list satisfying the following properties:\n\n 1. the first element is `start_word`\n 2. the last element is `end_word`\n 3. elements at index i and i+1 are `_adjacent`\n 4. all elements are entries in the `dictionary_file` file\n\n For example, running the command\n ```\n word_ladder('stone','money')\n ```\n may give the output\n ```\n ['stone', 'shone', 'phone', 'phony', 'peony', 'penny', 'benny', 'bonny', 'boney', 'money']\n ```\n but the possible outputs are not unique,\n so you may also get the output\n ```\n ['stone', 'shone', 'shote', 'shots', 'soots', 'hoots', 'hooty', 'hooey', 'honey', 'money']\n ```\n (We cannot use doctests here because the outputs are not unique.)\n\n Whenever it is impossible to generate a word ladder between the two words,\n the function returns `None`.\n\n HINT:\n See <https://github.com/mikeizbicki/cmc-csci046/issues/472> for a discussion about a common memory management bug that causes the generated word ladders to be too long in some cases.\n \"\"\"\n\n\n<mask token>\n\n\ndef _adjacent(word1, word2):\n \"\"\"\n Returns True if the input words differ by only a single character;\n returns False otherwise.\n\n >>> _adjacent('phone','phony')\n True\n >>> _adjacent('stone','money')\n False\n \"\"\"\n",
"step-4": "def word_ladder(start_word, end_word, dictionary_file='words5.dict'):\n \"\"\"\n Returns a list satisfying the following properties:\n\n 1. the first element is `start_word`\n 2. the last element is `end_word`\n 3. elements at index i and i+1 are `_adjacent`\n 4. all elements are entries in the `dictionary_file` file\n\n For example, running the command\n ```\n word_ladder('stone','money')\n ```\n may give the output\n ```\n ['stone', 'shone', 'phone', 'phony', 'peony', 'penny', 'benny', 'bonny', 'boney', 'money']\n ```\n but the possible outputs are not unique,\n so you may also get the output\n ```\n ['stone', 'shone', 'shote', 'shots', 'soots', 'hoots', 'hooty', 'hooey', 'honey', 'money']\n ```\n (We cannot use doctests here because the outputs are not unique.)\n\n Whenever it is impossible to generate a word ladder between the two words,\n the function returns `None`.\n\n HINT:\n See <https://github.com/mikeizbicki/cmc-csci046/issues/472> for a discussion about a common memory management bug that causes the generated word ladders to be too long in some cases.\n \"\"\"\n\n\ndef verify_word_ladder(ladder):\n \"\"\"\n Returns True if each entry of the input list is adjacent to its neighbors;\n otherwise returns False.\n\n >>> verify_word_ladder(['stone', 'shone', 'phone', 'phony'])\n True\n >>> verify_word_ladder(['stone', 'shone', 'phony'])\n False\n \"\"\"\n\n\ndef _adjacent(word1, word2):\n \"\"\"\n Returns True if the input words differ by only a single character;\n returns False otherwise.\n\n >>> _adjacent('phone','phony')\n True\n >>> _adjacent('stone','money')\n False\n \"\"\"\n",
"step-5": "#!/bin/python3\n\n\ndef word_ladder(start_word, end_word, dictionary_file='words5.dict'):\n '''\n Returns a list satisfying the following properties:\n\n 1. the first element is `start_word`\n 2. the last element is `end_word`\n 3. elements at index i and i+1 are `_adjacent`\n 4. all elements are entries in the `dictionary_file` file\n\n For example, running the command\n ```\n word_ladder('stone','money')\n ```\n may give the output\n ```\n ['stone', 'shone', 'phone', 'phony', 'peony', 'penny', 'benny', 'bonny', 'boney', 'money']\n ```\n but the possible outputs are not unique,\n so you may also get the output\n ```\n ['stone', 'shone', 'shote', 'shots', 'soots', 'hoots', 'hooty', 'hooey', 'honey', 'money']\n ```\n (We cannot use doctests here because the outputs are not unique.)\n\n Whenever it is impossible to generate a word ladder between the two words,\n the function returns `None`.\n\n HINT:\n See <https://github.com/mikeizbicki/cmc-csci046/issues/472> for a discussion about a common memory management bug that causes the generated word ladders to be too long in some cases.\n '''\n\n\ndef verify_word_ladder(ladder):\n '''\n Returns True if each entry of the input list is adjacent to its neighbors;\n otherwise returns False.\n\n >>> verify_word_ladder(['stone', 'shone', 'phone', 'phony'])\n True\n >>> verify_word_ladder(['stone', 'shone', 'phony'])\n False\n '''\n\n\ndef _adjacent(word1, word2):\n '''\n Returns True if the input words differ by only a single character;\n returns False otherwise.\n\n >>> _adjacent('phone','phony')\n True\n >>> _adjacent('stone','money')\n False\n '''\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#### As an example below shell script can be used to execute this every 300s.
####!/bin/bash
####while true
####do
#### /usr/bin/sudo python3 /path/of/the/python/script.sh
####done
#!/usr/bin/python
import sys
import time
import paho.mqtt.client as mqtt
broker_url = "<IP_Address_of_MQTT_broker>"
broker_port = <MQTT_Broker_port>
def on_connect(client, userdata, flags, rc):
print("Connected With Result Code: {}".format(rc))
def on_message(client, userdata, message):
print("Message Recieved: "+message.payload.decode())
file_name=message.payload.decode()
file_path="/home/demouser/nagios/node-check/logs/"+file_name+".ok"
file1 = open(file_path, 'w')
file1.write(message.payload.decode()+" is up and running\n")
file1.close()
def on_disconnect(client, userdata, rc):
print("Client Got Disconnected")
client = mqtt.Client("Nagios_NodeChecker")
client.on_connect = on_connect
client.on_disconnect = on_disconnect
client.on_message = on_message
client.username_pw_set(username="<mqtt_username>",password="<mqtt_password>")
client.connect(broker_url, broker_port)
client.subscribe(topic="nagios/node_check", qos=2)
client.message_callback_add("nagios/node_check", on_message)
client.loop_start()
time.sleep(300)
client.loop_stop()
|
normal
|
{
"blob_id": "f311b803d8c0ee68bc43526f56e6b14f3a2836b8",
"index": 7309,
"step-1": "#### As an example below shell script can be used to execute this every 300s.\r\n####!/bin/bash\r\n####while true\r\n####do\r\n#### /usr/bin/sudo python3 /path/of/the/python/script.sh\r\n####done\r\n\r\n#!/usr/bin/python\r\nimport sys\r\nimport time\r\nimport paho.mqtt.client as mqtt\r\n\r\nbroker_url = \"<IP_Address_of_MQTT_broker>\"\r\nbroker_port = <MQTT_Broker_port>\r\n\r\ndef on_connect(client, userdata, flags, rc):\r\n print(\"Connected With Result Code: {}\".format(rc))\r\n\r\ndef on_message(client, userdata, message):\r\n print(\"Message Recieved: \"+message.payload.decode())\r\n file_name=message.payload.decode()\r\n file_path=\"/home/demouser/nagios/node-check/logs/\"+file_name+\".ok\"\r\n file1 = open(file_path, 'w')\r\n file1.write(message.payload.decode()+\" is up and running\\n\")\r\n file1.close()\r\n\r\ndef on_disconnect(client, userdata, rc):\r\n print(\"Client Got Disconnected\")\r\n\r\nclient = mqtt.Client(\"Nagios_NodeChecker\")\r\nclient.on_connect = on_connect\r\nclient.on_disconnect = on_disconnect\r\nclient.on_message = on_message\r\nclient.username_pw_set(username=\"<mqtt_username>\",password=\"<mqtt_password>\")\r\n\r\nclient.connect(broker_url, broker_port)\r\nclient.subscribe(topic=\"nagios/node_check\", qos=2)\r\nclient.message_callback_add(\"nagios/node_check\", on_message)\r\n\r\nclient.loop_start()\r\ntime.sleep(300)\r\nclient.loop_stop()\r\n\r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/env python
from time import sleep
from org.mustadroid.python.interleech import processPage
import MySQLdb
from org.mustadroid.python.interleech.item import Item
import re
import time
if __name__ == "__main__":
db = MySQLdb.connect(host = "localhost",
user = "interleech",
passwd = "abc123",
db = "interleech")
cur = db.cursor()
sqlQuery = "SELECT * FROM interleech ORDER by id ASC"
while True:
cur.execute(sqlQuery)
results = cur.fetchall()
print results
rows = [results[5]]
for row in results:
print "\n################## PROCESSING " + row[1] + " #######################\n"
processPage.attrList = set()
processPage.dbHintsDict = dict()
processPage.tableName = row[1]
processPage.abortProcessing = False
processPage.xmlBase = re.compile('(.*)/(.*)').search(row[3]).group(1) + "/"
processPage.base = re.compile('(.*).se/(.*)').search(row[2]).group(1) + ".se"
processPage.timeStamp = int(time.time() * 100)
itemList = []
processPage.processPage(row[2], row[3], itemList, Item)
Item.CreateTable(processPage.tableName, processPage.attrList, processPage.dbHintsDict)
i = 0
for item in itemList:
if item.id is not None:
item.Save(processPage.tableName, processPage.attrList, processPage.timeStamp - i)
i += 1
print "\n################### FINISHED PROCESSING #######################\n"
sleep(60)
db.close()
|
normal
|
{
"blob_id": "598c634aac1df951f544127e102a1e2d61cac0b0",
"index": 4323,
"step-1": "#!/usr/bin/env python\nfrom time import sleep\nfrom org.mustadroid.python.interleech import processPage\nimport MySQLdb\nfrom org.mustadroid.python.interleech.item import Item\nimport re\nimport time\n\nif __name__ == \"__main__\":\n db = MySQLdb.connect(host = \"localhost\",\n user = \"interleech\",\n passwd = \"abc123\",\n db = \"interleech\")\n cur = db.cursor()\n sqlQuery = \"SELECT * FROM interleech ORDER by id ASC\"\n \n while True:\n cur.execute(sqlQuery)\n results = cur.fetchall()\n print results\n rows = [results[5]]\n for row in results:\n print \"\\n################## PROCESSING \" + row[1] + \" #######################\\n\"\n processPage.attrList = set()\n processPage.dbHintsDict = dict()\n processPage.tableName = row[1]\n processPage.abortProcessing = False\n processPage.xmlBase = re.compile('(.*)/(.*)').search(row[3]).group(1) + \"/\"\n processPage.base = re.compile('(.*).se/(.*)').search(row[2]).group(1) + \".se\"\n processPage.timeStamp = int(time.time() * 100)\n itemList = []\n processPage.processPage(row[2], row[3], itemList, Item)\n Item.CreateTable(processPage.tableName, processPage.attrList, processPage.dbHintsDict)\n i = 0\n for item in itemList:\n if item.id is not None:\n item.Save(processPage.tableName, processPage.attrList, processPage.timeStamp - i)\n i += 1\n \n print \"\\n################### FINISHED PROCESSING #######################\\n\" \n \n sleep(60)\n \n db.close()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#1.25.2019 - shashi
#Program that accepts an array of different elements
#And moves all the integer 0s to the end of it. String 0s like "0" or "0.0" remain untouched.
def shiftZeroesToEnd(myArray): #function starts here
zeroCounter = 0 #counter to keep track of how many 0s exist.
shiftedArray = [] #array to hold final output
for item in myArray: #loop through each item in array
if (str(item) == "0" or str(item) == "0.0") and type(item) is not str:
zeroCounter += 1 #if numeric string found, incremenet zero counter
else:
shiftedArray.append(item) #else add item from original list as is (same position)
#end of loop
zeroStore = [0 for i in range(zeroCounter)] #declare an array of 0s of the size of zeroCounter
shiftedArray.extend(zeroStore) #append it to final output list (adds it to the end)
return shiftedArray #return final output back
#testing function
print(shiftZeroesToEnd([True, 3, 3, 0, 23, 0, 112, "b", "a"]))
print(shiftZeroesToEnd([0.0, 23, -3, False, "xxx", 0, 112, True , 9]))
|
normal
|
{
"blob_id": "4a9c42727a28e19cf1eebcf72784b85bbae695bf",
"index": 3429,
"step-1": "<mask token>\n",
"step-2": "def shiftZeroesToEnd(myArray):\n zeroCounter = 0\n shiftedArray = []\n for item in myArray:\n if (str(item) == '0' or str(item) == '0.0') and type(item) is not str:\n zeroCounter += 1\n else:\n shiftedArray.append(item)\n zeroStore = [(0) for i in range(zeroCounter)]\n shiftedArray.extend(zeroStore)\n return shiftedArray\n\n\n<mask token>\n",
"step-3": "def shiftZeroesToEnd(myArray):\n zeroCounter = 0\n shiftedArray = []\n for item in myArray:\n if (str(item) == '0' or str(item) == '0.0') and type(item) is not str:\n zeroCounter += 1\n else:\n shiftedArray.append(item)\n zeroStore = [(0) for i in range(zeroCounter)]\n shiftedArray.extend(zeroStore)\n return shiftedArray\n\n\nprint(shiftZeroesToEnd([True, 3, 3, 0, 23, 0, 112, 'b', 'a']))\nprint(shiftZeroesToEnd([0.0, 23, -3, False, 'xxx', 0, 112, True, 9]))\n",
"step-4": "#1.25.2019 - shashi\n#Program that accepts an array of different elements\n#And moves all the integer 0s to the end of it. String 0s like \"0\" or \"0.0\" remain untouched.\n\n\ndef shiftZeroesToEnd(myArray): #function starts here\n zeroCounter = 0 #counter to keep track of how many 0s exist.\n shiftedArray = [] #array to hold final output\n \n for item in myArray: #loop through each item in array\n if (str(item) == \"0\" or str(item) == \"0.0\") and type(item) is not str:\n zeroCounter += 1 #if numeric string found, incremenet zero counter\n else:\n shiftedArray.append(item) #else add item from original list as is (same position)\n #end of loop \n zeroStore = [0 for i in range(zeroCounter)] #declare an array of 0s of the size of zeroCounter\n shiftedArray.extend(zeroStore) #append it to final output list (adds it to the end)\n \n return shiftedArray #return final output back\n\n#testing function\nprint(shiftZeroesToEnd([True, 3, 3, 0, 23, 0, 112, \"b\", \"a\"]))\nprint(shiftZeroesToEnd([0.0, 23, -3, False, \"xxx\", 0, 112, True , 9]))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Generated by Django 3.1.1 on 2020-10-10 07:38
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('socialapp', '0004_mesage_creation_date'),
]
operations = [
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField(max_length=200)),
('creation_date', models.DateTimeField(auto_now_add=True)),
('receiver', models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='receiver_not', to=settings.AUTH_USER_MODEL)),
('sender', models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='sender_not', to=settings.AUTH_USER_MODEL)),
],
),
]
|
normal
|
{
"blob_id": "38751da57ad7c786e9fc0722faf065380e5f7e60",
"index": 4994,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('socialapp', '0004_mesage_creation_date')]\n operations = [migrations.CreateModel(name='Notification', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('content', models.TextField(max_length\n =200)), ('creation_date', models.DateTimeField(auto_now_add=True)),\n ('receiver', models.OneToOneField(null=True, on_delete=django.db.\n models.deletion.SET_NULL, related_name='receiver_not', to=settings.\n AUTH_USER_MODEL)), ('sender', models.OneToOneField(null=True,\n on_delete=django.db.models.deletion.SET_NULL, related_name=\n 'sender_not', to=settings.AUTH_USER_MODEL))])]\n",
"step-4": "from django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('socialapp', '0004_mesage_creation_date')]\n operations = [migrations.CreateModel(name='Notification', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('content', models.TextField(max_length\n =200)), ('creation_date', models.DateTimeField(auto_now_add=True)),\n ('receiver', models.OneToOneField(null=True, on_delete=django.db.\n models.deletion.SET_NULL, related_name='receiver_not', to=settings.\n AUTH_USER_MODEL)), ('sender', models.OneToOneField(null=True,\n on_delete=django.db.models.deletion.SET_NULL, related_name=\n 'sender_not', to=settings.AUTH_USER_MODEL))])]\n",
"step-5": "# Generated by Django 3.1.1 on 2020-10-10 07:38\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('socialapp', '0004_mesage_creation_date'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Notification',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('content', models.TextField(max_length=200)),\n ('creation_date', models.DateTimeField(auto_now_add=True)),\n ('receiver', models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='receiver_not', to=settings.AUTH_USER_MODEL)),\n ('sender', models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='sender_not', to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/python
import json, sys, getopt, re
# Usage: ./get_code.py -i <inputfile>
def main(argv):
inputfile = argv[0]
with open(inputfile) as json_data:
d=json.load(json_data)
json_data.close()
code_array = d["hits"]["hits"]
output_json = []
for element in code_array:
gistid = element["_id"]
e = element["_source"]
code = e["code"].encode('ascii', 'ignore')
author = e["userId"]
code = get_js_only(code)
if(code != None):
filename = 'data/' + author + '_' + gistid + '.html'
outfile = open(filename, 'w')
outfile.write(code)
simple_e = {}
simple_e["uid"] = author + '_' + gistid
simple_e["created_at"] = e["created_at"]
simple_e["updated_at"] = e["updated_at"]
simple_e["api"] = e["api"]
simple_e["readme"] = e["readme"]
simple_e["description"] = e["description"]
simple_e["code"] = code # e["code"]
output_json.append(simple_e)
print len(output_json)
with open('nodes.json', 'w') as datafile:
json.dump(output_json, datafile)
def get_js_only(code):
re.DOTALL
re.MULTILINE
match = re.search('<script>.*</script>', code, re.DOTALL)
if(match != None):
return match.group(0)
else:
# print "\n\n-------------------------------------------------------------"
# print code
return None
if __name__ == "__main__":
main(sys.argv[1:])
|
normal
|
{
"blob_id": "9594cda360847d2878aa2bd9c9c85fe50562b6ab",
"index": 5685,
"step-1": "#!/usr/bin/python\n\nimport json, sys, getopt, re\n\n# Usage: ./get_code.py -i <inputfile>\n\ndef main(argv): \n inputfile = argv[0]\n \n with open(inputfile) as json_data: \n d=json.load(json_data)\n json_data.close()\n code_array = d[\"hits\"][\"hits\"]\n \n output_json = []\n \n for element in code_array:\n gistid = element[\"_id\"]\n e = element[\"_source\"]\n code = e[\"code\"].encode('ascii', 'ignore')\n author = e[\"userId\"]\n \n code = get_js_only(code)\n if(code != None): \n filename = 'data/' + author + '_' + gistid + '.html'\n outfile = open(filename, 'w')\n outfile.write(code)\n simple_e = {}\n simple_e[\"uid\"] = author + '_' + gistid\n simple_e[\"created_at\"] = e[\"created_at\"]\n simple_e[\"updated_at\"] = e[\"updated_at\"]\n simple_e[\"api\"] = e[\"api\"]\n simple_e[\"readme\"] = e[\"readme\"]\n simple_e[\"description\"] = e[\"description\"]\n simple_e[\"code\"] = code # e[\"code\"]\n output_json.append(simple_e)\n \n \n print len(output_json)\n with open('nodes.json', 'w') as datafile:\n json.dump(output_json, datafile) \n \ndef get_js_only(code):\n re.DOTALL\n re.MULTILINE\n match = re.search('<script>.*</script>', code, re.DOTALL)\n if(match != None):\n return match.group(0)\n else:\n # print \"\\n\\n-------------------------------------------------------------\"\n # print code\n return None\n \nif __name__ == \"__main__\":\n main(sys.argv[1:])",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
"""Sophie Tan's special AI."""
from typing import Sequence, Tuple
from battleships import Player, ShotResult
from random import randint
class SophiesAI(Player):
"""Sophie Tan's Random Shot Magic."""
def ship_locations(self) -> Sequence[Tuple[int, int, int, bool]]:
return [(2, 0, 0, True)]
def drop_bomb(self) -> Tuple[int, int]:
return randint(0, self.size - 1), randint(0, self.size - 1)
def bomb_feedback(self, x: int, y: int, result: ShotResult):
pass
def bombed_feedback(self, x: int, y: int, result: ShotResult):
pass
|
normal
|
{
"blob_id": "127bf47de554dd397d18c6a70616a2a4d93cae80",
"index": 3659,
"step-1": "<mask token>\n\n\nclass SophiesAI(Player):\n <mask token>\n\n def ship_locations(self) ->Sequence[Tuple[int, int, int, bool]]:\n return [(2, 0, 0, True)]\n <mask token>\n\n def bomb_feedback(self, x: int, y: int, result: ShotResult):\n pass\n\n def bombed_feedback(self, x: int, y: int, result: ShotResult):\n pass\n",
"step-2": "<mask token>\n\n\nclass SophiesAI(Player):\n <mask token>\n\n def ship_locations(self) ->Sequence[Tuple[int, int, int, bool]]:\n return [(2, 0, 0, True)]\n\n def drop_bomb(self) ->Tuple[int, int]:\n return randint(0, self.size - 1), randint(0, self.size - 1)\n\n def bomb_feedback(self, x: int, y: int, result: ShotResult):\n pass\n\n def bombed_feedback(self, x: int, y: int, result: ShotResult):\n pass\n",
"step-3": "<mask token>\n\n\nclass SophiesAI(Player):\n \"\"\"Sophie Tan's Random Shot Magic.\"\"\"\n\n def ship_locations(self) ->Sequence[Tuple[int, int, int, bool]]:\n return [(2, 0, 0, True)]\n\n def drop_bomb(self) ->Tuple[int, int]:\n return randint(0, self.size - 1), randint(0, self.size - 1)\n\n def bomb_feedback(self, x: int, y: int, result: ShotResult):\n pass\n\n def bombed_feedback(self, x: int, y: int, result: ShotResult):\n pass\n",
"step-4": "<mask token>\nfrom typing import Sequence, Tuple\nfrom battleships import Player, ShotResult\nfrom random import randint\n\n\nclass SophiesAI(Player):\n \"\"\"Sophie Tan's Random Shot Magic.\"\"\"\n\n def ship_locations(self) ->Sequence[Tuple[int, int, int, bool]]:\n return [(2, 0, 0, True)]\n\n def drop_bomb(self) ->Tuple[int, int]:\n return randint(0, self.size - 1), randint(0, self.size - 1)\n\n def bomb_feedback(self, x: int, y: int, result: ShotResult):\n pass\n\n def bombed_feedback(self, x: int, y: int, result: ShotResult):\n pass\n",
"step-5": "\"\"\"Sophie Tan's special AI.\"\"\"\nfrom typing import Sequence, Tuple\n\nfrom battleships import Player, ShotResult\nfrom random import randint\n\n\nclass SophiesAI(Player):\n \"\"\"Sophie Tan's Random Shot Magic.\"\"\"\n\n def ship_locations(self) -> Sequence[Tuple[int, int, int, bool]]:\n return [(2, 0, 0, True)]\n\n def drop_bomb(self) -> Tuple[int, int]:\n return randint(0, self.size - 1), randint(0, self.size - 1)\n\n def bomb_feedback(self, x: int, y: int, result: ShotResult):\n pass\n\n def bombed_feedback(self, x: int, y: int, result: ShotResult):\n pass\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
import sys
filepath = 'input.txt'
def intersection(list1, list2):
return set(list1).intersection(list2)
def computeSteps(x, y, step, steps):
# build dictionary with steps for each point
curr = 0
if (x,y) in steps:
curr = steps.get((x,y))
steps[(x,y)] = step + curr
def buildPoints(wire, steps):
points = []
x, y = 0, 0
s = 0
for p in wire:
direction = p[0]
step = int(p[1:])
if direction == 'D':
for i in range(0, step):
y -= 1
points.append((x,y))
s += 1
computeSteps(x, y, s, steps)
elif direction == 'U':
for i in range(0, step):
y += 1
points.append((x,y))
s += 1
computeSteps(x, y, s, steps)
elif direction == 'L':
for i in range(0, step):
x -= 1
points.append((x,y))
s += 1
computeSteps(x, y, s, steps)
elif direction == 'R':
for i in range(0, step):
x += 1
points.append((x,y))
s += 1
computeSteps(x, y, s, steps)
#end for
return points
with open(filepath) as fp:
steps = {}
port = (0,0)
wire1 = fp.readline().strip().split(',')
wire2 = fp.readline().strip().split(',')
point1 = buildPoints(wire1, steps)
point2 = buildPoints(wire2, steps)
commonPoints = intersection(point1, point2)
min = sys.maxsize
for k in commonPoints:
val = steps.get(k)
if val < min:
min = val
print(min)
|
normal
|
{
"blob_id": "e9e119dd69f9416e007e748d7f494741140efc8e",
"index": 8182,
"step-1": "<mask token>\n\n\ndef intersection(list1, list2):\n return set(list1).intersection(list2)\n\n\ndef computeSteps(x, y, step, steps):\n curr = 0\n if (x, y) in steps:\n curr = steps.get((x, y))\n steps[x, y] = step + curr\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef intersection(list1, list2):\n return set(list1).intersection(list2)\n\n\ndef computeSteps(x, y, step, steps):\n curr = 0\n if (x, y) in steps:\n curr = steps.get((x, y))\n steps[x, y] = step + curr\n\n\ndef buildPoints(wire, steps):\n points = []\n x, y = 0, 0\n s = 0\n for p in wire:\n direction = p[0]\n step = int(p[1:])\n if direction == 'D':\n for i in range(0, step):\n y -= 1\n points.append((x, y))\n s += 1\n computeSteps(x, y, s, steps)\n elif direction == 'U':\n for i in range(0, step):\n y += 1\n points.append((x, y))\n s += 1\n computeSteps(x, y, s, steps)\n elif direction == 'L':\n for i in range(0, step):\n x -= 1\n points.append((x, y))\n s += 1\n computeSteps(x, y, s, steps)\n elif direction == 'R':\n for i in range(0, step):\n x += 1\n points.append((x, y))\n s += 1\n computeSteps(x, y, s, steps)\n return points\n\n\nwith open(filepath) as fp:\n steps = {}\n port = 0, 0\n wire1 = fp.readline().strip().split(',')\n wire2 = fp.readline().strip().split(',')\n point1 = buildPoints(wire1, steps)\n point2 = buildPoints(wire2, steps)\n commonPoints = intersection(point1, point2)\n min = sys.maxsize\n for k in commonPoints:\n val = steps.get(k)\n if val < min:\n min = val\n print(min)\n",
"step-3": "<mask token>\nfilepath = 'input.txt'\n\n\ndef intersection(list1, list2):\n return set(list1).intersection(list2)\n\n\ndef computeSteps(x, y, step, steps):\n curr = 0\n if (x, y) in steps:\n curr = steps.get((x, y))\n steps[x, y] = step + curr\n\n\ndef buildPoints(wire, steps):\n points = []\n x, y = 0, 0\n s = 0\n for p in wire:\n direction = p[0]\n step = int(p[1:])\n if direction == 'D':\n for i in range(0, step):\n y -= 1\n points.append((x, y))\n s += 1\n computeSteps(x, y, s, steps)\n elif direction == 'U':\n for i in range(0, step):\n y += 1\n points.append((x, y))\n s += 1\n computeSteps(x, y, s, steps)\n elif direction == 'L':\n for i in range(0, step):\n x -= 1\n points.append((x, y))\n s += 1\n computeSteps(x, y, s, steps)\n elif direction == 'R':\n for i in range(0, step):\n x += 1\n points.append((x, y))\n s += 1\n computeSteps(x, y, s, steps)\n return points\n\n\nwith open(filepath) as fp:\n steps = {}\n port = 0, 0\n wire1 = fp.readline().strip().split(',')\n wire2 = fp.readline().strip().split(',')\n point1 = buildPoints(wire1, steps)\n point2 = buildPoints(wire2, steps)\n commonPoints = intersection(point1, point2)\n min = sys.maxsize\n for k in commonPoints:\n val = steps.get(k)\n if val < min:\n min = val\n print(min)\n",
"step-4": "import sys\nfilepath = 'input.txt'\n\n\ndef intersection(list1, list2):\n return set(list1).intersection(list2)\n\n\ndef computeSteps(x, y, step, steps):\n curr = 0\n if (x, y) in steps:\n curr = steps.get((x, y))\n steps[x, y] = step + curr\n\n\ndef buildPoints(wire, steps):\n points = []\n x, y = 0, 0\n s = 0\n for p in wire:\n direction = p[0]\n step = int(p[1:])\n if direction == 'D':\n for i in range(0, step):\n y -= 1\n points.append((x, y))\n s += 1\n computeSteps(x, y, s, steps)\n elif direction == 'U':\n for i in range(0, step):\n y += 1\n points.append((x, y))\n s += 1\n computeSteps(x, y, s, steps)\n elif direction == 'L':\n for i in range(0, step):\n x -= 1\n points.append((x, y))\n s += 1\n computeSteps(x, y, s, steps)\n elif direction == 'R':\n for i in range(0, step):\n x += 1\n points.append((x, y))\n s += 1\n computeSteps(x, y, s, steps)\n return points\n\n\nwith open(filepath) as fp:\n steps = {}\n port = 0, 0\n wire1 = fp.readline().strip().split(',')\n wire2 = fp.readline().strip().split(',')\n point1 = buildPoints(wire1, steps)\n point2 = buildPoints(wire2, steps)\n commonPoints = intersection(point1, point2)\n min = sys.maxsize\n for k in commonPoints:\n val = steps.get(k)\n if val < min:\n min = val\n print(min)\n",
"step-5": "import sys\r\nfilepath = 'input.txt' \r\n\r\ndef intersection(list1, list2): \r\n return set(list1).intersection(list2) \r\n\r\ndef computeSteps(x, y, step, steps):\r\n # build dictionary with steps for each point\r\n curr = 0\r\n if (x,y) in steps:\r\n curr = steps.get((x,y)) \r\n steps[(x,y)] = step + curr\r\n\r\n \r\ndef buildPoints(wire, steps):\r\n points = []\r\n x, y = 0, 0\r\n s = 0\r\n for p in wire:\r\n direction = p[0]\r\n step = int(p[1:])\r\n if direction == 'D':\r\n for i in range(0, step):\r\n y -= 1\r\n points.append((x,y)) \r\n s += 1 \r\n computeSteps(x, y, s, steps) \r\n elif direction == 'U':\r\n for i in range(0, step):\r\n y += 1\r\n points.append((x,y)) \r\n s += 1 \r\n computeSteps(x, y, s, steps) \r\n elif direction == 'L':\r\n for i in range(0, step):\r\n x -= 1\r\n points.append((x,y))\r\n s += 1 \r\n computeSteps(x, y, s, steps) \r\n elif direction == 'R':\r\n for i in range(0, step):\r\n x += 1\r\n points.append((x,y))\r\n s += 1 \r\n computeSteps(x, y, s, steps) \r\n \r\n #end for\r\n return points\r\n\r\nwith open(filepath) as fp: \t \r\n steps = {} \r\n port = (0,0)\r\n wire1 = fp.readline().strip().split(',')\r\n wire2 = fp.readline().strip().split(',')\r\n point1 = buildPoints(wire1, steps)\r\n point2 = buildPoints(wire2, steps)\r\n \r\n commonPoints = intersection(point1, point2)\r\n\r\n min = sys.maxsize\r\n for k in commonPoints:\r\n val = steps.get(k)\r\n if val < min:\r\n min = val\r\n \r\n print(min)\r\n \r\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
#Takes - Contact Name(Must be saved in phone's contact list), Message, Time as input
# and sends message to the given contact at given time
# Accuracy Level ~ Seconds. (Also depends on your network speed)
from selenium import webdriver
PATH = 'C:\Program Files (x86)\chromedriver.exe'
driver = webdriver.Chrome(PATH)
from selenium.webdriver.common.keys import Keys
import time
from threading import Timer
from datetime import datetime
driver.get("https://web.whatsapp.com/")
print("Scan the QR code to Log in...")
time.sleep(10)
nameofcontact = input('Give name of contact: ')
msg = input("Type the message you want to send: ")
print("Enter Time of sending Message (Hrs, Min & Sec...)")
hrs = int(input("Hrs: "))
mins = int(input("Min: "))
secs = int(input("Sec: "))
x=datetime.today()
y=x.replace(day=x.day+1, hour=hrs, minute=mins, second=secs, microsecond=0)
delta_t=y-x
secs=delta_t.seconds+1
def send_msg():
global nameofcontact, msg
css_path = 'span[title="' + nameofcontact + '"]'
nameofcontact = driver.find_element_by_css_selector(css_path)
nameofcontact.click()
chatbox = driver.find_element_by_xpath('//*[@id="main"]/footer/div[1]/div/div/div[2]/div[1]/div/div[2]')
chatbox.send_keys(msg)
chatbox.send_keys(Keys.RETURN)
t = Timer(secs, send_msg)
t.start()
|
normal
|
{
"blob_id": "1811c0c5aca9d209638e2221cad2c30e80ee5199",
"index": 3116,
"step-1": "<mask token>\n\n\ndef send_msg():\n global nameofcontact, msg\n css_path = 'span[title=\"' + nameofcontact + '\"]'\n nameofcontact = driver.find_element_by_css_selector(css_path)\n nameofcontact.click()\n chatbox = driver.find_element_by_xpath(\n '//*[@id=\"main\"]/footer/div[1]/div/div/div[2]/div[1]/div/div[2]')\n chatbox.send_keys(msg)\n chatbox.send_keys(Keys.RETURN)\n\n\n<mask token>\n",
"step-2": "<mask token>\ndriver.get('https://web.whatsapp.com/')\nprint('Scan the QR code to Log in...')\ntime.sleep(10)\n<mask token>\nprint('Enter Time of sending Message (Hrs, Min & Sec...)')\n<mask token>\n\n\ndef send_msg():\n global nameofcontact, msg\n css_path = 'span[title=\"' + nameofcontact + '\"]'\n nameofcontact = driver.find_element_by_css_selector(css_path)\n nameofcontact.click()\n chatbox = driver.find_element_by_xpath(\n '//*[@id=\"main\"]/footer/div[1]/div/div/div[2]/div[1]/div/div[2]')\n chatbox.send_keys(msg)\n chatbox.send_keys(Keys.RETURN)\n\n\n<mask token>\nt.start()\n",
"step-3": "<mask token>\nPATH = 'C:\\\\Program Files (x86)\\\\chromedriver.exe'\ndriver = webdriver.Chrome(PATH)\n<mask token>\ndriver.get('https://web.whatsapp.com/')\nprint('Scan the QR code to Log in...')\ntime.sleep(10)\nnameofcontact = input('Give name of contact: ')\nmsg = input('Type the message you want to send: ')\nprint('Enter Time of sending Message (Hrs, Min & Sec...)')\nhrs = int(input('Hrs: '))\nmins = int(input('Min: '))\nsecs = int(input('Sec: '))\nx = datetime.today()\ny = x.replace(day=x.day + 1, hour=hrs, minute=mins, second=secs, microsecond=0)\ndelta_t = y - x\nsecs = delta_t.seconds + 1\n\n\ndef send_msg():\n global nameofcontact, msg\n css_path = 'span[title=\"' + nameofcontact + '\"]'\n nameofcontact = driver.find_element_by_css_selector(css_path)\n nameofcontact.click()\n chatbox = driver.find_element_by_xpath(\n '//*[@id=\"main\"]/footer/div[1]/div/div/div[2]/div[1]/div/div[2]')\n chatbox.send_keys(msg)\n chatbox.send_keys(Keys.RETURN)\n\n\nt = Timer(secs, send_msg)\nt.start()\n",
"step-4": "from selenium import webdriver\nPATH = 'C:\\\\Program Files (x86)\\\\chromedriver.exe'\ndriver = webdriver.Chrome(PATH)\nfrom selenium.webdriver.common.keys import Keys\nimport time\nfrom threading import Timer\nfrom datetime import datetime\ndriver.get('https://web.whatsapp.com/')\nprint('Scan the QR code to Log in...')\ntime.sleep(10)\nnameofcontact = input('Give name of contact: ')\nmsg = input('Type the message you want to send: ')\nprint('Enter Time of sending Message (Hrs, Min & Sec...)')\nhrs = int(input('Hrs: '))\nmins = int(input('Min: '))\nsecs = int(input('Sec: '))\nx = datetime.today()\ny = x.replace(day=x.day + 1, hour=hrs, minute=mins, second=secs, microsecond=0)\ndelta_t = y - x\nsecs = delta_t.seconds + 1\n\n\ndef send_msg():\n global nameofcontact, msg\n css_path = 'span[title=\"' + nameofcontact + '\"]'\n nameofcontact = driver.find_element_by_css_selector(css_path)\n nameofcontact.click()\n chatbox = driver.find_element_by_xpath(\n '//*[@id=\"main\"]/footer/div[1]/div/div/div[2]/div[1]/div/div[2]')\n chatbox.send_keys(msg)\n chatbox.send_keys(Keys.RETURN)\n\n\nt = Timer(secs, send_msg)\nt.start()\n",
"step-5": "#Takes - Contact Name(Must be saved in phone's contact list), Message, Time as input\n# and sends message to the given contact at given time\n# Accuracy Level ~ Seconds. (Also depends on your network speed)\n\nfrom selenium import webdriver\nPATH = 'C:\\Program Files (x86)\\chromedriver.exe'\ndriver = webdriver.Chrome(PATH)\nfrom selenium.webdriver.common.keys import Keys\nimport time\nfrom threading import Timer\nfrom datetime import datetime\n\ndriver.get(\"https://web.whatsapp.com/\")\nprint(\"Scan the QR code to Log in...\")\ntime.sleep(10)\n\nnameofcontact = input('Give name of contact: ')\nmsg = input(\"Type the message you want to send: \")\nprint(\"Enter Time of sending Message (Hrs, Min & Sec...)\")\nhrs = int(input(\"Hrs: \"))\nmins = int(input(\"Min: \"))\nsecs = int(input(\"Sec: \"))\n\n\nx=datetime.today()\ny=x.replace(day=x.day+1, hour=hrs, minute=mins, second=secs, microsecond=0)\ndelta_t=y-x\n\nsecs=delta_t.seconds+1\n\ndef send_msg():\n global nameofcontact, msg\n css_path = 'span[title=\"' + nameofcontact + '\"]'\n nameofcontact = driver.find_element_by_css_selector(css_path)\n nameofcontact.click()\n\n chatbox = driver.find_element_by_xpath('//*[@id=\"main\"]/footer/div[1]/div/div/div[2]/div[1]/div/div[2]')\n chatbox.send_keys(msg)\n chatbox.send_keys(Keys.RETURN)\n\nt = Timer(secs, send_msg)\nt.start()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/env python
###############################################################################
#
#
# Project:
# Purpose:
#
#
# Author: Massimo Di Stefano , [email protected]
#
###############################################################################
# Copyright (c) 2009, Massimo Di Stefano <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
__author__ = "Massimo Di Stefano"
__copyright__ = "Copyright 2009, gfoss.it"
__credits__ = [""]
__license__ = "GPL V3"
__version__ = "1.0.0"
__maintainer__ = "Massimo Di Stefano"
__email__ = "[email protected]"
__status__ = "Production"
__date__ = ""
try:
from osgeo import osr, ogr, gdal
except ImportError:
import osr, ogr, gdal
import string
import sys
def GeomType2Name(type):
if type == ogr.wkbUnknown:
return 'wkbUnknown'
elif type == ogr.wkbPoint:
return 'wkbPoint'
elif type == ogr.wkbLineString:
return 'wkbLineString'
elif type == ogr.wkbPolygon:
return 'wkbPolygon'
elif type == ogr.wkbMultiPoint:
return 'wkbMultiPoint'
elif type == ogr.wkbMultiLineString:
return 'wkbMultiLineString'
elif type == ogr.wkbMultiPolygon:
return 'wkbMultiPolygon'
elif type == ogr.wkbGeometryCollection:
return 'wkbGeometryCollection'
elif type == ogr.wkbNone:
return 'wkbNone'
elif type == ogr.wkbLinearRing:
return 'wkbLinearRing'
else:
return 'wkbUnknown'
def Esc(x):
return gdal.EscapeString(x, gdal.CPLES_XML)
def makestile(outfile, brush, pen, size, fill, thickness):
brush = brush.split(',')
pen = pen.split(',')
size = size.split(',')
outfile = outfile.replace('.vrt', '')
outfile = outfile + '.omd'
omd = '// vector file rendering options\n'
omd += 'brush_color: %s %s %s \n' % (brush[0], brush[1], brush[2])
omd += 'pen_color: %s %s %s \n' % (pen[0], pen[1], pen[2])
omd += 'point_width_height: %s %s \n' % (size[0], size[1])
omd += 'fill_flag: %s \n' % (fill)
omd += 'thickness: %s \n' % (thickness)
open(outfile, 'w').write(omd)
def ogrvrt(infile, outfile):
layer_list = []
relative = "0"
schema = 0
print infile
src_ds = ogr.Open(infile, update=0)
if len(layer_list) == 0:
for layer in src_ds:
layer_list.append(layer.GetLayerDefn().GetName())
vrt = '<OGRVRTDataSource>\n'
for name in layer_list:
layer = src_ds.GetLayerByName(name)
layerdef = layer.GetLayerDefn()
vrt += ' <OGRVRTLayer name="%s">\n' % Esc(name)
vrt += ' <SrcDataSource relativeToVRT="%s" shared="%d">%s</SrcDataSource>\n' \
% (relative, not schema, Esc(infile))
if schema:
vrt += ' <SrcLayer>@dummy@</SrcLayer>\n'
else:
vrt += ' <SrcLayer>%s</SrcLayer>\n' % Esc(name)
vrt += ' <GeometryType>%s</GeometryType>\n' \
% GeomType2Name(layerdef.GetGeomType())
srs = layer.GetSpatialRef()
if srs is not None:
vrt += ' <LayerSRS>%s</LayerSRS>\n' \
% (Esc(srs.ExportToWkt()))
# Process all the fields.
for fld_index in range(layerdef.GetFieldCount()):
src_fd = layerdef.GetFieldDefn(fld_index)
if src_fd.GetType() == ogr.OFTInteger:
type = 'Integer'
elif src_fd.GetType() == ogr.OFTString:
type = 'String'
elif src_fd.GetType() == ogr.OFTReal:
type = 'Real'
elif src_fd.GetType() == ogr.OFTStringList:
type = 'StringList'
elif src_fd.GetType() == ogr.OFTIntegerList:
type = 'IntegerList'
elif src_fd.GetType() == ogr.OFTRealList:
type = 'RealList'
elif src_fd.GetType() == ogr.OFTBinary:
type = 'Binary'
elif src_fd.GetType() == ogr.OFTDate:
type = 'Date'
elif src_fd.GetType() == ogr.OFTTime:
type = 'Time'
elif src_fd.GetType() == ogr.OFTDateTime:
type = 'DateTime'
else:
type = 'String'
vrt += ' <Field name="%s" type="%s"' \
% (Esc(src_fd.GetName()), type)
if not schema:
vrt += ' src="%s"' % Esc(src_fd.GetName())
if src_fd.GetWidth() > 0:
vrt += ' width="%d"' % src_fd.GetWidth()
if src_fd.GetPrecision() > 0:
vrt += ' precision="%d"' % src_fd.GetPrecision()
vrt += '/>\n'
vrt += ' </OGRVRTLayer>\n'
vrt += '</OGRVRTDataSource>\n'
file = open(outfile, 'w')
file.write(vrt)
file.close()
print 'vrt wroted'
|
normal
|
{
"blob_id": "59338170b44be037f749790a7942c2bcca1fc078",
"index": 2434,
"step-1": "#!/usr/bin/env python\n###############################################################################\n#\n#\n# Project:\n# Purpose:\n#\n#\n# Author: Massimo Di Stefano , [email protected]\n#\n###############################################################################\n# Copyright (c) 2009, Massimo Di Stefano <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n###############################################################################\n\n__author__ = \"Massimo Di Stefano\"\n__copyright__ = \"Copyright 2009, gfoss.it\"\n__credits__ = [\"\"]\n__license__ = \"GPL V3\"\n__version__ = \"1.0.0\"\n__maintainer__ = \"Massimo Di Stefano\"\n__email__ = \"[email protected]\"\n__status__ = \"Production\"\n__date__ = \"\"\n\ntry:\n from osgeo import osr, ogr, gdal\nexcept ImportError:\n import osr, ogr, gdal\n\nimport string\nimport sys\n\n\ndef GeomType2Name(type):\n if type == ogr.wkbUnknown:\n return 'wkbUnknown'\n elif type == ogr.wkbPoint:\n return 'wkbPoint'\n elif type == ogr.wkbLineString:\n return 'wkbLineString'\n elif type == ogr.wkbPolygon:\n return 'wkbPolygon'\n elif type == ogr.wkbMultiPoint:\n return 'wkbMultiPoint'\n elif type == ogr.wkbMultiLineString:\n return 'wkbMultiLineString'\n elif type == ogr.wkbMultiPolygon:\n return 'wkbMultiPolygon'\n elif type == ogr.wkbGeometryCollection:\n return 'wkbGeometryCollection'\n elif type == ogr.wkbNone:\n return 'wkbNone'\n elif type == ogr.wkbLinearRing:\n return 'wkbLinearRing'\n else:\n return 'wkbUnknown'\n\n\ndef Esc(x):\n return gdal.EscapeString(x, gdal.CPLES_XML)\n\n\ndef makestile(outfile, brush, pen, size, fill, thickness):\n brush = brush.split(',')\n pen = pen.split(',')\n size = size.split(',')\n outfile = outfile.replace('.vrt', '')\n outfile = outfile + '.omd'\n omd = '// vector file rendering options\\n'\n omd += 'brush_color: %s %s %s \\n' % (brush[0], brush[1], brush[2])\n omd += 'pen_color: %s %s %s \\n' % (pen[0], pen[1], pen[2])\n omd += 'point_width_height: %s %s \\n' % (size[0], size[1])\n omd += 'fill_flag: %s \\n' % (fill)\n omd += 'thickness: %s \\n' % (thickness)\n open(outfile, 'w').write(omd)\n\n\ndef ogrvrt(infile, outfile):\n layer_list = []\n relative = \"0\"\n schema = 0\n print infile\n src_ds = ogr.Open(infile, update=0)\n if len(layer_list) == 0:\n for layer in src_ds:\n layer_list.append(layer.GetLayerDefn().GetName())\n vrt = '<OGRVRTDataSource>\\n'\n for name in layer_list:\n layer = src_ds.GetLayerByName(name)\n layerdef = layer.GetLayerDefn()\n vrt += ' <OGRVRTLayer name=\"%s\">\\n' % Esc(name)\n vrt += ' <SrcDataSource relativeToVRT=\"%s\" shared=\"%d\">%s</SrcDataSource>\\n' \\\n % (relative, not schema, Esc(infile))\n if schema:\n vrt += ' <SrcLayer>@dummy@</SrcLayer>\\n'\n else:\n vrt += ' <SrcLayer>%s</SrcLayer>\\n' % Esc(name)\n vrt += ' <GeometryType>%s</GeometryType>\\n' \\\n % GeomType2Name(layerdef.GetGeomType())\n srs = layer.GetSpatialRef()\n if srs is not None:\n vrt += ' <LayerSRS>%s</LayerSRS>\\n' \\\n % (Esc(srs.ExportToWkt()))\n # Process all the fields.\n for fld_index in range(layerdef.GetFieldCount()):\n src_fd = layerdef.GetFieldDefn(fld_index)\n if src_fd.GetType() == ogr.OFTInteger:\n type = 'Integer'\n elif src_fd.GetType() == ogr.OFTString:\n type = 'String'\n elif src_fd.GetType() == ogr.OFTReal:\n type = 'Real'\n elif src_fd.GetType() == ogr.OFTStringList:\n type = 'StringList'\n elif src_fd.GetType() == ogr.OFTIntegerList:\n type = 'IntegerList'\n elif src_fd.GetType() == ogr.OFTRealList:\n type = 'RealList'\n elif src_fd.GetType() == ogr.OFTBinary:\n type = 'Binary'\n elif src_fd.GetType() == ogr.OFTDate:\n type = 'Date'\n elif src_fd.GetType() == ogr.OFTTime:\n type = 'Time'\n elif src_fd.GetType() == ogr.OFTDateTime:\n type = 'DateTime'\n else:\n type = 'String'\n\n vrt += ' <Field name=\"%s\" type=\"%s\"' \\\n % (Esc(src_fd.GetName()), type)\n if not schema:\n vrt += ' src=\"%s\"' % Esc(src_fd.GetName())\n if src_fd.GetWidth() > 0:\n vrt += ' width=\"%d\"' % src_fd.GetWidth()\n if src_fd.GetPrecision() > 0:\n vrt += ' precision=\"%d\"' % src_fd.GetPrecision()\n vrt += '/>\\n'\n vrt += ' </OGRVRTLayer>\\n'\n vrt += '</OGRVRTDataSource>\\n'\n file = open(outfile, 'w')\n file.write(vrt)\n file.close()\n print 'vrt wroted'\n\t\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
class State(object):
def __init__(self, stateName, stateLevel):
self.stateName = stateName;
self.stateLevel = stateLevel;
|
normal
|
{
"blob_id": "73082ed2824ee65f7f4cbac47b9ebad19cec4196",
"index": 7226,
"step-1": "class State(object):\ndef __init__(self, stateName, stateLevel):\n self.stateName = stateName;\n self.stateLevel = stateLevel;\t\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
"""
Creates a ResNeXt Model as defined in:
Xie, S., Girshick, R., Dollar, P., Tu, Z., & He, K. (2016).
Aggregated residual transformations for deep neural networks.
arXiv preprint arXiv:1611.05431.
import from https://github.com/prlz77/ResNeXt.pytorch/blob/master/models/model.py
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
__all__ = ['resnext']
class ResNeXtBottleneck(nn.Module):
"""
RexNeXt bottleneck type C (https://github.com/facebookresearch/ResNeXt/blob/master/models/resnext.lua)
"""
def __init__(self, in_channels, out_channels, stride, cardinality, widen_factor):
""" Constructor
Args:
in_channels: input channel dimensionality
out_channels: output channel dimensionality
stride: conv stride. Replaces pooling layer.
cardinality: num of convolution groups.
widen_factor: factor to reduce the input dimensionality before convolution.
"""
super(ResNeXtBottleneck, self).__init__()
D = cardinality * out_channels // widen_factor
self.conv_reduce = nn.Conv2d(in_channels, D, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_reduce = nn.BatchNorm2d(D)
self.conv_conv = nn.Conv2d(D, D, kernel_size=3, stride=stride, padding=1, groups=cardinality, bias=False)
self.bn = nn.BatchNorm2d(D)
self.conv_expand = nn.Conv2d(D, out_channels, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_expand = nn.BatchNorm2d(out_channels)
self.shortcut = nn.Sequential()
if in_channels != out_channels:
self.shortcut.add_module('shortcut_conv', nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, padding=0, bias=False))
self.shortcut.add_module('shortcut_bn', nn.BatchNorm2d(out_channels))
def forward(self, x):
bottleneck = self.conv_reduce.forward(x)
bottleneck = F.relu(self.bn_reduce.forward(bottleneck), inplace=True)
bottleneck = self.conv_conv.forward(bottleneck)
bottleneck = F.relu(self.bn.forward(bottleneck), inplace=True)
bottleneck = self.conv_expand.forward(bottleneck)
bottleneck = self.bn_expand.forward(bottleneck)
residual = self.shortcut.forward(x)
return F.relu(residual + bottleneck, inplace=True)
class CifarResNeXt(nn.Module):
"""
ResNext optimized for the Cifar dataset, as specified in
https://arxiv.org/pdf/1611.05431.pdf
"""
def __init__(self, cardinality, depth, num_classes, widen_factor=4, dropRate=0):
""" Constructor
Args:
cardinality: number of convolution groups.
depth: number of layers.
num_classes: number of classes
widen_factor: factor to adjust the channel dimensionality
"""
super(CifarResNeXt, self).__init__()
self.cardinality = cardinality
self.depth = depth
self.block_depth = (self.depth - 2) // 9
self.widen_factor = widen_factor
self.num_classes = num_classes
self.output_size = 64
self.stages = [64, 64 * self.widen_factor, 128 * self.widen_factor, 256 * self.widen_factor]
self.conv_1_3x3 = nn.Conv2d(3, 64, 3, 1, 1, bias=False)
self.bn_1 = nn.BatchNorm2d(64)
self.stage_1 = self.block('stage_1', self.stages[0], self.stages[1], 1)
self.stage_2 = self.block('stage_2', self.stages[1], self.stages[2], 2)
self.stage_3 = self.block('stage_3', self.stages[2], self.stages[3], 2)
self.classifier = nn.Linear(1024, num_classes)
self.stage_att = self.block('stage_att', self.stages[2], self.stages[3], 1)
self.bn_att = nn.BatchNorm2d(self.stages[3])
self.att_conv = nn.Conv2d(self.stages[3], num_classes, kernel_size=1, padding=0,
bias=False)
self.bn_att2 = nn.BatchNorm2d(num_classes)
self.att_conv2 = nn.Conv2d(num_classes, num_classes, kernel_size=1, padding=0,
bias=False)
self.att_conv3 = nn.Conv2d(num_classes, 1, kernel_size=3, padding=1,
bias=False)
self.bn_att3 = nn.BatchNorm2d(1)
self.att_gap = nn.AvgPool2d(16)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU(inplace=True)
init.kaiming_normal(self.classifier.weight)
for key in self.state_dict():
if key.split('.')[-1] == 'weight':
if 'conv' in key:
init.kaiming_normal(self.state_dict()[key], mode='fan_out')
if 'bn' in key:
self.state_dict()[key][...] = 1
elif key.split('.')[-1] == 'bias':
self.state_dict()[key][...] = 0
def block(self, name, in_channels, out_channels, pool_stride=2):
""" Stack n bottleneck modules where n is inferred from the depth of the network.
Args:
name: string name of the current block.
in_channels: number of input channels
out_channels: number of output channels
pool_stride: factor to reduce the spatial dimensionality in the first bottleneck of the block.
Returns: a Module consisting of n sequential bottlenecks.
"""
block = nn.Sequential()
for bottleneck in range(self.block_depth):
name_ = '%s_bottleneck_%d' % (name, bottleneck)
if bottleneck == 0:
block.add_module(name_, ResNeXtBottleneck(in_channels, out_channels, pool_stride, self.cardinality,
self.widen_factor))
else:
block.add_module(name_,
ResNeXtBottleneck(out_channels, out_channels, 1, self.cardinality, self.widen_factor))
return block
def forward(self, x):
x = self.conv_1_3x3.forward(x)
x = F.relu(self.bn_1.forward(x), inplace=True)
x = self.stage_1.forward(x)
x = self.stage_2.forward(x)
ax = self.stage_att(x)
ax = self.relu(self.bn_att2(self.att_conv(ax)))
bs, cs, ys, xs = ax.shape
self.att = self.sigmoid(self.bn_att3(self.att_conv3(ax)))
# self.att = self.att.view(bs, 1, ys, xs)
ax = self.att_conv2(ax)
ax = self.att_gap(ax)
ax = ax.view(ax.size(0), -1)
rx = x * self.att
rx = rx + x
rx = self.stage_3.forward(rx)
rx = F.avg_pool2d(rx, 8, 1)
rx = rx.view(-1, 1024)
rx = self.classifier(rx)
return ax, rx, self.att
def resnext(**kwargs):
"""Constructs a ResNeXt.
"""
model = CifarResNeXt(**kwargs)
return model
# """
# resneXt for cifar with pytorch
# Reference:
# [1] S. Xie, G. Ross, P. Dollar, Z. Tu and K. He Aggregated residual transformations for deep neural networks. In CVPR, 2017
# """
#
# import torch
# import torch.nn as nn
# import math
#
#
# class Bottleneck(nn.Module):
# expansion = 4
#
# def __init__(self, inplanes, planes, cardinality, baseWidth, stride=1, downsample=None):
# super(Bottleneck, self).__init__()
# D = int(planes * (baseWidth / 64.))
# C = cardinality
# self.conv1 = nn.Conv2d(inplanes, D * C, kernel_size=1, bias=False)
# self.bn1 = nn.BatchNorm2d(D * C)
# self.conv2 = nn.Conv2d(D * C, D * C, kernel_size=3, stride=stride, padding=1, groups=C, bias=False)
# self.bn2 = nn.BatchNorm2d(D * C)
# self.conv3 = nn.Conv2d(D * C, planes * 4, kernel_size=1, bias=False)
# self.bn3 = nn.BatchNorm2d(planes * 4)
# self.relu = nn.ReLU(inplace=True)
# self.downsample = downsample
# self.stride = stride
#
# def forward(self, x):
# residual = x
#
# out = self.conv1(x)
# out = self.bn1(out)
# out = self.relu(out)
#
# out = self.conv2(out)
# out = self.bn2(out)
# out = self.relu(out)
#
# out = self.conv3(out)
# out = self.bn3(out)
#
# if self.downsample is not None:
# residual = self.downsample(x)
#
# if residual.size() != out.size():
# print(out.size(), residual.size())
# out += residual
# out = self.relu(out)
#
# return out
#
#
# class ResNeXt_Cifar(nn.Module):
#
# def __init__(self, block, layers, cardinality, baseWidth, num_classes=10):
# super(ResNeXt_Cifar, self).__init__()
# self.inplanes = 64
# self.cardinality = cardinality
# self.baseWidth = baseWidth
# self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
# self.bn1 = nn.BatchNorm2d(64)
# self.relu = nn.ReLU(inplace=True)
# self.layer1 = self._make_layer(block, 64, layers[0])
# self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
# self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
# self.avgpool = nn.AvgPool2d(8, stride=1)
# self.fc = nn.Linear(256 * block.expansion, num_classes)
#
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
# elif isinstance(m, nn.BatchNorm2d):
# m.weight.data.fill_(1)
# m.bias.data.zero_()
#
# def _make_layer(self, block, planes, blocks, stride=1):
# downsample = None
# if stride != 1 or self.inplanes != planes * block.expansion:
# downsample = nn.Sequential(
# nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),
# nn.BatchNorm2d(planes * block.expansion)
# )
#
# layers = []
# layers.append(block(self.inplanes, planes, self.cardinality, self.baseWidth, stride, downsample))
# self.inplanes = planes * block.expansion
# for _ in range(1, blocks):
# layers.append(block(self.inplanes, planes, self.cardinality, self.baseWidth))
#
# return nn.Sequential(*layers)
#
# def forward(self, x):
# x = self.conv1(x)
# x = self.bn1(x)
# x = self.relu(x)
#
# x = self.layer1(x)
# x = self.layer2(x)
# x = self.layer3(x)
#
# x = self.avgpool(x)
# x = x.view(x.size(0), -1)
# x = self.fc(x)
#
# return x
#
#
# def resneXt_cifar(depth, cardinality, baseWidth, **kwargs):
# assert (depth - 2) % 9 == 0
# n = int((depth - 2) / 9)
# model = ResNeXt_Cifar(Bottleneck, [n, n, n], cardinality, baseWidth, **kwargs)
# return model
# if __name__ == '__main__':
# net = resneXt_cifar(29, 16, 64)
# y = net(torch.randn(1, 3, 32, 32))
# print(net)
# print(y.size())
|
normal
|
{
"blob_id": "50ed1512b0e6ff8e01f5d4aa034406fa78850176",
"index": 2293,
"step-1": "<mask token>\n\n\nclass CifarResNeXt(nn.Module):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ResNeXtBottleneck(nn.Module):\n <mask token>\n\n def __init__(self, in_channels, out_channels, stride, cardinality,\n widen_factor):\n \"\"\" Constructor\n Args:\n in_channels: input channel dimensionality\n out_channels: output channel dimensionality\n stride: conv stride. Replaces pooling layer.\n cardinality: num of convolution groups.\n widen_factor: factor to reduce the input dimensionality before convolution.\n \"\"\"\n super(ResNeXtBottleneck, self).__init__()\n D = cardinality * out_channels // widen_factor\n self.conv_reduce = nn.Conv2d(in_channels, D, kernel_size=1, stride=\n 1, padding=0, bias=False)\n self.bn_reduce = nn.BatchNorm2d(D)\n self.conv_conv = nn.Conv2d(D, D, kernel_size=3, stride=stride,\n padding=1, groups=cardinality, bias=False)\n self.bn = nn.BatchNorm2d(D)\n self.conv_expand = nn.Conv2d(D, out_channels, kernel_size=1, stride\n =1, padding=0, bias=False)\n self.bn_expand = nn.BatchNorm2d(out_channels)\n self.shortcut = nn.Sequential()\n if in_channels != out_channels:\n self.shortcut.add_module('shortcut_conv', nn.Conv2d(in_channels,\n out_channels, kernel_size=1, stride=stride, padding=0, bias\n =False))\n self.shortcut.add_module('shortcut_bn', nn.BatchNorm2d(\n out_channels))\n\n def forward(self, x):\n bottleneck = self.conv_reduce.forward(x)\n bottleneck = F.relu(self.bn_reduce.forward(bottleneck), inplace=True)\n bottleneck = self.conv_conv.forward(bottleneck)\n bottleneck = F.relu(self.bn.forward(bottleneck), inplace=True)\n bottleneck = self.conv_expand.forward(bottleneck)\n bottleneck = self.bn_expand.forward(bottleneck)\n residual = self.shortcut.forward(x)\n return F.relu(residual + bottleneck, inplace=True)\n\n\nclass CifarResNeXt(nn.Module):\n \"\"\"\n ResNext optimized for the Cifar dataset, as specified in\n https://arxiv.org/pdf/1611.05431.pdf\n \"\"\"\n\n def __init__(self, cardinality, depth, num_classes, widen_factor=4,\n dropRate=0):\n \"\"\" Constructor\n Args:\n cardinality: number of convolution groups.\n depth: number of layers.\n num_classes: number of classes\n widen_factor: factor to adjust the channel dimensionality\n \"\"\"\n super(CifarResNeXt, self).__init__()\n self.cardinality = cardinality\n self.depth = depth\n self.block_depth = (self.depth - 2) // 9\n self.widen_factor = widen_factor\n self.num_classes = num_classes\n self.output_size = 64\n self.stages = [64, 64 * self.widen_factor, 128 * self.widen_factor,\n 256 * self.widen_factor]\n self.conv_1_3x3 = nn.Conv2d(3, 64, 3, 1, 1, bias=False)\n self.bn_1 = nn.BatchNorm2d(64)\n self.stage_1 = self.block('stage_1', self.stages[0], self.stages[1], 1)\n self.stage_2 = self.block('stage_2', self.stages[1], self.stages[2], 2)\n self.stage_3 = self.block('stage_3', self.stages[2], self.stages[3], 2)\n self.classifier = nn.Linear(1024, num_classes)\n self.stage_att = self.block('stage_att', self.stages[2], self.\n stages[3], 1)\n self.bn_att = nn.BatchNorm2d(self.stages[3])\n self.att_conv = nn.Conv2d(self.stages[3], num_classes, kernel_size=\n 1, padding=0, bias=False)\n self.bn_att2 = nn.BatchNorm2d(num_classes)\n self.att_conv2 = nn.Conv2d(num_classes, num_classes, kernel_size=1,\n padding=0, bias=False)\n self.att_conv3 = nn.Conv2d(num_classes, 1, kernel_size=3, padding=1,\n bias=False)\n self.bn_att3 = nn.BatchNorm2d(1)\n self.att_gap = nn.AvgPool2d(16)\n self.sigmoid = nn.Sigmoid()\n self.relu = nn.ReLU(inplace=True)\n init.kaiming_normal(self.classifier.weight)\n for key in self.state_dict():\n if key.split('.')[-1] == 'weight':\n if 'conv' in key:\n init.kaiming_normal(self.state_dict()[key], mode='fan_out')\n if 'bn' in key:\n self.state_dict()[key][...] = 1\n elif key.split('.')[-1] == 'bias':\n self.state_dict()[key][...] = 0\n\n def block(self, name, in_channels, out_channels, pool_stride=2):\n \"\"\" Stack n bottleneck modules where n is inferred from the depth of the network.\n Args:\n name: string name of the current block.\n in_channels: number of input channels\n out_channels: number of output channels\n pool_stride: factor to reduce the spatial dimensionality in the first bottleneck of the block.\n Returns: a Module consisting of n sequential bottlenecks.\n \"\"\"\n block = nn.Sequential()\n for bottleneck in range(self.block_depth):\n name_ = '%s_bottleneck_%d' % (name, bottleneck)\n if bottleneck == 0:\n block.add_module(name_, ResNeXtBottleneck(in_channels,\n out_channels, pool_stride, self.cardinality, self.\n widen_factor))\n else:\n block.add_module(name_, ResNeXtBottleneck(out_channels,\n out_channels, 1, self.cardinality, self.widen_factor))\n return block\n\n def forward(self, x):\n x = self.conv_1_3x3.forward(x)\n x = F.relu(self.bn_1.forward(x), inplace=True)\n x = self.stage_1.forward(x)\n x = self.stage_2.forward(x)\n ax = self.stage_att(x)\n ax = self.relu(self.bn_att2(self.att_conv(ax)))\n bs, cs, ys, xs = ax.shape\n self.att = self.sigmoid(self.bn_att3(self.att_conv3(ax)))\n ax = self.att_conv2(ax)\n ax = self.att_gap(ax)\n ax = ax.view(ax.size(0), -1)\n rx = x * self.att\n rx = rx + x\n rx = self.stage_3.forward(rx)\n rx = F.avg_pool2d(rx, 8, 1)\n rx = rx.view(-1, 1024)\n rx = self.classifier(rx)\n return ax, rx, self.att\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ResNeXtBottleneck(nn.Module):\n \"\"\"\n RexNeXt bottleneck type C (https://github.com/facebookresearch/ResNeXt/blob/master/models/resnext.lua)\n \"\"\"\n\n def __init__(self, in_channels, out_channels, stride, cardinality,\n widen_factor):\n \"\"\" Constructor\n Args:\n in_channels: input channel dimensionality\n out_channels: output channel dimensionality\n stride: conv stride. Replaces pooling layer.\n cardinality: num of convolution groups.\n widen_factor: factor to reduce the input dimensionality before convolution.\n \"\"\"\n super(ResNeXtBottleneck, self).__init__()\n D = cardinality * out_channels // widen_factor\n self.conv_reduce = nn.Conv2d(in_channels, D, kernel_size=1, stride=\n 1, padding=0, bias=False)\n self.bn_reduce = nn.BatchNorm2d(D)\n self.conv_conv = nn.Conv2d(D, D, kernel_size=3, stride=stride,\n padding=1, groups=cardinality, bias=False)\n self.bn = nn.BatchNorm2d(D)\n self.conv_expand = nn.Conv2d(D, out_channels, kernel_size=1, stride\n =1, padding=0, bias=False)\n self.bn_expand = nn.BatchNorm2d(out_channels)\n self.shortcut = nn.Sequential()\n if in_channels != out_channels:\n self.shortcut.add_module('shortcut_conv', nn.Conv2d(in_channels,\n out_channels, kernel_size=1, stride=stride, padding=0, bias\n =False))\n self.shortcut.add_module('shortcut_bn', nn.BatchNorm2d(\n out_channels))\n\n def forward(self, x):\n bottleneck = self.conv_reduce.forward(x)\n bottleneck = F.relu(self.bn_reduce.forward(bottleneck), inplace=True)\n bottleneck = self.conv_conv.forward(bottleneck)\n bottleneck = F.relu(self.bn.forward(bottleneck), inplace=True)\n bottleneck = self.conv_expand.forward(bottleneck)\n bottleneck = self.bn_expand.forward(bottleneck)\n residual = self.shortcut.forward(x)\n return F.relu(residual + bottleneck, inplace=True)\n\n\nclass CifarResNeXt(nn.Module):\n \"\"\"\n ResNext optimized for the Cifar dataset, as specified in\n https://arxiv.org/pdf/1611.05431.pdf\n \"\"\"\n\n def __init__(self, cardinality, depth, num_classes, widen_factor=4,\n dropRate=0):\n \"\"\" Constructor\n Args:\n cardinality: number of convolution groups.\n depth: number of layers.\n num_classes: number of classes\n widen_factor: factor to adjust the channel dimensionality\n \"\"\"\n super(CifarResNeXt, self).__init__()\n self.cardinality = cardinality\n self.depth = depth\n self.block_depth = (self.depth - 2) // 9\n self.widen_factor = widen_factor\n self.num_classes = num_classes\n self.output_size = 64\n self.stages = [64, 64 * self.widen_factor, 128 * self.widen_factor,\n 256 * self.widen_factor]\n self.conv_1_3x3 = nn.Conv2d(3, 64, 3, 1, 1, bias=False)\n self.bn_1 = nn.BatchNorm2d(64)\n self.stage_1 = self.block('stage_1', self.stages[0], self.stages[1], 1)\n self.stage_2 = self.block('stage_2', self.stages[1], self.stages[2], 2)\n self.stage_3 = self.block('stage_3', self.stages[2], self.stages[3], 2)\n self.classifier = nn.Linear(1024, num_classes)\n self.stage_att = self.block('stage_att', self.stages[2], self.\n stages[3], 1)\n self.bn_att = nn.BatchNorm2d(self.stages[3])\n self.att_conv = nn.Conv2d(self.stages[3], num_classes, kernel_size=\n 1, padding=0, bias=False)\n self.bn_att2 = nn.BatchNorm2d(num_classes)\n self.att_conv2 = nn.Conv2d(num_classes, num_classes, kernel_size=1,\n padding=0, bias=False)\n self.att_conv3 = nn.Conv2d(num_classes, 1, kernel_size=3, padding=1,\n bias=False)\n self.bn_att3 = nn.BatchNorm2d(1)\n self.att_gap = nn.AvgPool2d(16)\n self.sigmoid = nn.Sigmoid()\n self.relu = nn.ReLU(inplace=True)\n init.kaiming_normal(self.classifier.weight)\n for key in self.state_dict():\n if key.split('.')[-1] == 'weight':\n if 'conv' in key:\n init.kaiming_normal(self.state_dict()[key], mode='fan_out')\n if 'bn' in key:\n self.state_dict()[key][...] = 1\n elif key.split('.')[-1] == 'bias':\n self.state_dict()[key][...] = 0\n\n def block(self, name, in_channels, out_channels, pool_stride=2):\n \"\"\" Stack n bottleneck modules where n is inferred from the depth of the network.\n Args:\n name: string name of the current block.\n in_channels: number of input channels\n out_channels: number of output channels\n pool_stride: factor to reduce the spatial dimensionality in the first bottleneck of the block.\n Returns: a Module consisting of n sequential bottlenecks.\n \"\"\"\n block = nn.Sequential()\n for bottleneck in range(self.block_depth):\n name_ = '%s_bottleneck_%d' % (name, bottleneck)\n if bottleneck == 0:\n block.add_module(name_, ResNeXtBottleneck(in_channels,\n out_channels, pool_stride, self.cardinality, self.\n widen_factor))\n else:\n block.add_module(name_, ResNeXtBottleneck(out_channels,\n out_channels, 1, self.cardinality, self.widen_factor))\n return block\n\n def forward(self, x):\n x = self.conv_1_3x3.forward(x)\n x = F.relu(self.bn_1.forward(x), inplace=True)\n x = self.stage_1.forward(x)\n x = self.stage_2.forward(x)\n ax = self.stage_att(x)\n ax = self.relu(self.bn_att2(self.att_conv(ax)))\n bs, cs, ys, xs = ax.shape\n self.att = self.sigmoid(self.bn_att3(self.att_conv3(ax)))\n ax = self.att_conv2(ax)\n ax = self.att_gap(ax)\n ax = ax.view(ax.size(0), -1)\n rx = x * self.att\n rx = rx + x\n rx = self.stage_3.forward(rx)\n rx = F.avg_pool2d(rx, 8, 1)\n rx = rx.view(-1, 1024)\n rx = self.classifier(rx)\n return ax, rx, self.att\n\n\ndef resnext(**kwargs):\n \"\"\"Constructs a ResNeXt.\n \"\"\"\n model = CifarResNeXt(**kwargs)\n return model\n",
"step-4": "<mask token>\n__all__ = ['resnext']\n\n\nclass ResNeXtBottleneck(nn.Module):\n \"\"\"\n RexNeXt bottleneck type C (https://github.com/facebookresearch/ResNeXt/blob/master/models/resnext.lua)\n \"\"\"\n\n def __init__(self, in_channels, out_channels, stride, cardinality,\n widen_factor):\n \"\"\" Constructor\n Args:\n in_channels: input channel dimensionality\n out_channels: output channel dimensionality\n stride: conv stride. Replaces pooling layer.\n cardinality: num of convolution groups.\n widen_factor: factor to reduce the input dimensionality before convolution.\n \"\"\"\n super(ResNeXtBottleneck, self).__init__()\n D = cardinality * out_channels // widen_factor\n self.conv_reduce = nn.Conv2d(in_channels, D, kernel_size=1, stride=\n 1, padding=0, bias=False)\n self.bn_reduce = nn.BatchNorm2d(D)\n self.conv_conv = nn.Conv2d(D, D, kernel_size=3, stride=stride,\n padding=1, groups=cardinality, bias=False)\n self.bn = nn.BatchNorm2d(D)\n self.conv_expand = nn.Conv2d(D, out_channels, kernel_size=1, stride\n =1, padding=0, bias=False)\n self.bn_expand = nn.BatchNorm2d(out_channels)\n self.shortcut = nn.Sequential()\n if in_channels != out_channels:\n self.shortcut.add_module('shortcut_conv', nn.Conv2d(in_channels,\n out_channels, kernel_size=1, stride=stride, padding=0, bias\n =False))\n self.shortcut.add_module('shortcut_bn', nn.BatchNorm2d(\n out_channels))\n\n def forward(self, x):\n bottleneck = self.conv_reduce.forward(x)\n bottleneck = F.relu(self.bn_reduce.forward(bottleneck), inplace=True)\n bottleneck = self.conv_conv.forward(bottleneck)\n bottleneck = F.relu(self.bn.forward(bottleneck), inplace=True)\n bottleneck = self.conv_expand.forward(bottleneck)\n bottleneck = self.bn_expand.forward(bottleneck)\n residual = self.shortcut.forward(x)\n return F.relu(residual + bottleneck, inplace=True)\n\n\nclass CifarResNeXt(nn.Module):\n \"\"\"\n ResNext optimized for the Cifar dataset, as specified in\n https://arxiv.org/pdf/1611.05431.pdf\n \"\"\"\n\n def __init__(self, cardinality, depth, num_classes, widen_factor=4,\n dropRate=0):\n \"\"\" Constructor\n Args:\n cardinality: number of convolution groups.\n depth: number of layers.\n num_classes: number of classes\n widen_factor: factor to adjust the channel dimensionality\n \"\"\"\n super(CifarResNeXt, self).__init__()\n self.cardinality = cardinality\n self.depth = depth\n self.block_depth = (self.depth - 2) // 9\n self.widen_factor = widen_factor\n self.num_classes = num_classes\n self.output_size = 64\n self.stages = [64, 64 * self.widen_factor, 128 * self.widen_factor,\n 256 * self.widen_factor]\n self.conv_1_3x3 = nn.Conv2d(3, 64, 3, 1, 1, bias=False)\n self.bn_1 = nn.BatchNorm2d(64)\n self.stage_1 = self.block('stage_1', self.stages[0], self.stages[1], 1)\n self.stage_2 = self.block('stage_2', self.stages[1], self.stages[2], 2)\n self.stage_3 = self.block('stage_3', self.stages[2], self.stages[3], 2)\n self.classifier = nn.Linear(1024, num_classes)\n self.stage_att = self.block('stage_att', self.stages[2], self.\n stages[3], 1)\n self.bn_att = nn.BatchNorm2d(self.stages[3])\n self.att_conv = nn.Conv2d(self.stages[3], num_classes, kernel_size=\n 1, padding=0, bias=False)\n self.bn_att2 = nn.BatchNorm2d(num_classes)\n self.att_conv2 = nn.Conv2d(num_classes, num_classes, kernel_size=1,\n padding=0, bias=False)\n self.att_conv3 = nn.Conv2d(num_classes, 1, kernel_size=3, padding=1,\n bias=False)\n self.bn_att3 = nn.BatchNorm2d(1)\n self.att_gap = nn.AvgPool2d(16)\n self.sigmoid = nn.Sigmoid()\n self.relu = nn.ReLU(inplace=True)\n init.kaiming_normal(self.classifier.weight)\n for key in self.state_dict():\n if key.split('.')[-1] == 'weight':\n if 'conv' in key:\n init.kaiming_normal(self.state_dict()[key], mode='fan_out')\n if 'bn' in key:\n self.state_dict()[key][...] = 1\n elif key.split('.')[-1] == 'bias':\n self.state_dict()[key][...] = 0\n\n def block(self, name, in_channels, out_channels, pool_stride=2):\n \"\"\" Stack n bottleneck modules where n is inferred from the depth of the network.\n Args:\n name: string name of the current block.\n in_channels: number of input channels\n out_channels: number of output channels\n pool_stride: factor to reduce the spatial dimensionality in the first bottleneck of the block.\n Returns: a Module consisting of n sequential bottlenecks.\n \"\"\"\n block = nn.Sequential()\n for bottleneck in range(self.block_depth):\n name_ = '%s_bottleneck_%d' % (name, bottleneck)\n if bottleneck == 0:\n block.add_module(name_, ResNeXtBottleneck(in_channels,\n out_channels, pool_stride, self.cardinality, self.\n widen_factor))\n else:\n block.add_module(name_, ResNeXtBottleneck(out_channels,\n out_channels, 1, self.cardinality, self.widen_factor))\n return block\n\n def forward(self, x):\n x = self.conv_1_3x3.forward(x)\n x = F.relu(self.bn_1.forward(x), inplace=True)\n x = self.stage_1.forward(x)\n x = self.stage_2.forward(x)\n ax = self.stage_att(x)\n ax = self.relu(self.bn_att2(self.att_conv(ax)))\n bs, cs, ys, xs = ax.shape\n self.att = self.sigmoid(self.bn_att3(self.att_conv3(ax)))\n ax = self.att_conv2(ax)\n ax = self.att_gap(ax)\n ax = ax.view(ax.size(0), -1)\n rx = x * self.att\n rx = rx + x\n rx = self.stage_3.forward(rx)\n rx = F.avg_pool2d(rx, 8, 1)\n rx = rx.view(-1, 1024)\n rx = self.classifier(rx)\n return ax, rx, self.att\n\n\ndef resnext(**kwargs):\n \"\"\"Constructs a ResNeXt.\n \"\"\"\n model = CifarResNeXt(**kwargs)\n return model\n",
"step-5": "\n\"\"\"\nCreates a ResNeXt Model as defined in:\nXie, S., Girshick, R., Dollar, P., Tu, Z., & He, K. (2016).\nAggregated residual transformations for deep neural networks.\narXiv preprint arXiv:1611.05431.\nimport from https://github.com/prlz77/ResNeXt.pytorch/blob/master/models/model.py\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import init\n\n__all__ = ['resnext']\n\nclass ResNeXtBottleneck(nn.Module):\n \"\"\"\n RexNeXt bottleneck type C (https://github.com/facebookresearch/ResNeXt/blob/master/models/resnext.lua)\n \"\"\"\n def __init__(self, in_channels, out_channels, stride, cardinality, widen_factor):\n \"\"\" Constructor\n Args:\n in_channels: input channel dimensionality\n out_channels: output channel dimensionality\n stride: conv stride. Replaces pooling layer.\n cardinality: num of convolution groups.\n widen_factor: factor to reduce the input dimensionality before convolution.\n \"\"\"\n super(ResNeXtBottleneck, self).__init__()\n D = cardinality * out_channels // widen_factor\n self.conv_reduce = nn.Conv2d(in_channels, D, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn_reduce = nn.BatchNorm2d(D)\n self.conv_conv = nn.Conv2d(D, D, kernel_size=3, stride=stride, padding=1, groups=cardinality, bias=False)\n self.bn = nn.BatchNorm2d(D)\n self.conv_expand = nn.Conv2d(D, out_channels, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn_expand = nn.BatchNorm2d(out_channels)\n\n self.shortcut = nn.Sequential()\n if in_channels != out_channels:\n self.shortcut.add_module('shortcut_conv', nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, padding=0, bias=False))\n self.shortcut.add_module('shortcut_bn', nn.BatchNorm2d(out_channels))\n\n def forward(self, x):\n bottleneck = self.conv_reduce.forward(x)\n bottleneck = F.relu(self.bn_reduce.forward(bottleneck), inplace=True)\n bottleneck = self.conv_conv.forward(bottleneck)\n bottleneck = F.relu(self.bn.forward(bottleneck), inplace=True)\n bottleneck = self.conv_expand.forward(bottleneck)\n bottleneck = self.bn_expand.forward(bottleneck)\n residual = self.shortcut.forward(x)\n return F.relu(residual + bottleneck, inplace=True)\n\n\nclass CifarResNeXt(nn.Module):\n \"\"\"\n ResNext optimized for the Cifar dataset, as specified in\n https://arxiv.org/pdf/1611.05431.pdf\n \"\"\"\n def __init__(self, cardinality, depth, num_classes, widen_factor=4, dropRate=0):\n \"\"\" Constructor\n Args:\n cardinality: number of convolution groups.\n depth: number of layers.\n num_classes: number of classes\n widen_factor: factor to adjust the channel dimensionality\n \"\"\"\n super(CifarResNeXt, self).__init__()\n self.cardinality = cardinality\n self.depth = depth\n self.block_depth = (self.depth - 2) // 9\n self.widen_factor = widen_factor\n self.num_classes = num_classes\n self.output_size = 64\n self.stages = [64, 64 * self.widen_factor, 128 * self.widen_factor, 256 * self.widen_factor]\n\n self.conv_1_3x3 = nn.Conv2d(3, 64, 3, 1, 1, bias=False)\n self.bn_1 = nn.BatchNorm2d(64)\n self.stage_1 = self.block('stage_1', self.stages[0], self.stages[1], 1)\n self.stage_2 = self.block('stage_2', self.stages[1], self.stages[2], 2)\n self.stage_3 = self.block('stage_3', self.stages[2], self.stages[3], 2)\n self.classifier = nn.Linear(1024, num_classes)\n\n self.stage_att = self.block('stage_att', self.stages[2], self.stages[3], 1)\n self.bn_att = nn.BatchNorm2d(self.stages[3])\n self.att_conv = nn.Conv2d(self.stages[3], num_classes, kernel_size=1, padding=0,\n bias=False)\n self.bn_att2 = nn.BatchNorm2d(num_classes)\n self.att_conv2 = nn.Conv2d(num_classes, num_classes, kernel_size=1, padding=0,\n bias=False)\n self.att_conv3 = nn.Conv2d(num_classes, 1, kernel_size=3, padding=1,\n bias=False)\n self.bn_att3 = nn.BatchNorm2d(1)\n self.att_gap = nn.AvgPool2d(16)\n self.sigmoid = nn.Sigmoid()\n self.relu = nn.ReLU(inplace=True)\n\n init.kaiming_normal(self.classifier.weight)\n\n for key in self.state_dict():\n if key.split('.')[-1] == 'weight':\n if 'conv' in key:\n init.kaiming_normal(self.state_dict()[key], mode='fan_out')\n if 'bn' in key:\n self.state_dict()[key][...] = 1\n elif key.split('.')[-1] == 'bias':\n self.state_dict()[key][...] = 0\n\n def block(self, name, in_channels, out_channels, pool_stride=2):\n \"\"\" Stack n bottleneck modules where n is inferred from the depth of the network.\n Args:\n name: string name of the current block.\n in_channels: number of input channels\n out_channels: number of output channels\n pool_stride: factor to reduce the spatial dimensionality in the first bottleneck of the block.\n Returns: a Module consisting of n sequential bottlenecks.\n \"\"\"\n block = nn.Sequential()\n for bottleneck in range(self.block_depth):\n name_ = '%s_bottleneck_%d' % (name, bottleneck)\n if bottleneck == 0:\n block.add_module(name_, ResNeXtBottleneck(in_channels, out_channels, pool_stride, self.cardinality,\n self.widen_factor))\n else:\n block.add_module(name_,\n ResNeXtBottleneck(out_channels, out_channels, 1, self.cardinality, self.widen_factor))\n return block\n\n def forward(self, x):\n x = self.conv_1_3x3.forward(x)\n x = F.relu(self.bn_1.forward(x), inplace=True)\n x = self.stage_1.forward(x)\n x = self.stage_2.forward(x)\n\n ax = self.stage_att(x)\n ax = self.relu(self.bn_att2(self.att_conv(ax)))\n bs, cs, ys, xs = ax.shape\n self.att = self.sigmoid(self.bn_att3(self.att_conv3(ax)))\n # self.att = self.att.view(bs, 1, ys, xs)\n ax = self.att_conv2(ax)\n ax = self.att_gap(ax)\n ax = ax.view(ax.size(0), -1)\n\n rx = x * self.att\n rx = rx + x\n rx = self.stage_3.forward(rx)\n rx = F.avg_pool2d(rx, 8, 1)\n rx = rx.view(-1, 1024)\n rx = self.classifier(rx)\n\n return ax, rx, self.att\n\ndef resnext(**kwargs):\n \"\"\"Constructs a ResNeXt.\n \"\"\"\n model = CifarResNeXt(**kwargs)\n return model\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# \"\"\"\n# resneXt for cifar with pytorch\n# Reference:\n# [1] S. Xie, G. Ross, P. Dollar, Z. Tu and K. He Aggregated residual transformations for deep neural networks. In CVPR, 2017\n# \"\"\"\n#\n# import torch\n# import torch.nn as nn\n# import math\n#\n#\n# class Bottleneck(nn.Module):\n# expansion = 4\n#\n# def __init__(self, inplanes, planes, cardinality, baseWidth, stride=1, downsample=None):\n# super(Bottleneck, self).__init__()\n# D = int(planes * (baseWidth / 64.))\n# C = cardinality\n# self.conv1 = nn.Conv2d(inplanes, D * C, kernel_size=1, bias=False)\n# self.bn1 = nn.BatchNorm2d(D * C)\n# self.conv2 = nn.Conv2d(D * C, D * C, kernel_size=3, stride=stride, padding=1, groups=C, bias=False)\n# self.bn2 = nn.BatchNorm2d(D * C)\n# self.conv3 = nn.Conv2d(D * C, planes * 4, kernel_size=1, bias=False)\n# self.bn3 = nn.BatchNorm2d(planes * 4)\n# self.relu = nn.ReLU(inplace=True)\n# self.downsample = downsample\n# self.stride = stride\n#\n# def forward(self, x):\n# residual = x\n#\n# out = self.conv1(x)\n# out = self.bn1(out)\n# out = self.relu(out)\n#\n# out = self.conv2(out)\n# out = self.bn2(out)\n# out = self.relu(out)\n#\n# out = self.conv3(out)\n# out = self.bn3(out)\n#\n# if self.downsample is not None:\n# residual = self.downsample(x)\n#\n# if residual.size() != out.size():\n# print(out.size(), residual.size())\n# out += residual\n# out = self.relu(out)\n#\n# return out\n#\n#\n# class ResNeXt_Cifar(nn.Module):\n#\n# def __init__(self, block, layers, cardinality, baseWidth, num_classes=10):\n# super(ResNeXt_Cifar, self).__init__()\n# self.inplanes = 64\n# self.cardinality = cardinality\n# self.baseWidth = baseWidth\n# self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)\n# self.bn1 = nn.BatchNorm2d(64)\n# self.relu = nn.ReLU(inplace=True)\n# self.layer1 = self._make_layer(block, 64, layers[0])\n# self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n# self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n# self.avgpool = nn.AvgPool2d(8, stride=1)\n# self.fc = nn.Linear(256 * block.expansion, num_classes)\n#\n# for m in self.modules():\n# if isinstance(m, nn.Conv2d):\n# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n# m.weight.data.normal_(0, math.sqrt(2. / n))\n# elif isinstance(m, nn.BatchNorm2d):\n# m.weight.data.fill_(1)\n# m.bias.data.zero_()\n#\n# def _make_layer(self, block, planes, blocks, stride=1):\n# downsample = None\n# if stride != 1 or self.inplanes != planes * block.expansion:\n# downsample = nn.Sequential(\n# nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),\n# nn.BatchNorm2d(planes * block.expansion)\n# )\n#\n# layers = []\n# layers.append(block(self.inplanes, planes, self.cardinality, self.baseWidth, stride, downsample))\n# self.inplanes = planes * block.expansion\n# for _ in range(1, blocks):\n# layers.append(block(self.inplanes, planes, self.cardinality, self.baseWidth))\n#\n# return nn.Sequential(*layers)\n#\n# def forward(self, x):\n# x = self.conv1(x)\n# x = self.bn1(x)\n# x = self.relu(x)\n#\n# x = self.layer1(x)\n# x = self.layer2(x)\n# x = self.layer3(x)\n#\n# x = self.avgpool(x)\n# x = x.view(x.size(0), -1)\n# x = self.fc(x)\n#\n# return x\n#\n#\n# def resneXt_cifar(depth, cardinality, baseWidth, **kwargs):\n# assert (depth - 2) % 9 == 0\n# n = int((depth - 2) / 9)\n# model = ResNeXt_Cifar(Bottleneck, [n, n, n], cardinality, baseWidth, **kwargs)\n# return model\n\n\n# if __name__ == '__main__':\n# net = resneXt_cifar(29, 16, 64)\n# y = net(torch.randn(1, 3, 32, 32))\n# print(net)\n# print(y.size())",
"step-ids": [
1,
8,
10,
11,
13
]
}
|
[
1,
8,
10,
11,
13
] |
# import tensorflow as tf
# from tensorflow.examples.tutorials.mnist import input_data
# mnist = input_data.read_data_sets('/tmp/data/',one_hot=True)
# def build_CNN_clasifier(x):
# x_image = tf.reshape (x, [-1,28,28,1])
#
# #layer1
# w_conv1 = tf.Variable(tf.truncated_normal(shape = [5,5,1,32],stddev= 5e-2))
# b_conv1 = tf.Variable(tf.constant(0.1,shape=[32]))
# h_conv1 = tf.nn.relu(tf.nn.conv2d(x_image,w_conv1,stride=[1,1,1,1,],padding='SAME')+b_conv1)
# h_pool1 = tf.nn.max_pool(h_conv1,ksize=[1,2,2,1],strides = [1,2,2,1],padding='SAME')
#
# #layer2
# w_conv2 = tf.Variable(tf.truncated_normal(shape=[5,5,32,64],stddev = 5e-2))
# b_conv2 = tf.Variable(tf.constant(0.1,shape=[64]))
# h_conv2 = tf.nn.relu(tf.nn.conv2d(h_conv1,w_conv2,strides=[1,1,1,1],padding='SAME')+b_conv2)
#
# h_pool2 = tf.nn.max_pool(h_conv2,ksize=[1,2,2,1],strides= [1,2,2,1],padding='SAME')
#
# #fully-connected layer
# w_fc_1 = tf.Variable(tf.truncated_normal(shape=[7*7*64,1024],stddev=5e-2))
# b_fc_1 = tf.Variable(tf.constant(0.1,shape=[1024]))
# h_pool2_flat= tf.reshape(h_pool2,[-1,7*7*64])
# h_fc_1 = tf.nn.relu(tf.matmul(h_pool2_flat,w_fc_1)+b_fc_1)
#
#
#
#
# with tf.Session() as sess:
# sess.run(x_image, feed_dict={x:mnist})
# print(x_image)
# print(x_image.shape)
import numpy as np
def conv1d(x, w, p=0, s=1):
w_rot = np.array(w[::-1])
x_padded = np.array(x)
if p > 0:
zero_pad = np.zeros(shape=p)
x_padded = np.concatenate([zero_pad, x_padded, zero_pad])
res = []
for i in range(0, int((len(x)+2*p-len(w))/s)+1):
j = s*i;
res.append(np.sum(x_padded[j:j+w_rot.shape[0]] * w_rot))
return np.array(res)
## Testing:
x = [1, 0, 2, 3, 0, 1, 1]
w = [2, 1, 3]
print('Conv1d Implementation: ', conv1d(x, w, p=0, s=1))
print('Numpy Results: ', np.convolve(x, w, mode='valid'))
import tensorflow as tf
i = tf.constant([1, 0, 2, 3, 0, 1, 1], dtype=tf.float32, name='i')
k = tf.constant([2, 1, 3], dtype=tf.float32, name='k')
print(i, '\n', k, '\n')
data = tf.reshape(i, [1, int(i.shape[0]), 1], name='data')
kernel = tf.reshape(k, [int(k.shape[0]), 1, 1], name='kernel')
print(data, '\n', kernel, '\n')
res = tf.squeeze(tf.nn.conv1d(data, kernel, 1, 'VALID'))
#res = tf.squeeze(tf.nn.conv1d(data, kernel, 1, 'SAME'))
#res = tf.squeeze(tf.nn.conv1d(data, kernel, 2, 'SAME’))
#res = tf.nn.conv1d(data, kernel, 2, 'SAME')
with tf.Session() as sess:
print(sess.run(res))
print(sess.run(data))
|
normal
|
{
"blob_id": "a336434abc526357db0536955885cf076ee60f59",
"index": 7220,
"step-1": "<mask token>\n\n\ndef conv1d(x, w, p=0, s=1):\n w_rot = np.array(w[::-1])\n x_padded = np.array(x)\n if p > 0:\n zero_pad = np.zeros(shape=p)\n x_padded = np.concatenate([zero_pad, x_padded, zero_pad])\n res = []\n for i in range(0, int((len(x) + 2 * p - len(w)) / s) + 1):\n j = s * i\n res.append(np.sum(x_padded[j:j + w_rot.shape[0]] * w_rot))\n return np.array(res)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef conv1d(x, w, p=0, s=1):\n w_rot = np.array(w[::-1])\n x_padded = np.array(x)\n if p > 0:\n zero_pad = np.zeros(shape=p)\n x_padded = np.concatenate([zero_pad, x_padded, zero_pad])\n res = []\n for i in range(0, int((len(x) + 2 * p - len(w)) / s) + 1):\n j = s * i\n res.append(np.sum(x_padded[j:j + w_rot.shape[0]] * w_rot))\n return np.array(res)\n\n\n<mask token>\nprint('Conv1d Implementation: ', conv1d(x, w, p=0, s=1))\nprint('Numpy Results: ', np.convolve(x, w, mode='valid'))\n<mask token>\nprint(i, '\\n', k, '\\n')\n<mask token>\nprint(data, '\\n', kernel, '\\n')\n<mask token>\nwith tf.Session() as sess:\n print(sess.run(res))\n print(sess.run(data))\n",
"step-3": "<mask token>\n\n\ndef conv1d(x, w, p=0, s=1):\n w_rot = np.array(w[::-1])\n x_padded = np.array(x)\n if p > 0:\n zero_pad = np.zeros(shape=p)\n x_padded = np.concatenate([zero_pad, x_padded, zero_pad])\n res = []\n for i in range(0, int((len(x) + 2 * p - len(w)) / s) + 1):\n j = s * i\n res.append(np.sum(x_padded[j:j + w_rot.shape[0]] * w_rot))\n return np.array(res)\n\n\nx = [1, 0, 2, 3, 0, 1, 1]\nw = [2, 1, 3]\nprint('Conv1d Implementation: ', conv1d(x, w, p=0, s=1))\nprint('Numpy Results: ', np.convolve(x, w, mode='valid'))\n<mask token>\ni = tf.constant([1, 0, 2, 3, 0, 1, 1], dtype=tf.float32, name='i')\nk = tf.constant([2, 1, 3], dtype=tf.float32, name='k')\nprint(i, '\\n', k, '\\n')\ndata = tf.reshape(i, [1, int(i.shape[0]), 1], name='data')\nkernel = tf.reshape(k, [int(k.shape[0]), 1, 1], name='kernel')\nprint(data, '\\n', kernel, '\\n')\nres = tf.squeeze(tf.nn.conv1d(data, kernel, 1, 'VALID'))\nwith tf.Session() as sess:\n print(sess.run(res))\n print(sess.run(data))\n",
"step-4": "import numpy as np\n\n\ndef conv1d(x, w, p=0, s=1):\n w_rot = np.array(w[::-1])\n x_padded = np.array(x)\n if p > 0:\n zero_pad = np.zeros(shape=p)\n x_padded = np.concatenate([zero_pad, x_padded, zero_pad])\n res = []\n for i in range(0, int((len(x) + 2 * p - len(w)) / s) + 1):\n j = s * i\n res.append(np.sum(x_padded[j:j + w_rot.shape[0]] * w_rot))\n return np.array(res)\n\n\nx = [1, 0, 2, 3, 0, 1, 1]\nw = [2, 1, 3]\nprint('Conv1d Implementation: ', conv1d(x, w, p=0, s=1))\nprint('Numpy Results: ', np.convolve(x, w, mode='valid'))\nimport tensorflow as tf\ni = tf.constant([1, 0, 2, 3, 0, 1, 1], dtype=tf.float32, name='i')\nk = tf.constant([2, 1, 3], dtype=tf.float32, name='k')\nprint(i, '\\n', k, '\\n')\ndata = tf.reshape(i, [1, int(i.shape[0]), 1], name='data')\nkernel = tf.reshape(k, [int(k.shape[0]), 1, 1], name='kernel')\nprint(data, '\\n', kernel, '\\n')\nres = tf.squeeze(tf.nn.conv1d(data, kernel, 1, 'VALID'))\nwith tf.Session() as sess:\n print(sess.run(res))\n print(sess.run(data))\n",
"step-5": "# import tensorflow as tf\n\n# from tensorflow.examples.tutorials.mnist import input_data\n# mnist = input_data.read_data_sets('/tmp/data/',one_hot=True)\n# def build_CNN_clasifier(x):\n# x_image = tf.reshape (x, [-1,28,28,1])\n#\n# #layer1\n# w_conv1 = tf.Variable(tf.truncated_normal(shape = [5,5,1,32],stddev= 5e-2))\n# b_conv1 = tf.Variable(tf.constant(0.1,shape=[32]))\n# h_conv1 = tf.nn.relu(tf.nn.conv2d(x_image,w_conv1,stride=[1,1,1,1,],padding='SAME')+b_conv1)\n# h_pool1 = tf.nn.max_pool(h_conv1,ksize=[1,2,2,1],strides = [1,2,2,1],padding='SAME')\n#\n# #layer2\n # w_conv2 = tf.Variable(tf.truncated_normal(shape=[5,5,32,64],stddev = 5e-2))\n # b_conv2 = tf.Variable(tf.constant(0.1,shape=[64]))\n # h_conv2 = tf.nn.relu(tf.nn.conv2d(h_conv1,w_conv2,strides=[1,1,1,1],padding='SAME')+b_conv2)\n #\n # h_pool2 = tf.nn.max_pool(h_conv2,ksize=[1,2,2,1],strides= [1,2,2,1],padding='SAME')\n #\n # #fully-connected layer\n # w_fc_1 = tf.Variable(tf.truncated_normal(shape=[7*7*64,1024],stddev=5e-2))\n # b_fc_1 = tf.Variable(tf.constant(0.1,shape=[1024]))\n # h_pool2_flat= tf.reshape(h_pool2,[-1,7*7*64])\n # h_fc_1 = tf.nn.relu(tf.matmul(h_pool2_flat,w_fc_1)+b_fc_1)\n #\n #\n #\n #\n # with tf.Session() as sess:\n # sess.run(x_image, feed_dict={x:mnist})\n # print(x_image)\n # print(x_image.shape)\n\n\nimport numpy as np\n\ndef conv1d(x, w, p=0, s=1):\n w_rot = np.array(w[::-1])\n\n x_padded = np.array(x)\n if p > 0:\n zero_pad = np.zeros(shape=p)\n x_padded = np.concatenate([zero_pad, x_padded, zero_pad])\n res = []\n for i in range(0, int((len(x)+2*p-len(w))/s)+1):\n j = s*i;\n res.append(np.sum(x_padded[j:j+w_rot.shape[0]] * w_rot))\n\n return np.array(res)\n## Testing:\nx = [1, 0, 2, 3, 0, 1, 1]\nw = [2, 1, 3]\nprint('Conv1d Implementation: ', conv1d(x, w, p=0, s=1))\nprint('Numpy Results: ', np.convolve(x, w, mode='valid'))\n\n\n\n\n\n\nimport tensorflow as tf\ni = tf.constant([1, 0, 2, 3, 0, 1, 1], dtype=tf.float32, name='i')\nk = tf.constant([2, 1, 3], dtype=tf.float32, name='k')\nprint(i, '\\n', k, '\\n')\ndata = tf.reshape(i, [1, int(i.shape[0]), 1], name='data')\nkernel = tf.reshape(k, [int(k.shape[0]), 1, 1], name='kernel')\nprint(data, '\\n', kernel, '\\n')\nres = tf.squeeze(tf.nn.conv1d(data, kernel, 1, 'VALID'))\n#res = tf.squeeze(tf.nn.conv1d(data, kernel, 1, 'SAME'))\n#res = tf.squeeze(tf.nn.conv1d(data, kernel, 2, 'SAME’))\n#res = tf.nn.conv1d(data, kernel, 2, 'SAME')\nwith tf.Session() as sess:\n print(sess.run(res))\n print(sess.run(data))",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
vocales = "aeiou"
resultado = []
frase = input("Por favor ingrese la frase que desea verificar").lower()
print(frase)
for vocal in vocales:
conteo_vocales = frase.count(vocal)
mensaje = (f"En la frase hay {conteo_vocales} veces, la vocal{vocal}")
resultado.append(mensaje)
for elemento in resultado:
print(elemento)
|
normal
|
{
"blob_id": "f0a03f9a6dc78d01455913f7db3ab1948b19ea63",
"index": 6250,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(frase)\nfor vocal in vocales:\n conteo_vocales = frase.count(vocal)\n mensaje = f'En la frase hay {conteo_vocales} veces, la vocal{vocal}'\n resultado.append(mensaje)\nfor elemento in resultado:\n print(elemento)\n",
"step-3": "vocales = 'aeiou'\nresultado = []\nfrase = input('Por favor ingrese la frase que desea verificar').lower()\nprint(frase)\nfor vocal in vocales:\n conteo_vocales = frase.count(vocal)\n mensaje = f'En la frase hay {conteo_vocales} veces, la vocal{vocal}'\n resultado.append(mensaje)\nfor elemento in resultado:\n print(elemento)\n",
"step-4": "vocales = \"aeiou\"\nresultado = []\n\nfrase = input(\"Por favor ingrese la frase que desea verificar\").lower()\nprint(frase)\n\nfor vocal in vocales:\n conteo_vocales = frase.count(vocal)\n mensaje = (f\"En la frase hay {conteo_vocales} veces, la vocal{vocal}\")\n resultado.append(mensaje)\n\nfor elemento in resultado:\n print(elemento)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import xarray as xr
def precip_stats_to_climatology(fili, start_year=1981, end_year=2015):
"""
Calculates average climatology for annual data - either Jan to Dec or accummulation period
"""
nyear = end_year - start_year + 1
ds = xr.open_dataset(fili)
year = ds['time'].dt.year
#dsMsk = ds.isel( time=( (year >= start_year) & (year <= end_year) ) ).count(dim='time')
dsClm = ds.isel( time=( (year >= start_year) & (year <= end_year) ) ).mean(dim='time', skipna=False)
#dsClm = dsClm.where(dsMsk == nyear)
#dsMsk.to_netcdf('era5.count.nc4')
print (dsClm)
filo = fili.replace('annual','annual.clm')
print (f'Writing climatology to {filo}')
dsClm.to_netcdf(filo)
return
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser( description='Calculates climatology from annual data' )
parser.add_argument('fili', type=str, help='path to annual file')
parser.add_argument('--start_year', '-sy', default=1981,
help='First year for climatology')
parser.add_argument('--end_year', '-ey', default=2015,
help='Last year for climatology')
args = parser.parse_args()
precip_stats_to_climatology(args.fili, start_year=args.start_year, end_year=args.end_year)
|
normal
|
{
"blob_id": "eb403fbb307332c18ffdcdf52589c714f0719960",
"index": 3052,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef precip_stats_to_climatology(fili, start_year=1981, end_year=2015):\n \"\"\"\n Calculates average climatology for annual data - either Jan to Dec or accummulation period\n \"\"\"\n nyear = end_year - start_year + 1\n ds = xr.open_dataset(fili)\n year = ds['time'].dt.year\n dsClm = ds.isel(time=(year >= start_year) & (year <= end_year)).mean(dim\n ='time', skipna=False)\n print(dsClm)\n filo = fili.replace('annual', 'annual.clm')\n print(f'Writing climatology to {filo}')\n dsClm.to_netcdf(filo)\n return\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef precip_stats_to_climatology(fili, start_year=1981, end_year=2015):\n \"\"\"\n Calculates average climatology for annual data - either Jan to Dec or accummulation period\n \"\"\"\n nyear = end_year - start_year + 1\n ds = xr.open_dataset(fili)\n year = ds['time'].dt.year\n dsClm = ds.isel(time=(year >= start_year) & (year <= end_year)).mean(dim\n ='time', skipna=False)\n print(dsClm)\n filo = fili.replace('annual', 'annual.clm')\n print(f'Writing climatology to {filo}')\n dsClm.to_netcdf(filo)\n return\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser(description=\n 'Calculates climatology from annual data')\n parser.add_argument('fili', type=str, help='path to annual file')\n parser.add_argument('--start_year', '-sy', default=1981, help=\n 'First year for climatology')\n parser.add_argument('--end_year', '-ey', default=2015, help=\n 'Last year for climatology')\n args = parser.parse_args()\n precip_stats_to_climatology(args.fili, start_year=args.start_year,\n end_year=args.end_year)\n",
"step-4": "import xarray as xr\n\n\ndef precip_stats_to_climatology(fili, start_year=1981, end_year=2015):\n \"\"\"\n Calculates average climatology for annual data - either Jan to Dec or accummulation period\n \"\"\"\n nyear = end_year - start_year + 1\n ds = xr.open_dataset(fili)\n year = ds['time'].dt.year\n dsClm = ds.isel(time=(year >= start_year) & (year <= end_year)).mean(dim\n ='time', skipna=False)\n print(dsClm)\n filo = fili.replace('annual', 'annual.clm')\n print(f'Writing climatology to {filo}')\n dsClm.to_netcdf(filo)\n return\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser(description=\n 'Calculates climatology from annual data')\n parser.add_argument('fili', type=str, help='path to annual file')\n parser.add_argument('--start_year', '-sy', default=1981, help=\n 'First year for climatology')\n parser.add_argument('--end_year', '-ey', default=2015, help=\n 'Last year for climatology')\n args = parser.parse_args()\n precip_stats_to_climatology(args.fili, start_year=args.start_year,\n end_year=args.end_year)\n",
"step-5": "import xarray as xr\n\ndef precip_stats_to_climatology(fili, start_year=1981, end_year=2015):\n \"\"\"\n Calculates average climatology for annual data - either Jan to Dec or accummulation period\n \"\"\"\n\n nyear = end_year - start_year + 1\n \n ds = xr.open_dataset(fili)\n\n year = ds['time'].dt.year\n #dsMsk = ds.isel( time=( (year >= start_year) & (year <= end_year) ) ).count(dim='time')\n dsClm = ds.isel( time=( (year >= start_year) & (year <= end_year) ) ).mean(dim='time', skipna=False)\n #dsClm = dsClm.where(dsMsk == nyear)\n \n #dsMsk.to_netcdf('era5.count.nc4')\n\n print (dsClm)\n \n filo = fili.replace('annual','annual.clm')\n print (f'Writing climatology to {filo}') \n dsClm.to_netcdf(filo)\n\n return\n\nif __name__ == \"__main__\":\n\n import argparse\n\n parser = argparse.ArgumentParser( description='Calculates climatology from annual data' )\n parser.add_argument('fili', type=str, help='path to annual file')\n parser.add_argument('--start_year', '-sy', default=1981,\n help='First year for climatology')\n parser.add_argument('--end_year', '-ey', default=2015,\n help='Last year for climatology')\n args = parser.parse_args()\n\n precip_stats_to_climatology(args.fili, start_year=args.start_year, end_year=args.end_year)\n \n\n \n \n \n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# 12.02.17
"""
nomencalura
a__b__c
a: parametro
t-temperatura
tm-temperatura minima
tM-teperatura massima
b: intervallo di tempo
a-anno
c: tabella fonte dati
g-giornaliero
"""
import db_02 as DB
def t_tm_tM__a__g(db, anno):
cmd = """
SELECT data, t, tmin, tmax
FROM Giornaliero
WHERE strftime('%Y') = '{}'
""".format(anno)
dati = db.cur.execute(cmd).fetchall()
ldate = []
lt = []
ltm = []
ltM = []
for data, t, tm , tM in dati:
ldate.append(data)
lt.append(t)
ltm.append(tm)
ltM.append(tM)
return ldate, lt, ltm, ltM
if __name__ == '__main__':
db = DB.DB()
db.crea_db()
t_tm_tM__a__g(db, 2017)
|
normal
|
{
"blob_id": "26b0a762b8eb30f0ef3c5a914f032c2a7d24f750",
"index": 5606,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef t_tm_tM__a__g(db, anno):\n cmd = (\n \"\\nSELECT data, t, tmin, tmax\\nFROM Giornaliero\\nWHERE strftime('%Y') = '{}'\\n \"\n .format(anno))\n dati = db.cur.execute(cmd).fetchall()\n ldate = []\n lt = []\n ltm = []\n ltM = []\n for data, t, tm, tM in dati:\n ldate.append(data)\n lt.append(t)\n ltm.append(tm)\n ltM.append(tM)\n return ldate, lt, ltm, ltM\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef t_tm_tM__a__g(db, anno):\n cmd = (\n \"\\nSELECT data, t, tmin, tmax\\nFROM Giornaliero\\nWHERE strftime('%Y') = '{}'\\n \"\n .format(anno))\n dati = db.cur.execute(cmd).fetchall()\n ldate = []\n lt = []\n ltm = []\n ltM = []\n for data, t, tm, tM in dati:\n ldate.append(data)\n lt.append(t)\n ltm.append(tm)\n ltM.append(tM)\n return ldate, lt, ltm, ltM\n\n\nif __name__ == '__main__':\n db = DB.DB()\n db.crea_db()\n t_tm_tM__a__g(db, 2017)\n",
"step-4": "<mask token>\nimport db_02 as DB\n\n\ndef t_tm_tM__a__g(db, anno):\n cmd = (\n \"\\nSELECT data, t, tmin, tmax\\nFROM Giornaliero\\nWHERE strftime('%Y') = '{}'\\n \"\n .format(anno))\n dati = db.cur.execute(cmd).fetchall()\n ldate = []\n lt = []\n ltm = []\n ltM = []\n for data, t, tm, tM in dati:\n ldate.append(data)\n lt.append(t)\n ltm.append(tm)\n ltM.append(tM)\n return ldate, lt, ltm, ltM\n\n\nif __name__ == '__main__':\n db = DB.DB()\n db.crea_db()\n t_tm_tM__a__g(db, 2017)\n",
"step-5": "# 12.02.17\n\n\"\"\"\nnomencalura\na__b__c\n\na: parametro\n t-temperatura\n tm-temperatura minima\n tM-teperatura massima\n\nb: intervallo di tempo\n a-anno\n\nc: tabella fonte dati\n g-giornaliero\n\"\"\"\n\nimport db_02 as DB\n\n\ndef t_tm_tM__a__g(db, anno):\n cmd = \"\"\"\nSELECT data, t, tmin, tmax\nFROM Giornaliero\nWHERE strftime('%Y') = '{}'\n \"\"\".format(anno)\n\n dati = db.cur.execute(cmd).fetchall()\n\n ldate = []\n lt = []\n ltm = []\n ltM = []\n for data, t, tm , tM in dati:\n ldate.append(data)\n lt.append(t)\n ltm.append(tm)\n ltM.append(tM)\n\n return ldate, lt, ltm, ltM\n\n\nif __name__ == '__main__':\n db = DB.DB()\n db.crea_db()\n\n t_tm_tM__a__g(db, 2017)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8-*-
import random
import re
from datetime import datetime, time
from phue import Bridge
import os
import glob
WORDS = []
def handle(text, mic, profile):
messages1 = ["Naturally Sir ","Of course Sir ","I'll get right at it"]
final = random.choice(messages1)
mic.say(final)
command = "ssh pi@"
ip = profile['piip']
command += ip
command += " pkill omxplayer"
os.system(command)
mic.say("The music process has successfully been killed")
def isValid(text):
"""
Returns True if the input is related to the meaning of life.
Arguments:
text -- user-input, typically transcribed speech
"""
return bool(re.search(r'\b((kill|stop) the (alarm|clock|music))\b', text, re.IGNORECASE))
|
normal
|
{
"blob_id": "668a8005f2f66190d588fb9289293d73a608f767",
"index": 926,
"step-1": "<mask token>\n\n\ndef handle(text, mic, profile):\n messages1 = ['Naturally Sir ', 'Of course Sir ', \"I'll get right at it\"]\n final = random.choice(messages1)\n mic.say(final)\n command = 'ssh pi@'\n ip = profile['piip']\n command += ip\n command += ' pkill omxplayer'\n os.system(command)\n mic.say('The music process has successfully been killed')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef handle(text, mic, profile):\n messages1 = ['Naturally Sir ', 'Of course Sir ', \"I'll get right at it\"]\n final = random.choice(messages1)\n mic.say(final)\n command = 'ssh pi@'\n ip = profile['piip']\n command += ip\n command += ' pkill omxplayer'\n os.system(command)\n mic.say('The music process has successfully been killed')\n\n\ndef isValid(text):\n \"\"\"\n Returns True if the input is related to the meaning of life.\n\n Arguments:\n text -- user-input, typically transcribed speech\n \"\"\"\n return bool(re.search('\\\\b((kill|stop) the (alarm|clock|music))\\\\b',\n text, re.IGNORECASE))\n",
"step-3": "<mask token>\nWORDS = []\n\n\ndef handle(text, mic, profile):\n messages1 = ['Naturally Sir ', 'Of course Sir ', \"I'll get right at it\"]\n final = random.choice(messages1)\n mic.say(final)\n command = 'ssh pi@'\n ip = profile['piip']\n command += ip\n command += ' pkill omxplayer'\n os.system(command)\n mic.say('The music process has successfully been killed')\n\n\ndef isValid(text):\n \"\"\"\n Returns True if the input is related to the meaning of life.\n\n Arguments:\n text -- user-input, typically transcribed speech\n \"\"\"\n return bool(re.search('\\\\b((kill|stop) the (alarm|clock|music))\\\\b',\n text, re.IGNORECASE))\n",
"step-4": "import random\nimport re\nfrom datetime import datetime, time\nfrom phue import Bridge\nimport os\nimport glob\nWORDS = []\n\n\ndef handle(text, mic, profile):\n messages1 = ['Naturally Sir ', 'Of course Sir ', \"I'll get right at it\"]\n final = random.choice(messages1)\n mic.say(final)\n command = 'ssh pi@'\n ip = profile['piip']\n command += ip\n command += ' pkill omxplayer'\n os.system(command)\n mic.say('The music process has successfully been killed')\n\n\ndef isValid(text):\n \"\"\"\n Returns True if the input is related to the meaning of life.\n\n Arguments:\n text -- user-input, typically transcribed speech\n \"\"\"\n return bool(re.search('\\\\b((kill|stop) the (alarm|clock|music))\\\\b',\n text, re.IGNORECASE))\n",
"step-5": "# -*- coding: utf-8-*-\nimport random\nimport re\nfrom datetime import datetime, time\nfrom phue import Bridge\nimport os\nimport glob\n\nWORDS = []\n\n\ndef handle(text, mic, profile):\n \n\n\tmessages1 = [\"Naturally Sir \",\"Of course Sir \",\"I'll get right at it\"]\n\tfinal = random.choice(messages1)\n\tmic.say(final)\n\tcommand = \"ssh pi@\"\n\tip = profile['piip']\n\tcommand += ip\n\tcommand += \" pkill omxplayer\"\n\tos.system(command)\n\tmic.say(\"The music process has successfully been killed\")\n\n\n\ndef isValid(text):\n \"\"\"\n Returns True if the input is related to the meaning of life.\n\n Arguments:\n text -- user-input, typically transcribed speech\n \"\"\"\n return bool(re.search(r'\\b((kill|stop) the (alarm|clock|music))\\b', text, re.IGNORECASE))\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from functools import update_wrapper
from django.db import models
# Create your models here.
class Product(models.Model):
product_id=models.AutoField
product_name=models.CharField(max_length=50)
category=models.CharField(max_length=50,default="")
subcategory=models.CharField(max_length=50,default="")
desc=models.CharField(max_length=300)
price=models.IntegerField(default=0)
pub_date=models.DateField()
image=models.ImageField(upload_to='shop/images',default="")
def __str__(self):
return self.product_name
class Contact(models.Model):
msg_id=models.AutoField(primary_key=True)
name=models.CharField(max_length=50,default="")
email=models.CharField(max_length=50,default="")
desc=models.CharField(max_length=1000,default="")
phone=models.CharField(max_length=50,default="")
def __str__(self):
return self.name
class Order(models.Model):
order_id=models.AutoField(primary_key=True)
item_json=models.CharField(max_length=10000,default="")
name=models.CharField(max_length=100,default="")
email=models.CharField(max_length=100,default="")
address=models.CharField(max_length=100,default="")
locality=models.CharField(max_length=50,default="")
city=models.CharField(max_length=1000,default="")
state=models.CharField(max_length=1000,default="")
zip=models.CharField(max_length=1000,default="")
phone=models.CharField(max_length=1000,default="")
class OrderUpdate(models.Model):
update_id=models.AutoField(primary_key=True);
order_id=models.IntegerField(default=0)
update_desc=models.CharField(max_length=50000,default="")
timestamp=models.DateField(auto_now_add=True)
def __str__(self):
return self.update_desc[0:7] + "..."
|
normal
|
{
"blob_id": "d123083358a4fd69f6f8de27fa177afac3bf80ce",
"index": 5680,
"step-1": "<mask token>\n\n\nclass Contact(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.name\n\n\nclass Order(models.Model):\n order_id = models.AutoField(primary_key=True)\n item_json = models.CharField(max_length=10000, default='')\n name = models.CharField(max_length=100, default='')\n email = models.CharField(max_length=100, default='')\n address = models.CharField(max_length=100, default='')\n locality = models.CharField(max_length=50, default='')\n city = models.CharField(max_length=1000, default='')\n state = models.CharField(max_length=1000, default='')\n zip = models.CharField(max_length=1000, default='')\n phone = models.CharField(max_length=1000, default='')\n\n\nclass OrderUpdate(models.Model):\n update_id = models.AutoField(primary_key=True)\n order_id = models.IntegerField(default=0)\n update_desc = models.CharField(max_length=50000, default='')\n timestamp = models.DateField(auto_now_add=True)\n\n def __str__(self):\n return self.update_desc[0:7] + '...'\n",
"step-2": "<mask token>\n\n\nclass Product(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Contact(models.Model):\n msg_id = models.AutoField(primary_key=True)\n name = models.CharField(max_length=50, default='')\n email = models.CharField(max_length=50, default='')\n desc = models.CharField(max_length=1000, default='')\n phone = models.CharField(max_length=50, default='')\n\n def __str__(self):\n return self.name\n\n\nclass Order(models.Model):\n order_id = models.AutoField(primary_key=True)\n item_json = models.CharField(max_length=10000, default='')\n name = models.CharField(max_length=100, default='')\n email = models.CharField(max_length=100, default='')\n address = models.CharField(max_length=100, default='')\n locality = models.CharField(max_length=50, default='')\n city = models.CharField(max_length=1000, default='')\n state = models.CharField(max_length=1000, default='')\n zip = models.CharField(max_length=1000, default='')\n phone = models.CharField(max_length=1000, default='')\n\n\nclass OrderUpdate(models.Model):\n update_id = models.AutoField(primary_key=True)\n order_id = models.IntegerField(default=0)\n update_desc = models.CharField(max_length=50000, default='')\n timestamp = models.DateField(auto_now_add=True)\n\n def __str__(self):\n return self.update_desc[0:7] + '...'\n",
"step-3": "<mask token>\n\n\nclass Product(models.Model):\n product_id = models.AutoField\n product_name = models.CharField(max_length=50)\n category = models.CharField(max_length=50, default='')\n subcategory = models.CharField(max_length=50, default='')\n desc = models.CharField(max_length=300)\n price = models.IntegerField(default=0)\n pub_date = models.DateField()\n image = models.ImageField(upload_to='shop/images', default='')\n\n def __str__(self):\n return self.product_name\n\n\nclass Contact(models.Model):\n msg_id = models.AutoField(primary_key=True)\n name = models.CharField(max_length=50, default='')\n email = models.CharField(max_length=50, default='')\n desc = models.CharField(max_length=1000, default='')\n phone = models.CharField(max_length=50, default='')\n\n def __str__(self):\n return self.name\n\n\nclass Order(models.Model):\n order_id = models.AutoField(primary_key=True)\n item_json = models.CharField(max_length=10000, default='')\n name = models.CharField(max_length=100, default='')\n email = models.CharField(max_length=100, default='')\n address = models.CharField(max_length=100, default='')\n locality = models.CharField(max_length=50, default='')\n city = models.CharField(max_length=1000, default='')\n state = models.CharField(max_length=1000, default='')\n zip = models.CharField(max_length=1000, default='')\n phone = models.CharField(max_length=1000, default='')\n\n\nclass OrderUpdate(models.Model):\n update_id = models.AutoField(primary_key=True)\n order_id = models.IntegerField(default=0)\n update_desc = models.CharField(max_length=50000, default='')\n timestamp = models.DateField(auto_now_add=True)\n\n def __str__(self):\n return self.update_desc[0:7] + '...'\n",
"step-4": "from functools import update_wrapper\nfrom django.db import models\n\n\nclass Product(models.Model):\n product_id = models.AutoField\n product_name = models.CharField(max_length=50)\n category = models.CharField(max_length=50, default='')\n subcategory = models.CharField(max_length=50, default='')\n desc = models.CharField(max_length=300)\n price = models.IntegerField(default=0)\n pub_date = models.DateField()\n image = models.ImageField(upload_to='shop/images', default='')\n\n def __str__(self):\n return self.product_name\n\n\nclass Contact(models.Model):\n msg_id = models.AutoField(primary_key=True)\n name = models.CharField(max_length=50, default='')\n email = models.CharField(max_length=50, default='')\n desc = models.CharField(max_length=1000, default='')\n phone = models.CharField(max_length=50, default='')\n\n def __str__(self):\n return self.name\n\n\nclass Order(models.Model):\n order_id = models.AutoField(primary_key=True)\n item_json = models.CharField(max_length=10000, default='')\n name = models.CharField(max_length=100, default='')\n email = models.CharField(max_length=100, default='')\n address = models.CharField(max_length=100, default='')\n locality = models.CharField(max_length=50, default='')\n city = models.CharField(max_length=1000, default='')\n state = models.CharField(max_length=1000, default='')\n zip = models.CharField(max_length=1000, default='')\n phone = models.CharField(max_length=1000, default='')\n\n\nclass OrderUpdate(models.Model):\n update_id = models.AutoField(primary_key=True)\n order_id = models.IntegerField(default=0)\n update_desc = models.CharField(max_length=50000, default='')\n timestamp = models.DateField(auto_now_add=True)\n\n def __str__(self):\n return self.update_desc[0:7] + '...'\n",
"step-5": "from functools import update_wrapper\nfrom django.db import models\n\n# Create your models here.\n\n\nclass Product(models.Model):\n product_id=models.AutoField\n product_name=models.CharField(max_length=50)\n category=models.CharField(max_length=50,default=\"\")\n subcategory=models.CharField(max_length=50,default=\"\")\n desc=models.CharField(max_length=300)\n price=models.IntegerField(default=0)\n pub_date=models.DateField()\n image=models.ImageField(upload_to='shop/images',default=\"\")\n\n def __str__(self):\n return self.product_name\nclass Contact(models.Model):\n msg_id=models.AutoField(primary_key=True)\n name=models.CharField(max_length=50,default=\"\")\n email=models.CharField(max_length=50,default=\"\")\n desc=models.CharField(max_length=1000,default=\"\")\n phone=models.CharField(max_length=50,default=\"\")\n \n\n def __str__(self):\n return self.name\nclass Order(models.Model):\n order_id=models.AutoField(primary_key=True)\n item_json=models.CharField(max_length=10000,default=\"\")\n name=models.CharField(max_length=100,default=\"\")\n email=models.CharField(max_length=100,default=\"\")\n address=models.CharField(max_length=100,default=\"\")\n locality=models.CharField(max_length=50,default=\"\")\n city=models.CharField(max_length=1000,default=\"\")\n state=models.CharField(max_length=1000,default=\"\")\n zip=models.CharField(max_length=1000,default=\"\")\n phone=models.CharField(max_length=1000,default=\"\")\n \n\nclass OrderUpdate(models.Model):\n update_id=models.AutoField(primary_key=True);\n order_id=models.IntegerField(default=0)\n update_desc=models.CharField(max_length=50000,default=\"\")\n timestamp=models.DateField(auto_now_add=True)\n\n def __str__(self):\n return self.update_desc[0:7] + \"...\"\n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n",
"step-ids": [
7,
9,
11,
12,
13
]
}
|
[
7,
9,
11,
12,
13
] |
from django.shortcuts import render,redirect
from . import download_function
from django.http import HttpResponse
# Create your views here.
def download(request):
if request.method == "GET":
session = request.GET['session']
title = request.GET['download_title']
download_quality = request.GET['download_quality']
file_url = download_function.download_generator(session,download_quality,title)
return HttpResponse(file_url)
|
normal
|
{
"blob_id": "339506777f5471ec99b39c67c28df8ec3d06ce19",
"index": 3084,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef download(request):\n if request.method == 'GET':\n session = request.GET['session']\n title = request.GET['download_title']\n download_quality = request.GET['download_quality']\n file_url = download_function.download_generator(session,\n download_quality, title)\n return HttpResponse(file_url)\n",
"step-3": "from django.shortcuts import render, redirect\nfrom . import download_function\nfrom django.http import HttpResponse\n\n\ndef download(request):\n if request.method == 'GET':\n session = request.GET['session']\n title = request.GET['download_title']\n download_quality = request.GET['download_quality']\n file_url = download_function.download_generator(session,\n download_quality, title)\n return HttpResponse(file_url)\n",
"step-4": "from django.shortcuts import render,redirect\nfrom . import download_function\nfrom django.http import HttpResponse\n# Create your views here.\ndef download(request):\n if request.method == \"GET\":\n session = request.GET['session']\n title = request.GET['download_title']\n download_quality = request.GET['download_quality']\n\n file_url = download_function.download_generator(session,download_quality,title)\n return HttpResponse(file_url)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 18 18:53:02 2020
@author: vinhe
I followed below tutorial to push newly created csv to google sheets:
https://medium.com/craftsmenltd/from-csv-to-google-sheet-using-python-ef097cb014f9
"""
import gspread
from oauth2client.service_account import ServiceAccountCredentials
scope = ["https://spreadsheets.google.com/feeds",
"https://www.googleapis.com/auth/spreadsheets",
"https://www.googleapis.com/auth/drive.file",
"https://www.googleapis.com/auth/drive"]
credentials = ServiceAccountCredentials.from_json_keyfile_name('client_secret.json', scope)
client = gspread.authorize(credentials)
spreadsheet = client.open('golf-csv-to-sheets')
with open('C:/users/vinhe/code/projects/golf/golf_stats.csv', 'r') as file_obj:
content = file_obj.read()
client.import_csv(spreadsheet.id, data=content)
|
normal
|
{
"blob_id": "ac2edcd6ea71ebdc5b1df5fd4211632b5d8e2704",
"index": 3019,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('C:/users/vinhe/code/projects/golf/golf_stats.csv', 'r') as file_obj:\n content = file_obj.read()\n client.import_csv(spreadsheet.id, data=content)\n",
"step-3": "<mask token>\nscope = ['https://spreadsheets.google.com/feeds',\n 'https://www.googleapis.com/auth/spreadsheets',\n 'https://www.googleapis.com/auth/drive.file',\n 'https://www.googleapis.com/auth/drive']\ncredentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'client_secret.json', scope)\nclient = gspread.authorize(credentials)\nspreadsheet = client.open('golf-csv-to-sheets')\nwith open('C:/users/vinhe/code/projects/golf/golf_stats.csv', 'r') as file_obj:\n content = file_obj.read()\n client.import_csv(spreadsheet.id, data=content)\n",
"step-4": "<mask token>\nimport gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\nscope = ['https://spreadsheets.google.com/feeds',\n 'https://www.googleapis.com/auth/spreadsheets',\n 'https://www.googleapis.com/auth/drive.file',\n 'https://www.googleapis.com/auth/drive']\ncredentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'client_secret.json', scope)\nclient = gspread.authorize(credentials)\nspreadsheet = client.open('golf-csv-to-sheets')\nwith open('C:/users/vinhe/code/projects/golf/golf_stats.csv', 'r') as file_obj:\n content = file_obj.read()\n client.import_csv(spreadsheet.id, data=content)\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Aug 18 18:53:02 2020\r\n\r\n@author: vinhe\r\n\r\nI followed below tutorial to push newly created csv to google sheets:\r\nhttps://medium.com/craftsmenltd/from-csv-to-google-sheet-using-python-ef097cb014f9\r\n\r\n\"\"\"\r\n\r\n\r\nimport gspread\r\nfrom oauth2client.service_account import ServiceAccountCredentials\r\n\r\nscope = [\"https://spreadsheets.google.com/feeds\", \r\n \"https://www.googleapis.com/auth/spreadsheets\",\r\n \"https://www.googleapis.com/auth/drive.file\", \r\n \"https://www.googleapis.com/auth/drive\"]\r\n\r\ncredentials = ServiceAccountCredentials.from_json_keyfile_name('client_secret.json', scope)\r\nclient = gspread.authorize(credentials)\r\n\r\nspreadsheet = client.open('golf-csv-to-sheets')\r\n\r\nwith open('C:/users/vinhe/code/projects/golf/golf_stats.csv', 'r') as file_obj:\r\n content = file_obj.read()\r\n client.import_csv(spreadsheet.id, data=content)\r\n\r\n\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import asyncio
import logging
import os.path
from serial_asyncio import open_serial_connection
from typing import NewType, cast
# Type annotations and converters
AsciiBytes = NewType('AsciiBytes', bytes)
def to_ascii(s: str) -> AsciiBytes:
if s[-1] != '\n':
s += '\n'
return cast(AsciiBytes, s.encode(encoding='ascii'))
class USBHandler:
"""Reads from and writes to the underlying MDB USB board.
Users can either obtain an asyncio.Queue that the handler will push
messages to using listen(), or it can ask for a one-time read using read().
For sending messages, if no reply is expected or there is a poller waiting
for any response, send() can be used, otherwise sendread() will send the
message and wait for a one-time reply. Having a listener and waiting for a
single message at the same time is an error. See the Sniffer class for an
example of both usages."""
def __init__(self):
self.initialized = False
self.run_task = None
self.waiters = {}
self.queues = {}
self.logger = logging.getLogger('.'.join((__name__,
self.__class__.__name__)))
async def initialize(self, device_path: str) -> None:
assert os.path.exists(device_path)
self.logger.info("Initializing USBReader.")
self.logger.debug("Opening serial connection to device at %s",
device_path)
self.serial_reader, self.serial_writer = \
await open_serial_connection(url=device_path, baudrate=115200)
self.initialized = True
self.logger.debug("Connected to serial device at %s.", device_path)
async def _run(self) -> None:
while True:
message = await self.serial_reader.readuntil(separator=b'\r\n')
stripped_message = message.decode(encoding='ascii').rstrip('\n\r')
self.logger.debug("Read '%s' from MDB board.", stripped_message)
message_type = stripped_message[0]
if message_type in self.waiters:
self.waiters[message_type].set_result(stripped_message)
del self.waiters[message_type]
# Lets the waiter run.
await asyncio.sleep(0)
elif message_type in self.queues:
try:
self.queues[message_type].put_nowait(stripped_message)
except asyncio.QueueFull:
self.logger.warning('Queue for message type %s is full. '
'Scheduling the put in another task.',
message_type)
asyncio.create_task(
self.queues[message_type].put(stripped_message))
else:
self.logger.error("Unhandled message: %s", stripped_message)
async def run(self) -> None:
assert self.initialized
self.logger.info('Starting runner.')
self.run_task = asyncio.create_task(self._run())
try:
await self.run_task
except asyncio.CancelledError:
self.logger.info('Runner cancelled.')
async def send(self, message: AsciiBytes, _drain=True) -> None:
assert self.initialized
self.logger.info("Sending message to MDB board: %s", message)
self.serial_writer.write(message)
if _drain:
await self.serial_writer.drain()
self.logger.info("Sent message to MDB board: %s", message)
def _read_internal(self, prefix: str) -> asyncio.Future:
assert len(prefix) == 1
if prefix in self.queues or prefix in self.waiters:
raise RuntimeError(f"Tried to wait for message type {prefix}"
" when there was already a queue listening to "
"all messages")
fut = asyncio.get_running_loop().create_future()
self.waiters[prefix] = fut
return fut
async def sendread(self, message: AsciiBytes, prefix: str) -> str:
await self.send(message, _drain=False)
fut = self._read_internal(prefix)
self.logger.info("Waiting for a single message of type: %s", prefix)
try:
await self.serial_writer.drain()
self.logger.info("Sent message to MDB board: %s", message)
await fut
except asyncio.CancelledError as e:
self.logger.warning("Got cancelled while sending message %r or "
"waiting on prefix %s", message, prefix,
exc_info=e)
del self.waiters[prefix]
raise
self.logger.info("Got message: %s", fut.result())
return fut.result()
async def read(self, prefix: str) -> str:
fut = self._read_internal(prefix)
self.logger.info("Waiting for a single message of type: %s", prefix)
try:
await fut
except asyncio.CancelledError as e:
self.logger.warning("Got cancelled while waiting for message on "
"%s", prefix, exc_info=e)
del self.waiters[prefix]
raise
self.logger.info("Got message: %s", fut.result())
return fut.result()
def listen(self, prefix: str) -> asyncio.Queue:
assert len(prefix) == 1
if prefix in self.waiters or prefix in self.queues:
raise RuntimeError("Tried to get a queue for message type "
f"{prefix} when there was already someone"
"waiting on it.")
self.queues[prefix] = asyncio.Queue()
self.logger.info("Polling for messages of type: %s", prefix)
return self.queues[prefix]
def unlisten(self, prefix: str) -> None:
"""Stops pushing messages with this prefix character to a Queue."""
assert len(prefix) == 1
del self.queues[prefix]
self.logger.info("No longer polling for message type: %s", prefix)
async def shutdown(self):
if not self.initialized:
return
self.logger.info("Shutting down.")
if self.run_task:
self.run_task.cancel()
self.run_task = None
for fut in self.waiters.values():
fut.cancel()
self.serial_writer.close()
await self.serial_writer.wait_closed()
self.logger.info("Shutdown complete.")
self.initialized = False
__all__ = (USBHandler, to_ascii)
|
normal
|
{
"blob_id": "50b630b762251f8646044b234ac4b82b8e4b645b",
"index": 8460,
"step-1": "<mask token>\n\n\nclass USBHandler:\n <mask token>\n\n def __init__(self):\n self.initialized = False\n self.run_task = None\n self.waiters = {}\n self.queues = {}\n self.logger = logging.getLogger('.'.join((__name__, self.__class__.\n __name__)))\n\n async def initialize(self, device_path: str) ->None:\n assert os.path.exists(device_path)\n self.logger.info('Initializing USBReader.')\n self.logger.debug('Opening serial connection to device at %s',\n device_path)\n self.serial_reader, self.serial_writer = await open_serial_connection(\n url=device_path, baudrate=115200)\n self.initialized = True\n self.logger.debug('Connected to serial device at %s.', device_path)\n\n async def _run(self) ->None:\n while True:\n message = await self.serial_reader.readuntil(separator=b'\\r\\n')\n stripped_message = message.decode(encoding='ascii').rstrip('\\n\\r')\n self.logger.debug(\"Read '%s' from MDB board.\", stripped_message)\n message_type = stripped_message[0]\n if message_type in self.waiters:\n self.waiters[message_type].set_result(stripped_message)\n del self.waiters[message_type]\n await asyncio.sleep(0)\n elif message_type in self.queues:\n try:\n self.queues[message_type].put_nowait(stripped_message)\n except asyncio.QueueFull:\n self.logger.warning(\n 'Queue for message type %s is full. Scheduling the put in another task.'\n , message_type)\n asyncio.create_task(self.queues[message_type].put(\n stripped_message))\n else:\n self.logger.error('Unhandled message: %s', stripped_message)\n\n async def run(self) ->None:\n assert self.initialized\n self.logger.info('Starting runner.')\n self.run_task = asyncio.create_task(self._run())\n try:\n await self.run_task\n except asyncio.CancelledError:\n self.logger.info('Runner cancelled.')\n\n async def send(self, message: AsciiBytes, _drain=True) ->None:\n assert self.initialized\n self.logger.info('Sending message to MDB board: %s', message)\n self.serial_writer.write(message)\n if _drain:\n await self.serial_writer.drain()\n self.logger.info('Sent message to MDB board: %s', message)\n <mask token>\n\n async def sendread(self, message: AsciiBytes, prefix: str) ->str:\n await self.send(message, _drain=False)\n fut = self._read_internal(prefix)\n self.logger.info('Waiting for a single message of type: %s', prefix)\n try:\n await self.serial_writer.drain()\n self.logger.info('Sent message to MDB board: %s', message)\n await fut\n except asyncio.CancelledError as e:\n self.logger.warning(\n 'Got cancelled while sending message %r or waiting on prefix %s'\n , message, prefix, exc_info=e)\n del self.waiters[prefix]\n raise\n self.logger.info('Got message: %s', fut.result())\n return fut.result()\n\n async def read(self, prefix: str) ->str:\n fut = self._read_internal(prefix)\n self.logger.info('Waiting for a single message of type: %s', prefix)\n try:\n await fut\n except asyncio.CancelledError as e:\n self.logger.warning('Got cancelled while waiting for message on %s'\n , prefix, exc_info=e)\n del self.waiters[prefix]\n raise\n self.logger.info('Got message: %s', fut.result())\n return fut.result()\n\n def listen(self, prefix: str) ->asyncio.Queue:\n assert len(prefix) == 1\n if prefix in self.waiters or prefix in self.queues:\n raise RuntimeError(\n f'Tried to get a queue for message type {prefix} when there was already someonewaiting on it.'\n )\n self.queues[prefix] = asyncio.Queue()\n self.logger.info('Polling for messages of type: %s', prefix)\n return self.queues[prefix]\n <mask token>\n\n async def shutdown(self):\n if not self.initialized:\n return\n self.logger.info('Shutting down.')\n if self.run_task:\n self.run_task.cancel()\n self.run_task = None\n for fut in self.waiters.values():\n fut.cancel()\n self.serial_writer.close()\n await self.serial_writer.wait_closed()\n self.logger.info('Shutdown complete.')\n self.initialized = False\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef to_ascii(s: str) ->AsciiBytes:\n if s[-1] != '\\n':\n s += '\\n'\n return cast(AsciiBytes, s.encode(encoding='ascii'))\n\n\nclass USBHandler:\n \"\"\"Reads from and writes to the underlying MDB USB board.\n\n Users can either obtain an asyncio.Queue that the handler will push\n messages to using listen(), or it can ask for a one-time read using read().\n For sending messages, if no reply is expected or there is a poller waiting\n for any response, send() can be used, otherwise sendread() will send the\n message and wait for a one-time reply. Having a listener and waiting for a\n single message at the same time is an error. See the Sniffer class for an\n example of both usages.\"\"\"\n\n def __init__(self):\n self.initialized = False\n self.run_task = None\n self.waiters = {}\n self.queues = {}\n self.logger = logging.getLogger('.'.join((__name__, self.__class__.\n __name__)))\n\n async def initialize(self, device_path: str) ->None:\n assert os.path.exists(device_path)\n self.logger.info('Initializing USBReader.')\n self.logger.debug('Opening serial connection to device at %s',\n device_path)\n self.serial_reader, self.serial_writer = await open_serial_connection(\n url=device_path, baudrate=115200)\n self.initialized = True\n self.logger.debug('Connected to serial device at %s.', device_path)\n\n async def _run(self) ->None:\n while True:\n message = await self.serial_reader.readuntil(separator=b'\\r\\n')\n stripped_message = message.decode(encoding='ascii').rstrip('\\n\\r')\n self.logger.debug(\"Read '%s' from MDB board.\", stripped_message)\n message_type = stripped_message[0]\n if message_type in self.waiters:\n self.waiters[message_type].set_result(stripped_message)\n del self.waiters[message_type]\n await asyncio.sleep(0)\n elif message_type in self.queues:\n try:\n self.queues[message_type].put_nowait(stripped_message)\n except asyncio.QueueFull:\n self.logger.warning(\n 'Queue for message type %s is full. Scheduling the put in another task.'\n , message_type)\n asyncio.create_task(self.queues[message_type].put(\n stripped_message))\n else:\n self.logger.error('Unhandled message: %s', stripped_message)\n\n async def run(self) ->None:\n assert self.initialized\n self.logger.info('Starting runner.')\n self.run_task = asyncio.create_task(self._run())\n try:\n await self.run_task\n except asyncio.CancelledError:\n self.logger.info('Runner cancelled.')\n\n async def send(self, message: AsciiBytes, _drain=True) ->None:\n assert self.initialized\n self.logger.info('Sending message to MDB board: %s', message)\n self.serial_writer.write(message)\n if _drain:\n await self.serial_writer.drain()\n self.logger.info('Sent message to MDB board: %s', message)\n\n def _read_internal(self, prefix: str) ->asyncio.Future:\n assert len(prefix) == 1\n if prefix in self.queues or prefix in self.waiters:\n raise RuntimeError(\n f'Tried to wait for message type {prefix} when there was already a queue listening to all messages'\n )\n fut = asyncio.get_running_loop().create_future()\n self.waiters[prefix] = fut\n return fut\n\n async def sendread(self, message: AsciiBytes, prefix: str) ->str:\n await self.send(message, _drain=False)\n fut = self._read_internal(prefix)\n self.logger.info('Waiting for a single message of type: %s', prefix)\n try:\n await self.serial_writer.drain()\n self.logger.info('Sent message to MDB board: %s', message)\n await fut\n except asyncio.CancelledError as e:\n self.logger.warning(\n 'Got cancelled while sending message %r or waiting on prefix %s'\n , message, prefix, exc_info=e)\n del self.waiters[prefix]\n raise\n self.logger.info('Got message: %s', fut.result())\n return fut.result()\n\n async def read(self, prefix: str) ->str:\n fut = self._read_internal(prefix)\n self.logger.info('Waiting for a single message of type: %s', prefix)\n try:\n await fut\n except asyncio.CancelledError as e:\n self.logger.warning('Got cancelled while waiting for message on %s'\n , prefix, exc_info=e)\n del self.waiters[prefix]\n raise\n self.logger.info('Got message: %s', fut.result())\n return fut.result()\n\n def listen(self, prefix: str) ->asyncio.Queue:\n assert len(prefix) == 1\n if prefix in self.waiters or prefix in self.queues:\n raise RuntimeError(\n f'Tried to get a queue for message type {prefix} when there was already someonewaiting on it.'\n )\n self.queues[prefix] = asyncio.Queue()\n self.logger.info('Polling for messages of type: %s', prefix)\n return self.queues[prefix]\n\n def unlisten(self, prefix: str) ->None:\n \"\"\"Stops pushing messages with this prefix character to a Queue.\"\"\"\n assert len(prefix) == 1\n del self.queues[prefix]\n self.logger.info('No longer polling for message type: %s', prefix)\n\n async def shutdown(self):\n if not self.initialized:\n return\n self.logger.info('Shutting down.')\n if self.run_task:\n self.run_task.cancel()\n self.run_task = None\n for fut in self.waiters.values():\n fut.cancel()\n self.serial_writer.close()\n await self.serial_writer.wait_closed()\n self.logger.info('Shutdown complete.')\n self.initialized = False\n\n\n<mask token>\n",
"step-3": "<mask token>\nAsciiBytes = NewType('AsciiBytes', bytes)\n\n\ndef to_ascii(s: str) ->AsciiBytes:\n if s[-1] != '\\n':\n s += '\\n'\n return cast(AsciiBytes, s.encode(encoding='ascii'))\n\n\nclass USBHandler:\n \"\"\"Reads from and writes to the underlying MDB USB board.\n\n Users can either obtain an asyncio.Queue that the handler will push\n messages to using listen(), or it can ask for a one-time read using read().\n For sending messages, if no reply is expected or there is a poller waiting\n for any response, send() can be used, otherwise sendread() will send the\n message and wait for a one-time reply. Having a listener and waiting for a\n single message at the same time is an error. See the Sniffer class for an\n example of both usages.\"\"\"\n\n def __init__(self):\n self.initialized = False\n self.run_task = None\n self.waiters = {}\n self.queues = {}\n self.logger = logging.getLogger('.'.join((__name__, self.__class__.\n __name__)))\n\n async def initialize(self, device_path: str) ->None:\n assert os.path.exists(device_path)\n self.logger.info('Initializing USBReader.')\n self.logger.debug('Opening serial connection to device at %s',\n device_path)\n self.serial_reader, self.serial_writer = await open_serial_connection(\n url=device_path, baudrate=115200)\n self.initialized = True\n self.logger.debug('Connected to serial device at %s.', device_path)\n\n async def _run(self) ->None:\n while True:\n message = await self.serial_reader.readuntil(separator=b'\\r\\n')\n stripped_message = message.decode(encoding='ascii').rstrip('\\n\\r')\n self.logger.debug(\"Read '%s' from MDB board.\", stripped_message)\n message_type = stripped_message[0]\n if message_type in self.waiters:\n self.waiters[message_type].set_result(stripped_message)\n del self.waiters[message_type]\n await asyncio.sleep(0)\n elif message_type in self.queues:\n try:\n self.queues[message_type].put_nowait(stripped_message)\n except asyncio.QueueFull:\n self.logger.warning(\n 'Queue for message type %s is full. Scheduling the put in another task.'\n , message_type)\n asyncio.create_task(self.queues[message_type].put(\n stripped_message))\n else:\n self.logger.error('Unhandled message: %s', stripped_message)\n\n async def run(self) ->None:\n assert self.initialized\n self.logger.info('Starting runner.')\n self.run_task = asyncio.create_task(self._run())\n try:\n await self.run_task\n except asyncio.CancelledError:\n self.logger.info('Runner cancelled.')\n\n async def send(self, message: AsciiBytes, _drain=True) ->None:\n assert self.initialized\n self.logger.info('Sending message to MDB board: %s', message)\n self.serial_writer.write(message)\n if _drain:\n await self.serial_writer.drain()\n self.logger.info('Sent message to MDB board: %s', message)\n\n def _read_internal(self, prefix: str) ->asyncio.Future:\n assert len(prefix) == 1\n if prefix in self.queues or prefix in self.waiters:\n raise RuntimeError(\n f'Tried to wait for message type {prefix} when there was already a queue listening to all messages'\n )\n fut = asyncio.get_running_loop().create_future()\n self.waiters[prefix] = fut\n return fut\n\n async def sendread(self, message: AsciiBytes, prefix: str) ->str:\n await self.send(message, _drain=False)\n fut = self._read_internal(prefix)\n self.logger.info('Waiting for a single message of type: %s', prefix)\n try:\n await self.serial_writer.drain()\n self.logger.info('Sent message to MDB board: %s', message)\n await fut\n except asyncio.CancelledError as e:\n self.logger.warning(\n 'Got cancelled while sending message %r or waiting on prefix %s'\n , message, prefix, exc_info=e)\n del self.waiters[prefix]\n raise\n self.logger.info('Got message: %s', fut.result())\n return fut.result()\n\n async def read(self, prefix: str) ->str:\n fut = self._read_internal(prefix)\n self.logger.info('Waiting for a single message of type: %s', prefix)\n try:\n await fut\n except asyncio.CancelledError as e:\n self.logger.warning('Got cancelled while waiting for message on %s'\n , prefix, exc_info=e)\n del self.waiters[prefix]\n raise\n self.logger.info('Got message: %s', fut.result())\n return fut.result()\n\n def listen(self, prefix: str) ->asyncio.Queue:\n assert len(prefix) == 1\n if prefix in self.waiters or prefix in self.queues:\n raise RuntimeError(\n f'Tried to get a queue for message type {prefix} when there was already someonewaiting on it.'\n )\n self.queues[prefix] = asyncio.Queue()\n self.logger.info('Polling for messages of type: %s', prefix)\n return self.queues[prefix]\n\n def unlisten(self, prefix: str) ->None:\n \"\"\"Stops pushing messages with this prefix character to a Queue.\"\"\"\n assert len(prefix) == 1\n del self.queues[prefix]\n self.logger.info('No longer polling for message type: %s', prefix)\n\n async def shutdown(self):\n if not self.initialized:\n return\n self.logger.info('Shutting down.')\n if self.run_task:\n self.run_task.cancel()\n self.run_task = None\n for fut in self.waiters.values():\n fut.cancel()\n self.serial_writer.close()\n await self.serial_writer.wait_closed()\n self.logger.info('Shutdown complete.')\n self.initialized = False\n\n\n__all__ = USBHandler, to_ascii\n",
"step-4": "import asyncio\nimport logging\nimport os.path\nfrom serial_asyncio import open_serial_connection\nfrom typing import NewType, cast\nAsciiBytes = NewType('AsciiBytes', bytes)\n\n\ndef to_ascii(s: str) ->AsciiBytes:\n if s[-1] != '\\n':\n s += '\\n'\n return cast(AsciiBytes, s.encode(encoding='ascii'))\n\n\nclass USBHandler:\n \"\"\"Reads from and writes to the underlying MDB USB board.\n\n Users can either obtain an asyncio.Queue that the handler will push\n messages to using listen(), or it can ask for a one-time read using read().\n For sending messages, if no reply is expected or there is a poller waiting\n for any response, send() can be used, otherwise sendread() will send the\n message and wait for a one-time reply. Having a listener and waiting for a\n single message at the same time is an error. See the Sniffer class for an\n example of both usages.\"\"\"\n\n def __init__(self):\n self.initialized = False\n self.run_task = None\n self.waiters = {}\n self.queues = {}\n self.logger = logging.getLogger('.'.join((__name__, self.__class__.\n __name__)))\n\n async def initialize(self, device_path: str) ->None:\n assert os.path.exists(device_path)\n self.logger.info('Initializing USBReader.')\n self.logger.debug('Opening serial connection to device at %s',\n device_path)\n self.serial_reader, self.serial_writer = await open_serial_connection(\n url=device_path, baudrate=115200)\n self.initialized = True\n self.logger.debug('Connected to serial device at %s.', device_path)\n\n async def _run(self) ->None:\n while True:\n message = await self.serial_reader.readuntil(separator=b'\\r\\n')\n stripped_message = message.decode(encoding='ascii').rstrip('\\n\\r')\n self.logger.debug(\"Read '%s' from MDB board.\", stripped_message)\n message_type = stripped_message[0]\n if message_type in self.waiters:\n self.waiters[message_type].set_result(stripped_message)\n del self.waiters[message_type]\n await asyncio.sleep(0)\n elif message_type in self.queues:\n try:\n self.queues[message_type].put_nowait(stripped_message)\n except asyncio.QueueFull:\n self.logger.warning(\n 'Queue for message type %s is full. Scheduling the put in another task.'\n , message_type)\n asyncio.create_task(self.queues[message_type].put(\n stripped_message))\n else:\n self.logger.error('Unhandled message: %s', stripped_message)\n\n async def run(self) ->None:\n assert self.initialized\n self.logger.info('Starting runner.')\n self.run_task = asyncio.create_task(self._run())\n try:\n await self.run_task\n except asyncio.CancelledError:\n self.logger.info('Runner cancelled.')\n\n async def send(self, message: AsciiBytes, _drain=True) ->None:\n assert self.initialized\n self.logger.info('Sending message to MDB board: %s', message)\n self.serial_writer.write(message)\n if _drain:\n await self.serial_writer.drain()\n self.logger.info('Sent message to MDB board: %s', message)\n\n def _read_internal(self, prefix: str) ->asyncio.Future:\n assert len(prefix) == 1\n if prefix in self.queues or prefix in self.waiters:\n raise RuntimeError(\n f'Tried to wait for message type {prefix} when there was already a queue listening to all messages'\n )\n fut = asyncio.get_running_loop().create_future()\n self.waiters[prefix] = fut\n return fut\n\n async def sendread(self, message: AsciiBytes, prefix: str) ->str:\n await self.send(message, _drain=False)\n fut = self._read_internal(prefix)\n self.logger.info('Waiting for a single message of type: %s', prefix)\n try:\n await self.serial_writer.drain()\n self.logger.info('Sent message to MDB board: %s', message)\n await fut\n except asyncio.CancelledError as e:\n self.logger.warning(\n 'Got cancelled while sending message %r or waiting on prefix %s'\n , message, prefix, exc_info=e)\n del self.waiters[prefix]\n raise\n self.logger.info('Got message: %s', fut.result())\n return fut.result()\n\n async def read(self, prefix: str) ->str:\n fut = self._read_internal(prefix)\n self.logger.info('Waiting for a single message of type: %s', prefix)\n try:\n await fut\n except asyncio.CancelledError as e:\n self.logger.warning('Got cancelled while waiting for message on %s'\n , prefix, exc_info=e)\n del self.waiters[prefix]\n raise\n self.logger.info('Got message: %s', fut.result())\n return fut.result()\n\n def listen(self, prefix: str) ->asyncio.Queue:\n assert len(prefix) == 1\n if prefix in self.waiters or prefix in self.queues:\n raise RuntimeError(\n f'Tried to get a queue for message type {prefix} when there was already someonewaiting on it.'\n )\n self.queues[prefix] = asyncio.Queue()\n self.logger.info('Polling for messages of type: %s', prefix)\n return self.queues[prefix]\n\n def unlisten(self, prefix: str) ->None:\n \"\"\"Stops pushing messages with this prefix character to a Queue.\"\"\"\n assert len(prefix) == 1\n del self.queues[prefix]\n self.logger.info('No longer polling for message type: %s', prefix)\n\n async def shutdown(self):\n if not self.initialized:\n return\n self.logger.info('Shutting down.')\n if self.run_task:\n self.run_task.cancel()\n self.run_task = None\n for fut in self.waiters.values():\n fut.cancel()\n self.serial_writer.close()\n await self.serial_writer.wait_closed()\n self.logger.info('Shutdown complete.')\n self.initialized = False\n\n\n__all__ = USBHandler, to_ascii\n",
"step-5": "import asyncio\nimport logging\nimport os.path\nfrom serial_asyncio import open_serial_connection\nfrom typing import NewType, cast\n\n# Type annotations and converters\nAsciiBytes = NewType('AsciiBytes', bytes)\n\n\ndef to_ascii(s: str) -> AsciiBytes:\n if s[-1] != '\\n':\n s += '\\n'\n return cast(AsciiBytes, s.encode(encoding='ascii'))\n\n\nclass USBHandler:\n \"\"\"Reads from and writes to the underlying MDB USB board.\n\n Users can either obtain an asyncio.Queue that the handler will push\n messages to using listen(), or it can ask for a one-time read using read().\n For sending messages, if no reply is expected or there is a poller waiting\n for any response, send() can be used, otherwise sendread() will send the\n message and wait for a one-time reply. Having a listener and waiting for a\n single message at the same time is an error. See the Sniffer class for an\n example of both usages.\"\"\"\n\n def __init__(self):\n self.initialized = False\n self.run_task = None\n self.waiters = {}\n self.queues = {}\n self.logger = logging.getLogger('.'.join((__name__,\n self.__class__.__name__)))\n\n async def initialize(self, device_path: str) -> None:\n assert os.path.exists(device_path)\n self.logger.info(\"Initializing USBReader.\")\n self.logger.debug(\"Opening serial connection to device at %s\",\n device_path)\n self.serial_reader, self.serial_writer = \\\n await open_serial_connection(url=device_path, baudrate=115200)\n self.initialized = True\n self.logger.debug(\"Connected to serial device at %s.\", device_path)\n\n async def _run(self) -> None:\n while True:\n message = await self.serial_reader.readuntil(separator=b'\\r\\n')\n stripped_message = message.decode(encoding='ascii').rstrip('\\n\\r')\n self.logger.debug(\"Read '%s' from MDB board.\", stripped_message)\n message_type = stripped_message[0]\n if message_type in self.waiters:\n self.waiters[message_type].set_result(stripped_message)\n del self.waiters[message_type]\n # Lets the waiter run.\n await asyncio.sleep(0)\n elif message_type in self.queues:\n try:\n self.queues[message_type].put_nowait(stripped_message)\n except asyncio.QueueFull:\n self.logger.warning('Queue for message type %s is full. '\n 'Scheduling the put in another task.',\n message_type)\n asyncio.create_task(\n self.queues[message_type].put(stripped_message))\n else:\n self.logger.error(\"Unhandled message: %s\", stripped_message)\n\n async def run(self) -> None:\n assert self.initialized\n self.logger.info('Starting runner.')\n self.run_task = asyncio.create_task(self._run())\n try:\n await self.run_task\n except asyncio.CancelledError:\n self.logger.info('Runner cancelled.')\n\n async def send(self, message: AsciiBytes, _drain=True) -> None:\n assert self.initialized\n self.logger.info(\"Sending message to MDB board: %s\", message)\n self.serial_writer.write(message)\n if _drain:\n await self.serial_writer.drain()\n self.logger.info(\"Sent message to MDB board: %s\", message)\n\n def _read_internal(self, prefix: str) -> asyncio.Future:\n assert len(prefix) == 1\n if prefix in self.queues or prefix in self.waiters:\n raise RuntimeError(f\"Tried to wait for message type {prefix}\"\n \" when there was already a queue listening to \"\n \"all messages\")\n fut = asyncio.get_running_loop().create_future()\n self.waiters[prefix] = fut\n return fut\n\n async def sendread(self, message: AsciiBytes, prefix: str) -> str:\n await self.send(message, _drain=False)\n fut = self._read_internal(prefix)\n self.logger.info(\"Waiting for a single message of type: %s\", prefix)\n try:\n await self.serial_writer.drain()\n self.logger.info(\"Sent message to MDB board: %s\", message)\n await fut\n except asyncio.CancelledError as e:\n self.logger.warning(\"Got cancelled while sending message %r or \"\n \"waiting on prefix %s\", message, prefix,\n exc_info=e)\n del self.waiters[prefix]\n raise\n self.logger.info(\"Got message: %s\", fut.result())\n return fut.result()\n\n async def read(self, prefix: str) -> str:\n fut = self._read_internal(prefix)\n self.logger.info(\"Waiting for a single message of type: %s\", prefix)\n try:\n await fut\n except asyncio.CancelledError as e:\n self.logger.warning(\"Got cancelled while waiting for message on \"\n \"%s\", prefix, exc_info=e)\n del self.waiters[prefix]\n raise\n self.logger.info(\"Got message: %s\", fut.result())\n return fut.result()\n\n def listen(self, prefix: str) -> asyncio.Queue:\n assert len(prefix) == 1\n if prefix in self.waiters or prefix in self.queues:\n raise RuntimeError(\"Tried to get a queue for message type \"\n f\"{prefix} when there was already someone\"\n \"waiting on it.\")\n self.queues[prefix] = asyncio.Queue()\n self.logger.info(\"Polling for messages of type: %s\", prefix)\n return self.queues[prefix]\n\n def unlisten(self, prefix: str) -> None:\n \"\"\"Stops pushing messages with this prefix character to a Queue.\"\"\"\n assert len(prefix) == 1\n del self.queues[prefix]\n self.logger.info(\"No longer polling for message type: %s\", prefix)\n\n async def shutdown(self):\n if not self.initialized:\n return\n self.logger.info(\"Shutting down.\")\n if self.run_task:\n self.run_task.cancel()\n self.run_task = None\n for fut in self.waiters.values():\n fut.cancel()\n self.serial_writer.close()\n await self.serial_writer.wait_closed()\n self.logger.info(\"Shutdown complete.\")\n self.initialized = False\n\n\n__all__ = (USBHandler, to_ascii)\n",
"step-ids": [
3,
7,
8,
9,
10
]
}
|
[
3,
7,
8,
9,
10
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# sphinx_gallery_thumbnail_number = 3
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import NullFormatter # useful for `logit` scale
import matplotlib.ticker as ticker
import matplotlib as mpl
mpl.style.use('classic')
# Data for plotting
chi2=np.loadtxt(r'Lam0/buffer/chi2.dat')
chi4=np.loadtxt(r'Lam0/buffer/chi4.dat')
# Create figure
fig=plt.figure(figsize=(9, 3.5))
ax1=fig.add_subplot(121)
ax1.plot(chi2,color='r',linestyle='--',linewidth=2,markersize=5,label=r'$\chi^B_2$')
ax1.axis([0,300,-0.05,0.2])
ax1.set_xlabel('$T\,[\mathrm{MeV}]$', fontsize=15, color='black')
ax1.set_ylabel(r'$\chi_2$', fontsize=15, color='black')
for label in ax1.xaxis.get_ticklabels():
label.set_fontsize(10)
for label in ax1.yaxis.get_ticklabels():
label.set_fontsize(10)
# Plot two
ax2=fig.add_subplot(122)
ax2.plot(chi4,color='k',linestyle='-',linewidth=2,markersize=5,label=r'$\chi^B_4$')
ax2.axis([0,300,-0.15,0.2])
ax2.set_xlabel('$T\,[\mathrm{MeV}]$', fontsize=15, color='black')
ax2.set_ylabel(r'$\chi_4$', fontsize=15, color='black')
ax2.legend(loc=0,fontsize=7.3,frameon=False,shadow=True,handlelength=3.,borderpad=0.5,borderaxespad=1)
for label in ax2.xaxis.get_ticklabels():
label.set_fontsize(10)
for label in ax2.yaxis.get_ticklabels():
label.set_fontsize(10)
fig.subplots_adjust(top=0.9, bottom=0.15, left=0.1, right=0.95, hspace=0.35,
wspace=0.2)
fig.savefig("chi.pdf")
#plt.show()
|
normal
|
{
"blob_id": "66904cbe3e57d9cc1ee385cd8a4c1ba3767626bd",
"index": 923,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nmpl.style.use('classic')\n<mask token>\nax1.plot(chi2, color='r', linestyle='--', linewidth=2, markersize=5, label=\n '$\\\\chi^B_2$')\nax1.axis([0, 300, -0.05, 0.2])\nax1.set_xlabel('$T\\\\,[\\\\mathrm{MeV}]$', fontsize=15, color='black')\nax1.set_ylabel('$\\\\chi_2$', fontsize=15, color='black')\nfor label in ax1.xaxis.get_ticklabels():\n label.set_fontsize(10)\nfor label in ax1.yaxis.get_ticklabels():\n label.set_fontsize(10)\n<mask token>\nax2.plot(chi4, color='k', linestyle='-', linewidth=2, markersize=5, label=\n '$\\\\chi^B_4$')\nax2.axis([0, 300, -0.15, 0.2])\nax2.set_xlabel('$T\\\\,[\\\\mathrm{MeV}]$', fontsize=15, color='black')\nax2.set_ylabel('$\\\\chi_4$', fontsize=15, color='black')\nax2.legend(loc=0, fontsize=7.3, frameon=False, shadow=True, handlelength=\n 3.0, borderpad=0.5, borderaxespad=1)\nfor label in ax2.xaxis.get_ticklabels():\n label.set_fontsize(10)\nfor label in ax2.yaxis.get_ticklabels():\n label.set_fontsize(10)\nfig.subplots_adjust(top=0.9, bottom=0.15, left=0.1, right=0.95, hspace=0.35,\n wspace=0.2)\nfig.savefig('chi.pdf')\n",
"step-3": "<mask token>\nmpl.style.use('classic')\nchi2 = np.loadtxt('Lam0/buffer/chi2.dat')\nchi4 = np.loadtxt('Lam0/buffer/chi4.dat')\nfig = plt.figure(figsize=(9, 3.5))\nax1 = fig.add_subplot(121)\nax1.plot(chi2, color='r', linestyle='--', linewidth=2, markersize=5, label=\n '$\\\\chi^B_2$')\nax1.axis([0, 300, -0.05, 0.2])\nax1.set_xlabel('$T\\\\,[\\\\mathrm{MeV}]$', fontsize=15, color='black')\nax1.set_ylabel('$\\\\chi_2$', fontsize=15, color='black')\nfor label in ax1.xaxis.get_ticklabels():\n label.set_fontsize(10)\nfor label in ax1.yaxis.get_ticklabels():\n label.set_fontsize(10)\nax2 = fig.add_subplot(122)\nax2.plot(chi4, color='k', linestyle='-', linewidth=2, markersize=5, label=\n '$\\\\chi^B_4$')\nax2.axis([0, 300, -0.15, 0.2])\nax2.set_xlabel('$T\\\\,[\\\\mathrm{MeV}]$', fontsize=15, color='black')\nax2.set_ylabel('$\\\\chi_4$', fontsize=15, color='black')\nax2.legend(loc=0, fontsize=7.3, frameon=False, shadow=True, handlelength=\n 3.0, borderpad=0.5, borderaxespad=1)\nfor label in ax2.xaxis.get_ticklabels():\n label.set_fontsize(10)\nfor label in ax2.yaxis.get_ticklabels():\n label.set_fontsize(10)\nfig.subplots_adjust(top=0.9, bottom=0.15, left=0.1, right=0.95, hspace=0.35,\n wspace=0.2)\nfig.savefig('chi.pdf')\n",
"step-4": "import matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.ticker import NullFormatter\nimport matplotlib.ticker as ticker\nimport matplotlib as mpl\nmpl.style.use('classic')\nchi2 = np.loadtxt('Lam0/buffer/chi2.dat')\nchi4 = np.loadtxt('Lam0/buffer/chi4.dat')\nfig = plt.figure(figsize=(9, 3.5))\nax1 = fig.add_subplot(121)\nax1.plot(chi2, color='r', linestyle='--', linewidth=2, markersize=5, label=\n '$\\\\chi^B_2$')\nax1.axis([0, 300, -0.05, 0.2])\nax1.set_xlabel('$T\\\\,[\\\\mathrm{MeV}]$', fontsize=15, color='black')\nax1.set_ylabel('$\\\\chi_2$', fontsize=15, color='black')\nfor label in ax1.xaxis.get_ticklabels():\n label.set_fontsize(10)\nfor label in ax1.yaxis.get_ticklabels():\n label.set_fontsize(10)\nax2 = fig.add_subplot(122)\nax2.plot(chi4, color='k', linestyle='-', linewidth=2, markersize=5, label=\n '$\\\\chi^B_4$')\nax2.axis([0, 300, -0.15, 0.2])\nax2.set_xlabel('$T\\\\,[\\\\mathrm{MeV}]$', fontsize=15, color='black')\nax2.set_ylabel('$\\\\chi_4$', fontsize=15, color='black')\nax2.legend(loc=0, fontsize=7.3, frameon=False, shadow=True, handlelength=\n 3.0, borderpad=0.5, borderaxespad=1)\nfor label in ax2.xaxis.get_ticklabels():\n label.set_fontsize(10)\nfor label in ax2.yaxis.get_ticklabels():\n label.set_fontsize(10)\nfig.subplots_adjust(top=0.9, bottom=0.15, left=0.1, right=0.95, hspace=0.35,\n wspace=0.2)\nfig.savefig('chi.pdf')\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# sphinx_gallery_thumbnail_number = 3\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.ticker import NullFormatter # useful for `logit` scale\nimport matplotlib.ticker as ticker\nimport matplotlib as mpl\n\nmpl.style.use('classic')\n\n\n# Data for plotting\n\n\nchi2=np.loadtxt(r'Lam0/buffer/chi2.dat')\nchi4=np.loadtxt(r'Lam0/buffer/chi4.dat')\n\n\n# Create figure\nfig=plt.figure(figsize=(9, 3.5))\nax1=fig.add_subplot(121)\n\nax1.plot(chi2,color='r',linestyle='--',linewidth=2,markersize=5,label=r'$\\chi^B_2$')\n\n\nax1.axis([0,300,-0.05,0.2])\n\nax1.set_xlabel('$T\\,[\\mathrm{MeV}]$', fontsize=15, color='black')\nax1.set_ylabel(r'$\\chi_2$', fontsize=15, color='black')\n\n\n\nfor label in ax1.xaxis.get_ticklabels():\n label.set_fontsize(10)\nfor label in ax1.yaxis.get_ticklabels():\n label.set_fontsize(10)\n\n\n# Plot two\nax2=fig.add_subplot(122)\n\nax2.plot(chi4,color='k',linestyle='-',linewidth=2,markersize=5,label=r'$\\chi^B_4$')\n\nax2.axis([0,300,-0.15,0.2])\n\nax2.set_xlabel('$T\\,[\\mathrm{MeV}]$', fontsize=15, color='black')\nax2.set_ylabel(r'$\\chi_4$', fontsize=15, color='black')\nax2.legend(loc=0,fontsize=7.3,frameon=False,shadow=True,handlelength=3.,borderpad=0.5,borderaxespad=1)\n\nfor label in ax2.xaxis.get_ticklabels():\n label.set_fontsize(10)\nfor label in ax2.yaxis.get_ticklabels():\n label.set_fontsize(10)\n\n\n\nfig.subplots_adjust(top=0.9, bottom=0.15, left=0.1, right=0.95, hspace=0.35,\n wspace=0.2)\n \n\nfig.savefig(\"chi.pdf\")\n\n#plt.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from base.SpellingDictionary import SpellingDictionary
from datastructure.trie import NeedMore
class WordFinder:
def __init__(self):
self.spellingDictionary = SpellingDictionary()
#self.dictionary.add(["toad", "to", "do", "dot"])
self.spellingDictionary.populateDictionary()
print"Done building dictionary"
def findWords(self, stem, unusedLetters):
words = []
# children = self.spellingDictionary.dictionary.children(stem)
# nextLetters = children.keys();
print "stem = ", stem, " unused letters = ", unusedLetters
#apparent children/keys is worthless and only gets immediate children with results ie dict.children('to')
#doesn't give us the 'a' becuase 'toa' isn't a word
for nextLetter in unusedLetters:
#is stem+nextLetter a word?
try:
newWord = self.spellingDictionary.dictionary[stem + nextLetter]
words.append(newWord)
except KeyError:
#nop
print()
except NeedMore:
#nop
print()
#brute force!
#create new stem, remove letter from unused letter, infinite recursion!
newUnusedLetters = list(unusedLetters)
newUnusedLetters.remove(nextLetter)
newStem = stem + nextLetter
if newUnusedLetters != None:
#recursion!
print "recursion"
words.extend(self.findWords(newStem, newUnusedLetters))
return words;
def execute(self, tiles):
wordList = []
print "tiles = ", tiles
for letter in tiles:
unusedLetters = list(tiles)
unusedLetters.remove(letter)
print"calling findWords with ", letter, " ", unusedLetters
foundWords = self.findWords(letter, unusedLetters)
wordList.extend(foundWords)
return wordList
def testToad():
wordFinder = WordFinder()
words = wordFinder.execute(["t", "o", "a", "d"])
print "words: "
for word in words:
print word
def testLettersT():
wordFinder = WordFinder()
words = wordFinder.findWords('t', ['o','a','d'])
print "words: "
for word in words:
print word
def printTOAD():
wordFinder = WordFinder()
print wordFinder.spellingDictionary.dictionary['t']
print wordFinder.spellingDictionary.dictionary['to']
print wordFinder.spellingDictionary.dictionary['toa']
print wordFinder.spellingDictionary.dictionary['toad']
def printTOADChildren():
wordFinder = WordFinder()
print wordFinder.spellingDictionary.dictionary.children('t')
print wordFinder.spellingDictionary.dictionary.children('to')
print wordFinder.spellingDictionary.dictionary.children('toa')
print wordFinder.spellingDictionary.dictionary.children('toad')
#testToad()
|
normal
|
{
"blob_id": "3be3edbecfbb602d4c4a853f006a3a6f4b992fd3",
"index": 1728,
"step-1": "from base.SpellingDictionary import SpellingDictionary\r\nfrom datastructure.trie import NeedMore\r\nclass WordFinder:\r\n def __init__(self):\r\n self.spellingDictionary = SpellingDictionary()\r\n #self.dictionary.add([\"toad\", \"to\", \"do\", \"dot\"])\r\n self.spellingDictionary.populateDictionary()\r\n print\"Done building dictionary\"\r\n\r\n def findWords(self, stem, unusedLetters):\r\n words = []\r\n# children = self.spellingDictionary.dictionary.children(stem)\r\n# nextLetters = children.keys();\r\n print \"stem = \", stem, \" unused letters = \", unusedLetters\r\n #apparent children/keys is worthless and only gets immediate children with results ie dict.children('to') \r\n #doesn't give us the 'a' becuase 'toa' isn't a word\r\n for nextLetter in unusedLetters:\r\n #is stem+nextLetter a word?\r\n try:\r\n newWord = self.spellingDictionary.dictionary[stem + nextLetter]\r\n words.append(newWord)\r\n except KeyError:\r\n #nop\r\n print()\r\n except NeedMore:\r\n #nop\r\n print()\r\n \r\n #brute force!\r\n #create new stem, remove letter from unused letter, infinite recursion!\r\n newUnusedLetters = list(unusedLetters)\r\n newUnusedLetters.remove(nextLetter)\r\n newStem = stem + nextLetter\r\n if newUnusedLetters != None:\r\n #recursion!\r\n print \"recursion\"\r\n words.extend(self.findWords(newStem, newUnusedLetters)) \r\n return words;\r\n\r\n def execute(self, tiles):\r\n wordList = []\r\n print \"tiles = \", tiles\r\n for letter in tiles:\r\n unusedLetters = list(tiles)\r\n unusedLetters.remove(letter)\r\n print\"calling findWords with \", letter, \" \", unusedLetters\r\n foundWords = self.findWords(letter, unusedLetters)\r\n wordList.extend(foundWords)\r\n return wordList\r\n \r\ndef testToad():\r\n wordFinder = WordFinder()\r\n words = wordFinder.execute([\"t\", \"o\", \"a\", \"d\"])\r\n print \"words: \"\r\n for word in words:\r\n print word\r\n \r\ndef testLettersT():\r\n wordFinder = WordFinder()\r\n words = wordFinder.findWords('t', ['o','a','d'])\r\n print \"words: \"\r\n for word in words:\r\n print word\r\n\r\ndef printTOAD():\r\n wordFinder = WordFinder()\r\n print wordFinder.spellingDictionary.dictionary['t']\r\n print wordFinder.spellingDictionary.dictionary['to']\r\n print wordFinder.spellingDictionary.dictionary['toa'] \r\n print wordFinder.spellingDictionary.dictionary['toad']\r\n\r\ndef printTOADChildren():\r\n wordFinder = WordFinder()\r\n print wordFinder.spellingDictionary.dictionary.children('t')\r\n print wordFinder.spellingDictionary.dictionary.children('to')\r\n print wordFinder.spellingDictionary.dictionary.children('toa') \r\n print wordFinder.spellingDictionary.dictionary.children('toad')\r\n \r\n \r\n#testToad()\r\n\r\n\r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from enum import Enum
# Genie
from genie.decorator import managedattribute
from genie.conf.base import Base, \
DeviceFeature, \
LinkFeature, \
Interface
import genie.conf.base.attributes
from genie.libs.conf.base.feature import consolidate_feature_args
from genie.conf.base.attributes import SubAttributes, \
SubAttributesDict, \
AttributesHelper, \
KeyedSubAttributes
from genie.conf.base.attributes import InterfaceSubAttributes
from genie.libs import parser
from genie.abstract import Lookup
from genie.ops.base import Base as ops_Base
from genie.ops.base import Context
__all__ = ('Keychains', )
# Structure Hierarchy:
# Keychains
# +--DeviceAttributes
# +-- KeyChainAttributes
# | +-- KeyIdAttributes
# +-- KeyChainMacSecAttributes
# | +-- KeyIdAttributes
# +-- KeyChainTunEncAttributes
# +-- KeyIdAttributes
class Keychains(DeviceFeature):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# =============================================
# Device attributes
# =============================================
class DeviceAttributes(genie.conf.base.attributes.DeviceSubAttributes):
# KeyChainAttributes
class KeyChainAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_chain = key
super().__init__(parent)
# KeyIdAttributes
class KeyIdAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_id = key
super().__init__(parent)
key_id_attr = managedattribute(name='key_id_attr',
read_only=True,
doc=KeyIdAttributes.__doc__)
@key_id_attr.initter
def key_id_attr(self):
return SubAttributesDict(self.KeyIdAttributes, parent=self)
keychain_attr = managedattribute(name='keychain_attr',
read_only=True,
doc=KeyChainAttributes.__doc__)
@keychain_attr.initter
def keychain_attr(self):
return SubAttributesDict(self.KeyChainAttributes, parent=self)
# KeyChainMacSecAttributes
class KeyChainMacSecAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.ms_key_chain = key
super().__init__(parent)
# KeyIdAttributes
class KeyIdAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_id = key
super().__init__(parent)
key_id_attr = managedattribute(name='key_id_attr',
read_only=True,
doc=KeyIdAttributes.__doc__)
@key_id_attr.initter
def key_id_attr(self):
return SubAttributesDict(self.KeyIdAttributes, parent=self)
ms_keychain_attr = managedattribute(
name='ms_keychain_attr',
read_only=True,
doc=KeyChainMacSecAttributes.__doc__)
@ms_keychain_attr.initter
def ms_keychain_attr(self):
return SubAttributesDict(self.KeyChainMacSecAttributes,
parent=self)
# KeyChainTunEncAttributes
class KeyChainTunEncAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.te_key_chain = key
super().__init__(parent)
# KeyIdAttributes
class KeyIdAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_id = key
super().__init__(parent)
key_id_attr = managedattribute(name='key_id_attr',
read_only=True,
doc=KeyIdAttributes.__doc__)
@key_id_attr.initter
def key_id_attr(self):
return SubAttributesDict(self.KeyIdAttributes, parent=self)
te_keychain_attr = managedattribute(
name='te_keychain_attr',
read_only=True,
doc=KeyChainTunEncAttributes.__doc__)
@te_keychain_attr.initter
def te_keychain_attr(self):
return SubAttributesDict(self.KeyChainTunEncAttributes,
parent=self)
device_attr = managedattribute(name='device_attr',
read_only=True,
doc=DeviceAttributes.__doc__)
@device_attr.initter
def device_attr(self):
return SubAttributesDict(self.DeviceAttributes, parent=self)
# ============ managedattributes ============#
key_id = managedattribute(name='key_id',
default=None,
type=(None, managedattribute.test_istype(str)),
doc='Configure a key')
key_enc_type = managedattribute(name='key_enc_type',
default=None,
type=managedattribute.test_istype(int),
doc='Set key encode type')
key_string = managedattribute(name='key_string',
default=None,
type=(None,
managedattribute.test_istype(str)),
doc='Set key string')
class CRYPTO_ALGO(Enum):
aes_128_cmac = 'aes-128-cmac'
aes_256_cmac = 'aes-256-cmac'
crypto_algo = managedattribute(
name='crypto_algo',
default=None,
type=(None, CRYPTO_ALGO),
doc='Set cryptographic authentication algorithm')
lifetime_start = managedattribute(
name='lifetime_start',
default=None,
type=(None, managedattribute.test_istype(str)),
doc='Set start time for sending lifetime of encryption key')
lifetime_duration = managedattribute(
name='lifetime_duration',
default=None,
type=(None, managedattribute.test_istype(int)),
doc='Set key lifetime duration')
# =========================================================
# build_config
# =========================================================
def build_config(self,
devices=None,
interfaces=None,
links=None,
apply=True,
attributes=None,
**kwargs):
attributes = AttributesHelper(self, attributes)
cfgs = {}
devices, interfaces, links = \
consolidate_feature_args(self, devices, interfaces, links)
for key, sub, attributes2 in attributes.mapping_items('device_attr',
keys=devices,
sort=True):
cfgs[key] = sub.build_config(apply=False, attributes=attributes2)
if apply:
for device_name, cfg in sorted(cfgs.items()):
self.testbed.config_on_devices(cfg, fail_invalid=True)
else:
return cfgs
def build_unconfig(self,
devices=None,
interfaces=None,
links=None,
apply=True,
attributes=None,
**kwargs):
attributes = AttributesHelper(self, attributes)
cfgs = {}
devices, interfaces, links = \
consolidate_feature_args(self, devices, interfaces, links)
for key, sub, attributes2 in attributes.mapping_items('device_attr',
keys=devices,
sort=True):
cfgs[key] = sub.build_unconfig(apply=False, attributes=attributes2)
if apply:
for device_name, cfg in sorted(cfgs.items()):
self.testbed.config_on_devices(cfg, fail_invalid=True)
else:
return cfgs
|
normal
|
{
"blob_id": "6d2581b83a2839dcbc644ca572b05b158d80b58d",
"index": 2479,
"step-1": "<mask token>\n\n\nclass Keychains(DeviceFeature):\n <mask token>\n\n\n class DeviceAttributes(genie.conf.base.attributes.DeviceSubAttributes):\n\n\n class KeyChainAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_chain = key\n super().__init__(parent)\n\n\n class KeyIdAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n key_id_attr = managedattribute(name='key_id_attr', read_only=\n True, doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n keychain_attr = managedattribute(name='keychain_attr', read_only=\n True, doc=KeyChainAttributes.__doc__)\n\n @keychain_attr.initter\n def keychain_attr(self):\n return SubAttributesDict(self.KeyChainAttributes, parent=self)\n\n\n class KeyChainMacSecAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.ms_key_chain = key\n super().__init__(parent)\n\n\n class KeyIdAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n key_id_attr = managedattribute(name='key_id_attr', read_only=\n True, doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n ms_keychain_attr = managedattribute(name='ms_keychain_attr',\n read_only=True, doc=KeyChainMacSecAttributes.__doc__)\n\n @ms_keychain_attr.initter\n def ms_keychain_attr(self):\n return SubAttributesDict(self.KeyChainMacSecAttributes, parent=self\n )\n\n\n class KeyChainTunEncAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.te_key_chain = key\n super().__init__(parent)\n\n\n class KeyIdAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n key_id_attr = managedattribute(name='key_id_attr', read_only=\n True, doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n te_keychain_attr = managedattribute(name='te_keychain_attr',\n read_only=True, doc=KeyChainTunEncAttributes.__doc__)\n\n @te_keychain_attr.initter\n def te_keychain_attr(self):\n return SubAttributesDict(self.KeyChainTunEncAttributes, parent=self\n )\n <mask token>\n\n @device_attr.initter\n def device_attr(self):\n return SubAttributesDict(self.DeviceAttributes, parent=self)\n <mask token>\n <mask token>\n <mask token>\n\n\n class CRYPTO_ALGO(Enum):\n aes_128_cmac = 'aes-128-cmac'\n aes_256_cmac = 'aes-256-cmac'\n <mask token>\n <mask token>\n <mask token>\n\n def build_config(self, devices=None, interfaces=None, links=None, apply\n =True, attributes=None, **kwargs):\n attributes = AttributesHelper(self, attributes)\n cfgs = {}\n devices, interfaces, links = consolidate_feature_args(self, devices,\n interfaces, links)\n for key, sub, attributes2 in attributes.mapping_items('device_attr',\n keys=devices, sort=True):\n cfgs[key] = sub.build_config(apply=False, attributes=attributes2)\n if apply:\n for device_name, cfg in sorted(cfgs.items()):\n self.testbed.config_on_devices(cfg, fail_invalid=True)\n else:\n return cfgs\n\n def build_unconfig(self, devices=None, interfaces=None, links=None,\n apply=True, attributes=None, **kwargs):\n attributes = AttributesHelper(self, attributes)\n cfgs = {}\n devices, interfaces, links = consolidate_feature_args(self, devices,\n interfaces, links)\n for key, sub, attributes2 in attributes.mapping_items('device_attr',\n keys=devices, sort=True):\n cfgs[key] = sub.build_unconfig(apply=False, attributes=attributes2)\n if apply:\n for device_name, cfg in sorted(cfgs.items()):\n self.testbed.config_on_devices(cfg, fail_invalid=True)\n else:\n return cfgs\n",
"step-2": "<mask token>\n\n\nclass Keychains(DeviceFeature):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n\n class DeviceAttributes(genie.conf.base.attributes.DeviceSubAttributes):\n\n\n class KeyChainAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_chain = key\n super().__init__(parent)\n\n\n class KeyIdAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n key_id_attr = managedattribute(name='key_id_attr', read_only=\n True, doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n keychain_attr = managedattribute(name='keychain_attr', read_only=\n True, doc=KeyChainAttributes.__doc__)\n\n @keychain_attr.initter\n def keychain_attr(self):\n return SubAttributesDict(self.KeyChainAttributes, parent=self)\n\n\n class KeyChainMacSecAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.ms_key_chain = key\n super().__init__(parent)\n\n\n class KeyIdAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n key_id_attr = managedattribute(name='key_id_attr', read_only=\n True, doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n ms_keychain_attr = managedattribute(name='ms_keychain_attr',\n read_only=True, doc=KeyChainMacSecAttributes.__doc__)\n\n @ms_keychain_attr.initter\n def ms_keychain_attr(self):\n return SubAttributesDict(self.KeyChainMacSecAttributes, parent=self\n )\n\n\n class KeyChainTunEncAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.te_key_chain = key\n super().__init__(parent)\n\n\n class KeyIdAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n key_id_attr = managedattribute(name='key_id_attr', read_only=\n True, doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n te_keychain_attr = managedattribute(name='te_keychain_attr',\n read_only=True, doc=KeyChainTunEncAttributes.__doc__)\n\n @te_keychain_attr.initter\n def te_keychain_attr(self):\n return SubAttributesDict(self.KeyChainTunEncAttributes, parent=self\n )\n device_attr = managedattribute(name='device_attr', read_only=True, doc=\n DeviceAttributes.__doc__)\n\n @device_attr.initter\n def device_attr(self):\n return SubAttributesDict(self.DeviceAttributes, parent=self)\n key_id = managedattribute(name='key_id', default=None, type=(None,\n managedattribute.test_istype(str)), doc='Configure a key')\n key_enc_type = managedattribute(name='key_enc_type', default=None, type\n =managedattribute.test_istype(int), doc='Set key encode type')\n key_string = managedattribute(name='key_string', default=None, type=(\n None, managedattribute.test_istype(str)), doc='Set key string')\n\n\n class CRYPTO_ALGO(Enum):\n aes_128_cmac = 'aes-128-cmac'\n aes_256_cmac = 'aes-256-cmac'\n crypto_algo = managedattribute(name='crypto_algo', default=None, type=(\n None, CRYPTO_ALGO), doc='Set cryptographic authentication algorithm')\n lifetime_start = managedattribute(name='lifetime_start', default=None,\n type=(None, managedattribute.test_istype(str)), doc=\n 'Set start time for sending lifetime of encryption key')\n lifetime_duration = managedattribute(name='lifetime_duration', default=\n None, type=(None, managedattribute.test_istype(int)), doc=\n 'Set key lifetime duration')\n\n def build_config(self, devices=None, interfaces=None, links=None, apply\n =True, attributes=None, **kwargs):\n attributes = AttributesHelper(self, attributes)\n cfgs = {}\n devices, interfaces, links = consolidate_feature_args(self, devices,\n interfaces, links)\n for key, sub, attributes2 in attributes.mapping_items('device_attr',\n keys=devices, sort=True):\n cfgs[key] = sub.build_config(apply=False, attributes=attributes2)\n if apply:\n for device_name, cfg in sorted(cfgs.items()):\n self.testbed.config_on_devices(cfg, fail_invalid=True)\n else:\n return cfgs\n\n def build_unconfig(self, devices=None, interfaces=None, links=None,\n apply=True, attributes=None, **kwargs):\n attributes = AttributesHelper(self, attributes)\n cfgs = {}\n devices, interfaces, links = consolidate_feature_args(self, devices,\n interfaces, links)\n for key, sub, attributes2 in attributes.mapping_items('device_attr',\n keys=devices, sort=True):\n cfgs[key] = sub.build_unconfig(apply=False, attributes=attributes2)\n if apply:\n for device_name, cfg in sorted(cfgs.items()):\n self.testbed.config_on_devices(cfg, fail_invalid=True)\n else:\n return cfgs\n",
"step-3": "<mask token>\n__all__ = 'Keychains',\n\n\nclass Keychains(DeviceFeature):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n\n class DeviceAttributes(genie.conf.base.attributes.DeviceSubAttributes):\n\n\n class KeyChainAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_chain = key\n super().__init__(parent)\n\n\n class KeyIdAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n key_id_attr = managedattribute(name='key_id_attr', read_only=\n True, doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n keychain_attr = managedattribute(name='keychain_attr', read_only=\n True, doc=KeyChainAttributes.__doc__)\n\n @keychain_attr.initter\n def keychain_attr(self):\n return SubAttributesDict(self.KeyChainAttributes, parent=self)\n\n\n class KeyChainMacSecAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.ms_key_chain = key\n super().__init__(parent)\n\n\n class KeyIdAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n key_id_attr = managedattribute(name='key_id_attr', read_only=\n True, doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n ms_keychain_attr = managedattribute(name='ms_keychain_attr',\n read_only=True, doc=KeyChainMacSecAttributes.__doc__)\n\n @ms_keychain_attr.initter\n def ms_keychain_attr(self):\n return SubAttributesDict(self.KeyChainMacSecAttributes, parent=self\n )\n\n\n class KeyChainTunEncAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.te_key_chain = key\n super().__init__(parent)\n\n\n class KeyIdAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n key_id_attr = managedattribute(name='key_id_attr', read_only=\n True, doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n te_keychain_attr = managedattribute(name='te_keychain_attr',\n read_only=True, doc=KeyChainTunEncAttributes.__doc__)\n\n @te_keychain_attr.initter\n def te_keychain_attr(self):\n return SubAttributesDict(self.KeyChainTunEncAttributes, parent=self\n )\n device_attr = managedattribute(name='device_attr', read_only=True, doc=\n DeviceAttributes.__doc__)\n\n @device_attr.initter\n def device_attr(self):\n return SubAttributesDict(self.DeviceAttributes, parent=self)\n key_id = managedattribute(name='key_id', default=None, type=(None,\n managedattribute.test_istype(str)), doc='Configure a key')\n key_enc_type = managedattribute(name='key_enc_type', default=None, type\n =managedattribute.test_istype(int), doc='Set key encode type')\n key_string = managedattribute(name='key_string', default=None, type=(\n None, managedattribute.test_istype(str)), doc='Set key string')\n\n\n class CRYPTO_ALGO(Enum):\n aes_128_cmac = 'aes-128-cmac'\n aes_256_cmac = 'aes-256-cmac'\n crypto_algo = managedattribute(name='crypto_algo', default=None, type=(\n None, CRYPTO_ALGO), doc='Set cryptographic authentication algorithm')\n lifetime_start = managedattribute(name='lifetime_start', default=None,\n type=(None, managedattribute.test_istype(str)), doc=\n 'Set start time for sending lifetime of encryption key')\n lifetime_duration = managedattribute(name='lifetime_duration', default=\n None, type=(None, managedattribute.test_istype(int)), doc=\n 'Set key lifetime duration')\n\n def build_config(self, devices=None, interfaces=None, links=None, apply\n =True, attributes=None, **kwargs):\n attributes = AttributesHelper(self, attributes)\n cfgs = {}\n devices, interfaces, links = consolidate_feature_args(self, devices,\n interfaces, links)\n for key, sub, attributes2 in attributes.mapping_items('device_attr',\n keys=devices, sort=True):\n cfgs[key] = sub.build_config(apply=False, attributes=attributes2)\n if apply:\n for device_name, cfg in sorted(cfgs.items()):\n self.testbed.config_on_devices(cfg, fail_invalid=True)\n else:\n return cfgs\n\n def build_unconfig(self, devices=None, interfaces=None, links=None,\n apply=True, attributes=None, **kwargs):\n attributes = AttributesHelper(self, attributes)\n cfgs = {}\n devices, interfaces, links = consolidate_feature_args(self, devices,\n interfaces, links)\n for key, sub, attributes2 in attributes.mapping_items('device_attr',\n keys=devices, sort=True):\n cfgs[key] = sub.build_unconfig(apply=False, attributes=attributes2)\n if apply:\n for device_name, cfg in sorted(cfgs.items()):\n self.testbed.config_on_devices(cfg, fail_invalid=True)\n else:\n return cfgs\n",
"step-4": "from enum import Enum\nfrom genie.decorator import managedattribute\nfrom genie.conf.base import Base, DeviceFeature, LinkFeature, Interface\nimport genie.conf.base.attributes\nfrom genie.libs.conf.base.feature import consolidate_feature_args\nfrom genie.conf.base.attributes import SubAttributes, SubAttributesDict, AttributesHelper, KeyedSubAttributes\nfrom genie.conf.base.attributes import InterfaceSubAttributes\nfrom genie.libs import parser\nfrom genie.abstract import Lookup\nfrom genie.ops.base import Base as ops_Base\nfrom genie.ops.base import Context\n__all__ = 'Keychains',\n\n\nclass Keychains(DeviceFeature):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n\n class DeviceAttributes(genie.conf.base.attributes.DeviceSubAttributes):\n\n\n class KeyChainAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_chain = key\n super().__init__(parent)\n\n\n class KeyIdAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n key_id_attr = managedattribute(name='key_id_attr', read_only=\n True, doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n keychain_attr = managedattribute(name='keychain_attr', read_only=\n True, doc=KeyChainAttributes.__doc__)\n\n @keychain_attr.initter\n def keychain_attr(self):\n return SubAttributesDict(self.KeyChainAttributes, parent=self)\n\n\n class KeyChainMacSecAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.ms_key_chain = key\n super().__init__(parent)\n\n\n class KeyIdAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n key_id_attr = managedattribute(name='key_id_attr', read_only=\n True, doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n ms_keychain_attr = managedattribute(name='ms_keychain_attr',\n read_only=True, doc=KeyChainMacSecAttributes.__doc__)\n\n @ms_keychain_attr.initter\n def ms_keychain_attr(self):\n return SubAttributesDict(self.KeyChainMacSecAttributes, parent=self\n )\n\n\n class KeyChainTunEncAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.te_key_chain = key\n super().__init__(parent)\n\n\n class KeyIdAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n key_id_attr = managedattribute(name='key_id_attr', read_only=\n True, doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n te_keychain_attr = managedattribute(name='te_keychain_attr',\n read_only=True, doc=KeyChainTunEncAttributes.__doc__)\n\n @te_keychain_attr.initter\n def te_keychain_attr(self):\n return SubAttributesDict(self.KeyChainTunEncAttributes, parent=self\n )\n device_attr = managedattribute(name='device_attr', read_only=True, doc=\n DeviceAttributes.__doc__)\n\n @device_attr.initter\n def device_attr(self):\n return SubAttributesDict(self.DeviceAttributes, parent=self)\n key_id = managedattribute(name='key_id', default=None, type=(None,\n managedattribute.test_istype(str)), doc='Configure a key')\n key_enc_type = managedattribute(name='key_enc_type', default=None, type\n =managedattribute.test_istype(int), doc='Set key encode type')\n key_string = managedattribute(name='key_string', default=None, type=(\n None, managedattribute.test_istype(str)), doc='Set key string')\n\n\n class CRYPTO_ALGO(Enum):\n aes_128_cmac = 'aes-128-cmac'\n aes_256_cmac = 'aes-256-cmac'\n crypto_algo = managedattribute(name='crypto_algo', default=None, type=(\n None, CRYPTO_ALGO), doc='Set cryptographic authentication algorithm')\n lifetime_start = managedattribute(name='lifetime_start', default=None,\n type=(None, managedattribute.test_istype(str)), doc=\n 'Set start time for sending lifetime of encryption key')\n lifetime_duration = managedattribute(name='lifetime_duration', default=\n None, type=(None, managedattribute.test_istype(int)), doc=\n 'Set key lifetime duration')\n\n def build_config(self, devices=None, interfaces=None, links=None, apply\n =True, attributes=None, **kwargs):\n attributes = AttributesHelper(self, attributes)\n cfgs = {}\n devices, interfaces, links = consolidate_feature_args(self, devices,\n interfaces, links)\n for key, sub, attributes2 in attributes.mapping_items('device_attr',\n keys=devices, sort=True):\n cfgs[key] = sub.build_config(apply=False, attributes=attributes2)\n if apply:\n for device_name, cfg in sorted(cfgs.items()):\n self.testbed.config_on_devices(cfg, fail_invalid=True)\n else:\n return cfgs\n\n def build_unconfig(self, devices=None, interfaces=None, links=None,\n apply=True, attributes=None, **kwargs):\n attributes = AttributesHelper(self, attributes)\n cfgs = {}\n devices, interfaces, links = consolidate_feature_args(self, devices,\n interfaces, links)\n for key, sub, attributes2 in attributes.mapping_items('device_attr',\n keys=devices, sort=True):\n cfgs[key] = sub.build_unconfig(apply=False, attributes=attributes2)\n if apply:\n for device_name, cfg in sorted(cfgs.items()):\n self.testbed.config_on_devices(cfg, fail_invalid=True)\n else:\n return cfgs\n",
"step-5": "from enum import Enum\n\n# Genie\nfrom genie.decorator import managedattribute\nfrom genie.conf.base import Base, \\\n DeviceFeature, \\\n LinkFeature, \\\n Interface\nimport genie.conf.base.attributes\nfrom genie.libs.conf.base.feature import consolidate_feature_args\nfrom genie.conf.base.attributes import SubAttributes, \\\n SubAttributesDict, \\\n AttributesHelper, \\\n KeyedSubAttributes\nfrom genie.conf.base.attributes import InterfaceSubAttributes\nfrom genie.libs import parser\nfrom genie.abstract import Lookup\nfrom genie.ops.base import Base as ops_Base\nfrom genie.ops.base import Context\n\n__all__ = ('Keychains', )\n# Structure Hierarchy:\n# Keychains\n# +--DeviceAttributes\n# +-- KeyChainAttributes\n# | +-- KeyIdAttributes\n# +-- KeyChainMacSecAttributes\n# | +-- KeyIdAttributes\n# +-- KeyChainTunEncAttributes\n# +-- KeyIdAttributes\n\n\nclass Keychains(DeviceFeature):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # =============================================\n # Device attributes\n # =============================================\n class DeviceAttributes(genie.conf.base.attributes.DeviceSubAttributes):\n\n # KeyChainAttributes\n class KeyChainAttributes(KeyedSubAttributes):\n def __init__(self, parent, key):\n self.key_chain = key\n super().__init__(parent)\n\n # KeyIdAttributes\n class KeyIdAttributes(KeyedSubAttributes):\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n\n key_id_attr = managedattribute(name='key_id_attr',\n read_only=True,\n doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n\n keychain_attr = managedattribute(name='keychain_attr',\n read_only=True,\n doc=KeyChainAttributes.__doc__)\n\n @keychain_attr.initter\n def keychain_attr(self):\n return SubAttributesDict(self.KeyChainAttributes, parent=self)\n\n # KeyChainMacSecAttributes\n class KeyChainMacSecAttributes(KeyedSubAttributes):\n def __init__(self, parent, key):\n self.ms_key_chain = key\n super().__init__(parent)\n\n # KeyIdAttributes\n class KeyIdAttributes(KeyedSubAttributes):\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n\n key_id_attr = managedattribute(name='key_id_attr',\n read_only=True,\n doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n\n ms_keychain_attr = managedattribute(\n name='ms_keychain_attr',\n read_only=True,\n doc=KeyChainMacSecAttributes.__doc__)\n\n @ms_keychain_attr.initter\n def ms_keychain_attr(self):\n return SubAttributesDict(self.KeyChainMacSecAttributes,\n parent=self)\n\n # KeyChainTunEncAttributes\n class KeyChainTunEncAttributes(KeyedSubAttributes):\n def __init__(self, parent, key):\n self.te_key_chain = key\n super().__init__(parent)\n\n # KeyIdAttributes\n class KeyIdAttributes(KeyedSubAttributes):\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n\n key_id_attr = managedattribute(name='key_id_attr',\n read_only=True,\n doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n\n te_keychain_attr = managedattribute(\n name='te_keychain_attr',\n read_only=True,\n doc=KeyChainTunEncAttributes.__doc__)\n\n @te_keychain_attr.initter\n def te_keychain_attr(self):\n return SubAttributesDict(self.KeyChainTunEncAttributes,\n parent=self)\n\n device_attr = managedattribute(name='device_attr',\n read_only=True,\n doc=DeviceAttributes.__doc__)\n\n @device_attr.initter\n def device_attr(self):\n return SubAttributesDict(self.DeviceAttributes, parent=self)\n\n # ============ managedattributes ============#\n key_id = managedattribute(name='key_id',\n default=None,\n type=(None, managedattribute.test_istype(str)),\n doc='Configure a key')\n\n key_enc_type = managedattribute(name='key_enc_type',\n default=None,\n type=managedattribute.test_istype(int),\n doc='Set key encode type')\n\n key_string = managedattribute(name='key_string',\n default=None,\n type=(None,\n managedattribute.test_istype(str)),\n doc='Set key string')\n\n class CRYPTO_ALGO(Enum):\n aes_128_cmac = 'aes-128-cmac'\n aes_256_cmac = 'aes-256-cmac'\n\n crypto_algo = managedattribute(\n name='crypto_algo',\n default=None,\n type=(None, CRYPTO_ALGO),\n doc='Set cryptographic authentication algorithm')\n\n lifetime_start = managedattribute(\n name='lifetime_start',\n default=None,\n type=(None, managedattribute.test_istype(str)),\n doc='Set start time for sending lifetime of encryption key')\n\n lifetime_duration = managedattribute(\n name='lifetime_duration',\n default=None,\n type=(None, managedattribute.test_istype(int)),\n doc='Set key lifetime duration')\n\n # =========================================================\n # build_config\n # =========================================================\n def build_config(self,\n devices=None,\n interfaces=None,\n links=None,\n apply=True,\n attributes=None,\n **kwargs):\n attributes = AttributesHelper(self, attributes)\n cfgs = {}\n\n devices, interfaces, links = \\\n consolidate_feature_args(self, devices, interfaces, links)\n\n for key, sub, attributes2 in attributes.mapping_items('device_attr',\n keys=devices,\n sort=True):\n cfgs[key] = sub.build_config(apply=False, attributes=attributes2)\n if apply:\n for device_name, cfg in sorted(cfgs.items()):\n self.testbed.config_on_devices(cfg, fail_invalid=True)\n else:\n return cfgs\n\n def build_unconfig(self,\n devices=None,\n interfaces=None,\n links=None,\n apply=True,\n attributes=None,\n **kwargs):\n attributes = AttributesHelper(self, attributes)\n cfgs = {}\n\n devices, interfaces, links = \\\n consolidate_feature_args(self, devices, interfaces, links)\n for key, sub, attributes2 in attributes.mapping_items('device_attr',\n keys=devices,\n sort=True):\n cfgs[key] = sub.build_unconfig(apply=False, attributes=attributes2)\n\n if apply:\n for device_name, cfg in sorted(cfgs.items()):\n self.testbed.config_on_devices(cfg, fail_invalid=True)\n else:\n return cfgs\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
#!/usr/bin/env python3
"""Shows how to call C code from python"""
__appname__ = "myccalc.py"
__author__ = "Joseph Palmer <[email protected]>"
__version__ = "0.0.1"
__license__ = "License for this code/"
__date__ = "Dec-2018"
## imports ##
import os
import ctypes
# Load the C library into python - needs the full path for some reason!
so_filepath = "{}/libmycalc.so".format(os.getcwd())
ctypes.cdll.LoadLibrary(so_filepath)
myccalc = ctypes.CDLL(so_filepath)
# make a simpler name for the mycalc.add_floats
add_floats = myccalc.add_floats
# tell python what variables this function takes & returns
add_floats.argtypes = [ctypes.c_float, ctypes.c_float]
add_floats.restype = ctypes.c_float
# the function can now be used
x = 1.2
y = 3.3
a = add_floats(x, y)
print("The sum of %.1f and %.1f is %.1f" % (x, y, a))
# we can do the same for others
sf = myccalc.subtract_floats
sf.argtypes = [ctypes.c_float, ctypes.c_float]
sf.restype = ctypes.c_float
b = sf(y, x)
print("Subtracting %.1f from %.1f is %.1f" % (x, y, b))
|
normal
|
{
"blob_id": "12ecfd2750f79fd19355665b6e57c2103a3cac3e",
"index": 4257,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nctypes.cdll.LoadLibrary(so_filepath)\n<mask token>\nprint('The sum of %.1f and %.1f is %.1f' % (x, y, a))\n<mask token>\nprint('Subtracting %.1f from %.1f is %.1f' % (x, y, b))\n",
"step-3": "<mask token>\n__appname__ = 'myccalc.py'\n__author__ = 'Joseph Palmer <[email protected]>'\n__version__ = '0.0.1'\n__license__ = 'License for this code/'\n__date__ = 'Dec-2018'\n<mask token>\nso_filepath = '{}/libmycalc.so'.format(os.getcwd())\nctypes.cdll.LoadLibrary(so_filepath)\nmyccalc = ctypes.CDLL(so_filepath)\nadd_floats = myccalc.add_floats\nadd_floats.argtypes = [ctypes.c_float, ctypes.c_float]\nadd_floats.restype = ctypes.c_float\nx = 1.2\ny = 3.3\na = add_floats(x, y)\nprint('The sum of %.1f and %.1f is %.1f' % (x, y, a))\nsf = myccalc.subtract_floats\nsf.argtypes = [ctypes.c_float, ctypes.c_float]\nsf.restype = ctypes.c_float\nb = sf(y, x)\nprint('Subtracting %.1f from %.1f is %.1f' % (x, y, b))\n",
"step-4": "<mask token>\n__appname__ = 'myccalc.py'\n__author__ = 'Joseph Palmer <[email protected]>'\n__version__ = '0.0.1'\n__license__ = 'License for this code/'\n__date__ = 'Dec-2018'\nimport os\nimport ctypes\nso_filepath = '{}/libmycalc.so'.format(os.getcwd())\nctypes.cdll.LoadLibrary(so_filepath)\nmyccalc = ctypes.CDLL(so_filepath)\nadd_floats = myccalc.add_floats\nadd_floats.argtypes = [ctypes.c_float, ctypes.c_float]\nadd_floats.restype = ctypes.c_float\nx = 1.2\ny = 3.3\na = add_floats(x, y)\nprint('The sum of %.1f and %.1f is %.1f' % (x, y, a))\nsf = myccalc.subtract_floats\nsf.argtypes = [ctypes.c_float, ctypes.c_float]\nsf.restype = ctypes.c_float\nb = sf(y, x)\nprint('Subtracting %.1f from %.1f is %.1f' % (x, y, b))\n",
"step-5": "#!/usr/bin/env python3\n\"\"\"Shows how to call C code from python\"\"\"\n__appname__ = \"myccalc.py\"\n__author__ = \"Joseph Palmer <[email protected]>\"\n__version__ = \"0.0.1\"\n__license__ = \"License for this code/\"\n__date__ = \"Dec-2018\"\n\n## imports ##\nimport os\nimport ctypes\n\n# Load the C library into python - needs the full path for some reason!\nso_filepath = \"{}/libmycalc.so\".format(os.getcwd())\nctypes.cdll.LoadLibrary(so_filepath)\nmyccalc = ctypes.CDLL(so_filepath)\n\n# make a simpler name for the mycalc.add_floats\nadd_floats = myccalc.add_floats\n\n# tell python what variables this function takes & returns\nadd_floats.argtypes = [ctypes.c_float, ctypes.c_float]\nadd_floats.restype = ctypes.c_float\n\n# the function can now be used\nx = 1.2\ny = 3.3\na = add_floats(x, y)\nprint(\"The sum of %.1f and %.1f is %.1f\" % (x, y, a))\n\n# we can do the same for others\nsf = myccalc.subtract_floats\nsf.argtypes = [ctypes.c_float, ctypes.c_float]\nsf.restype = ctypes.c_float\nb = sf(y, x)\nprint(\"Subtracting %.1f from %.1f is %.1f\" % (x, y, b))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import datetime
import logging
from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional
from dagster import check
from dagster.core.utils import coerce_valid_log_level, make_new_run_id
if TYPE_CHECKING:
from dagster.core.events import DagsterEvent
DAGSTER_META_KEY = "dagster_meta"
class DagsterMessageProps(
NamedTuple(
"_DagsterMessageProps",
[
("orig_message", Optional[str]),
("log_message_id", Optional[str]),
("log_timestamp", Optional[str]),
("dagster_event", Optional[Any]),
],
)
):
"""Internal class used to represent specific attributes about a logged message"""
def __new__(
cls,
orig_message: str,
log_message_id: Optional[str] = None,
log_timestamp: Optional[str] = None,
dagster_event: Optional["DagsterEvent"] = None,
):
return super().__new__(
cls,
orig_message=check.str_param(orig_message, "orig_message"),
log_message_id=check.opt_str_param(
log_message_id, "log_message_id", default=make_new_run_id()
),
log_timestamp=check.opt_str_param(
log_timestamp, "log_timestamp", default=datetime.datetime.utcnow().isoformat()
),
dagster_event=dagster_event,
)
@property
def error_str(self) -> Optional[str]:
if self.dagster_event is None:
return None
event_specific_data = self.dagster_event.event_specific_data
if not event_specific_data:
return None
error = getattr(event_specific_data, "error", None)
if error:
return "\n\n" + getattr(event_specific_data, "error_display_string", error.to_string())
return None
@property
def pid(self) -> Optional[str]:
if self.dagster_event is None or self.dagster_event.pid is None:
return None
return str(self.dagster_event.pid)
@property
def step_key(self) -> Optional[str]:
if self.dagster_event is None:
return None
return self.dagster_event.step_key
@property
def event_type_value(self) -> Optional[str]:
if self.dagster_event is None:
return None
return self.dagster_event.event_type_value
class DagsterLoggingMetadata(
NamedTuple(
"_DagsterLoggingMetadata",
[
("run_id", Optional[str]),
("pipeline_name", Optional[str]),
("pipeline_tags", Dict[str, str]),
("step_key", Optional[str]),
("solid_name", Optional[str]),
("resource_name", Optional[str]),
("resource_fn_name", Optional[str]),
],
)
):
"""Internal class used to represent the context in which a given message was logged (i.e. the
step, pipeline run, resource, etc.)
"""
def __new__(
cls,
run_id: str = None,
pipeline_name: str = None,
pipeline_tags: Dict[str, str] = None,
step_key: str = None,
solid_name: str = None,
resource_name: str = None,
resource_fn_name: str = None,
):
return super().__new__(
cls,
run_id=run_id,
pipeline_name=pipeline_name,
pipeline_tags=pipeline_tags or {},
step_key=step_key,
solid_name=solid_name,
resource_name=resource_name,
resource_fn_name=resource_fn_name,
)
@property
def log_source(self):
if self.resource_name is None:
return self.pipeline_name or "system"
return f"resource:{self.resource_name}"
def to_tags(self) -> Dict[str, str]:
# converts all values into strings
return {k: str(v) for k, v in self._asdict().items()}
def construct_log_string(
logging_metadata: DagsterLoggingMetadata, message_props: DagsterMessageProps
) -> str:
return (
" - ".join(
filter(
None,
(
logging_metadata.log_source,
logging_metadata.run_id,
message_props.pid,
logging_metadata.step_key,
message_props.event_type_value,
message_props.orig_message,
),
)
)
+ (message_props.error_str or "")
)
class DagsterLogManager(logging.Logger):
def __init__(
self,
logging_metadata: DagsterLoggingMetadata,
loggers: List[logging.Logger],
handlers: Optional[List[logging.Handler]] = None,
):
self._logging_metadata = check.inst_param(
logging_metadata, "logging_metadata", DagsterLoggingMetadata
)
self._loggers = check.list_param(loggers, "loggers", of_type=logging.Logger)
super().__init__(name="dagster", level=logging.DEBUG)
handlers = check.opt_list_param(handlers, "handlers", of_type=logging.Handler)
for handler in handlers:
self.addHandler(handler)
@property
def logging_metadata(self) -> DagsterLoggingMetadata:
return self._logging_metadata
@property
def loggers(self) -> List[logging.Logger]:
return self._loggers
def log_dagster_event(self, level: int, msg: str, dagster_event: "DagsterEvent"):
self.log(level=level, msg=msg, extra={DAGSTER_META_KEY: dagster_event})
def log(self, level, msg, *args, **kwargs):
# allow for string level names
super().log(coerce_valid_log_level(level), msg, *args, **kwargs)
def _log(
self, level, msg, args, exc_info=None, extra=None, stack_info=False
): # pylint: disable=arguments-differ
# we stash dagster meta information in the extra field
extra = extra or {}
dagster_message_props = DagsterMessageProps(
orig_message=msg, dagster_event=extra.get(DAGSTER_META_KEY)
)
# convert the message to our preferred format
msg = construct_log_string(self.logging_metadata, dagster_message_props)
# combine all dagster meta information into a single dictionary
meta_dict = {
**self.logging_metadata._asdict(),
**dagster_message_props._asdict(),
}
# step-level events can be logged from a pipeline context. for these cases, pull the step
# key from the underlying DagsterEvent
if meta_dict["step_key"] is None:
meta_dict["step_key"] = dagster_message_props.step_key
extra[DAGSTER_META_KEY] = meta_dict
for logger in self._loggers:
logger.log(level, msg, *args, extra=extra)
super()._log(level, msg, args, exc_info=exc_info, extra=extra, stack_info=stack_info)
def with_tags(self, **new_tags):
"""Add new tags in "new_tags" to the set of tags attached to this log manager instance, and
return a new DagsterLogManager with the merged set of tags.
Args:
tags (Dict[str,str]): Dictionary of tags
Returns:
DagsterLogManager: a new DagsterLogManager namedtuple with updated tags for the same
run ID and loggers.
"""
return DagsterLogManager(
logging_metadata=self.logging_metadata._replace(**new_tags),
loggers=self._loggers,
handlers=self.handlers,
)
|
normal
|
{
"blob_id": "f900e08c06ae736f5e32ac748e282700f9d0a969",
"index": 7922,
"step-1": "<mask token>\n\n\nclass DagsterMessageProps(NamedTuple('_DagsterMessageProps', [(\n 'orig_message', Optional[str]), ('log_message_id', Optional[str]), (\n 'log_timestamp', Optional[str]), ('dagster_event', Optional[Any])])):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def step_key(self) ->Optional[str]:\n if self.dagster_event is None:\n return None\n return self.dagster_event.step_key\n\n @property\n def event_type_value(self) ->Optional[str]:\n if self.dagster_event is None:\n return None\n return self.dagster_event.event_type_value\n\n\nclass DagsterLoggingMetadata(NamedTuple('_DagsterLoggingMetadata', [(\n 'run_id', Optional[str]), ('pipeline_name', Optional[str]), (\n 'pipeline_tags', Dict[str, str]), ('step_key', Optional[str]), (\n 'solid_name', Optional[str]), ('resource_name', Optional[str]), (\n 'resource_fn_name', Optional[str])])):\n \"\"\"Internal class used to represent the context in which a given message was logged (i.e. the\n step, pipeline run, resource, etc.)\n \"\"\"\n\n def __new__(cls, run_id: str=None, pipeline_name: str=None,\n pipeline_tags: Dict[str, str]=None, step_key: str=None, solid_name:\n str=None, resource_name: str=None, resource_fn_name: str=None):\n return super().__new__(cls, run_id=run_id, pipeline_name=\n pipeline_name, pipeline_tags=pipeline_tags or {}, step_key=\n step_key, solid_name=solid_name, resource_name=resource_name,\n resource_fn_name=resource_fn_name)\n\n @property\n def log_source(self):\n if self.resource_name is None:\n return self.pipeline_name or 'system'\n return f'resource:{self.resource_name}'\n\n def to_tags(self) ->Dict[str, str]:\n return {k: str(v) for k, v in self._asdict().items()}\n\n\n<mask token>\n\n\nclass DagsterLogManager(logging.Logger):\n\n def __init__(self, logging_metadata: DagsterLoggingMetadata, loggers:\n List[logging.Logger], handlers: Optional[List[logging.Handler]]=None):\n self._logging_metadata = check.inst_param(logging_metadata,\n 'logging_metadata', DagsterLoggingMetadata)\n self._loggers = check.list_param(loggers, 'loggers', of_type=\n logging.Logger)\n super().__init__(name='dagster', level=logging.DEBUG)\n handlers = check.opt_list_param(handlers, 'handlers', of_type=\n logging.Handler)\n for handler in handlers:\n self.addHandler(handler)\n\n @property\n def logging_metadata(self) ->DagsterLoggingMetadata:\n return self._logging_metadata\n\n @property\n def loggers(self) ->List[logging.Logger]:\n return self._loggers\n\n def log_dagster_event(self, level: int, msg: str, dagster_event:\n 'DagsterEvent'):\n self.log(level=level, msg=msg, extra={DAGSTER_META_KEY: dagster_event})\n\n def log(self, level, msg, *args, **kwargs):\n super().log(coerce_valid_log_level(level), msg, *args, **kwargs)\n\n def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=\n False):\n extra = extra or {}\n dagster_message_props = DagsterMessageProps(orig_message=msg,\n dagster_event=extra.get(DAGSTER_META_KEY))\n msg = construct_log_string(self.logging_metadata, dagster_message_props\n )\n meta_dict = {**self.logging_metadata._asdict(), **\n dagster_message_props._asdict()}\n if meta_dict['step_key'] is None:\n meta_dict['step_key'] = dagster_message_props.step_key\n extra[DAGSTER_META_KEY] = meta_dict\n for logger in self._loggers:\n logger.log(level, msg, *args, extra=extra)\n super()._log(level, msg, args, exc_info=exc_info, extra=extra,\n stack_info=stack_info)\n\n def with_tags(self, **new_tags):\n \"\"\"Add new tags in \"new_tags\" to the set of tags attached to this log manager instance, and\n return a new DagsterLogManager with the merged set of tags.\n\n Args:\n tags (Dict[str,str]): Dictionary of tags\n\n Returns:\n DagsterLogManager: a new DagsterLogManager namedtuple with updated tags for the same\n run ID and loggers.\n \"\"\"\n return DagsterLogManager(logging_metadata=self.logging_metadata.\n _replace(**new_tags), loggers=self._loggers, handlers=self.handlers\n )\n",
"step-2": "<mask token>\n\n\nclass DagsterMessageProps(NamedTuple('_DagsterMessageProps', [(\n 'orig_message', Optional[str]), ('log_message_id', Optional[str]), (\n 'log_timestamp', Optional[str]), ('dagster_event', Optional[Any])])):\n <mask token>\n\n def __new__(cls, orig_message: str, log_message_id: Optional[str]=None,\n log_timestamp: Optional[str]=None, dagster_event: Optional[\n 'DagsterEvent']=None):\n return super().__new__(cls, orig_message=check.str_param(\n orig_message, 'orig_message'), log_message_id=check.\n opt_str_param(log_message_id, 'log_message_id', default=\n make_new_run_id()), log_timestamp=check.opt_str_param(\n log_timestamp, 'log_timestamp', default=datetime.datetime.\n utcnow().isoformat()), dagster_event=dagster_event)\n <mask token>\n\n @property\n def pid(self) ->Optional[str]:\n if self.dagster_event is None or self.dagster_event.pid is None:\n return None\n return str(self.dagster_event.pid)\n\n @property\n def step_key(self) ->Optional[str]:\n if self.dagster_event is None:\n return None\n return self.dagster_event.step_key\n\n @property\n def event_type_value(self) ->Optional[str]:\n if self.dagster_event is None:\n return None\n return self.dagster_event.event_type_value\n\n\nclass DagsterLoggingMetadata(NamedTuple('_DagsterLoggingMetadata', [(\n 'run_id', Optional[str]), ('pipeline_name', Optional[str]), (\n 'pipeline_tags', Dict[str, str]), ('step_key', Optional[str]), (\n 'solid_name', Optional[str]), ('resource_name', Optional[str]), (\n 'resource_fn_name', Optional[str])])):\n \"\"\"Internal class used to represent the context in which a given message was logged (i.e. the\n step, pipeline run, resource, etc.)\n \"\"\"\n\n def __new__(cls, run_id: str=None, pipeline_name: str=None,\n pipeline_tags: Dict[str, str]=None, step_key: str=None, solid_name:\n str=None, resource_name: str=None, resource_fn_name: str=None):\n return super().__new__(cls, run_id=run_id, pipeline_name=\n pipeline_name, pipeline_tags=pipeline_tags or {}, step_key=\n step_key, solid_name=solid_name, resource_name=resource_name,\n resource_fn_name=resource_fn_name)\n\n @property\n def log_source(self):\n if self.resource_name is None:\n return self.pipeline_name or 'system'\n return f'resource:{self.resource_name}'\n\n def to_tags(self) ->Dict[str, str]:\n return {k: str(v) for k, v in self._asdict().items()}\n\n\n<mask token>\n\n\nclass DagsterLogManager(logging.Logger):\n\n def __init__(self, logging_metadata: DagsterLoggingMetadata, loggers:\n List[logging.Logger], handlers: Optional[List[logging.Handler]]=None):\n self._logging_metadata = check.inst_param(logging_metadata,\n 'logging_metadata', DagsterLoggingMetadata)\n self._loggers = check.list_param(loggers, 'loggers', of_type=\n logging.Logger)\n super().__init__(name='dagster', level=logging.DEBUG)\n handlers = check.opt_list_param(handlers, 'handlers', of_type=\n logging.Handler)\n for handler in handlers:\n self.addHandler(handler)\n\n @property\n def logging_metadata(self) ->DagsterLoggingMetadata:\n return self._logging_metadata\n\n @property\n def loggers(self) ->List[logging.Logger]:\n return self._loggers\n\n def log_dagster_event(self, level: int, msg: str, dagster_event:\n 'DagsterEvent'):\n self.log(level=level, msg=msg, extra={DAGSTER_META_KEY: dagster_event})\n\n def log(self, level, msg, *args, **kwargs):\n super().log(coerce_valid_log_level(level), msg, *args, **kwargs)\n\n def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=\n False):\n extra = extra or {}\n dagster_message_props = DagsterMessageProps(orig_message=msg,\n dagster_event=extra.get(DAGSTER_META_KEY))\n msg = construct_log_string(self.logging_metadata, dagster_message_props\n )\n meta_dict = {**self.logging_metadata._asdict(), **\n dagster_message_props._asdict()}\n if meta_dict['step_key'] is None:\n meta_dict['step_key'] = dagster_message_props.step_key\n extra[DAGSTER_META_KEY] = meta_dict\n for logger in self._loggers:\n logger.log(level, msg, *args, extra=extra)\n super()._log(level, msg, args, exc_info=exc_info, extra=extra,\n stack_info=stack_info)\n\n def with_tags(self, **new_tags):\n \"\"\"Add new tags in \"new_tags\" to the set of tags attached to this log manager instance, and\n return a new DagsterLogManager with the merged set of tags.\n\n Args:\n tags (Dict[str,str]): Dictionary of tags\n\n Returns:\n DagsterLogManager: a new DagsterLogManager namedtuple with updated tags for the same\n run ID and loggers.\n \"\"\"\n return DagsterLogManager(logging_metadata=self.logging_metadata.\n _replace(**new_tags), loggers=self._loggers, handlers=self.handlers\n )\n",
"step-3": "<mask token>\n\n\nclass DagsterMessageProps(NamedTuple('_DagsterMessageProps', [(\n 'orig_message', Optional[str]), ('log_message_id', Optional[str]), (\n 'log_timestamp', Optional[str]), ('dagster_event', Optional[Any])])):\n \"\"\"Internal class used to represent specific attributes about a logged message\"\"\"\n\n def __new__(cls, orig_message: str, log_message_id: Optional[str]=None,\n log_timestamp: Optional[str]=None, dagster_event: Optional[\n 'DagsterEvent']=None):\n return super().__new__(cls, orig_message=check.str_param(\n orig_message, 'orig_message'), log_message_id=check.\n opt_str_param(log_message_id, 'log_message_id', default=\n make_new_run_id()), log_timestamp=check.opt_str_param(\n log_timestamp, 'log_timestamp', default=datetime.datetime.\n utcnow().isoformat()), dagster_event=dagster_event)\n\n @property\n def error_str(self) ->Optional[str]:\n if self.dagster_event is None:\n return None\n event_specific_data = self.dagster_event.event_specific_data\n if not event_specific_data:\n return None\n error = getattr(event_specific_data, 'error', None)\n if error:\n return '\\n\\n' + getattr(event_specific_data,\n 'error_display_string', error.to_string())\n return None\n\n @property\n def pid(self) ->Optional[str]:\n if self.dagster_event is None or self.dagster_event.pid is None:\n return None\n return str(self.dagster_event.pid)\n\n @property\n def step_key(self) ->Optional[str]:\n if self.dagster_event is None:\n return None\n return self.dagster_event.step_key\n\n @property\n def event_type_value(self) ->Optional[str]:\n if self.dagster_event is None:\n return None\n return self.dagster_event.event_type_value\n\n\nclass DagsterLoggingMetadata(NamedTuple('_DagsterLoggingMetadata', [(\n 'run_id', Optional[str]), ('pipeline_name', Optional[str]), (\n 'pipeline_tags', Dict[str, str]), ('step_key', Optional[str]), (\n 'solid_name', Optional[str]), ('resource_name', Optional[str]), (\n 'resource_fn_name', Optional[str])])):\n \"\"\"Internal class used to represent the context in which a given message was logged (i.e. the\n step, pipeline run, resource, etc.)\n \"\"\"\n\n def __new__(cls, run_id: str=None, pipeline_name: str=None,\n pipeline_tags: Dict[str, str]=None, step_key: str=None, solid_name:\n str=None, resource_name: str=None, resource_fn_name: str=None):\n return super().__new__(cls, run_id=run_id, pipeline_name=\n pipeline_name, pipeline_tags=pipeline_tags or {}, step_key=\n step_key, solid_name=solid_name, resource_name=resource_name,\n resource_fn_name=resource_fn_name)\n\n @property\n def log_source(self):\n if self.resource_name is None:\n return self.pipeline_name or 'system'\n return f'resource:{self.resource_name}'\n\n def to_tags(self) ->Dict[str, str]:\n return {k: str(v) for k, v in self._asdict().items()}\n\n\ndef construct_log_string(logging_metadata: DagsterLoggingMetadata,\n message_props: DagsterMessageProps) ->str:\n return ' - '.join(filter(None, (logging_metadata.log_source,\n logging_metadata.run_id, message_props.pid, logging_metadata.\n step_key, message_props.event_type_value, message_props.orig_message))\n ) + (message_props.error_str or '')\n\n\nclass DagsterLogManager(logging.Logger):\n\n def __init__(self, logging_metadata: DagsterLoggingMetadata, loggers:\n List[logging.Logger], handlers: Optional[List[logging.Handler]]=None):\n self._logging_metadata = check.inst_param(logging_metadata,\n 'logging_metadata', DagsterLoggingMetadata)\n self._loggers = check.list_param(loggers, 'loggers', of_type=\n logging.Logger)\n super().__init__(name='dagster', level=logging.DEBUG)\n handlers = check.opt_list_param(handlers, 'handlers', of_type=\n logging.Handler)\n for handler in handlers:\n self.addHandler(handler)\n\n @property\n def logging_metadata(self) ->DagsterLoggingMetadata:\n return self._logging_metadata\n\n @property\n def loggers(self) ->List[logging.Logger]:\n return self._loggers\n\n def log_dagster_event(self, level: int, msg: str, dagster_event:\n 'DagsterEvent'):\n self.log(level=level, msg=msg, extra={DAGSTER_META_KEY: dagster_event})\n\n def log(self, level, msg, *args, **kwargs):\n super().log(coerce_valid_log_level(level), msg, *args, **kwargs)\n\n def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=\n False):\n extra = extra or {}\n dagster_message_props = DagsterMessageProps(orig_message=msg,\n dagster_event=extra.get(DAGSTER_META_KEY))\n msg = construct_log_string(self.logging_metadata, dagster_message_props\n )\n meta_dict = {**self.logging_metadata._asdict(), **\n dagster_message_props._asdict()}\n if meta_dict['step_key'] is None:\n meta_dict['step_key'] = dagster_message_props.step_key\n extra[DAGSTER_META_KEY] = meta_dict\n for logger in self._loggers:\n logger.log(level, msg, *args, extra=extra)\n super()._log(level, msg, args, exc_info=exc_info, extra=extra,\n stack_info=stack_info)\n\n def with_tags(self, **new_tags):\n \"\"\"Add new tags in \"new_tags\" to the set of tags attached to this log manager instance, and\n return a new DagsterLogManager with the merged set of tags.\n\n Args:\n tags (Dict[str,str]): Dictionary of tags\n\n Returns:\n DagsterLogManager: a new DagsterLogManager namedtuple with updated tags for the same\n run ID and loggers.\n \"\"\"\n return DagsterLogManager(logging_metadata=self.logging_metadata.\n _replace(**new_tags), loggers=self._loggers, handlers=self.handlers\n )\n",
"step-4": "<mask token>\nif TYPE_CHECKING:\n from dagster.core.events import DagsterEvent\n<mask token>\n\n\nclass DagsterMessageProps(NamedTuple('_DagsterMessageProps', [(\n 'orig_message', Optional[str]), ('log_message_id', Optional[str]), (\n 'log_timestamp', Optional[str]), ('dagster_event', Optional[Any])])):\n \"\"\"Internal class used to represent specific attributes about a logged message\"\"\"\n\n def __new__(cls, orig_message: str, log_message_id: Optional[str]=None,\n log_timestamp: Optional[str]=None, dagster_event: Optional[\n 'DagsterEvent']=None):\n return super().__new__(cls, orig_message=check.str_param(\n orig_message, 'orig_message'), log_message_id=check.\n opt_str_param(log_message_id, 'log_message_id', default=\n make_new_run_id()), log_timestamp=check.opt_str_param(\n log_timestamp, 'log_timestamp', default=datetime.datetime.\n utcnow().isoformat()), dagster_event=dagster_event)\n\n @property\n def error_str(self) ->Optional[str]:\n if self.dagster_event is None:\n return None\n event_specific_data = self.dagster_event.event_specific_data\n if not event_specific_data:\n return None\n error = getattr(event_specific_data, 'error', None)\n if error:\n return '\\n\\n' + getattr(event_specific_data,\n 'error_display_string', error.to_string())\n return None\n\n @property\n def pid(self) ->Optional[str]:\n if self.dagster_event is None or self.dagster_event.pid is None:\n return None\n return str(self.dagster_event.pid)\n\n @property\n def step_key(self) ->Optional[str]:\n if self.dagster_event is None:\n return None\n return self.dagster_event.step_key\n\n @property\n def event_type_value(self) ->Optional[str]:\n if self.dagster_event is None:\n return None\n return self.dagster_event.event_type_value\n\n\nclass DagsterLoggingMetadata(NamedTuple('_DagsterLoggingMetadata', [(\n 'run_id', Optional[str]), ('pipeline_name', Optional[str]), (\n 'pipeline_tags', Dict[str, str]), ('step_key', Optional[str]), (\n 'solid_name', Optional[str]), ('resource_name', Optional[str]), (\n 'resource_fn_name', Optional[str])])):\n \"\"\"Internal class used to represent the context in which a given message was logged (i.e. the\n step, pipeline run, resource, etc.)\n \"\"\"\n\n def __new__(cls, run_id: str=None, pipeline_name: str=None,\n pipeline_tags: Dict[str, str]=None, step_key: str=None, solid_name:\n str=None, resource_name: str=None, resource_fn_name: str=None):\n return super().__new__(cls, run_id=run_id, pipeline_name=\n pipeline_name, pipeline_tags=pipeline_tags or {}, step_key=\n step_key, solid_name=solid_name, resource_name=resource_name,\n resource_fn_name=resource_fn_name)\n\n @property\n def log_source(self):\n if self.resource_name is None:\n return self.pipeline_name or 'system'\n return f'resource:{self.resource_name}'\n\n def to_tags(self) ->Dict[str, str]:\n return {k: str(v) for k, v in self._asdict().items()}\n\n\ndef construct_log_string(logging_metadata: DagsterLoggingMetadata,\n message_props: DagsterMessageProps) ->str:\n return ' - '.join(filter(None, (logging_metadata.log_source,\n logging_metadata.run_id, message_props.pid, logging_metadata.\n step_key, message_props.event_type_value, message_props.orig_message))\n ) + (message_props.error_str or '')\n\n\nclass DagsterLogManager(logging.Logger):\n\n def __init__(self, logging_metadata: DagsterLoggingMetadata, loggers:\n List[logging.Logger], handlers: Optional[List[logging.Handler]]=None):\n self._logging_metadata = check.inst_param(logging_metadata,\n 'logging_metadata', DagsterLoggingMetadata)\n self._loggers = check.list_param(loggers, 'loggers', of_type=\n logging.Logger)\n super().__init__(name='dagster', level=logging.DEBUG)\n handlers = check.opt_list_param(handlers, 'handlers', of_type=\n logging.Handler)\n for handler in handlers:\n self.addHandler(handler)\n\n @property\n def logging_metadata(self) ->DagsterLoggingMetadata:\n return self._logging_metadata\n\n @property\n def loggers(self) ->List[logging.Logger]:\n return self._loggers\n\n def log_dagster_event(self, level: int, msg: str, dagster_event:\n 'DagsterEvent'):\n self.log(level=level, msg=msg, extra={DAGSTER_META_KEY: dagster_event})\n\n def log(self, level, msg, *args, **kwargs):\n super().log(coerce_valid_log_level(level), msg, *args, **kwargs)\n\n def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=\n False):\n extra = extra or {}\n dagster_message_props = DagsterMessageProps(orig_message=msg,\n dagster_event=extra.get(DAGSTER_META_KEY))\n msg = construct_log_string(self.logging_metadata, dagster_message_props\n )\n meta_dict = {**self.logging_metadata._asdict(), **\n dagster_message_props._asdict()}\n if meta_dict['step_key'] is None:\n meta_dict['step_key'] = dagster_message_props.step_key\n extra[DAGSTER_META_KEY] = meta_dict\n for logger in self._loggers:\n logger.log(level, msg, *args, extra=extra)\n super()._log(level, msg, args, exc_info=exc_info, extra=extra,\n stack_info=stack_info)\n\n def with_tags(self, **new_tags):\n \"\"\"Add new tags in \"new_tags\" to the set of tags attached to this log manager instance, and\n return a new DagsterLogManager with the merged set of tags.\n\n Args:\n tags (Dict[str,str]): Dictionary of tags\n\n Returns:\n DagsterLogManager: a new DagsterLogManager namedtuple with updated tags for the same\n run ID and loggers.\n \"\"\"\n return DagsterLogManager(logging_metadata=self.logging_metadata.\n _replace(**new_tags), loggers=self._loggers, handlers=self.handlers\n )\n",
"step-5": "import datetime\nimport logging\nfrom typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional\n\nfrom dagster import check\nfrom dagster.core.utils import coerce_valid_log_level, make_new_run_id\n\nif TYPE_CHECKING:\n from dagster.core.events import DagsterEvent\n\nDAGSTER_META_KEY = \"dagster_meta\"\n\n\nclass DagsterMessageProps(\n NamedTuple(\n \"_DagsterMessageProps\",\n [\n (\"orig_message\", Optional[str]),\n (\"log_message_id\", Optional[str]),\n (\"log_timestamp\", Optional[str]),\n (\"dagster_event\", Optional[Any]),\n ],\n )\n):\n \"\"\"Internal class used to represent specific attributes about a logged message\"\"\"\n\n def __new__(\n cls,\n orig_message: str,\n log_message_id: Optional[str] = None,\n log_timestamp: Optional[str] = None,\n dagster_event: Optional[\"DagsterEvent\"] = None,\n ):\n return super().__new__(\n cls,\n orig_message=check.str_param(orig_message, \"orig_message\"),\n log_message_id=check.opt_str_param(\n log_message_id, \"log_message_id\", default=make_new_run_id()\n ),\n log_timestamp=check.opt_str_param(\n log_timestamp, \"log_timestamp\", default=datetime.datetime.utcnow().isoformat()\n ),\n dagster_event=dagster_event,\n )\n\n @property\n def error_str(self) -> Optional[str]:\n if self.dagster_event is None:\n return None\n\n event_specific_data = self.dagster_event.event_specific_data\n if not event_specific_data:\n return None\n\n error = getattr(event_specific_data, \"error\", None)\n if error:\n return \"\\n\\n\" + getattr(event_specific_data, \"error_display_string\", error.to_string())\n return None\n\n @property\n def pid(self) -> Optional[str]:\n if self.dagster_event is None or self.dagster_event.pid is None:\n return None\n return str(self.dagster_event.pid)\n\n @property\n def step_key(self) -> Optional[str]:\n if self.dagster_event is None:\n return None\n return self.dagster_event.step_key\n\n @property\n def event_type_value(self) -> Optional[str]:\n if self.dagster_event is None:\n return None\n return self.dagster_event.event_type_value\n\n\nclass DagsterLoggingMetadata(\n NamedTuple(\n \"_DagsterLoggingMetadata\",\n [\n (\"run_id\", Optional[str]),\n (\"pipeline_name\", Optional[str]),\n (\"pipeline_tags\", Dict[str, str]),\n (\"step_key\", Optional[str]),\n (\"solid_name\", Optional[str]),\n (\"resource_name\", Optional[str]),\n (\"resource_fn_name\", Optional[str]),\n ],\n )\n):\n \"\"\"Internal class used to represent the context in which a given message was logged (i.e. the\n step, pipeline run, resource, etc.)\n \"\"\"\n\n def __new__(\n cls,\n run_id: str = None,\n pipeline_name: str = None,\n pipeline_tags: Dict[str, str] = None,\n step_key: str = None,\n solid_name: str = None,\n resource_name: str = None,\n resource_fn_name: str = None,\n ):\n return super().__new__(\n cls,\n run_id=run_id,\n pipeline_name=pipeline_name,\n pipeline_tags=pipeline_tags or {},\n step_key=step_key,\n solid_name=solid_name,\n resource_name=resource_name,\n resource_fn_name=resource_fn_name,\n )\n\n @property\n def log_source(self):\n if self.resource_name is None:\n return self.pipeline_name or \"system\"\n return f\"resource:{self.resource_name}\"\n\n def to_tags(self) -> Dict[str, str]:\n # converts all values into strings\n return {k: str(v) for k, v in self._asdict().items()}\n\n\ndef construct_log_string(\n logging_metadata: DagsterLoggingMetadata, message_props: DagsterMessageProps\n) -> str:\n\n return (\n \" - \".join(\n filter(\n None,\n (\n logging_metadata.log_source,\n logging_metadata.run_id,\n message_props.pid,\n logging_metadata.step_key,\n message_props.event_type_value,\n message_props.orig_message,\n ),\n )\n )\n + (message_props.error_str or \"\")\n )\n\n\nclass DagsterLogManager(logging.Logger):\n def __init__(\n self,\n logging_metadata: DagsterLoggingMetadata,\n loggers: List[logging.Logger],\n handlers: Optional[List[logging.Handler]] = None,\n ):\n self._logging_metadata = check.inst_param(\n logging_metadata, \"logging_metadata\", DagsterLoggingMetadata\n )\n self._loggers = check.list_param(loggers, \"loggers\", of_type=logging.Logger)\n\n super().__init__(name=\"dagster\", level=logging.DEBUG)\n\n handlers = check.opt_list_param(handlers, \"handlers\", of_type=logging.Handler)\n for handler in handlers:\n self.addHandler(handler)\n\n @property\n def logging_metadata(self) -> DagsterLoggingMetadata:\n return self._logging_metadata\n\n @property\n def loggers(self) -> List[logging.Logger]:\n return self._loggers\n\n def log_dagster_event(self, level: int, msg: str, dagster_event: \"DagsterEvent\"):\n self.log(level=level, msg=msg, extra={DAGSTER_META_KEY: dagster_event})\n\n def log(self, level, msg, *args, **kwargs):\n # allow for string level names\n super().log(coerce_valid_log_level(level), msg, *args, **kwargs)\n\n def _log(\n self, level, msg, args, exc_info=None, extra=None, stack_info=False\n ): # pylint: disable=arguments-differ\n\n # we stash dagster meta information in the extra field\n extra = extra or {}\n\n dagster_message_props = DagsterMessageProps(\n orig_message=msg, dagster_event=extra.get(DAGSTER_META_KEY)\n )\n\n # convert the message to our preferred format\n msg = construct_log_string(self.logging_metadata, dagster_message_props)\n\n # combine all dagster meta information into a single dictionary\n meta_dict = {\n **self.logging_metadata._asdict(),\n **dagster_message_props._asdict(),\n }\n # step-level events can be logged from a pipeline context. for these cases, pull the step\n # key from the underlying DagsterEvent\n if meta_dict[\"step_key\"] is None:\n meta_dict[\"step_key\"] = dagster_message_props.step_key\n\n extra[DAGSTER_META_KEY] = meta_dict\n\n for logger in self._loggers:\n logger.log(level, msg, *args, extra=extra)\n\n super()._log(level, msg, args, exc_info=exc_info, extra=extra, stack_info=stack_info)\n\n def with_tags(self, **new_tags):\n \"\"\"Add new tags in \"new_tags\" to the set of tags attached to this log manager instance, and\n return a new DagsterLogManager with the merged set of tags.\n\n Args:\n tags (Dict[str,str]): Dictionary of tags\n\n Returns:\n DagsterLogManager: a new DagsterLogManager namedtuple with updated tags for the same\n run ID and loggers.\n \"\"\"\n return DagsterLogManager(\n logging_metadata=self.logging_metadata._replace(**new_tags),\n loggers=self._loggers,\n handlers=self.handlers,\n )\n",
"step-ids": [
16,
18,
21,
22,
25
]
}
|
[
16,
18,
21,
22,
25
] |
n, k = raw_input().split()
n = int(n)
k = int(k)
div = 0
for i in range(n):
new = int(raw_input())
if (new % k) == 0:
div += 1
print div
|
normal
|
{
"blob_id": "68e1e39f193537367d899c5fd01c1361ed93ef29",
"index": 7668,
"step-1": "n, k = raw_input().split()\nn = int(n)\nk = int(k)\n\ndiv = 0\n\nfor i in range(n):\n new = int(raw_input())\n if (new % k) == 0:\n div += 1\n\nprint div",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
r"""
Definition
----------
Calculates the scattering from a **body-centered cubic lattice** with
paracrystalline distortion. Thermal vibrations are considered to be negligible,
and the size of the paracrystal is infinitely large. Paracrystalline distortion
is assumed to be isotropic and characterized by a Gaussian distribution.
The scattering intensity $I(q)$ is calculated as
.. math::
I(q) = \frac{\text{scale}}{V_p} V_\text{lattice} P(q) Z(q) + \text{background}
where *scale* is the volume fraction of crystal in the sample volume,
$V_\text{lattice}$ is the volume fraction of spheres in the crystal, $V_p$ is
the volume of the primary particle, $P(q)$ is the form factor of the sphere
(normalized), and $Z(q)$ is the paracrystalline structure factor for a
body-centered cubic structure.
.. note::
At this point the GUI does not return $V_\text{lattice}$ separately so that
the user will need to calculate it from the equation given and the
appropriate returned parameters.
.. warning::
As per the equations below, this model will return I(q)=0 for all q if the
distortion factor is equal to 0. The model is not meant to support perfect
crystals.
.. figure:: img/bcc_geometry.jpg
Body-centered cubic (BCC) lattice taken from reference [#Matsuoka1987]_.
Following the derivation from reference [#Matsuoka1987]_, as corrected in
reference [#Matsuoka1990]_, and based on the above figure, the
primitive unit cell vectors $\vec{a_1},\vec{a_2}$, and $\vec{a_3}$, which
enclose the smallest possible unit cell for the bcc lattice, are defined below:
.. math::
\vec{a_1} &= \frac{1}{2}(-\vec{b_1} + \vec{b_2} + \vec{b_3}) \\
\vec{a_2} &= \frac{1}{2} (\vec{b_1} - \vec{b_2} + \vec{b_3}) \\
\vec{a_3} &= \frac{1}{2}(\vec{b_1} + \vec{b_2} -\vec{b_3}).
where $\vec{b_1},\vec{b_2}$, and $\vec{b_3}$ are the unit cell vectors of the
conventional unit cell, which is a unit cell that includes the full symmetry
of the lattice. As defined by reference [#Matsuoka1987]_, the constant $a$ is the
lattice parameter of the conventional unit cell with
$|\vec{b_1}|=|\vec{b_2}|=|\vec{b_3}|=a$. Using this definition, the
nearest-neighbor distance ($D$) is given by
$D=|\vec{a_1}|=|\vec{a_2}|=|\vec{a_3}|=\sqrt{(a/2)^2+(a/2)^2+(a/2)^2}=\sqrt{\frac{3a^2}{4}}=\frac{\sqrt{3}a}{2}$.
The volume of the primitive unit cell $V_u$ is then given by:
.. math::
V_u &= |(\vec{a_1}\times \vec{a_2})\cdot\vec{a_3}|\\
&= (\frac{a^2}{2},\frac{a^2}{2},0)\cdot(\frac{a}{2},\frac{a}{2},-\frac{a}{2})\\
&= a^3/2
In this case, the volume fraction ($V_{lattice}$) of spherical particles with
radius $R$ sitting on the bcc lattice is given by:
.. math::
V_{lattice} &= \frac{4/3 \pi R^3}{a^3/2}\\
&= \frac{8\pi R^3}{3a^3}\\
&= \frac{\sqrt{3} \pi R^3}{D^3}
Now, continuing to follow [#Matsuoka1987]_, the structure (lattice)
factor $Z(\vec{q})$ for a 3D paracrystal can be written as:
.. math::
Z(\vec{q}) = \prod_{k=1}^{3}Z_k(\vec{q})
with
.. math::
Z_k(\vec{q}) = \frac{1-|F_k|^2}{1-2|F_k|\cos(\vec{a_k}\cdot\vec{q})+|F_k|^2}
and where $F_k(\vec{q})$ is the structure factor of the primitive unit cell
defined as:
.. math::
F_k(\vec{q}) = e^{-\frac{1}{2} \Delta a^2_k q^2} \times e^{-i\vec{q}\cdot\vec{a_k}}.
Here, $\vec{a_k}$ are the primitive unit cell vectors $\vec{a_1}$, $\vec{a_2}$,
and $\vec{a_3}$. Furthermore, $\Delta a_k$ is the isotropic distortion of the
lattice point from its ideal position and can be defined by a constant factor
$g=\Delta a / |\vec{a_1}| = \Delta a / |\vec{a_2}| = \Delta a / |\vec{a_3}|=\Delta a/D$.
Finally, assuming the definitions presented in this document, the authors of
reference [#Matsuoka1987]_ have derived the lattice factors which are given by:
.. math::
Z_1(q,\theta,\phi)&=[1-e^{-q^2\Delta a^2}]/\{1-2e^{-\frac{1}{2}q^2\Delta a^2}\cos[\frac{qa}{2}(\sin\theta \cos\phi + \sin\theta \sin\phi + \cos\theta)] + e^{-q^2\Delta a^2}\}\\
Z_2(q,\theta,\phi)&=[1-e^{-q^2\Delta a^2}]/\{1-2e^{-\frac{1}{2}q^2\Delta a^2}\cos[\frac{qa}{2}(-\sin\theta \cos\phi - \sin\theta \sin\phi + \cos\theta)] + e^{-q^2\Delta a^2}\}\\
Z_3(q,\theta,\phi)&=[1-e^{-q^2\Delta a^2}]/\{1-2e^{-\frac{1}{2}q^2\Delta a^2}\cos[\frac{qa}{2}(-\sin\theta \cos\phi + \sin\theta \sin\phi - \cos\theta)] + e^{-q^2\Delta a^2}\}\\
Note that Sasview is using the nearest-neighbor parameter ($D$) as an input
instead of the conventional unit cell parameter $a$. In this case, using
$a=\frac{2D}{\sqrt{3}}$, we rewrite $Z_1(q)$, $Z_2(q)$, and $Z_3(q)$ in terms
of $D$ instead of $a$, which leads to:
.. math::
Z_1(q,\theta,\phi)&=[1-e^{-q^2\Delta a^2}]/\{1-2e^{-\frac{1}{2}q^2\Delta a^2}\cos[\frac{qD}{\sqrt{3}}(\sin\theta \cos\phi + \sin\theta \sin\phi + \cos\theta)] + e^{-q^2\Delta a^2}\}\\
Z_2(q,\theta,\phi)&=[1-e^{-q^2\Delta a^2}]/\{1-2e^{-\frac{1}{2}q^2\Delta a^2}\cos[\frac{qD}{\sqrt{3}}(-\sin\theta \cos\phi - \sin\theta \sin\phi + \cos\theta)] + e^{-q^2\Delta a^2}\}\\
Z_3(q,\theta,\phi)&=[1-e^{-q^2\Delta a^2}]/\{1-2e^{-\frac{1}{2}q^2\Delta a^2}\cos[\frac{qD}{\sqrt{3}}(-\sin\theta \cos\phi + \sin\theta \sin\phi - \cos\theta)] + e^{-q^2\Delta a^2}\}\\
Finally note that the position of the Bragg peaks for the bcc lattice are
indexed by (reduced q-values):
.. math::
\frac{qa}{2\pi}=\frac{qD}{\sqrt{3}\pi}=\sqrt{h^2+k^2+l^2}.
In the above equation, we used the conventional unit cell so not all
permutations of h,k, and l will produce Bragg peaks. The Bragg scattering
condition for bcc imposes that h+k+l = even. Thus the peak positions
correspond to (just the first 5)
.. math::
\begin{array}{lccccc}
q/q_o & 1 & \sqrt{2} & \sqrt{3} & \sqrt{4} & \sqrt{5} \\
\text{Indices} & (110) & (200) & (211) & (220) & (310) \\
\end{array}
.. note::
The calculation of $Z(q)$ is a double numerical integral that must be
carried out with a high density of points to properly capture the sharp
peaks of the paracrystalline scattering. So be warned that the calculation
is slow. Fitting of any experimental data must be resolution smeared for
any meaningful fit. This makes a triple integral which may be very slow.
If a double-precision GPU with OpenCL support is available this may improve
the speed of the calculation.
This example dataset is produced using 200 data points,
*qmin* = 0.001 |Ang^-1|, *qmax* = 0.1 |Ang^-1| and the above default values.
The 2D (Anisotropic model) is based on the reference below where $I(q)$ is
approximated for 1d scattering. Thus the scattering pattern for 2D may not be
accurate, particularly at low $q$. For general details of the calculation and
angular dispersions for oriented particles see :ref:`orientation`. Note that
we are not responsible for any incorrectness of the 2D model computation.
.. figure:: img/parallelepiped_angle_definition.png
Orientation of the crystal with respect to the scattering plane, when
$\theta = \phi = 0$ the $c$ axis is along the beam direction (the $z$ axis).
References
----------
.. [#Matsuoka1987] Hideki Matsuoka et. al. *Physical Review B*, 36 (1987)
1754-1765 (Original Paper)
.. [#Matsuoka1990] Hideki Matsuoka et. al. *Physical Review B*, 41 (1990)
3854-3856 (Corrections to FCC and BCC lattice structure calculation)
Authorship and Verification
---------------------------
* **Author:** NIST IGOR/DANSE **Date:** pre 2010
* **Last Modified by:** Jonathan Gaudet **Date:** September 26, 2022
* **Last Reviewed by:** Paul Butler **Date:** November 2, 2022
"""
import numpy as np
from numpy import inf, pi
name = "bcc_paracrystal"
title = "Body-centred cubic lattic with paracrystalline distortion"
description = """
Calculates the scattering from a **body-centered cubic lattice** with
paracrystalline distortion. Thermal vibrations are considered to be
negligible, and the size of the paracrystal is infinitely large.
Paracrystalline distortion is assumed to be isotropic and characterized
by a Gaussian distribution.
"""
category = "shape:paracrystal"
#note - calculation requires double precision
single = False
# pylint: disable=bad-whitespace, line-too-long
# ["name", "units", default, [lower, upper], "type","description" ],
parameters = [["dnn", "Ang", 220, [-inf, inf], "", "Nearest neighbour distance"],
["d_factor", "", 0.06, [-inf, inf], "", "Paracrystal distortion factor"],
["radius", "Ang", 40, [0, inf], "volume", "Particle radius"],
["sld", "1e-6/Ang^2", 4, [-inf, inf], "sld", "Particle scattering length density"],
["sld_solvent", "1e-6/Ang^2", 1, [-inf, inf], "sld", "Solvent scattering length density"],
["theta", "degrees", 60, [-360, 360], "orientation", "c axis to beam angle"],
["phi", "degrees", 60, [-360, 360], "orientation", "rotation about beam"],
["psi", "degrees", 60, [-360, 360], "orientation", "rotation about c axis"]
]
# pylint: enable=bad-whitespace, line-too-long
source = ["lib/sas_3j1x_x.c", "lib/gauss150.c", "lib/sphere_form.c", "bcc_paracrystal.c"]
def random():
"""Return a random parameter set for the model."""
# Define lattice spacing as a multiple of the particle radius
# using the formula a = 4 r/sqrt(3). Systems which are ordered
# are probably mostly filled, so use a distribution which goes from
# zero to one, but leaving 90% of them within 80% of the
# maximum bcc packing. Lattice distortion values are empirically
# useful between 0.01 and 0.7. Use an exponential distribution
# in this range 'cuz its easy.
radius = 10**np.random.uniform(1.3, 4)
d_factor = 10**np.random.uniform(-2, -0.7) # sigma_d in 0.01-0.7
dnn_fraction = np.random.beta(a=10, b=1)
dnn = radius*4/np.sqrt(3)/dnn_fraction
pars = dict(
#sld=1, sld_solvent=0, scale=1, background=1e-32,
dnn=dnn,
d_factor=d_factor,
radius=radius,
)
return pars
# april 6 2017, rkh add unit tests, NOT compared with any other calc method, assume correct!
# add 2d test later
# October 26, 2022 PDB updated the 1D unit test after fixing the math. The values are again
# assumed correct. It would be good to have an independent assessment. 2D tests remain
# on the todo list
# TODO: fix the 2d tests
q = 4.*pi/220.
tests = [
[{}, [0.001, q, 0.25], [0.6945817843046642, 1.6885157981411993, 0.005367008206852725]],
#[{'theta': 20.0, 'phi': 30, 'psi': 40.0}, (-0.017, 0.035), 2082.20264399],
#[{'theta': 20.0, 'phi': 30, 'psi': 40.0}, (-0.081, 0.011), 0.436323144781],
]
|
normal
|
{
"blob_id": "7ccaa15f025b2c1ba560d07c1a30b06c9ebf9ad1",
"index": 1927,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef random():\n \"\"\"Return a random parameter set for the model.\"\"\"\n radius = 10 ** np.random.uniform(1.3, 4)\n d_factor = 10 ** np.random.uniform(-2, -0.7)\n dnn_fraction = np.random.beta(a=10, b=1)\n dnn = radius * 4 / np.sqrt(3) / dnn_fraction\n pars = dict(dnn=dnn, d_factor=d_factor, radius=radius)\n return pars\n\n\n<mask token>\n",
"step-3": "<mask token>\nname = 'bcc_paracrystal'\ntitle = 'Body-centred cubic lattic with paracrystalline distortion'\ndescription = \"\"\"\n Calculates the scattering from a **body-centered cubic lattice** with\n paracrystalline distortion. Thermal vibrations are considered to be\n negligible, and the size of the paracrystal is infinitely large.\n Paracrystalline distortion is assumed to be isotropic and characterized\n by a Gaussian distribution.\n \"\"\"\ncategory = 'shape:paracrystal'\nsingle = False\nparameters = [['dnn', 'Ang', 220, [-inf, inf], '',\n 'Nearest neighbour distance'], ['d_factor', '', 0.06, [-inf, inf], '',\n 'Paracrystal distortion factor'], ['radius', 'Ang', 40, [0, inf],\n 'volume', 'Particle radius'], ['sld', '1e-6/Ang^2', 4, [-inf, inf],\n 'sld', 'Particle scattering length density'], ['sld_solvent',\n '1e-6/Ang^2', 1, [-inf, inf], 'sld',\n 'Solvent scattering length density'], ['theta', 'degrees', 60, [-360, \n 360], 'orientation', 'c axis to beam angle'], ['phi', 'degrees', 60, [-\n 360, 360], 'orientation', 'rotation about beam'], ['psi', 'degrees', 60,\n [-360, 360], 'orientation', 'rotation about c axis']]\nsource = ['lib/sas_3j1x_x.c', 'lib/gauss150.c', 'lib/sphere_form.c',\n 'bcc_paracrystal.c']\n\n\ndef random():\n \"\"\"Return a random parameter set for the model.\"\"\"\n radius = 10 ** np.random.uniform(1.3, 4)\n d_factor = 10 ** np.random.uniform(-2, -0.7)\n dnn_fraction = np.random.beta(a=10, b=1)\n dnn = radius * 4 / np.sqrt(3) / dnn_fraction\n pars = dict(dnn=dnn, d_factor=d_factor, radius=radius)\n return pars\n\n\nq = 4.0 * pi / 220.0\ntests = [[{}, [0.001, q, 0.25], [0.6945817843046642, 1.6885157981411993, \n 0.005367008206852725]]]\n",
"step-4": "<mask token>\nimport numpy as np\nfrom numpy import inf, pi\nname = 'bcc_paracrystal'\ntitle = 'Body-centred cubic lattic with paracrystalline distortion'\ndescription = \"\"\"\n Calculates the scattering from a **body-centered cubic lattice** with\n paracrystalline distortion. Thermal vibrations are considered to be\n negligible, and the size of the paracrystal is infinitely large.\n Paracrystalline distortion is assumed to be isotropic and characterized\n by a Gaussian distribution.\n \"\"\"\ncategory = 'shape:paracrystal'\nsingle = False\nparameters = [['dnn', 'Ang', 220, [-inf, inf], '',\n 'Nearest neighbour distance'], ['d_factor', '', 0.06, [-inf, inf], '',\n 'Paracrystal distortion factor'], ['radius', 'Ang', 40, [0, inf],\n 'volume', 'Particle radius'], ['sld', '1e-6/Ang^2', 4, [-inf, inf],\n 'sld', 'Particle scattering length density'], ['sld_solvent',\n '1e-6/Ang^2', 1, [-inf, inf], 'sld',\n 'Solvent scattering length density'], ['theta', 'degrees', 60, [-360, \n 360], 'orientation', 'c axis to beam angle'], ['phi', 'degrees', 60, [-\n 360, 360], 'orientation', 'rotation about beam'], ['psi', 'degrees', 60,\n [-360, 360], 'orientation', 'rotation about c axis']]\nsource = ['lib/sas_3j1x_x.c', 'lib/gauss150.c', 'lib/sphere_form.c',\n 'bcc_paracrystal.c']\n\n\ndef random():\n \"\"\"Return a random parameter set for the model.\"\"\"\n radius = 10 ** np.random.uniform(1.3, 4)\n d_factor = 10 ** np.random.uniform(-2, -0.7)\n dnn_fraction = np.random.beta(a=10, b=1)\n dnn = radius * 4 / np.sqrt(3) / dnn_fraction\n pars = dict(dnn=dnn, d_factor=d_factor, radius=radius)\n return pars\n\n\nq = 4.0 * pi / 220.0\ntests = [[{}, [0.001, q, 0.25], [0.6945817843046642, 1.6885157981411993, \n 0.005367008206852725]]]\n",
"step-5": "r\"\"\"\nDefinition\n----------\n\nCalculates the scattering from a **body-centered cubic lattice** with\nparacrystalline distortion. Thermal vibrations are considered to be negligible,\nand the size of the paracrystal is infinitely large. Paracrystalline distortion\nis assumed to be isotropic and characterized by a Gaussian distribution.\n\nThe scattering intensity $I(q)$ is calculated as\n\n.. math::\n\n I(q) = \\frac{\\text{scale}}{V_p} V_\\text{lattice} P(q) Z(q) + \\text{background}\n\nwhere *scale* is the volume fraction of crystal in the sample volume,\n$V_\\text{lattice}$ is the volume fraction of spheres in the crystal, $V_p$ is\nthe volume of the primary particle, $P(q)$ is the form factor of the sphere\n(normalized), and $Z(q)$ is the paracrystalline structure factor for a\nbody-centered cubic structure.\n\n.. note::\n At this point the GUI does not return $V_\\text{lattice}$ separately so that\n the user will need to calculate it from the equation given and the\n appropriate returned parameters.\n\n.. warning::\n As per the equations below, this model will return I(q)=0 for all q if the\n distortion factor is equal to 0. The model is not meant to support perfect\n crystals.\n\n.. figure:: img/bcc_geometry.jpg\n\n Body-centered cubic (BCC) lattice taken from reference [#Matsuoka1987]_.\n\nFollowing the derivation from reference [#Matsuoka1987]_, as corrected in\nreference [#Matsuoka1990]_, and based on the above figure, the\nprimitive unit cell vectors $\\vec{a_1},\\vec{a_2}$, and $\\vec{a_3}$, which\nenclose the smallest possible unit cell for the bcc lattice, are defined below:\n\n.. math::\n \\vec{a_1} &= \\frac{1}{2}(-\\vec{b_1} + \\vec{b_2} + \\vec{b_3}) \\\\\n \\vec{a_2} &= \\frac{1}{2} (\\vec{b_1} - \\vec{b_2} + \\vec{b_3}) \\\\\n \\vec{a_3} &= \\frac{1}{2}(\\vec{b_1} + \\vec{b_2} -\\vec{b_3}).\n\nwhere $\\vec{b_1},\\vec{b_2}$, and $\\vec{b_3}$ are the unit cell vectors of the\nconventional unit cell, which is a unit cell that includes the full symmetry\nof the lattice. As defined by reference [#Matsuoka1987]_, the constant $a$ is the\nlattice parameter of the conventional unit cell with\n$|\\vec{b_1}|=|\\vec{b_2}|=|\\vec{b_3}|=a$. Using this definition, the\nnearest-neighbor distance ($D$) is given by\n$D=|\\vec{a_1}|=|\\vec{a_2}|=|\\vec{a_3}|=\\sqrt{(a/2)^2+(a/2)^2+(a/2)^2}=\\sqrt{\\frac{3a^2}{4}}=\\frac{\\sqrt{3}a}{2}$.\n\nThe volume of the primitive unit cell $V_u$ is then given by:\n\n.. math::\n V_u &= |(\\vec{a_1}\\times \\vec{a_2})\\cdot\\vec{a_3}|\\\\\n &= (\\frac{a^2}{2},\\frac{a^2}{2},0)\\cdot(\\frac{a}{2},\\frac{a}{2},-\\frac{a}{2})\\\\\n &= a^3/2\n\nIn this case, the volume fraction ($V_{lattice}$) of spherical particles with\nradius $R$ sitting on the bcc lattice is given by:\n\n.. math::\n V_{lattice} &= \\frac{4/3 \\pi R^3}{a^3/2}\\\\\n &= \\frac{8\\pi R^3}{3a^3}\\\\\n &= \\frac{\\sqrt{3} \\pi R^3}{D^3}\n\nNow, continuing to follow [#Matsuoka1987]_, the structure (lattice)\nfactor $Z(\\vec{q})$ for a 3D paracrystal can be written as:\n\n.. math::\n Z(\\vec{q}) = \\prod_{k=1}^{3}Z_k(\\vec{q})\n\nwith\n\n.. math::\n Z_k(\\vec{q}) = \\frac{1-|F_k|^2}{1-2|F_k|\\cos(\\vec{a_k}\\cdot\\vec{q})+|F_k|^2}\n\nand where $F_k(\\vec{q})$ is the structure factor of the primitive unit cell\ndefined as:\n\n.. math::\n F_k(\\vec{q}) = e^{-\\frac{1}{2} \\Delta a^2_k q^2} \\times e^{-i\\vec{q}\\cdot\\vec{a_k}}.\n\nHere, $\\vec{a_k}$ are the primitive unit cell vectors $\\vec{a_1}$, $\\vec{a_2}$,\nand $\\vec{a_3}$. Furthermore, $\\Delta a_k$ is the isotropic distortion of the\nlattice point from its ideal position and can be defined by a constant factor\n$g=\\Delta a / |\\vec{a_1}| = \\Delta a / |\\vec{a_2}| = \\Delta a / |\\vec{a_3}|=\\Delta a/D$.\n\nFinally, assuming the definitions presented in this document, the authors of\nreference [#Matsuoka1987]_ have derived the lattice factors which are given by:\n\n.. math::\n Z_1(q,\\theta,\\phi)&=[1-e^{-q^2\\Delta a^2}]/\\{1-2e^{-\\frac{1}{2}q^2\\Delta a^2}\\cos[\\frac{qa}{2}(\\sin\\theta \\cos\\phi + \\sin\\theta \\sin\\phi + \\cos\\theta)] + e^{-q^2\\Delta a^2}\\}\\\\\n Z_2(q,\\theta,\\phi)&=[1-e^{-q^2\\Delta a^2}]/\\{1-2e^{-\\frac{1}{2}q^2\\Delta a^2}\\cos[\\frac{qa}{2}(-\\sin\\theta \\cos\\phi - \\sin\\theta \\sin\\phi + \\cos\\theta)] + e^{-q^2\\Delta a^2}\\}\\\\\n Z_3(q,\\theta,\\phi)&=[1-e^{-q^2\\Delta a^2}]/\\{1-2e^{-\\frac{1}{2}q^2\\Delta a^2}\\cos[\\frac{qa}{2}(-\\sin\\theta \\cos\\phi + \\sin\\theta \\sin\\phi - \\cos\\theta)] + e^{-q^2\\Delta a^2}\\}\\\\\n\nNote that Sasview is using the nearest-neighbor parameter ($D$) as an input\ninstead of the conventional unit cell parameter $a$. In this case, using\n$a=\\frac{2D}{\\sqrt{3}}$, we rewrite $Z_1(q)$, $Z_2(q)$, and $Z_3(q)$ in terms\nof $D$ instead of $a$, which leads to:\n\n.. math::\n Z_1(q,\\theta,\\phi)&=[1-e^{-q^2\\Delta a^2}]/\\{1-2e^{-\\frac{1}{2}q^2\\Delta a^2}\\cos[\\frac{qD}{\\sqrt{3}}(\\sin\\theta \\cos\\phi + \\sin\\theta \\sin\\phi + \\cos\\theta)] + e^{-q^2\\Delta a^2}\\}\\\\\n Z_2(q,\\theta,\\phi)&=[1-e^{-q^2\\Delta a^2}]/\\{1-2e^{-\\frac{1}{2}q^2\\Delta a^2}\\cos[\\frac{qD}{\\sqrt{3}}(-\\sin\\theta \\cos\\phi - \\sin\\theta \\sin\\phi + \\cos\\theta)] + e^{-q^2\\Delta a^2}\\}\\\\\n Z_3(q,\\theta,\\phi)&=[1-e^{-q^2\\Delta a^2}]/\\{1-2e^{-\\frac{1}{2}q^2\\Delta a^2}\\cos[\\frac{qD}{\\sqrt{3}}(-\\sin\\theta \\cos\\phi + \\sin\\theta \\sin\\phi - \\cos\\theta)] + e^{-q^2\\Delta a^2}\\}\\\\\n\nFinally note that the position of the Bragg peaks for the bcc lattice are\nindexed by (reduced q-values):\n\n.. math::\n \\frac{qa}{2\\pi}=\\frac{qD}{\\sqrt{3}\\pi}=\\sqrt{h^2+k^2+l^2}.\n\nIn the above equation, we used the conventional unit cell so not all\npermutations of h,k, and l will produce Bragg peaks. The Bragg scattering\ncondition for bcc imposes that h+k+l = even. Thus the peak positions\ncorrespond to (just the first 5)\n\n.. math::\n\n \\begin{array}{lccccc}\n q/q_o & 1 & \\sqrt{2} & \\sqrt{3} & \\sqrt{4} & \\sqrt{5} \\\\\n \\text{Indices} & (110) & (200) & (211) & (220) & (310) \\\\\n \\end{array}\n\n.. note::\n\n The calculation of $Z(q)$ is a double numerical integral that must be\n carried out with a high density of points to properly capture the sharp\n peaks of the paracrystalline scattering. So be warned that the calculation\n is slow. Fitting of any experimental data must be resolution smeared for\n any meaningful fit. This makes a triple integral which may be very slow.\n If a double-precision GPU with OpenCL support is available this may improve\n the speed of the calculation.\n\nThis example dataset is produced using 200 data points,\n*qmin* = 0.001 |Ang^-1|, *qmax* = 0.1 |Ang^-1| and the above default values.\n\nThe 2D (Anisotropic model) is based on the reference below where $I(q)$ is\napproximated for 1d scattering. Thus the scattering pattern for 2D may not be\naccurate, particularly at low $q$. For general details of the calculation and\nangular dispersions for oriented particles see :ref:`orientation`. Note that\nwe are not responsible for any incorrectness of the 2D model computation.\n\n.. figure:: img/parallelepiped_angle_definition.png\n\n Orientation of the crystal with respect to the scattering plane, when\n $\\theta = \\phi = 0$ the $c$ axis is along the beam direction (the $z$ axis).\n\nReferences\n----------\n\n.. [#Matsuoka1987] Hideki Matsuoka et. al. *Physical Review B*, 36 (1987)\n 1754-1765 (Original Paper)\n.. [#Matsuoka1990] Hideki Matsuoka et. al. *Physical Review B*, 41 (1990)\n 3854-3856 (Corrections to FCC and BCC lattice structure calculation)\n\nAuthorship and Verification\n---------------------------\n\n* **Author:** NIST IGOR/DANSE **Date:** pre 2010\n* **Last Modified by:** Jonathan Gaudet **Date:** September 26, 2022\n* **Last Reviewed by:** Paul Butler **Date:** November 2, 2022\n\"\"\"\n\nimport numpy as np\nfrom numpy import inf, pi\n\nname = \"bcc_paracrystal\"\ntitle = \"Body-centred cubic lattic with paracrystalline distortion\"\ndescription = \"\"\"\n Calculates the scattering from a **body-centered cubic lattice** with\n paracrystalline distortion. Thermal vibrations are considered to be\n negligible, and the size of the paracrystal is infinitely large.\n Paracrystalline distortion is assumed to be isotropic and characterized\n by a Gaussian distribution.\n \"\"\"\ncategory = \"shape:paracrystal\"\n\n#note - calculation requires double precision\nsingle = False\n\n# pylint: disable=bad-whitespace, line-too-long\n# [\"name\", \"units\", default, [lower, upper], \"type\",\"description\" ],\nparameters = [[\"dnn\", \"Ang\", 220, [-inf, inf], \"\", \"Nearest neighbour distance\"],\n [\"d_factor\", \"\", 0.06, [-inf, inf], \"\", \"Paracrystal distortion factor\"],\n [\"radius\", \"Ang\", 40, [0, inf], \"volume\", \"Particle radius\"],\n [\"sld\", \"1e-6/Ang^2\", 4, [-inf, inf], \"sld\", \"Particle scattering length density\"],\n [\"sld_solvent\", \"1e-6/Ang^2\", 1, [-inf, inf], \"sld\", \"Solvent scattering length density\"],\n [\"theta\", \"degrees\", 60, [-360, 360], \"orientation\", \"c axis to beam angle\"],\n [\"phi\", \"degrees\", 60, [-360, 360], \"orientation\", \"rotation about beam\"],\n [\"psi\", \"degrees\", 60, [-360, 360], \"orientation\", \"rotation about c axis\"]\n ]\n# pylint: enable=bad-whitespace, line-too-long\n\nsource = [\"lib/sas_3j1x_x.c\", \"lib/gauss150.c\", \"lib/sphere_form.c\", \"bcc_paracrystal.c\"]\n\ndef random():\n \"\"\"Return a random parameter set for the model.\"\"\"\n # Define lattice spacing as a multiple of the particle radius\n # using the formula a = 4 r/sqrt(3). Systems which are ordered\n # are probably mostly filled, so use a distribution which goes from\n # zero to one, but leaving 90% of them within 80% of the\n # maximum bcc packing. Lattice distortion values are empirically\n # useful between 0.01 and 0.7. Use an exponential distribution\n # in this range 'cuz its easy.\n radius = 10**np.random.uniform(1.3, 4)\n d_factor = 10**np.random.uniform(-2, -0.7) # sigma_d in 0.01-0.7\n dnn_fraction = np.random.beta(a=10, b=1)\n dnn = radius*4/np.sqrt(3)/dnn_fraction\n pars = dict(\n #sld=1, sld_solvent=0, scale=1, background=1e-32,\n dnn=dnn,\n d_factor=d_factor,\n radius=radius,\n )\n return pars\n\n# april 6 2017, rkh add unit tests, NOT compared with any other calc method, assume correct!\n# add 2d test later\n\n# October 26, 2022 PDB updated the 1D unit test after fixing the math. The values are again\n# assumed correct. It would be good to have an independent assessment. 2D tests remain\n# on the todo list\n# TODO: fix the 2d tests\nq = 4.*pi/220.\ntests = [\n [{}, [0.001, q, 0.25], [0.6945817843046642, 1.6885157981411993, 0.005367008206852725]],\n #[{'theta': 20.0, 'phi': 30, 'psi': 40.0}, (-0.017, 0.035), 2082.20264399],\n #[{'theta': 20.0, 'phi': 30, 'psi': 40.0}, (-0.081, 0.011), 0.436323144781],\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.