code
stringlengths 13
1.2M
| order_type
stringclasses 1
value | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
class Thing3:
def __init__(self):
self.letters = 'xyz'
# print(Thing3.letters)
th = Thing3()
print(th.letters)
|
normal
|
{
"blob_id": "22bf65a20f7398b82f528112d2ba50f1dccd465c",
"index": 6487,
"step-1": "class Thing3:\n <mask token>\n\n\n<mask token>\n",
"step-2": "class Thing3:\n\n def __init__(self):\n self.letters = 'xyz'\n\n\n<mask token>\n",
"step-3": "class Thing3:\n\n def __init__(self):\n self.letters = 'xyz'\n\n\n<mask token>\nprint(th.letters)\n",
"step-4": "class Thing3:\n\n def __init__(self):\n self.letters = 'xyz'\n\n\nth = Thing3()\nprint(th.letters)\n",
"step-5": "\nclass Thing3:\n def __init__(self):\n self.letters = 'xyz'\n\n# print(Thing3.letters)\nth = Thing3()\nprint(th.letters)",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import random as pyrandom
import warnings
import numpy as np
from hypothesis import given
from hypothesis.strategies import dictionaries, floats, lists, text, tuples
import bayesmark.signatures as ss
from bayesmark.experiment import OBJECTIVE_NAMES
from util import space_configs
N_SIG = ss.N_SUGGESTIONS
def bsigs():
S = lists(floats(allow_infinity=False, allow_nan=False), min_size=N_SIG, max_size=N_SIG)
return S
def sigs():
S = lists(bsigs(), min_size=1)
return S
def sig_pair():
def separate(D):
signatures, signatures_ref = {}, {}
for kk in D:
if len(D[kk]) == 1:
v_ref, = D[kk]
signatures_ref[kk] = np.asarray(v_ref)
elif len(D[kk]) == 2:
v, v_ref = D[kk]
signatures[kk] = np.asarray(v)
signatures_ref[kk] = np.asarray(v_ref)
else:
assert False
return signatures, signatures_ref
sig_dict = dictionaries(text(), tuples(bsigs()) | tuples(bsigs(), bsigs()))
S = sig_dict.map(separate)
return S
def some_mock_f(x):
"""Some arbitrary deterministic test function.
"""
random_stream = pyrandom.Random(json.dumps(x, sort_keys=True))
y = [random_stream.gauss(0, 1) for _ in OBJECTIVE_NAMES]
return y
@given(space_configs())
def test_get_func_signature(api_config):
api_config, _, _, _ = api_config
signature_x, signature_y = ss.get_func_signature(some_mock_f, api_config)
@given(dictionaries(text(), sigs()))
def test_analyze_signatures(signatures):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=RuntimeWarning)
sig_errs, signatures_median = ss.analyze_signatures(signatures)
@given(sig_pair())
def test_analyze_signature_pair(args):
signatures, signatures_ref = args
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=RuntimeWarning)
sig_errs, signatures_pair = ss.analyze_signature_pair(signatures, signatures_ref)
|
normal
|
{
"blob_id": "64b254db6d8f352b2689385e70f2ea7d972c9191",
"index": 4797,
"step-1": "<mask token>\n\n\ndef bsigs():\n S = lists(floats(allow_infinity=False, allow_nan=False), min_size=N_SIG,\n max_size=N_SIG)\n return S\n\n\ndef sigs():\n S = lists(bsigs(), min_size=1)\n return S\n\n\n<mask token>\n\n\n@given(space_configs())\ndef test_get_func_signature(api_config):\n api_config, _, _, _ = api_config\n signature_x, signature_y = ss.get_func_signature(some_mock_f, api_config)\n\n\n<mask token>\n\n\n@given(sig_pair())\ndef test_analyze_signature_pair(args):\n signatures, signatures_ref = args\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', category=RuntimeWarning)\n sig_errs, signatures_pair = ss.analyze_signature_pair(signatures,\n signatures_ref)\n",
"step-2": "<mask token>\n\n\ndef bsigs():\n S = lists(floats(allow_infinity=False, allow_nan=False), min_size=N_SIG,\n max_size=N_SIG)\n return S\n\n\ndef sigs():\n S = lists(bsigs(), min_size=1)\n return S\n\n\ndef sig_pair():\n\n def separate(D):\n signatures, signatures_ref = {}, {}\n for kk in D:\n if len(D[kk]) == 1:\n v_ref, = D[kk]\n signatures_ref[kk] = np.asarray(v_ref)\n elif len(D[kk]) == 2:\n v, v_ref = D[kk]\n signatures[kk] = np.asarray(v)\n signatures_ref[kk] = np.asarray(v_ref)\n else:\n assert False\n return signatures, signatures_ref\n sig_dict = dictionaries(text(), tuples(bsigs()) | tuples(bsigs(), bsigs()))\n S = sig_dict.map(separate)\n return S\n\n\n<mask token>\n\n\n@given(space_configs())\ndef test_get_func_signature(api_config):\n api_config, _, _, _ = api_config\n signature_x, signature_y = ss.get_func_signature(some_mock_f, api_config)\n\n\n@given(dictionaries(text(), sigs()))\ndef test_analyze_signatures(signatures):\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', category=RuntimeWarning)\n sig_errs, signatures_median = ss.analyze_signatures(signatures)\n\n\n@given(sig_pair())\ndef test_analyze_signature_pair(args):\n signatures, signatures_ref = args\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', category=RuntimeWarning)\n sig_errs, signatures_pair = ss.analyze_signature_pair(signatures,\n signatures_ref)\n",
"step-3": "<mask token>\n\n\ndef bsigs():\n S = lists(floats(allow_infinity=False, allow_nan=False), min_size=N_SIG,\n max_size=N_SIG)\n return S\n\n\ndef sigs():\n S = lists(bsigs(), min_size=1)\n return S\n\n\ndef sig_pair():\n\n def separate(D):\n signatures, signatures_ref = {}, {}\n for kk in D:\n if len(D[kk]) == 1:\n v_ref, = D[kk]\n signatures_ref[kk] = np.asarray(v_ref)\n elif len(D[kk]) == 2:\n v, v_ref = D[kk]\n signatures[kk] = np.asarray(v)\n signatures_ref[kk] = np.asarray(v_ref)\n else:\n assert False\n return signatures, signatures_ref\n sig_dict = dictionaries(text(), tuples(bsigs()) | tuples(bsigs(), bsigs()))\n S = sig_dict.map(separate)\n return S\n\n\ndef some_mock_f(x):\n \"\"\"Some arbitrary deterministic test function.\n \"\"\"\n random_stream = pyrandom.Random(json.dumps(x, sort_keys=True))\n y = [random_stream.gauss(0, 1) for _ in OBJECTIVE_NAMES]\n return y\n\n\n@given(space_configs())\ndef test_get_func_signature(api_config):\n api_config, _, _, _ = api_config\n signature_x, signature_y = ss.get_func_signature(some_mock_f, api_config)\n\n\n@given(dictionaries(text(), sigs()))\ndef test_analyze_signatures(signatures):\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', category=RuntimeWarning)\n sig_errs, signatures_median = ss.analyze_signatures(signatures)\n\n\n@given(sig_pair())\ndef test_analyze_signature_pair(args):\n signatures, signatures_ref = args\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', category=RuntimeWarning)\n sig_errs, signatures_pair = ss.analyze_signature_pair(signatures,\n signatures_ref)\n",
"step-4": "import json\nimport random as pyrandom\nimport warnings\nimport numpy as np\nfrom hypothesis import given\nfrom hypothesis.strategies import dictionaries, floats, lists, text, tuples\nimport bayesmark.signatures as ss\nfrom bayesmark.experiment import OBJECTIVE_NAMES\nfrom util import space_configs\nN_SIG = ss.N_SUGGESTIONS\n\n\ndef bsigs():\n S = lists(floats(allow_infinity=False, allow_nan=False), min_size=N_SIG,\n max_size=N_SIG)\n return S\n\n\ndef sigs():\n S = lists(bsigs(), min_size=1)\n return S\n\n\ndef sig_pair():\n\n def separate(D):\n signatures, signatures_ref = {}, {}\n for kk in D:\n if len(D[kk]) == 1:\n v_ref, = D[kk]\n signatures_ref[kk] = np.asarray(v_ref)\n elif len(D[kk]) == 2:\n v, v_ref = D[kk]\n signatures[kk] = np.asarray(v)\n signatures_ref[kk] = np.asarray(v_ref)\n else:\n assert False\n return signatures, signatures_ref\n sig_dict = dictionaries(text(), tuples(bsigs()) | tuples(bsigs(), bsigs()))\n S = sig_dict.map(separate)\n return S\n\n\ndef some_mock_f(x):\n \"\"\"Some arbitrary deterministic test function.\n \"\"\"\n random_stream = pyrandom.Random(json.dumps(x, sort_keys=True))\n y = [random_stream.gauss(0, 1) for _ in OBJECTIVE_NAMES]\n return y\n\n\n@given(space_configs())\ndef test_get_func_signature(api_config):\n api_config, _, _, _ = api_config\n signature_x, signature_y = ss.get_func_signature(some_mock_f, api_config)\n\n\n@given(dictionaries(text(), sigs()))\ndef test_analyze_signatures(signatures):\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', category=RuntimeWarning)\n sig_errs, signatures_median = ss.analyze_signatures(signatures)\n\n\n@given(sig_pair())\ndef test_analyze_signature_pair(args):\n signatures, signatures_ref = args\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', category=RuntimeWarning)\n sig_errs, signatures_pair = ss.analyze_signature_pair(signatures,\n signatures_ref)\n",
"step-5": "# Copyright (c) 2019 Uber Technologies, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport json\nimport random as pyrandom\nimport warnings\n\nimport numpy as np\nfrom hypothesis import given\nfrom hypothesis.strategies import dictionaries, floats, lists, text, tuples\n\nimport bayesmark.signatures as ss\nfrom bayesmark.experiment import OBJECTIVE_NAMES\nfrom util import space_configs\n\nN_SIG = ss.N_SUGGESTIONS\n\n\ndef bsigs():\n S = lists(floats(allow_infinity=False, allow_nan=False), min_size=N_SIG, max_size=N_SIG)\n return S\n\n\ndef sigs():\n S = lists(bsigs(), min_size=1)\n return S\n\n\ndef sig_pair():\n def separate(D):\n signatures, signatures_ref = {}, {}\n for kk in D:\n if len(D[kk]) == 1:\n v_ref, = D[kk]\n signatures_ref[kk] = np.asarray(v_ref)\n elif len(D[kk]) == 2:\n v, v_ref = D[kk]\n signatures[kk] = np.asarray(v)\n signatures_ref[kk] = np.asarray(v_ref)\n else:\n assert False\n return signatures, signatures_ref\n\n sig_dict = dictionaries(text(), tuples(bsigs()) | tuples(bsigs(), bsigs()))\n S = sig_dict.map(separate)\n return S\n\n\ndef some_mock_f(x):\n \"\"\"Some arbitrary deterministic test function.\n \"\"\"\n random_stream = pyrandom.Random(json.dumps(x, sort_keys=True))\n y = [random_stream.gauss(0, 1) for _ in OBJECTIVE_NAMES]\n return y\n\n\n@given(space_configs())\ndef test_get_func_signature(api_config):\n api_config, _, _, _ = api_config\n\n signature_x, signature_y = ss.get_func_signature(some_mock_f, api_config)\n\n\n@given(dictionaries(text(), sigs()))\ndef test_analyze_signatures(signatures):\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n sig_errs, signatures_median = ss.analyze_signatures(signatures)\n\n\n@given(sig_pair())\ndef test_analyze_signature_pair(args):\n signatures, signatures_ref = args\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n sig_errs, signatures_pair = ss.analyze_signature_pair(signatures, signatures_ref)\n",
"step-ids": [
4,
6,
7,
9,
10
]
}
|
[
4,
6,
7,
9,
10
] |
from time import sleep
import RPi.GPIO as gpio
#GPIO.setmode(GPIO.BCM)
gpio.setwarnings(False)
def init():
gpio.setmode(gpio.BCM)
gpio.setup(26, gpio.OUT)
gpio.setup(19, gpio.OUT)
gpio.setup(13, gpio.OUT)
gpio.setup(6, gpio.OUT)
def turn_left(tf):
gpio.output(26, False)
gpio.output(19, True)
gpio.output(13, False)
gpio.output(6, True)
sleep(tf)
def turn_right(tf):
gpio.output(26, True)
gpio.output(19, False)
gpio.output(13, True)
gpio.output(6, False)
sleep(tf)
def forward(tf):
gpio.output(26, True)
gpio.output(19, False)
gpio.output(13, False)
gpio.output(6, True)
sleep(tf)
def reverse(tf):
gpio.output(26, False)
gpio.output(19, True)
gpio.output(13, True)
gpio.output(6, False)
sleep(tf)
def stop(tf):
gpio.output(26, False)
gpio.output(19, False)
gpio.output(13, False)
gpio.output(6, False)
sleep(tf)
gpio.cleanup()
def drive(direction, tym):
init()
if direction == "forward":
forward(tym)
stop(tym)
elif direction == "reverse":
reverse(tym)
stop(tym)
elif direction == "left":
turn_left(tym)
stop(tym)
elif direction == "right":
turn_right(tym)
stop(tym)
elif direction == "stop":
stop(tym)
else :
stop(tym)
if __name__ == '__main__':
import sys
drive((sys.argv[1]), float(sys.argv[2]))
gpio.cleanup()
##
##init()
##forward(0.6)
##sleep(1)
##reverse(0.6)
##sleep(1)
##turn_right(0.6)
##sleep(1)
##turn_left(0.6)
##stop(1)
|
normal
|
{
"blob_id": "a7cbd595b86908fb399bf11e1522588e0b0475c3",
"index": 9226,
"step-1": "<mask token>\n\n\ndef init():\n gpio.setmode(gpio.BCM)\n gpio.setup(26, gpio.OUT)\n gpio.setup(19, gpio.OUT)\n gpio.setup(13, gpio.OUT)\n gpio.setup(6, gpio.OUT)\n\n\ndef turn_left(tf):\n gpio.output(26, False)\n gpio.output(19, True)\n gpio.output(13, False)\n gpio.output(6, True)\n sleep(tf)\n\n\n<mask token>\n\n\ndef forward(tf):\n gpio.output(26, True)\n gpio.output(19, False)\n gpio.output(13, False)\n gpio.output(6, True)\n sleep(tf)\n\n\n<mask token>\n\n\ndef stop(tf):\n gpio.output(26, False)\n gpio.output(19, False)\n gpio.output(13, False)\n gpio.output(6, False)\n sleep(tf)\n gpio.cleanup()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef init():\n gpio.setmode(gpio.BCM)\n gpio.setup(26, gpio.OUT)\n gpio.setup(19, gpio.OUT)\n gpio.setup(13, gpio.OUT)\n gpio.setup(6, gpio.OUT)\n\n\ndef turn_left(tf):\n gpio.output(26, False)\n gpio.output(19, True)\n gpio.output(13, False)\n gpio.output(6, True)\n sleep(tf)\n\n\ndef turn_right(tf):\n gpio.output(26, True)\n gpio.output(19, False)\n gpio.output(13, True)\n gpio.output(6, False)\n sleep(tf)\n\n\ndef forward(tf):\n gpio.output(26, True)\n gpio.output(19, False)\n gpio.output(13, False)\n gpio.output(6, True)\n sleep(tf)\n\n\n<mask token>\n\n\ndef stop(tf):\n gpio.output(26, False)\n gpio.output(19, False)\n gpio.output(13, False)\n gpio.output(6, False)\n sleep(tf)\n gpio.cleanup()\n\n\ndef drive(direction, tym):\n init()\n if direction == 'forward':\n forward(tym)\n stop(tym)\n elif direction == 'reverse':\n reverse(tym)\n stop(tym)\n elif direction == 'left':\n turn_left(tym)\n stop(tym)\n elif direction == 'right':\n turn_right(tym)\n stop(tym)\n elif direction == 'stop':\n stop(tym)\n else:\n stop(tym)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef init():\n gpio.setmode(gpio.BCM)\n gpio.setup(26, gpio.OUT)\n gpio.setup(19, gpio.OUT)\n gpio.setup(13, gpio.OUT)\n gpio.setup(6, gpio.OUT)\n\n\ndef turn_left(tf):\n gpio.output(26, False)\n gpio.output(19, True)\n gpio.output(13, False)\n gpio.output(6, True)\n sleep(tf)\n\n\ndef turn_right(tf):\n gpio.output(26, True)\n gpio.output(19, False)\n gpio.output(13, True)\n gpio.output(6, False)\n sleep(tf)\n\n\ndef forward(tf):\n gpio.output(26, True)\n gpio.output(19, False)\n gpio.output(13, False)\n gpio.output(6, True)\n sleep(tf)\n\n\ndef reverse(tf):\n gpio.output(26, False)\n gpio.output(19, True)\n gpio.output(13, True)\n gpio.output(6, False)\n sleep(tf)\n\n\ndef stop(tf):\n gpio.output(26, False)\n gpio.output(19, False)\n gpio.output(13, False)\n gpio.output(6, False)\n sleep(tf)\n gpio.cleanup()\n\n\ndef drive(direction, tym):\n init()\n if direction == 'forward':\n forward(tym)\n stop(tym)\n elif direction == 'reverse':\n reverse(tym)\n stop(tym)\n elif direction == 'left':\n turn_left(tym)\n stop(tym)\n elif direction == 'right':\n turn_right(tym)\n stop(tym)\n elif direction == 'stop':\n stop(tym)\n else:\n stop(tym)\n\n\n<mask token>\n",
"step-4": "<mask token>\ngpio.setwarnings(False)\n\n\ndef init():\n gpio.setmode(gpio.BCM)\n gpio.setup(26, gpio.OUT)\n gpio.setup(19, gpio.OUT)\n gpio.setup(13, gpio.OUT)\n gpio.setup(6, gpio.OUT)\n\n\ndef turn_left(tf):\n gpio.output(26, False)\n gpio.output(19, True)\n gpio.output(13, False)\n gpio.output(6, True)\n sleep(tf)\n\n\ndef turn_right(tf):\n gpio.output(26, True)\n gpio.output(19, False)\n gpio.output(13, True)\n gpio.output(6, False)\n sleep(tf)\n\n\ndef forward(tf):\n gpio.output(26, True)\n gpio.output(19, False)\n gpio.output(13, False)\n gpio.output(6, True)\n sleep(tf)\n\n\ndef reverse(tf):\n gpio.output(26, False)\n gpio.output(19, True)\n gpio.output(13, True)\n gpio.output(6, False)\n sleep(tf)\n\n\ndef stop(tf):\n gpio.output(26, False)\n gpio.output(19, False)\n gpio.output(13, False)\n gpio.output(6, False)\n sleep(tf)\n gpio.cleanup()\n\n\ndef drive(direction, tym):\n init()\n if direction == 'forward':\n forward(tym)\n stop(tym)\n elif direction == 'reverse':\n reverse(tym)\n stop(tym)\n elif direction == 'left':\n turn_left(tym)\n stop(tym)\n elif direction == 'right':\n turn_right(tym)\n stop(tym)\n elif direction == 'stop':\n stop(tym)\n else:\n stop(tym)\n\n\nif __name__ == '__main__':\n import sys\n drive(sys.argv[1], float(sys.argv[2]))\n gpio.cleanup()\n",
"step-5": "from time import sleep\nimport RPi.GPIO as gpio\n#GPIO.setmode(GPIO.BCM)\ngpio.setwarnings(False)\n\ndef init():\n gpio.setmode(gpio.BCM)\n gpio.setup(26, gpio.OUT)\n gpio.setup(19, gpio.OUT)\n gpio.setup(13, gpio.OUT)\n gpio.setup(6, gpio.OUT)\n\ndef turn_left(tf):\n gpio.output(26, False)\n gpio.output(19, True)\n gpio.output(13, False)\n gpio.output(6, True)\n sleep(tf)\n \ndef turn_right(tf):\n gpio.output(26, True)\n gpio.output(19, False)\n gpio.output(13, True)\n gpio.output(6, False)\n sleep(tf)\n \ndef forward(tf):\n gpio.output(26, True)\n gpio.output(19, False)\n gpio.output(13, False)\n gpio.output(6, True)\n sleep(tf)\n \ndef reverse(tf):\n gpio.output(26, False)\n gpio.output(19, True)\n gpio.output(13, True)\n gpio.output(6, False)\n sleep(tf)\n\ndef stop(tf):\n gpio.output(26, False)\n gpio.output(19, False)\n gpio.output(13, False)\n gpio.output(6, False)\n sleep(tf)\n gpio.cleanup()\n \ndef drive(direction, tym):\n init()\n \n if direction == \"forward\":\n forward(tym)\n stop(tym)\n \n elif direction == \"reverse\":\n reverse(tym)\n stop(tym)\n\n elif direction == \"left\":\n turn_left(tym)\n stop(tym)\n\n elif direction == \"right\":\n turn_right(tym)\n stop(tym)\n\n elif direction == \"stop\":\n stop(tym)\n\n else :\n stop(tym)\n\n\n\nif __name__ == '__main__':\n\timport sys\n\tdrive((sys.argv[1]), float(sys.argv[2]))\n\tgpio.cleanup()\n\n##\n##init()\n##forward(0.6)\n##sleep(1)\n##reverse(0.6)\n##sleep(1)\n##turn_right(0.6)\n##sleep(1)\n##turn_left(0.6)\n##stop(1)\n",
"step-ids": [
4,
6,
7,
8,
10
]
}
|
[
4,
6,
7,
8,
10
] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import re
import sys
import tarfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn import datasets
from sklearn import svm
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.linear_model import LogisticRegression
from numpy import genfromtxt
def create_graph():
"""Creates a graph from saved GraphDef file and returns a saver."""
# Creates graph from saved graph_def.pb.
with tf.gfile.FastGFile(os.path.join(
'/home/ubuntu/hdd/tensorFlowDic/', 'classify_image_graph_def.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
def feature_extraction(image):
image_data = tf.gfile.FastGFile(image, 'rb').read()
with tf.Session() as sess:
softmax_tensor = sess.graph.get_tensor_by_name('pool_3:0')
predictions = sess.run(softmax_tensor,{'DecodeJpeg/contents:0': image_data})
predictions = np.squeeze(predictions)
return predictions
# create_graph()
# input_x = np.zeros((0,2048))
# for i in range(1,7001):
# imageName = str(i).zfill(5)
# image = '/home/ubuntu/caffe/examples/images/joey/'+imageName+".jpg"
# pre = feature_extraction(image)
# print ("Finish extracting features of training image "+image)
# input_x = np.vstack((input_x,pre))
# print(input_x.shape)
test_x = np.zeros((0,2048))
for i in range(1,971):
imageName = str(i).zfill(5)
image = '/home/ubuntu/caffe/examples/images/val/'+imageName+".jpg"
pre = feature_extraction(image)
print ("Finish extracting features of test image "+image)
test_x = np.vstack((test_x,pre))
print(test_x.shape)
input_label = genfromtxt('/home/ubuntu/caffe/examples/images/Files/train.csv', delimiter=',')
input_label = input_label[1:7001,1].reshape(-1)
input_x = np.load("tensorFlow_train.npz")
#np.load("tensorFlow_test.npz")
print ('input_x shape ',input_x.shape)
print ('input_label shape ',input_label.shape)
# np.savez_compressed("tensorFlow_train", input_x)
np.savez_compressed("tensorFlow_test", test_x)
X_train, X_test, y_train, y_test = train_test_split(input_x, input_label, test_size=0.1, random_state=42)
clf = SVC(C=500.0,decision_function_shape='ovr',max_iter=-1,probability=False)
clf.fit(X_train, y_train)
print('training accuracy is', clf.score(X_train,y_train))
print('validation accuracy is', clf.score(X_test,y_test))
clf = SVC(C=500.0,decision_function_shape='ovr',max_iter=-1,probability=False)
clf.fit(input_x, input_label)
y_pred = clf.predict(test_x)
filename = "predict_inception_v3.csv"
f = open(filename, "w")
f.write('Id,Prediction\n')
if ((len(y_pred))<1000):
zeros = np.zeros(2000)
y_pred = np.append(y_pred, zeros).reshape(-1)
for i in range(0,len(y_pred)):
d = '{0},{1}\n'.format(i+1,int(y_pred[i]))
f.write(d)
|
normal
|
{
"blob_id": "8ef20a7a93d6affabe88dad4e5d19613fe47dd0f",
"index": 5399,
"step-1": "<mask token>\n\n\ndef create_graph():\n \"\"\"Creates a graph from saved GraphDef file and returns a saver.\"\"\"\n with tf.gfile.FastGFile(os.path.join('/home/ubuntu/hdd/tensorFlowDic/',\n 'classify_image_graph_def.pb'), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')\n\n\ndef feature_extraction(image):\n image_data = tf.gfile.FastGFile(image, 'rb').read()\n with tf.Session() as sess:\n softmax_tensor = sess.graph.get_tensor_by_name('pool_3:0')\n predictions = sess.run(softmax_tensor, {'DecodeJpeg/contents:0':\n image_data})\n predictions = np.squeeze(predictions)\n return predictions\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef create_graph():\n \"\"\"Creates a graph from saved GraphDef file and returns a saver.\"\"\"\n with tf.gfile.FastGFile(os.path.join('/home/ubuntu/hdd/tensorFlowDic/',\n 'classify_image_graph_def.pb'), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')\n\n\ndef feature_extraction(image):\n image_data = tf.gfile.FastGFile(image, 'rb').read()\n with tf.Session() as sess:\n softmax_tensor = sess.graph.get_tensor_by_name('pool_3:0')\n predictions = sess.run(softmax_tensor, {'DecodeJpeg/contents:0':\n image_data})\n predictions = np.squeeze(predictions)\n return predictions\n\n\n<mask token>\nfor i in range(1, 971):\n imageName = str(i).zfill(5)\n image = '/home/ubuntu/caffe/examples/images/val/' + imageName + '.jpg'\n pre = feature_extraction(image)\n print('Finish extracting features of test image ' + image)\n test_x = np.vstack((test_x, pre))\nprint(test_x.shape)\n<mask token>\nprint('input_x shape ', input_x.shape)\nprint('input_label shape ', input_label.shape)\nnp.savez_compressed('tensorFlow_test', test_x)\n<mask token>\nclf.fit(X_train, y_train)\nprint('training accuracy is', clf.score(X_train, y_train))\nprint('validation accuracy is', clf.score(X_test, y_test))\n<mask token>\nclf.fit(input_x, input_label)\n<mask token>\nf.write('Id,Prediction\\n')\nif len(y_pred) < 1000:\n zeros = np.zeros(2000)\n y_pred = np.append(y_pred, zeros).reshape(-1)\nfor i in range(0, len(y_pred)):\n d = '{0},{1}\\n'.format(i + 1, int(y_pred[i]))\n f.write(d)\n",
"step-3": "<mask token>\n\n\ndef create_graph():\n \"\"\"Creates a graph from saved GraphDef file and returns a saver.\"\"\"\n with tf.gfile.FastGFile(os.path.join('/home/ubuntu/hdd/tensorFlowDic/',\n 'classify_image_graph_def.pb'), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')\n\n\ndef feature_extraction(image):\n image_data = tf.gfile.FastGFile(image, 'rb').read()\n with tf.Session() as sess:\n softmax_tensor = sess.graph.get_tensor_by_name('pool_3:0')\n predictions = sess.run(softmax_tensor, {'DecodeJpeg/contents:0':\n image_data})\n predictions = np.squeeze(predictions)\n return predictions\n\n\ntest_x = np.zeros((0, 2048))\nfor i in range(1, 971):\n imageName = str(i).zfill(5)\n image = '/home/ubuntu/caffe/examples/images/val/' + imageName + '.jpg'\n pre = feature_extraction(image)\n print('Finish extracting features of test image ' + image)\n test_x = np.vstack((test_x, pre))\nprint(test_x.shape)\ninput_label = genfromtxt('/home/ubuntu/caffe/examples/images/Files/train.csv',\n delimiter=',')\ninput_label = input_label[1:7001, 1].reshape(-1)\ninput_x = np.load('tensorFlow_train.npz')\nprint('input_x shape ', input_x.shape)\nprint('input_label shape ', input_label.shape)\nnp.savez_compressed('tensorFlow_test', test_x)\nX_train, X_test, y_train, y_test = train_test_split(input_x, input_label,\n test_size=0.1, random_state=42)\nclf = SVC(C=500.0, decision_function_shape='ovr', max_iter=-1, probability=\n False)\nclf.fit(X_train, y_train)\nprint('training accuracy is', clf.score(X_train, y_train))\nprint('validation accuracy is', clf.score(X_test, y_test))\nclf = SVC(C=500.0, decision_function_shape='ovr', max_iter=-1, probability=\n False)\nclf.fit(input_x, input_label)\ny_pred = clf.predict(test_x)\nfilename = 'predict_inception_v3.csv'\nf = open(filename, 'w')\nf.write('Id,Prediction\\n')\nif len(y_pred) < 1000:\n zeros = np.zeros(2000)\n y_pred = np.append(y_pred, zeros).reshape(-1)\nfor i in range(0, len(y_pred)):\n d = '{0},{1}\\n'.format(i + 1, int(y_pred[i]))\n f.write(d)\n",
"step-4": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport os.path\nimport re\nimport sys\nimport tarfile\nimport numpy as np\nfrom six.moves import urllib\nimport tensorflow as tf\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import datasets\nfrom sklearn import svm\nfrom sklearn.svm import SVC\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom numpy import genfromtxt\n\n\ndef create_graph():\n \"\"\"Creates a graph from saved GraphDef file and returns a saver.\"\"\"\n with tf.gfile.FastGFile(os.path.join('/home/ubuntu/hdd/tensorFlowDic/',\n 'classify_image_graph_def.pb'), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')\n\n\ndef feature_extraction(image):\n image_data = tf.gfile.FastGFile(image, 'rb').read()\n with tf.Session() as sess:\n softmax_tensor = sess.graph.get_tensor_by_name('pool_3:0')\n predictions = sess.run(softmax_tensor, {'DecodeJpeg/contents:0':\n image_data})\n predictions = np.squeeze(predictions)\n return predictions\n\n\ntest_x = np.zeros((0, 2048))\nfor i in range(1, 971):\n imageName = str(i).zfill(5)\n image = '/home/ubuntu/caffe/examples/images/val/' + imageName + '.jpg'\n pre = feature_extraction(image)\n print('Finish extracting features of test image ' + image)\n test_x = np.vstack((test_x, pre))\nprint(test_x.shape)\ninput_label = genfromtxt('/home/ubuntu/caffe/examples/images/Files/train.csv',\n delimiter=',')\ninput_label = input_label[1:7001, 1].reshape(-1)\ninput_x = np.load('tensorFlow_train.npz')\nprint('input_x shape ', input_x.shape)\nprint('input_label shape ', input_label.shape)\nnp.savez_compressed('tensorFlow_test', test_x)\nX_train, X_test, y_train, y_test = train_test_split(input_x, input_label,\n test_size=0.1, random_state=42)\nclf = SVC(C=500.0, decision_function_shape='ovr', max_iter=-1, probability=\n False)\nclf.fit(X_train, y_train)\nprint('training accuracy is', clf.score(X_train, y_train))\nprint('validation accuracy is', clf.score(X_test, y_test))\nclf = SVC(C=500.0, decision_function_shape='ovr', max_iter=-1, probability=\n False)\nclf.fit(input_x, input_label)\ny_pred = clf.predict(test_x)\nfilename = 'predict_inception_v3.csv'\nf = open(filename, 'w')\nf.write('Id,Prediction\\n')\nif len(y_pred) < 1000:\n zeros = np.zeros(2000)\n y_pred = np.append(y_pred, zeros).reshape(-1)\nfor i in range(0, len(y_pred)):\n d = '{0},{1}\\n'.format(i + 1, int(y_pred[i]))\n f.write(d)\n",
"step-5": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os.path\nimport re\nimport sys\nimport tarfile\n\nimport numpy as np\nfrom six.moves import urllib\nimport tensorflow as tf\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import datasets\nfrom sklearn import svm\nfrom sklearn.svm import SVC\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom numpy import genfromtxt\n\ndef create_graph():\n \"\"\"Creates a graph from saved GraphDef file and returns a saver.\"\"\"\n # Creates graph from saved graph_def.pb.\n with tf.gfile.FastGFile(os.path.join(\n '/home/ubuntu/hdd/tensorFlowDic/', 'classify_image_graph_def.pb'), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')\n\ndef feature_extraction(image):\n\n\timage_data = tf.gfile.FastGFile(image, 'rb').read()\n\twith tf.Session() as sess:\n\t\tsoftmax_tensor = sess.graph.get_tensor_by_name('pool_3:0')\n\t\tpredictions = sess.run(softmax_tensor,{'DecodeJpeg/contents:0': image_data})\n\t\tpredictions = np.squeeze(predictions)\n\t\treturn predictions\n\n\n# create_graph()\n# input_x = np.zeros((0,2048))\n# for i in range(1,7001):\n# imageName = str(i).zfill(5)\n# image = '/home/ubuntu/caffe/examples/images/joey/'+imageName+\".jpg\"\n# pre = feature_extraction(image)\n# print (\"Finish extracting features of training image \"+image)\n# input_x = np.vstack((input_x,pre))\n\n# print(input_x.shape)\n\ntest_x = np.zeros((0,2048))\nfor i in range(1,971):\n imageName = str(i).zfill(5)\n image = '/home/ubuntu/caffe/examples/images/val/'+imageName+\".jpg\"\n pre = feature_extraction(image)\n print (\"Finish extracting features of test image \"+image)\n test_x = np.vstack((test_x,pre))\n\nprint(test_x.shape)\n\ninput_label = genfromtxt('/home/ubuntu/caffe/examples/images/Files/train.csv', delimiter=',')\ninput_label = input_label[1:7001,1].reshape(-1)\ninput_x = np.load(\"tensorFlow_train.npz\")\n#np.load(\"tensorFlow_test.npz\")\n\nprint ('input_x shape ',input_x.shape)\nprint ('input_label shape ',input_label.shape)\n\n\n# np.savez_compressed(\"tensorFlow_train\", input_x)\nnp.savez_compressed(\"tensorFlow_test\", test_x)\n\nX_train, X_test, y_train, y_test = train_test_split(input_x, input_label, test_size=0.1, random_state=42)\n\nclf = SVC(C=500.0,decision_function_shape='ovr',max_iter=-1,probability=False)\nclf.fit(X_train, y_train)\nprint('training accuracy is', clf.score(X_train,y_train))\nprint('validation accuracy is', clf.score(X_test,y_test))\n\nclf = SVC(C=500.0,decision_function_shape='ovr',max_iter=-1,probability=False)\nclf.fit(input_x, input_label)\n\ny_pred = clf.predict(test_x)\nfilename = \"predict_inception_v3.csv\"\nf = open(filename, \"w\")\nf.write('Id,Prediction\\n')\n\nif ((len(y_pred))<1000):\n zeros = np.zeros(2000)\n y_pred = np.append(y_pred, zeros).reshape(-1)\n \nfor i in range(0,len(y_pred)):\n d = '{0},{1}\\n'.format(i+1,int(y_pred[i]))\n f.write(d)\n\n\n\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__doc__ = """\
A MiniFrame is a Frame with a small title bar. It is suitable for floating
toolbars that must not take up too much screen area. In other respects, it's the
same as a wx.Frame.
"""
__wxPyOnlineDocs__ = 'https://wxpython.org/Phoenix/docs/html/wx.MiniFrame.html'
__wxPyDemoPanel__ = 'TestPanel'
#-Imports-----------------------------------------------------------------------
#--Python Imports.
import os
import sys
#--wxPython Imports.
import wx
#-Globals-----------------------------------------------------------------------
try:
gFileDir = os.path.dirname(os.path.abspath(__file__))
except:
gFileDir = os.path.dirname(os.path.abspath(sys.argv[0]))
gBmpDir = gFileDir + os.sep + 'bitmaps'
class MyMiniFrame(wx.MiniFrame):
def __init__(self, parent, id, title, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.DEFAULT_FRAME_STYLE,
name='frame'):
wx.MiniFrame.__init__(self, parent, id, title, pos, size, style, name)
panel = wx.Panel(self, -1)
button = wx.Button(panel, 1003, "Close Me")
button.SetPosition((15, 15))
button2 = wx.Button(panel, -1, "ToggleWindowStyle(wx.STAY_ON_TOP)")
button2.SetPosition((30, 50))
self.Bind(wx.EVT_BUTTON, self.OnCloseMe, button)
self.Bind(wx.EVT_BUTTON, self.OnToggleWindowStyle, button2)
self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)
def OnToggleWindowStyle(self, event):
self.ToggleWindowStyle(wx.STAY_ON_TOP)
def OnCloseMe(self, event):
self.Close(True)
def OnCloseWindow(self, event):
self.Destroy()
#---------------------------------------------------------------------------
class TestPanel(wx.Panel):
def __init__(self, parent, log):
self.log = log
wx.Panel.__init__(self, parent, -1)
b1 = wx.Button(self, -1, "Create and Show a MiniFrame", (50, 50))
self.Bind(wx.EVT_BUTTON, self.OnButton1, b1)
b2 = wx.Button(self, -1, "Create and Show a MiniFrame With Effect", (50, 100))
self.Bind(wx.EVT_BUTTON, self.OnButton2, b2)
self.list = wx.ListBox(self, choices=['wx.SHOW_EFFECT_NONE',
'wx.SHOW_EFFECT_ROLL_TO_LEFT',
'wx.SHOW_EFFECT_ROLL_TO_RIGHT',
'wx.SHOW_EFFECT_ROLL_TO_TOP',
'wx.SHOW_EFFECT_ROLL_TO_BOTTOM',
'wx.SHOW_EFFECT_SLIDE_TO_LEFT',
'wx.SHOW_EFFECT_SLIDE_TO_RIGHT',
'wx.SHOW_EFFECT_SLIDE_TO_TOP',
'wx.SHOW_EFFECT_SLIDE_TO_BOTTOM',
'wx.SHOW_EFFECT_BLEND',
'wx.SHOW_EFFECT_EXPAND'
# 'wx.SHOW_EFFECT_MAX'
],
pos=(50, 155), size=(220, 160),
style=wx.LB_SINGLE)
self.list.Select(0)
tt = "Timeout in milliseconds\n0 is system default"
self.spin = wx.SpinCtrl(self, -1, tt,
pos=(50, 130), style=wx.ALIGN_LEFT)
self.spin.SetToolTip(wx.ToolTip(tt))
self.spin.SetRange(0, 5000)
self.spin.SetValue(0)
def OnButton1(self, evt):
win = MyMiniFrame(self, -1, "This is a wx.MiniFrame", size=(350, 200),
style=wx.DEFAULT_FRAME_STYLE)
win.Centre()
win.Show(True)
def OnButton2(self, evt):
win = MyMiniFrame(self, -1, "This is a wx.MiniFrame", size=(350, 200),
style=wx.DEFAULT_FRAME_STYLE)
win.Centre()
win.ShowWithEffect(effect=eval(self.list.GetString(self.list.GetSelection())),
timeout=self.spin.GetValue())
#- __main__ Demo ---------------------------------------------------------------
class printLog:
def __init__(self):
pass
def write(self, txt):
print('%s' % txt)
def WriteText(self, txt):
print('%s' % txt)
class TestFrame(wx.Frame):
def __init__(self, parent, id=wx.ID_ANY, title=wx.EmptyString,
pos=wx.DefaultPosition, size=wx.DefaultSize,
style=wx.DEFAULT_FRAME_STYLE, name='frame'):
wx.Frame.__init__(self, parent, id, title, pos, size, style, name)
log = printLog()
panel = TestPanel(self, log)
self.Bind(wx.EVT_CLOSE, self.OnDestroy)
try:
self.SetIcon(wx.IconFromLocation(wx.IconLocation(sys.executable)))
except Exception as exc:
raise exc
def OnDestroy(self, event):
self.Destroy()
class TestApp(wx.App):
def OnInit(self):
gMainWin = TestFrame(None)
gMainWin.SetTitle('Extended Frame Demo')
gMainWin.Show()
return True
#---------------------------------------------------------------------------
if __name__ == '__main__':
import sys
print('Python %s.%s.%s %s' % sys.version_info[0:4])
print('wxPython %s' % wx.version())
gApp = TestApp(redirect=False,
filename=None,
useBestVisual=False,
clearSigInt=True)
gApp.MainLoop()
|
normal
|
{
"blob_id": "b041e9577af72d2bcee3dda0cc78fa12800d53bd",
"index": 2286,
"step-1": "<mask token>\n\n\nclass TestPanel(wx.Panel):\n\n def __init__(self, parent, log):\n self.log = log\n wx.Panel.__init__(self, parent, -1)\n b1 = wx.Button(self, -1, 'Create and Show a MiniFrame', (50, 50))\n self.Bind(wx.EVT_BUTTON, self.OnButton1, b1)\n b2 = wx.Button(self, -1, 'Create and Show a MiniFrame With Effect',\n (50, 100))\n self.Bind(wx.EVT_BUTTON, self.OnButton2, b2)\n self.list = wx.ListBox(self, choices=['wx.SHOW_EFFECT_NONE',\n 'wx.SHOW_EFFECT_ROLL_TO_LEFT', 'wx.SHOW_EFFECT_ROLL_TO_RIGHT',\n 'wx.SHOW_EFFECT_ROLL_TO_TOP', 'wx.SHOW_EFFECT_ROLL_TO_BOTTOM',\n 'wx.SHOW_EFFECT_SLIDE_TO_LEFT', 'wx.SHOW_EFFECT_SLIDE_TO_RIGHT',\n 'wx.SHOW_EFFECT_SLIDE_TO_TOP', 'wx.SHOW_EFFECT_SLIDE_TO_BOTTOM',\n 'wx.SHOW_EFFECT_BLEND', 'wx.SHOW_EFFECT_EXPAND'], pos=(50, 155),\n size=(220, 160), style=wx.LB_SINGLE)\n self.list.Select(0)\n tt = 'Timeout in milliseconds\\n0 is system default'\n self.spin = wx.SpinCtrl(self, -1, tt, pos=(50, 130), style=wx.\n ALIGN_LEFT)\n self.spin.SetToolTip(wx.ToolTip(tt))\n self.spin.SetRange(0, 5000)\n self.spin.SetValue(0)\n\n def OnButton1(self, evt):\n win = MyMiniFrame(self, -1, 'This is a wx.MiniFrame', size=(350, \n 200), style=wx.DEFAULT_FRAME_STYLE)\n win.Centre()\n win.Show(True)\n <mask token>\n\n\nclass printLog:\n\n def __init__(self):\n pass\n\n def write(self, txt):\n print('%s' % txt)\n\n def WriteText(self, txt):\n print('%s' % txt)\n\n\nclass TestFrame(wx.Frame):\n\n def __init__(self, parent, id=wx.ID_ANY, title=wx.EmptyString, pos=wx.\n DefaultPosition, size=wx.DefaultSize, style=wx.DEFAULT_FRAME_STYLE,\n name='frame'):\n wx.Frame.__init__(self, parent, id, title, pos, size, style, name)\n log = printLog()\n panel = TestPanel(self, log)\n self.Bind(wx.EVT_CLOSE, self.OnDestroy)\n try:\n self.SetIcon(wx.IconFromLocation(wx.IconLocation(sys.executable)))\n except Exception as exc:\n raise exc\n\n def OnDestroy(self, event):\n self.Destroy()\n\n\nclass TestApp(wx.App):\n\n def OnInit(self):\n gMainWin = TestFrame(None)\n gMainWin.SetTitle('Extended Frame Demo')\n gMainWin.Show()\n return True\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MyMiniFrame(wx.MiniFrame):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass TestPanel(wx.Panel):\n\n def __init__(self, parent, log):\n self.log = log\n wx.Panel.__init__(self, parent, -1)\n b1 = wx.Button(self, -1, 'Create and Show a MiniFrame', (50, 50))\n self.Bind(wx.EVT_BUTTON, self.OnButton1, b1)\n b2 = wx.Button(self, -1, 'Create and Show a MiniFrame With Effect',\n (50, 100))\n self.Bind(wx.EVT_BUTTON, self.OnButton2, b2)\n self.list = wx.ListBox(self, choices=['wx.SHOW_EFFECT_NONE',\n 'wx.SHOW_EFFECT_ROLL_TO_LEFT', 'wx.SHOW_EFFECT_ROLL_TO_RIGHT',\n 'wx.SHOW_EFFECT_ROLL_TO_TOP', 'wx.SHOW_EFFECT_ROLL_TO_BOTTOM',\n 'wx.SHOW_EFFECT_SLIDE_TO_LEFT', 'wx.SHOW_EFFECT_SLIDE_TO_RIGHT',\n 'wx.SHOW_EFFECT_SLIDE_TO_TOP', 'wx.SHOW_EFFECT_SLIDE_TO_BOTTOM',\n 'wx.SHOW_EFFECT_BLEND', 'wx.SHOW_EFFECT_EXPAND'], pos=(50, 155),\n size=(220, 160), style=wx.LB_SINGLE)\n self.list.Select(0)\n tt = 'Timeout in milliseconds\\n0 is system default'\n self.spin = wx.SpinCtrl(self, -1, tt, pos=(50, 130), style=wx.\n ALIGN_LEFT)\n self.spin.SetToolTip(wx.ToolTip(tt))\n self.spin.SetRange(0, 5000)\n self.spin.SetValue(0)\n\n def OnButton1(self, evt):\n win = MyMiniFrame(self, -1, 'This is a wx.MiniFrame', size=(350, \n 200), style=wx.DEFAULT_FRAME_STYLE)\n win.Centre()\n win.Show(True)\n\n def OnButton2(self, evt):\n win = MyMiniFrame(self, -1, 'This is a wx.MiniFrame', size=(350, \n 200), style=wx.DEFAULT_FRAME_STYLE)\n win.Centre()\n win.ShowWithEffect(effect=eval(self.list.GetString(self.list.\n GetSelection())), timeout=self.spin.GetValue())\n\n\nclass printLog:\n\n def __init__(self):\n pass\n\n def write(self, txt):\n print('%s' % txt)\n\n def WriteText(self, txt):\n print('%s' % txt)\n\n\nclass TestFrame(wx.Frame):\n\n def __init__(self, parent, id=wx.ID_ANY, title=wx.EmptyString, pos=wx.\n DefaultPosition, size=wx.DefaultSize, style=wx.DEFAULT_FRAME_STYLE,\n name='frame'):\n wx.Frame.__init__(self, parent, id, title, pos, size, style, name)\n log = printLog()\n panel = TestPanel(self, log)\n self.Bind(wx.EVT_CLOSE, self.OnDestroy)\n try:\n self.SetIcon(wx.IconFromLocation(wx.IconLocation(sys.executable)))\n except Exception as exc:\n raise exc\n\n def OnDestroy(self, event):\n self.Destroy()\n\n\nclass TestApp(wx.App):\n\n def OnInit(self):\n gMainWin = TestFrame(None)\n gMainWin.SetTitle('Extended Frame Demo')\n gMainWin.Show()\n return True\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass MyMiniFrame(wx.MiniFrame):\n\n def __init__(self, parent, id, title, pos=wx.DefaultPosition, size=wx.\n DefaultSize, style=wx.DEFAULT_FRAME_STYLE, name='frame'):\n wx.MiniFrame.__init__(self, parent, id, title, pos, size, style, name)\n panel = wx.Panel(self, -1)\n button = wx.Button(panel, 1003, 'Close Me')\n button.SetPosition((15, 15))\n button2 = wx.Button(panel, -1, 'ToggleWindowStyle(wx.STAY_ON_TOP)')\n button2.SetPosition((30, 50))\n self.Bind(wx.EVT_BUTTON, self.OnCloseMe, button)\n self.Bind(wx.EVT_BUTTON, self.OnToggleWindowStyle, button2)\n self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)\n\n def OnToggleWindowStyle(self, event):\n self.ToggleWindowStyle(wx.STAY_ON_TOP)\n <mask token>\n <mask token>\n\n\nclass TestPanel(wx.Panel):\n\n def __init__(self, parent, log):\n self.log = log\n wx.Panel.__init__(self, parent, -1)\n b1 = wx.Button(self, -1, 'Create and Show a MiniFrame', (50, 50))\n self.Bind(wx.EVT_BUTTON, self.OnButton1, b1)\n b2 = wx.Button(self, -1, 'Create and Show a MiniFrame With Effect',\n (50, 100))\n self.Bind(wx.EVT_BUTTON, self.OnButton2, b2)\n self.list = wx.ListBox(self, choices=['wx.SHOW_EFFECT_NONE',\n 'wx.SHOW_EFFECT_ROLL_TO_LEFT', 'wx.SHOW_EFFECT_ROLL_TO_RIGHT',\n 'wx.SHOW_EFFECT_ROLL_TO_TOP', 'wx.SHOW_EFFECT_ROLL_TO_BOTTOM',\n 'wx.SHOW_EFFECT_SLIDE_TO_LEFT', 'wx.SHOW_EFFECT_SLIDE_TO_RIGHT',\n 'wx.SHOW_EFFECT_SLIDE_TO_TOP', 'wx.SHOW_EFFECT_SLIDE_TO_BOTTOM',\n 'wx.SHOW_EFFECT_BLEND', 'wx.SHOW_EFFECT_EXPAND'], pos=(50, 155),\n size=(220, 160), style=wx.LB_SINGLE)\n self.list.Select(0)\n tt = 'Timeout in milliseconds\\n0 is system default'\n self.spin = wx.SpinCtrl(self, -1, tt, pos=(50, 130), style=wx.\n ALIGN_LEFT)\n self.spin.SetToolTip(wx.ToolTip(tt))\n self.spin.SetRange(0, 5000)\n self.spin.SetValue(0)\n\n def OnButton1(self, evt):\n win = MyMiniFrame(self, -1, 'This is a wx.MiniFrame', size=(350, \n 200), style=wx.DEFAULT_FRAME_STYLE)\n win.Centre()\n win.Show(True)\n\n def OnButton2(self, evt):\n win = MyMiniFrame(self, -1, 'This is a wx.MiniFrame', size=(350, \n 200), style=wx.DEFAULT_FRAME_STYLE)\n win.Centre()\n win.ShowWithEffect(effect=eval(self.list.GetString(self.list.\n GetSelection())), timeout=self.spin.GetValue())\n\n\nclass printLog:\n\n def __init__(self):\n pass\n\n def write(self, txt):\n print('%s' % txt)\n\n def WriteText(self, txt):\n print('%s' % txt)\n\n\nclass TestFrame(wx.Frame):\n\n def __init__(self, parent, id=wx.ID_ANY, title=wx.EmptyString, pos=wx.\n DefaultPosition, size=wx.DefaultSize, style=wx.DEFAULT_FRAME_STYLE,\n name='frame'):\n wx.Frame.__init__(self, parent, id, title, pos, size, style, name)\n log = printLog()\n panel = TestPanel(self, log)\n self.Bind(wx.EVT_CLOSE, self.OnDestroy)\n try:\n self.SetIcon(wx.IconFromLocation(wx.IconLocation(sys.executable)))\n except Exception as exc:\n raise exc\n\n def OnDestroy(self, event):\n self.Destroy()\n\n\nclass TestApp(wx.App):\n\n def OnInit(self):\n gMainWin = TestFrame(None)\n gMainWin.SetTitle('Extended Frame Demo')\n gMainWin.Show()\n return True\n\n\n<mask token>\n",
"step-4": "<mask token>\ntry:\n gFileDir = os.path.dirname(os.path.abspath(__file__))\nexcept:\n gFileDir = os.path.dirname(os.path.abspath(sys.argv[0]))\n<mask token>\n\n\nclass MyMiniFrame(wx.MiniFrame):\n\n def __init__(self, parent, id, title, pos=wx.DefaultPosition, size=wx.\n DefaultSize, style=wx.DEFAULT_FRAME_STYLE, name='frame'):\n wx.MiniFrame.__init__(self, parent, id, title, pos, size, style, name)\n panel = wx.Panel(self, -1)\n button = wx.Button(panel, 1003, 'Close Me')\n button.SetPosition((15, 15))\n button2 = wx.Button(panel, -1, 'ToggleWindowStyle(wx.STAY_ON_TOP)')\n button2.SetPosition((30, 50))\n self.Bind(wx.EVT_BUTTON, self.OnCloseMe, button)\n self.Bind(wx.EVT_BUTTON, self.OnToggleWindowStyle, button2)\n self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)\n\n def OnToggleWindowStyle(self, event):\n self.ToggleWindowStyle(wx.STAY_ON_TOP)\n\n def OnCloseMe(self, event):\n self.Close(True)\n\n def OnCloseWindow(self, event):\n self.Destroy()\n\n\nclass TestPanel(wx.Panel):\n\n def __init__(self, parent, log):\n self.log = log\n wx.Panel.__init__(self, parent, -1)\n b1 = wx.Button(self, -1, 'Create and Show a MiniFrame', (50, 50))\n self.Bind(wx.EVT_BUTTON, self.OnButton1, b1)\n b2 = wx.Button(self, -1, 'Create and Show a MiniFrame With Effect',\n (50, 100))\n self.Bind(wx.EVT_BUTTON, self.OnButton2, b2)\n self.list = wx.ListBox(self, choices=['wx.SHOW_EFFECT_NONE',\n 'wx.SHOW_EFFECT_ROLL_TO_LEFT', 'wx.SHOW_EFFECT_ROLL_TO_RIGHT',\n 'wx.SHOW_EFFECT_ROLL_TO_TOP', 'wx.SHOW_EFFECT_ROLL_TO_BOTTOM',\n 'wx.SHOW_EFFECT_SLIDE_TO_LEFT', 'wx.SHOW_EFFECT_SLIDE_TO_RIGHT',\n 'wx.SHOW_EFFECT_SLIDE_TO_TOP', 'wx.SHOW_EFFECT_SLIDE_TO_BOTTOM',\n 'wx.SHOW_EFFECT_BLEND', 'wx.SHOW_EFFECT_EXPAND'], pos=(50, 155),\n size=(220, 160), style=wx.LB_SINGLE)\n self.list.Select(0)\n tt = 'Timeout in milliseconds\\n0 is system default'\n self.spin = wx.SpinCtrl(self, -1, tt, pos=(50, 130), style=wx.\n ALIGN_LEFT)\n self.spin.SetToolTip(wx.ToolTip(tt))\n self.spin.SetRange(0, 5000)\n self.spin.SetValue(0)\n\n def OnButton1(self, evt):\n win = MyMiniFrame(self, -1, 'This is a wx.MiniFrame', size=(350, \n 200), style=wx.DEFAULT_FRAME_STYLE)\n win.Centre()\n win.Show(True)\n\n def OnButton2(self, evt):\n win = MyMiniFrame(self, -1, 'This is a wx.MiniFrame', size=(350, \n 200), style=wx.DEFAULT_FRAME_STYLE)\n win.Centre()\n win.ShowWithEffect(effect=eval(self.list.GetString(self.list.\n GetSelection())), timeout=self.spin.GetValue())\n\n\nclass printLog:\n\n def __init__(self):\n pass\n\n def write(self, txt):\n print('%s' % txt)\n\n def WriteText(self, txt):\n print('%s' % txt)\n\n\nclass TestFrame(wx.Frame):\n\n def __init__(self, parent, id=wx.ID_ANY, title=wx.EmptyString, pos=wx.\n DefaultPosition, size=wx.DefaultSize, style=wx.DEFAULT_FRAME_STYLE,\n name='frame'):\n wx.Frame.__init__(self, parent, id, title, pos, size, style, name)\n log = printLog()\n panel = TestPanel(self, log)\n self.Bind(wx.EVT_CLOSE, self.OnDestroy)\n try:\n self.SetIcon(wx.IconFromLocation(wx.IconLocation(sys.executable)))\n except Exception as exc:\n raise exc\n\n def OnDestroy(self, event):\n self.Destroy()\n\n\nclass TestApp(wx.App):\n\n def OnInit(self):\n gMainWin = TestFrame(None)\n gMainWin.SetTitle('Extended Frame Demo')\n gMainWin.Show()\n return True\n\n\nif __name__ == '__main__':\n import sys\n print('Python %s.%s.%s %s' % sys.version_info[0:4])\n print('wxPython %s' % wx.version())\n gApp = TestApp(redirect=False, filename=None, useBestVisual=False,\n clearSigInt=True)\n gApp.MainLoop()\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n__doc__ = \"\"\"\\\nA MiniFrame is a Frame with a small title bar. It is suitable for floating\ntoolbars that must not take up too much screen area. In other respects, it's the\nsame as a wx.Frame.\n\"\"\"\n\n__wxPyOnlineDocs__ = 'https://wxpython.org/Phoenix/docs/html/wx.MiniFrame.html'\n__wxPyDemoPanel__ = 'TestPanel'\n\n#-Imports-----------------------------------------------------------------------\n\n#--Python Imports.\nimport os\nimport sys\n\n#--wxPython Imports.\nimport wx\n\n\n#-Globals-----------------------------------------------------------------------\ntry:\n gFileDir = os.path.dirname(os.path.abspath(__file__))\nexcept:\n gFileDir = os.path.dirname(os.path.abspath(sys.argv[0]))\ngBmpDir = gFileDir + os.sep + 'bitmaps'\n\n\nclass MyMiniFrame(wx.MiniFrame):\n def __init__(self, parent, id, title, pos=wx.DefaultPosition,\n size=wx.DefaultSize, style=wx.DEFAULT_FRAME_STYLE,\n name='frame'):\n\n wx.MiniFrame.__init__(self, parent, id, title, pos, size, style, name)\n panel = wx.Panel(self, -1)\n\n button = wx.Button(panel, 1003, \"Close Me\")\n button.SetPosition((15, 15))\n\n button2 = wx.Button(panel, -1, \"ToggleWindowStyle(wx.STAY_ON_TOP)\")\n button2.SetPosition((30, 50))\n\n self.Bind(wx.EVT_BUTTON, self.OnCloseMe, button)\n self.Bind(wx.EVT_BUTTON, self.OnToggleWindowStyle, button2)\n self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)\n\n\n def OnToggleWindowStyle(self, event):\n self.ToggleWindowStyle(wx.STAY_ON_TOP)\n\n def OnCloseMe(self, event):\n self.Close(True)\n\n def OnCloseWindow(self, event):\n self.Destroy()\n\n#---------------------------------------------------------------------------\n\nclass TestPanel(wx.Panel):\n def __init__(self, parent, log):\n self.log = log\n wx.Panel.__init__(self, parent, -1)\n\n b1 = wx.Button(self, -1, \"Create and Show a MiniFrame\", (50, 50))\n self.Bind(wx.EVT_BUTTON, self.OnButton1, b1)\n\n b2 = wx.Button(self, -1, \"Create and Show a MiniFrame With Effect\", (50, 100))\n self.Bind(wx.EVT_BUTTON, self.OnButton2, b2)\n\n self.list = wx.ListBox(self, choices=['wx.SHOW_EFFECT_NONE',\n 'wx.SHOW_EFFECT_ROLL_TO_LEFT',\n 'wx.SHOW_EFFECT_ROLL_TO_RIGHT',\n 'wx.SHOW_EFFECT_ROLL_TO_TOP',\n 'wx.SHOW_EFFECT_ROLL_TO_BOTTOM',\n 'wx.SHOW_EFFECT_SLIDE_TO_LEFT',\n 'wx.SHOW_EFFECT_SLIDE_TO_RIGHT',\n 'wx.SHOW_EFFECT_SLIDE_TO_TOP',\n 'wx.SHOW_EFFECT_SLIDE_TO_BOTTOM',\n 'wx.SHOW_EFFECT_BLEND',\n 'wx.SHOW_EFFECT_EXPAND'\n # 'wx.SHOW_EFFECT_MAX'\n ],\n pos=(50, 155), size=(220, 160),\n style=wx.LB_SINGLE)\n self.list.Select(0)\n\n tt = \"Timeout in milliseconds\\n0 is system default\"\n self.spin = wx.SpinCtrl(self, -1, tt,\n pos=(50, 130), style=wx.ALIGN_LEFT)\n self.spin.SetToolTip(wx.ToolTip(tt))\n self.spin.SetRange(0, 5000)\n self.spin.SetValue(0)\n\n def OnButton1(self, evt):\n win = MyMiniFrame(self, -1, \"This is a wx.MiniFrame\", size=(350, 200),\n style=wx.DEFAULT_FRAME_STYLE)\n win.Centre()\n win.Show(True)\n\n def OnButton2(self, evt):\n win = MyMiniFrame(self, -1, \"This is a wx.MiniFrame\", size=(350, 200),\n style=wx.DEFAULT_FRAME_STYLE)\n win.Centre()\n win.ShowWithEffect(effect=eval(self.list.GetString(self.list.GetSelection())),\n timeout=self.spin.GetValue())\n\n\n#- __main__ Demo ---------------------------------------------------------------\n\nclass printLog:\n def __init__(self):\n pass\n\n def write(self, txt):\n print('%s' % txt)\n\n def WriteText(self, txt):\n print('%s' % txt)\n\n\nclass TestFrame(wx.Frame):\n def __init__(self, parent, id=wx.ID_ANY, title=wx.EmptyString,\n pos=wx.DefaultPosition, size=wx.DefaultSize,\n style=wx.DEFAULT_FRAME_STYLE, name='frame'):\n wx.Frame.__init__(self, parent, id, title, pos, size, style, name)\n\n log = printLog()\n\n panel = TestPanel(self, log)\n self.Bind(wx.EVT_CLOSE, self.OnDestroy)\n\n try:\n self.SetIcon(wx.IconFromLocation(wx.IconLocation(sys.executable)))\n except Exception as exc:\n raise exc\n\n def OnDestroy(self, event):\n self.Destroy()\n\n\nclass TestApp(wx.App):\n def OnInit(self):\n gMainWin = TestFrame(None)\n gMainWin.SetTitle('Extended Frame Demo')\n gMainWin.Show()\n\n return True\n\n#---------------------------------------------------------------------------\n\n\nif __name__ == '__main__':\n import sys\n print('Python %s.%s.%s %s' % sys.version_info[0:4])\n print('wxPython %s' % wx.version())\n gApp = TestApp(redirect=False,\n filename=None,\n useBestVisual=False,\n clearSigInt=True)\n\n gApp.MainLoop()\n",
"step-ids": [
12,
14,
16,
19,
22
]
}
|
[
12,
14,
16,
19,
22
] |
import testr
import testg
import time
def run():
parser = testg.OptionParser(description='Autonomous grasp and manipulation planning example.')
parser.add_option('--scene',
action="store",type='string',dest='scene',default='/home/user/experiment/data/lab1.env.xml',
help='Scene file to load (default=%default)')
parser.add_option('--nodestinations', action='store_true',dest='nodestinations',default=False,
help='If set, will plan without destinations.')
parser.add_option('--norandomize', action='store_false',dest='randomize',default=True,
help='If set, will not randomize the bodies and robot position in the scene.')
(options, args) = parser.parse_args()
env = testg.Environment()
try:
env.SetViewer('qtcoin')
env.Load(options.scene)
robot = env.GetRobots()[0]
env.UpdatePublishedBodies()
time.sleep(0.1) # give time for environment to update
SNP = testr.SimpleNavigationPlanning(robot)
SNP.performNavigationPlanning()
GP= testg.GraspPlanning(robot,randomize=options.randomize,nodestinations=options.nodestinations)
GP.performGraspPlanning()
SNP = testr.SimpleNavigationPlanning(robot)
SNP.performNavigationPlanning()
finally:
env.Destroy()
if __name__ == "__main__":
run()
|
normal
|
{
"blob_id": "62857a015087500fec534ba1297d42a33ae61927",
"index": 7153,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef run():\n parser = testg.OptionParser(description=\n 'Autonomous grasp and manipulation planning example.')\n parser.add_option('--scene', action='store', type='string', dest=\n 'scene', default='/home/user/experiment/data/lab1.env.xml', help=\n 'Scene file to load (default=%default)')\n parser.add_option('--nodestinations', action='store_true', dest=\n 'nodestinations', default=False, help=\n 'If set, will plan without destinations.')\n parser.add_option('--norandomize', action='store_false', dest=\n 'randomize', default=True, help=\n 'If set, will not randomize the bodies and robot position in the scene.'\n )\n options, args = parser.parse_args()\n env = testg.Environment()\n try:\n env.SetViewer('qtcoin')\n env.Load(options.scene)\n robot = env.GetRobots()[0]\n env.UpdatePublishedBodies()\n time.sleep(0.1)\n SNP = testr.SimpleNavigationPlanning(robot)\n SNP.performNavigationPlanning()\n GP = testg.GraspPlanning(robot, randomize=options.randomize,\n nodestinations=options.nodestinations)\n GP.performGraspPlanning()\n SNP = testr.SimpleNavigationPlanning(robot)\n SNP.performNavigationPlanning()\n finally:\n env.Destroy()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef run():\n parser = testg.OptionParser(description=\n 'Autonomous grasp and manipulation planning example.')\n parser.add_option('--scene', action='store', type='string', dest=\n 'scene', default='/home/user/experiment/data/lab1.env.xml', help=\n 'Scene file to load (default=%default)')\n parser.add_option('--nodestinations', action='store_true', dest=\n 'nodestinations', default=False, help=\n 'If set, will plan without destinations.')\n parser.add_option('--norandomize', action='store_false', dest=\n 'randomize', default=True, help=\n 'If set, will not randomize the bodies and robot position in the scene.'\n )\n options, args = parser.parse_args()\n env = testg.Environment()\n try:\n env.SetViewer('qtcoin')\n env.Load(options.scene)\n robot = env.GetRobots()[0]\n env.UpdatePublishedBodies()\n time.sleep(0.1)\n SNP = testr.SimpleNavigationPlanning(robot)\n SNP.performNavigationPlanning()\n GP = testg.GraspPlanning(robot, randomize=options.randomize,\n nodestinations=options.nodestinations)\n GP.performGraspPlanning()\n SNP = testr.SimpleNavigationPlanning(robot)\n SNP.performNavigationPlanning()\n finally:\n env.Destroy()\n\n\nif __name__ == '__main__':\n run()\n",
"step-4": "import testr\nimport testg\nimport time\n\n\ndef run():\n parser = testg.OptionParser(description=\n 'Autonomous grasp and manipulation planning example.')\n parser.add_option('--scene', action='store', type='string', dest=\n 'scene', default='/home/user/experiment/data/lab1.env.xml', help=\n 'Scene file to load (default=%default)')\n parser.add_option('--nodestinations', action='store_true', dest=\n 'nodestinations', default=False, help=\n 'If set, will plan without destinations.')\n parser.add_option('--norandomize', action='store_false', dest=\n 'randomize', default=True, help=\n 'If set, will not randomize the bodies and robot position in the scene.'\n )\n options, args = parser.parse_args()\n env = testg.Environment()\n try:\n env.SetViewer('qtcoin')\n env.Load(options.scene)\n robot = env.GetRobots()[0]\n env.UpdatePublishedBodies()\n time.sleep(0.1)\n SNP = testr.SimpleNavigationPlanning(robot)\n SNP.performNavigationPlanning()\n GP = testg.GraspPlanning(robot, randomize=options.randomize,\n nodestinations=options.nodestinations)\n GP.performGraspPlanning()\n SNP = testr.SimpleNavigationPlanning(robot)\n SNP.performNavigationPlanning()\n finally:\n env.Destroy()\n\n\nif __name__ == '__main__':\n run()\n",
"step-5": "import testr\nimport testg\nimport time\n\ndef run():\n parser = testg.OptionParser(description='Autonomous grasp and manipulation planning example.')\n parser.add_option('--scene',\n action=\"store\",type='string',dest='scene',default='/home/user/experiment/data/lab1.env.xml',\n help='Scene file to load (default=%default)')\n parser.add_option('--nodestinations', action='store_true',dest='nodestinations',default=False,\n help='If set, will plan without destinations.')\n parser.add_option('--norandomize', action='store_false',dest='randomize',default=True,\n help='If set, will not randomize the bodies and robot position in the scene.')\n (options, args) = parser.parse_args()\n env = testg.Environment()\n try:\n env.SetViewer('qtcoin')\n env.Load(options.scene)\n robot = env.GetRobots()[0]\n env.UpdatePublishedBodies()\n time.sleep(0.1) # give time for environment to update\n SNP = testr.SimpleNavigationPlanning(robot)\n SNP.performNavigationPlanning()\n GP= testg.GraspPlanning(robot,randomize=options.randomize,nodestinations=options.nodestinations)\n GP.performGraspPlanning()\n SNP = testr.SimpleNavigationPlanning(robot)\n SNP.performNavigationPlanning()\n \n finally:\n env.Destroy()\n\nif __name__ == \"__main__\":\n run()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#header
import matplotlib.pyplot as pmf
import random
p = 0.5 # Probablility of success for original system
n = 18 # Number of trials
Y = [] # Contains binomial RVs
b = [0] * (n+1) # List of n + 1 zeroes
N = 100 # Number of experiments performed
for j in range(N):
# Bernoulli random variable
for i in range(n):
r = random.uniform(0,1)
if r < p:
x = 1
else:
x = 0
Y.append(x)
outcome = sum(Y) # Number of successes from 0 to n
b[outcome] = b[outcome] + 1 # Record of successes for bar plot
Y.clear()
for i in range(n+1):
b[i] = b[i]/N # Probabilities
p = 0
cv = int(input('Enter a choice for the CV.'))
for i in range(cv, 19):
p = p + b[i]
print('For a critical value of', cv, 'the probability of rejecting the old system in favor of a new system that is no better than is', p,'.')
#cv = 13, 1/20 or the 5% rule
|
normal
|
{
"blob_id": "9a1b268386b4652bf50af0365892ef7338329727",
"index": 9631,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor j in range(N):\n for i in range(n):\n r = random.uniform(0, 1)\n if r < p:\n x = 1\n else:\n x = 0\n Y.append(x)\n outcome = sum(Y)\n b[outcome] = b[outcome] + 1\n Y.clear()\nfor i in range(n + 1):\n b[i] = b[i] / N\n p = 0\n<mask token>\nfor i in range(cv, 19):\n p = p + b[i]\nprint('For a critical value of', cv,\n 'the probability of rejecting the old system in favor of a new system that is no better than is'\n , p, '.')\n",
"step-3": "<mask token>\np = 0.5\nn = 18\nY = []\nb = [0] * (n + 1)\nN = 100\nfor j in range(N):\n for i in range(n):\n r = random.uniform(0, 1)\n if r < p:\n x = 1\n else:\n x = 0\n Y.append(x)\n outcome = sum(Y)\n b[outcome] = b[outcome] + 1\n Y.clear()\nfor i in range(n + 1):\n b[i] = b[i] / N\n p = 0\ncv = int(input('Enter a choice for the CV.'))\nfor i in range(cv, 19):\n p = p + b[i]\nprint('For a critical value of', cv,\n 'the probability of rejecting the old system in favor of a new system that is no better than is'\n , p, '.')\n",
"step-4": "import matplotlib.pyplot as pmf\nimport random\np = 0.5\nn = 18\nY = []\nb = [0] * (n + 1)\nN = 100\nfor j in range(N):\n for i in range(n):\n r = random.uniform(0, 1)\n if r < p:\n x = 1\n else:\n x = 0\n Y.append(x)\n outcome = sum(Y)\n b[outcome] = b[outcome] + 1\n Y.clear()\nfor i in range(n + 1):\n b[i] = b[i] / N\n p = 0\ncv = int(input('Enter a choice for the CV.'))\nfor i in range(cv, 19):\n p = p + b[i]\nprint('For a critical value of', cv,\n 'the probability of rejecting the old system in favor of a new system that is no better than is'\n , p, '.')\n",
"step-5": "#header\n\nimport matplotlib.pyplot as pmf\nimport random\n\np = 0.5 # Probablility of success for original system\nn = 18 # Number of trials\nY = [] # Contains binomial RVs\nb = [0] * (n+1) # List of n + 1 zeroes\nN = 100 # Number of experiments performed\n\nfor j in range(N):\n \n # Bernoulli random variable\n for i in range(n):\n \n r = random.uniform(0,1)\n if r < p:\n x = 1\n else:\n x = 0\n Y.append(x)\n outcome = sum(Y) # Number of successes from 0 to n\n b[outcome] = b[outcome] + 1 # Record of successes for bar plot\n Y.clear()\n \n \nfor i in range(n+1):\n b[i] = b[i]/N # Probabilities\n p = 0\n\ncv = int(input('Enter a choice for the CV.'))\n\nfor i in range(cv, 19):\n p = p + b[i]\n \nprint('For a critical value of', cv, 'the probability of rejecting the old system in favor of a new system that is no better than is', p,'.')\n#cv = 13, 1/20 or the 5% rule",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import unittest
import shapely.geometry as gm
from alphaBetaLab.abRectangularGridBuilder import abRectangularGridBuilder
class testAbRectangularGridBuilder(unittest.TestCase):
def getMockHiResAlphaMtxAndCstCellDet(self, posCellCentroids = None):
class _mockClass:
def __init__(self, posCellCentroids):
self.posCellCentroids = posCellCentroids
self.cell = None
def getAlphaSubMatrix(self, cell):
sm = _mockClass(self.posCellCentroids)
sm.cell = cell
return sm
def _positive(self, cell):
cntrs = self.posCellCentroids
if cell is None or cntrs is None:
return False
else:
for c in cntrs:
if cell.contains(gm.Point([c[0], c[1]])):
return True
return False
def onLand(self):
cell = self.cell
return self._positive(cell)
def isCoastalCell(self, cell, boundary = None, surface = -1):
return self._positive(cell)
return _mockClass(posCellCentroids)
def testGetSeaGridSerial(self):
minx = 100.
miny = 45.
dx = .5
dy = 1.
nx = 30
ny = 10
maxx = minx + nx*dx
maxy = miny + ny*dy
landCntrs = [[100.25, 45.25], [105.25, 47.25]]
coastCntrs = [[100.75, 45.25], [105.25, 47.25]]
gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny,
nParWorker=1, minXYIsCentroid=False)
hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet(landCntrs)
cstClDet = self.getMockHiResAlphaMtxAndCstCellDet(coastCntrs)
grd = gb.buildGrid(hiResMtx, cstClDet)
self.assertFalse(grd.wrapAroundDateline)
self.assertEqual(1, grd.nParWorker)
cells = grd.cells
self.assertEqual(nx*ny - 3, len(cells))
def testGetSeaGridParallel(self):
minx = 100.
miny = 45.
dx = .5
dy = 1.
nx = 30
ny = 10
maxx = minx + nx*dx
maxy = miny + ny*dy
landCntrs = [[100.25, 45.25], [105.25, 47.25]]
coastCntrs = [[100.75, 45.25], [105.25, 47.25]]
gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=4, minXYIsCentroid=False)
hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet(landCntrs)
cstClDet = self.getMockHiResAlphaMtxAndCstCellDet(coastCntrs)
grd = gb.buildGrid(hiResMtx, cstClDet)
self.assertEqual(4, grd.nParWorker)
self.assertFalse(grd.wrapAroundDateline)
cells = grd.cells
self.assertEqual(nx*ny - 3, len(cells))
def testGetNeighborsSerial(self):
minx = 100.
miny = 45.
dx = .5
dy = 1.
nx = 30
ny = 10
maxx = minx + nx*dx
maxy = miny + ny*dy
gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker = 1)
hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()
cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()
grd = gb.buildGrid(hiResMtx, cstClDet)
self.assertEqual(1, grd.nParWorker)
self.assertFalse(grd.wrapAroundDateline)
cells = grd.cells
cell = cells[0]
ncls = grd.getNeighbors(cell)
self.assertEqual(3, len(ncls))
for nc in ncls:
self.assertTrue( cell.distance(nc) < .000000001 )
cell = cells[45]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue( cell.distance(nc) < .000000001 )
cell = cells[100]
ncls = grd.getNeighbors(cell)
self.assertEqual(5, len(ncls))
for nc in ncls:
self.assertTrue( cell.distance(nc) < .000000001 )
def testGetNeighborsParallel(self):
minx = 100.
miny = 45.
dx = .5
dy = 1.
nx = 30
ny = 10
maxx = minx + nx*dx
maxy = miny + ny*dy
gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker = 4)
hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()
cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()
grd = gb.buildGrid(hiResMtx, cstClDet)
self.assertEqual(4, grd.nParWorker)
self.assertFalse(grd.wrapAroundDateline)
cells = grd.cells
cell = cells[0]
ncls = grd.getNeighbors(cell)
self.assertEqual(3, len(ncls))
for nc in ncls:
self.assertTrue( cell.distance(nc) < .000000001 )
cell = cells[45]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue( cell.distance(nc) < .000000001 )
cell = cells[100]
ncls = grd.getNeighbors(cell)
self.assertEqual(5, len(ncls))
for nc in ncls:
self.assertTrue( cell.distance(nc) < .000000001 )
def testGetNeighborsWrapAroundGlobalParallel(self):
minx = -179.
miny = -89.
dx = 2
dy = 2
nx = 180
ny = 90
maxx = minx + nx*dx
maxy = miny + ny*dy
gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker = 4)
hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()
cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()
grd = gb.buildGrid(hiResMtx, cstClDet)
self.assertTrue(grd.wrapAroundDateline)
cells = grd.cells
cell = cells[0]
ncls = grd.getNeighbors(cell)
self.assertEqual(5, len(ncls))
for nc in ncls:
self.assertTrue( cell.distance(nc) < .000000001 )
self.assertEqual(-182, ncls[3].boundary.coords[0][0])
self.assertEqual(-182, ncls[4].boundary.coords[0][0])
cell = cells[45]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue( cell.distance(nc) < .000000001 )
self.assertEqual(-182, ncls[5].boundary.coords[0][0])
self.assertEqual(-182, ncls[6].boundary.coords[0][0])
self.assertEqual(-182, ncls[6].boundary.coords[0][0])
cell = cells[65]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue( cell.distance(nc) < .000000001 )
self.assertEqual(-182, ncls[5].boundary.coords[0][0])
self.assertEqual(-182, ncls[6].boundary.coords[0][0])
self.assertEqual(-182, ncls[6].boundary.coords[0][0])
cell = cells[89]
ncls = grd.getNeighbors(cell)
self.assertEqual(5, len(ncls))
for nc in ncls:
self.assertTrue( cell.distance(nc) < .000000001 )
self.assertEqual(-182, ncls[3].boundary.coords[0][0])
self.assertEqual(-182, ncls[4].boundary.coords[0][0])
cell = cells[200]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue( cell.distance(nc) < .000000001 )
cell = cells[nx*ny-22]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue( cell.distance(nc) < .000000001 )
self.assertEqual(182, ncls[5].boundary.coords[1][0])
self.assertEqual(182, ncls[6].boundary.coords[1][0])
self.assertEqual(182, ncls[6].boundary.coords[1][0])
cell = cells[nx*ny-1]
ncls = grd.getNeighbors(cell)
self.assertEqual(5, len(ncls))
for nc in ncls:
self.assertTrue( cell.distance(nc) < .000000001 )
self.assertEqual(182, ncls[3].boundary.coords[1][0])
self.assertEqual(182, ncls[4].boundary.coords[1][0])
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "6175ce6534d44d703df6cdef94fc2b1285e25f49",
"index": 2202,
"step-1": "<mask token>\n\n\nclass testAbRectangularGridBuilder(unittest.TestCase):\n\n def getMockHiResAlphaMtxAndCstCellDet(self, posCellCentroids=None):\n\n\n class _mockClass:\n\n def __init__(self, posCellCentroids):\n self.posCellCentroids = posCellCentroids\n self.cell = None\n\n def getAlphaSubMatrix(self, cell):\n sm = _mockClass(self.posCellCentroids)\n sm.cell = cell\n return sm\n\n def _positive(self, cell):\n cntrs = self.posCellCentroids\n if cell is None or cntrs is None:\n return False\n else:\n for c in cntrs:\n if cell.contains(gm.Point([c[0], c[1]])):\n return True\n return False\n\n def onLand(self):\n cell = self.cell\n return self._positive(cell)\n\n def isCoastalCell(self, cell, boundary=None, surface=-1):\n return self._positive(cell)\n return _mockClass(posCellCentroids)\n\n def testGetSeaGridSerial(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n landCntrs = [[100.25, 45.25], [105.25, 47.25]]\n coastCntrs = [[100.75, 45.25], [105.25, 47.25]]\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny,\n nParWorker=1, minXYIsCentroid=False)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet(landCntrs)\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet(coastCntrs)\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertFalse(grd.wrapAroundDateline)\n self.assertEqual(1, grd.nParWorker)\n cells = grd.cells\n self.assertEqual(nx * ny - 3, len(cells))\n <mask token>\n\n def testGetNeighborsSerial(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=1)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(1, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(3, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[100]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n\n def testGetNeighborsParallel(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=4)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(4, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(3, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[100]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n\n def testGetNeighborsWrapAroundGlobalParallel(self):\n minx = -179.0\n miny = -89.0\n dx = 2\n dy = 2\n nx = 180\n ny = 90\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=4)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertTrue(grd.wrapAroundDateline)\n cells = grd.cells\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[3].boundary.coords[0][0])\n self.assertEqual(-182, ncls[4].boundary.coords[0][0])\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[5].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n cell = cells[65]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[5].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n cell = cells[89]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[3].boundary.coords[0][0])\n self.assertEqual(-182, ncls[4].boundary.coords[0][0])\n cell = cells[200]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[nx * ny - 22]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(182, ncls[5].boundary.coords[1][0])\n self.assertEqual(182, ncls[6].boundary.coords[1][0])\n self.assertEqual(182, ncls[6].boundary.coords[1][0])\n cell = cells[nx * ny - 1]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(182, ncls[3].boundary.coords[1][0])\n self.assertEqual(182, ncls[4].boundary.coords[1][0])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass testAbRectangularGridBuilder(unittest.TestCase):\n\n def getMockHiResAlphaMtxAndCstCellDet(self, posCellCentroids=None):\n\n\n class _mockClass:\n\n def __init__(self, posCellCentroids):\n self.posCellCentroids = posCellCentroids\n self.cell = None\n\n def getAlphaSubMatrix(self, cell):\n sm = _mockClass(self.posCellCentroids)\n sm.cell = cell\n return sm\n\n def _positive(self, cell):\n cntrs = self.posCellCentroids\n if cell is None or cntrs is None:\n return False\n else:\n for c in cntrs:\n if cell.contains(gm.Point([c[0], c[1]])):\n return True\n return False\n\n def onLand(self):\n cell = self.cell\n return self._positive(cell)\n\n def isCoastalCell(self, cell, boundary=None, surface=-1):\n return self._positive(cell)\n return _mockClass(posCellCentroids)\n\n def testGetSeaGridSerial(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n landCntrs = [[100.25, 45.25], [105.25, 47.25]]\n coastCntrs = [[100.75, 45.25], [105.25, 47.25]]\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny,\n nParWorker=1, minXYIsCentroid=False)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet(landCntrs)\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet(coastCntrs)\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertFalse(grd.wrapAroundDateline)\n self.assertEqual(1, grd.nParWorker)\n cells = grd.cells\n self.assertEqual(nx * ny - 3, len(cells))\n\n def testGetSeaGridParallel(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n landCntrs = [[100.25, 45.25], [105.25, 47.25]]\n coastCntrs = [[100.75, 45.25], [105.25, 47.25]]\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny,\n nParWorker=4, minXYIsCentroid=False)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet(landCntrs)\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet(coastCntrs)\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(4, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n self.assertEqual(nx * ny - 3, len(cells))\n\n def testGetNeighborsSerial(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=1)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(1, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(3, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[100]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n\n def testGetNeighborsParallel(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=4)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(4, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(3, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[100]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n\n def testGetNeighborsWrapAroundGlobalParallel(self):\n minx = -179.0\n miny = -89.0\n dx = 2\n dy = 2\n nx = 180\n ny = 90\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=4)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertTrue(grd.wrapAroundDateline)\n cells = grd.cells\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[3].boundary.coords[0][0])\n self.assertEqual(-182, ncls[4].boundary.coords[0][0])\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[5].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n cell = cells[65]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[5].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n cell = cells[89]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[3].boundary.coords[0][0])\n self.assertEqual(-182, ncls[4].boundary.coords[0][0])\n cell = cells[200]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[nx * ny - 22]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(182, ncls[5].boundary.coords[1][0])\n self.assertEqual(182, ncls[6].boundary.coords[1][0])\n self.assertEqual(182, ncls[6].boundary.coords[1][0])\n cell = cells[nx * ny - 1]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(182, ncls[3].boundary.coords[1][0])\n self.assertEqual(182, ncls[4].boundary.coords[1][0])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass testAbRectangularGridBuilder(unittest.TestCase):\n\n def getMockHiResAlphaMtxAndCstCellDet(self, posCellCentroids=None):\n\n\n class _mockClass:\n\n def __init__(self, posCellCentroids):\n self.posCellCentroids = posCellCentroids\n self.cell = None\n\n def getAlphaSubMatrix(self, cell):\n sm = _mockClass(self.posCellCentroids)\n sm.cell = cell\n return sm\n\n def _positive(self, cell):\n cntrs = self.posCellCentroids\n if cell is None or cntrs is None:\n return False\n else:\n for c in cntrs:\n if cell.contains(gm.Point([c[0], c[1]])):\n return True\n return False\n\n def onLand(self):\n cell = self.cell\n return self._positive(cell)\n\n def isCoastalCell(self, cell, boundary=None, surface=-1):\n return self._positive(cell)\n return _mockClass(posCellCentroids)\n\n def testGetSeaGridSerial(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n landCntrs = [[100.25, 45.25], [105.25, 47.25]]\n coastCntrs = [[100.75, 45.25], [105.25, 47.25]]\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny,\n nParWorker=1, minXYIsCentroid=False)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet(landCntrs)\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet(coastCntrs)\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertFalse(grd.wrapAroundDateline)\n self.assertEqual(1, grd.nParWorker)\n cells = grd.cells\n self.assertEqual(nx * ny - 3, len(cells))\n\n def testGetSeaGridParallel(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n landCntrs = [[100.25, 45.25], [105.25, 47.25]]\n coastCntrs = [[100.75, 45.25], [105.25, 47.25]]\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny,\n nParWorker=4, minXYIsCentroid=False)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet(landCntrs)\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet(coastCntrs)\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(4, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n self.assertEqual(nx * ny - 3, len(cells))\n\n def testGetNeighborsSerial(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=1)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(1, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(3, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[100]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n\n def testGetNeighborsParallel(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=4)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(4, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(3, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[100]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n\n def testGetNeighborsWrapAroundGlobalParallel(self):\n minx = -179.0\n miny = -89.0\n dx = 2\n dy = 2\n nx = 180\n ny = 90\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=4)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertTrue(grd.wrapAroundDateline)\n cells = grd.cells\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[3].boundary.coords[0][0])\n self.assertEqual(-182, ncls[4].boundary.coords[0][0])\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[5].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n cell = cells[65]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[5].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n cell = cells[89]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[3].boundary.coords[0][0])\n self.assertEqual(-182, ncls[4].boundary.coords[0][0])\n cell = cells[200]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[nx * ny - 22]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(182, ncls[5].boundary.coords[1][0])\n self.assertEqual(182, ncls[6].boundary.coords[1][0])\n self.assertEqual(182, ncls[6].boundary.coords[1][0])\n cell = cells[nx * ny - 1]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(182, ncls[3].boundary.coords[1][0])\n self.assertEqual(182, ncls[4].boundary.coords[1][0])\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "import unittest\nimport shapely.geometry as gm\nfrom alphaBetaLab.abRectangularGridBuilder import abRectangularGridBuilder\n\n\nclass testAbRectangularGridBuilder(unittest.TestCase):\n\n def getMockHiResAlphaMtxAndCstCellDet(self, posCellCentroids=None):\n\n\n class _mockClass:\n\n def __init__(self, posCellCentroids):\n self.posCellCentroids = posCellCentroids\n self.cell = None\n\n def getAlphaSubMatrix(self, cell):\n sm = _mockClass(self.posCellCentroids)\n sm.cell = cell\n return sm\n\n def _positive(self, cell):\n cntrs = self.posCellCentroids\n if cell is None or cntrs is None:\n return False\n else:\n for c in cntrs:\n if cell.contains(gm.Point([c[0], c[1]])):\n return True\n return False\n\n def onLand(self):\n cell = self.cell\n return self._positive(cell)\n\n def isCoastalCell(self, cell, boundary=None, surface=-1):\n return self._positive(cell)\n return _mockClass(posCellCentroids)\n\n def testGetSeaGridSerial(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n landCntrs = [[100.25, 45.25], [105.25, 47.25]]\n coastCntrs = [[100.75, 45.25], [105.25, 47.25]]\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny,\n nParWorker=1, minXYIsCentroid=False)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet(landCntrs)\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet(coastCntrs)\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertFalse(grd.wrapAroundDateline)\n self.assertEqual(1, grd.nParWorker)\n cells = grd.cells\n self.assertEqual(nx * ny - 3, len(cells))\n\n def testGetSeaGridParallel(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n landCntrs = [[100.25, 45.25], [105.25, 47.25]]\n coastCntrs = [[100.75, 45.25], [105.25, 47.25]]\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny,\n nParWorker=4, minXYIsCentroid=False)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet(landCntrs)\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet(coastCntrs)\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(4, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n self.assertEqual(nx * ny - 3, len(cells))\n\n def testGetNeighborsSerial(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=1)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(1, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(3, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[100]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n\n def testGetNeighborsParallel(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=4)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(4, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(3, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[100]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n\n def testGetNeighborsWrapAroundGlobalParallel(self):\n minx = -179.0\n miny = -89.0\n dx = 2\n dy = 2\n nx = 180\n ny = 90\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=4)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertTrue(grd.wrapAroundDateline)\n cells = grd.cells\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[3].boundary.coords[0][0])\n self.assertEqual(-182, ncls[4].boundary.coords[0][0])\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[5].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n cell = cells[65]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[5].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n cell = cells[89]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[3].boundary.coords[0][0])\n self.assertEqual(-182, ncls[4].boundary.coords[0][0])\n cell = cells[200]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[nx * ny - 22]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(182, ncls[5].boundary.coords[1][0])\n self.assertEqual(182, ncls[6].boundary.coords[1][0])\n self.assertEqual(182, ncls[6].boundary.coords[1][0])\n cell = cells[nx * ny - 1]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(182, ncls[3].boundary.coords[1][0])\n self.assertEqual(182, ncls[4].boundary.coords[1][0])\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "import unittest\nimport shapely.geometry as gm\n\nfrom alphaBetaLab.abRectangularGridBuilder import abRectangularGridBuilder\n\nclass testAbRectangularGridBuilder(unittest.TestCase):\n\n def getMockHiResAlphaMtxAndCstCellDet(self, posCellCentroids = None):\n class _mockClass:\n def __init__(self, posCellCentroids):\n self.posCellCentroids = posCellCentroids\n self.cell = None\n def getAlphaSubMatrix(self, cell):\n sm = _mockClass(self.posCellCentroids)\n sm.cell = cell\n return sm\n def _positive(self, cell):\n cntrs = self.posCellCentroids\n if cell is None or cntrs is None:\n return False\n else:\n for c in cntrs:\n if cell.contains(gm.Point([c[0], c[1]])):\n return True\n return False\n def onLand(self):\n cell = self.cell\n return self._positive(cell)\n def isCoastalCell(self, cell, boundary = None, surface = -1):\n return self._positive(cell)\n return _mockClass(posCellCentroids)\n\n\n def testGetSeaGridSerial(self):\n minx = 100.\n miny = 45.\n dx = .5\n dy = 1.\n nx = 30\n ny = 10\n maxx = minx + nx*dx\n maxy = miny + ny*dy\n landCntrs = [[100.25, 45.25], [105.25, 47.25]]\n coastCntrs = [[100.75, 45.25], [105.25, 47.25]]\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, \n nParWorker=1, minXYIsCentroid=False)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet(landCntrs)\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet(coastCntrs)\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertFalse(grd.wrapAroundDateline)\n self.assertEqual(1, grd.nParWorker)\n cells = grd.cells\n self.assertEqual(nx*ny - 3, len(cells))\n\n\n def testGetSeaGridParallel(self):\n minx = 100.\n miny = 45.\n dx = .5\n dy = 1.\n nx = 30\n ny = 10\n maxx = minx + nx*dx\n maxy = miny + ny*dy\n landCntrs = [[100.25, 45.25], [105.25, 47.25]]\n coastCntrs = [[100.75, 45.25], [105.25, 47.25]]\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=4, minXYIsCentroid=False)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet(landCntrs)\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet(coastCntrs)\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(4, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n self.assertEqual(nx*ny - 3, len(cells))\n \n\n def testGetNeighborsSerial(self):\n minx = 100.\n miny = 45.\n dx = .5\n dy = 1.\n nx = 30\n ny = 10\n maxx = minx + nx*dx\n maxy = miny + ny*dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker = 1)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(1, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(3, len(ncls))\n for nc in ncls:\n self.assertTrue( cell.distance(nc) < .000000001 )\n\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue( cell.distance(nc) < .000000001 )\n\n cell = cells[100]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue( cell.distance(nc) < .000000001 )\n\n\n def testGetNeighborsParallel(self):\n minx = 100.\n miny = 45.\n dx = .5\n dy = 1.\n nx = 30\n ny = 10\n maxx = minx + nx*dx\n maxy = miny + ny*dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker = 4)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(4, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(3, len(ncls))\n for nc in ncls:\n self.assertTrue( cell.distance(nc) < .000000001 )\n\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue( cell.distance(nc) < .000000001 )\n\n cell = cells[100]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue( cell.distance(nc) < .000000001 )\n \n\n def testGetNeighborsWrapAroundGlobalParallel(self):\n minx = -179.\n miny = -89.\n dx = 2\n dy = 2\n nx = 180\n ny = 90\n maxx = minx + nx*dx\n maxy = miny + ny*dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker = 4)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertTrue(grd.wrapAroundDateline)\n cells = grd.cells\n\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue( cell.distance(nc) < .000000001 )\n self.assertEqual(-182, ncls[3].boundary.coords[0][0])\n self.assertEqual(-182, ncls[4].boundary.coords[0][0])\n\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue( cell.distance(nc) < .000000001 )\n self.assertEqual(-182, ncls[5].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n\n cell = cells[65]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue( cell.distance(nc) < .000000001 )\n self.assertEqual(-182, ncls[5].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n\n cell = cells[89]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue( cell.distance(nc) < .000000001 )\n self.assertEqual(-182, ncls[3].boundary.coords[0][0])\n self.assertEqual(-182, ncls[4].boundary.coords[0][0])\n\n cell = cells[200]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue( cell.distance(nc) < .000000001 )\n\n cell = cells[nx*ny-22]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue( cell.distance(nc) < .000000001 )\n self.assertEqual(182, ncls[5].boundary.coords[1][0])\n self.assertEqual(182, ncls[6].boundary.coords[1][0])\n self.assertEqual(182, ncls[6].boundary.coords[1][0])\n\n cell = cells[nx*ny-1]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue( cell.distance(nc) < .000000001 )\n self.assertEqual(182, ncls[3].boundary.coords[1][0])\n self.assertEqual(182, ncls[4].boundary.coords[1][0])\n \n \n \nif __name__ == '__main__':\n unittest.main()\n\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
# coding: utf-8
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import time
from urllib import urlencode
from urlparse import parse_qs, urlparse, urlunparse
from flask import current_app as app
from flask import url_for
from jose import jwt
from oauth2client.client import flow_from_clientsecrets
from pathlib2 import Path
from .models import Customer
def create_oauth_flow():
"""Prepare Google OAuth workflow from config file."""
app.flow = flow_from_clientsecrets(
str(Path(app.config['ROOT_DIR'], 'configs/client_secrets.json')),
scope=['email', 'profile'],
redirect_uri=url_for('auth.oauth2callback', _external=True),
)
def create_jwt(user, name=None, renewable=False):
"""Create a JWT."""
session_user = sessionize_user(user, name)
session_customer = sessionize_customer(
Customer.get_by_name(user.customers[0])
)
return format_jwt(session_user, session_customer, renewable)
def sessionize_user(user, name):
document = user.to_dict(include_meta=True)
sessionized = {}
sessionized.update(document['_source'])
sessionized['_id'] = document['_id']
sessionized['google_name'] = name
return sessionized
def sessionize_customer(customer):
document = customer.to_dict(include_meta=True)
sessionized = {}
sessionized.update(document['_source'])
sessionized['_id'] = document['_id']
return sessionized
def format_jwt(user, active_customer, renewable):
"""Format a JWT and MAC it."""
now = int(time.time())
claims = {
# reserved: https://tools.ietf.org/html/rfc7519#section-4.1
'exp': now + app.config['AUTH_TOKEN_LIFETIME'],
'nbf': now, # not before
'iss': app.config['AUTH_TOKEN_ISSUER'],
'iat': now, # issue date
# private: https://tools.ietf.org/html/rfc7519#section-4.3
'user': user,
'active_customer': active_customer,
'renewable': renewable,
}
return jwt.encode(
claims,
key=app.config['AUTH_JWT_SECRET'],
algorithm=app.config['AUTH_JWT_ALGORITHM'],
)
def set_params(url, params):
"""Set GET parameters on a URL."""
components = urlparse(url)
query = parse_qs(components.query)
query.update(params)
components = components._replace(query=urlencode(query, doseq=True))
return urlunparse(components)
|
normal
|
{
"blob_id": "fe73a80b15cad025a33930ddd9abb31524cd0244",
"index": 9404,
"step-1": "<mask token>\n\n\ndef create_oauth_flow():\n \"\"\"Prepare Google OAuth workflow from config file.\"\"\"\n app.flow = flow_from_clientsecrets(str(Path(app.config['ROOT_DIR'],\n 'configs/client_secrets.json')), scope=['email', 'profile'],\n redirect_uri=url_for('auth.oauth2callback', _external=True))\n\n\ndef create_jwt(user, name=None, renewable=False):\n \"\"\"Create a JWT.\"\"\"\n session_user = sessionize_user(user, name)\n session_customer = sessionize_customer(Customer.get_by_name(user.\n customers[0]))\n return format_jwt(session_user, session_customer, renewable)\n\n\ndef sessionize_user(user, name):\n document = user.to_dict(include_meta=True)\n sessionized = {}\n sessionized.update(document['_source'])\n sessionized['_id'] = document['_id']\n sessionized['google_name'] = name\n return sessionized\n\n\ndef sessionize_customer(customer):\n document = customer.to_dict(include_meta=True)\n sessionized = {}\n sessionized.update(document['_source'])\n sessionized['_id'] = document['_id']\n return sessionized\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef create_oauth_flow():\n \"\"\"Prepare Google OAuth workflow from config file.\"\"\"\n app.flow = flow_from_clientsecrets(str(Path(app.config['ROOT_DIR'],\n 'configs/client_secrets.json')), scope=['email', 'profile'],\n redirect_uri=url_for('auth.oauth2callback', _external=True))\n\n\ndef create_jwt(user, name=None, renewable=False):\n \"\"\"Create a JWT.\"\"\"\n session_user = sessionize_user(user, name)\n session_customer = sessionize_customer(Customer.get_by_name(user.\n customers[0]))\n return format_jwt(session_user, session_customer, renewable)\n\n\ndef sessionize_user(user, name):\n document = user.to_dict(include_meta=True)\n sessionized = {}\n sessionized.update(document['_source'])\n sessionized['_id'] = document['_id']\n sessionized['google_name'] = name\n return sessionized\n\n\ndef sessionize_customer(customer):\n document = customer.to_dict(include_meta=True)\n sessionized = {}\n sessionized.update(document['_source'])\n sessionized['_id'] = document['_id']\n return sessionized\n\n\n<mask token>\n\n\ndef set_params(url, params):\n \"\"\"Set GET parameters on a URL.\"\"\"\n components = urlparse(url)\n query = parse_qs(components.query)\n query.update(params)\n components = components._replace(query=urlencode(query, doseq=True))\n return urlunparse(components)\n",
"step-3": "<mask token>\n\n\ndef create_oauth_flow():\n \"\"\"Prepare Google OAuth workflow from config file.\"\"\"\n app.flow = flow_from_clientsecrets(str(Path(app.config['ROOT_DIR'],\n 'configs/client_secrets.json')), scope=['email', 'profile'],\n redirect_uri=url_for('auth.oauth2callback', _external=True))\n\n\ndef create_jwt(user, name=None, renewable=False):\n \"\"\"Create a JWT.\"\"\"\n session_user = sessionize_user(user, name)\n session_customer = sessionize_customer(Customer.get_by_name(user.\n customers[0]))\n return format_jwt(session_user, session_customer, renewable)\n\n\ndef sessionize_user(user, name):\n document = user.to_dict(include_meta=True)\n sessionized = {}\n sessionized.update(document['_source'])\n sessionized['_id'] = document['_id']\n sessionized['google_name'] = name\n return sessionized\n\n\ndef sessionize_customer(customer):\n document = customer.to_dict(include_meta=True)\n sessionized = {}\n sessionized.update(document['_source'])\n sessionized['_id'] = document['_id']\n return sessionized\n\n\ndef format_jwt(user, active_customer, renewable):\n \"\"\"Format a JWT and MAC it.\"\"\"\n now = int(time.time())\n claims = {'exp': now + app.config['AUTH_TOKEN_LIFETIME'], 'nbf': now,\n 'iss': app.config['AUTH_TOKEN_ISSUER'], 'iat': now, 'user': user,\n 'active_customer': active_customer, 'renewable': renewable}\n return jwt.encode(claims, key=app.config['AUTH_JWT_SECRET'], algorithm=\n app.config['AUTH_JWT_ALGORITHM'])\n\n\ndef set_params(url, params):\n \"\"\"Set GET parameters on a URL.\"\"\"\n components = urlparse(url)\n query = parse_qs(components.query)\n query.update(params)\n components = components._replace(query=urlencode(query, doseq=True))\n return urlunparse(components)\n",
"step-4": "from __future__ import absolute_import, division, print_function, unicode_literals\nimport time\nfrom urllib import urlencode\nfrom urlparse import parse_qs, urlparse, urlunparse\nfrom flask import current_app as app\nfrom flask import url_for\nfrom jose import jwt\nfrom oauth2client.client import flow_from_clientsecrets\nfrom pathlib2 import Path\nfrom .models import Customer\n\n\ndef create_oauth_flow():\n \"\"\"Prepare Google OAuth workflow from config file.\"\"\"\n app.flow = flow_from_clientsecrets(str(Path(app.config['ROOT_DIR'],\n 'configs/client_secrets.json')), scope=['email', 'profile'],\n redirect_uri=url_for('auth.oauth2callback', _external=True))\n\n\ndef create_jwt(user, name=None, renewable=False):\n \"\"\"Create a JWT.\"\"\"\n session_user = sessionize_user(user, name)\n session_customer = sessionize_customer(Customer.get_by_name(user.\n customers[0]))\n return format_jwt(session_user, session_customer, renewable)\n\n\ndef sessionize_user(user, name):\n document = user.to_dict(include_meta=True)\n sessionized = {}\n sessionized.update(document['_source'])\n sessionized['_id'] = document['_id']\n sessionized['google_name'] = name\n return sessionized\n\n\ndef sessionize_customer(customer):\n document = customer.to_dict(include_meta=True)\n sessionized = {}\n sessionized.update(document['_source'])\n sessionized['_id'] = document['_id']\n return sessionized\n\n\ndef format_jwt(user, active_customer, renewable):\n \"\"\"Format a JWT and MAC it.\"\"\"\n now = int(time.time())\n claims = {'exp': now + app.config['AUTH_TOKEN_LIFETIME'], 'nbf': now,\n 'iss': app.config['AUTH_TOKEN_ISSUER'], 'iat': now, 'user': user,\n 'active_customer': active_customer, 'renewable': renewable}\n return jwt.encode(claims, key=app.config['AUTH_JWT_SECRET'], algorithm=\n app.config['AUTH_JWT_ALGORITHM'])\n\n\ndef set_params(url, params):\n \"\"\"Set GET parameters on a URL.\"\"\"\n components = urlparse(url)\n query = parse_qs(components.query)\n query.update(params)\n components = components._replace(query=urlencode(query, doseq=True))\n return urlunparse(components)\n",
"step-5": "# coding: utf-8\n\nfrom __future__ import (\n absolute_import,\n division,\n print_function,\n unicode_literals,\n)\n\nimport time\nfrom urllib import urlencode\nfrom urlparse import parse_qs, urlparse, urlunparse\n\nfrom flask import current_app as app\nfrom flask import url_for\nfrom jose import jwt\nfrom oauth2client.client import flow_from_clientsecrets\nfrom pathlib2 import Path\n\nfrom .models import Customer\n\n\ndef create_oauth_flow():\n \"\"\"Prepare Google OAuth workflow from config file.\"\"\"\n app.flow = flow_from_clientsecrets(\n str(Path(app.config['ROOT_DIR'], 'configs/client_secrets.json')),\n scope=['email', 'profile'],\n redirect_uri=url_for('auth.oauth2callback', _external=True),\n )\n\n\ndef create_jwt(user, name=None, renewable=False):\n \"\"\"Create a JWT.\"\"\"\n session_user = sessionize_user(user, name)\n session_customer = sessionize_customer(\n Customer.get_by_name(user.customers[0])\n )\n\n return format_jwt(session_user, session_customer, renewable)\n\n\ndef sessionize_user(user, name):\n document = user.to_dict(include_meta=True)\n\n sessionized = {}\n sessionized.update(document['_source'])\n sessionized['_id'] = document['_id']\n sessionized['google_name'] = name\n\n return sessionized\n\n\ndef sessionize_customer(customer):\n document = customer.to_dict(include_meta=True)\n\n sessionized = {}\n sessionized.update(document['_source'])\n sessionized['_id'] = document['_id']\n\n return sessionized\n\n\ndef format_jwt(user, active_customer, renewable):\n \"\"\"Format a JWT and MAC it.\"\"\"\n now = int(time.time())\n\n claims = {\n # reserved: https://tools.ietf.org/html/rfc7519#section-4.1\n 'exp': now + app.config['AUTH_TOKEN_LIFETIME'],\n 'nbf': now, # not before\n 'iss': app.config['AUTH_TOKEN_ISSUER'],\n 'iat': now, # issue date\n # private: https://tools.ietf.org/html/rfc7519#section-4.3\n 'user': user,\n 'active_customer': active_customer,\n 'renewable': renewable,\n }\n\n return jwt.encode(\n claims,\n key=app.config['AUTH_JWT_SECRET'],\n algorithm=app.config['AUTH_JWT_ALGORITHM'],\n )\n\n\ndef set_params(url, params):\n \"\"\"Set GET parameters on a URL.\"\"\"\n components = urlparse(url)\n\n query = parse_qs(components.query)\n query.update(params)\n\n components = components._replace(query=urlencode(query, doseq=True))\n return urlunparse(components)\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
#!/usr/bin/env python
# coding: utf-8
# MIT Licensed
# http://opensource.org/licenses/MIT
led_dir = "/sys/class/gpio/gpio40/"
led_pin = led_dir + "value"
led_mode = led_dir + "direction"
with open(led_mode, "wb") as f:
f.write("out")
with open(led_pin, "wb") as f:
f.write(__import__("sys").argv[1])
"""
Contributors!
Danilo J. S. Bellini
Estevão U. P. Vieira
Lucas S. Simões
Thiago M. Sanches
Paulo R. O. Castro
AEEEW!!!! =D
"""
|
normal
|
{
"blob_id": "1a9cad6e49e5ed2bb7781f9fec930d48ec048b3b",
"index": 5061,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open(led_mode, 'wb') as f:\n f.write('out')\nwith open(led_pin, 'wb') as f:\n f.write(__import__('sys').argv[1])\n<mask token>\n",
"step-3": "led_dir = '/sys/class/gpio/gpio40/'\nled_pin = led_dir + 'value'\nled_mode = led_dir + 'direction'\nwith open(led_mode, 'wb') as f:\n f.write('out')\nwith open(led_pin, 'wb') as f:\n f.write(__import__('sys').argv[1])\n<mask token>\n",
"step-4": "#!/usr/bin/env python\n# coding: utf-8\n\n# MIT Licensed\n# http://opensource.org/licenses/MIT\n\nled_dir = \"/sys/class/gpio/gpio40/\"\nled_pin = led_dir + \"value\"\nled_mode = led_dir + \"direction\"\n\nwith open(led_mode, \"wb\") as f:\n f.write(\"out\")\n\nwith open(led_pin, \"wb\") as f:\n f.write(__import__(\"sys\").argv[1])\n\n\"\"\"\nContributors!\n\nDanilo J. S. Bellini\nEstevão U. P. Vieira\nLucas S. Simões\nThiago M. Sanches\nPaulo R. O. Castro\n\nAEEEW!!!! =D\n\"\"\"\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
# coding: utf-8
# In[19]:
import numpy as np
import pandas as pd
class simple_nn():
'''
This is simple nn class with 3 layers NN. In this class additional layer was added to the original layers
from notebook given by Julian Stier and Sahib Julka.
Moreover those functions were refactored so that final class would look more concise
and easier to read.
Additionaly optimization were done to work with multiclassification tasks (i.e > than 2 classes)
-----------------------------------------------------------------------------------------------
OUTPUT:
weights that must be used to call predict method of the class
loss_res - list that consist of loss value calculated during training steps
accuracy_res - list that consist of accuracy value calculated during training steps
-----------------------------------------------------------------------------------------------
INPUT:
creating a class examplar:
simple_nn(input_dim, output_dim, lr, num_epochs, decay_rate)
where: input_dim - input dimention of NN ,
output_dim - output dimention of NN,
lr -learnin rate,
num_epochs - number of epochs to iterate over
decay_rate - decay rate for learning rate
For example:
model = simple_nn(2, 2, 0.01, 2, 0.5)
Once model is initialized, we can call train method
train(x, y, nn_hdim, batch_size)
where: x, y are self-explanatory,
nn_hdim - num of neurons in hidden layer,
batch_size - size of batch wich will be used to split the data in each epoch
For example:
weights, loss_res, accuracy_res = model.train(X_train, y_train, 10, batch_size=50)
---------------------------------------------------------------------------------------
PREDICT:
Once model is trained it will return weights or also called "model".
Having weights and x is sufficient to execute prediction with simple NN.
Prediction will return predicted classes for the given inputs:
y_hat = model.predict(weights, X_test)
'''
def __init__(self, nn_input_dim, nn_output_dim, lr, epochs, decay_rate):
self.nn_input_dim = nn_input_dim # input layer dimensionality
self.nn_output_dim = nn_output_dim # output layer dimensionality
self.lr_init = lr # learning rate for gradient descent
self.epochs = epochs
self.decay_rate = decay_rate # decay rate for calculating learninng rate decay
self.reg_lambda = 0.01 # regularization strength
def init_weights(self, nn_hdim):
np.random.seed(0)
# when we initialize weights we normalise them by sqrt(n of input)
# that has been empirically proved to improve the rate of convergence
self.W1 = np.random.rand(self.nn_input_dim, nn_hdim)/ np.sqrt(self.nn_input_dim)
self.b1 = np.random.rand(1, nn_hdim)
self.W2 = np.random.rand(nn_hdim, nn_hdim)/ np.sqrt(nn_hdim)
self.b2 = np.random.rand(1, nn_hdim)
# W3 and b3 are added as here we are having +1 layer
self.W3 = np.random.rand(nn_hdim, self.nn_output_dim)/ np.sqrt(nn_hdim)
self.b3 = np.random.rand(1, self.nn_output_dim)
return self.W1, self.b1, self.W2, self.b2, self.W3, self.b3
# sigmoid and sigmoid derivative have been added to this NN
def sigmoid(self, x):
return 1/(1+np.exp(-x))
def sigmoid_deriv(self, x):
f = 1/(1+np.exp(-x))
df = f * (1 - f)
return df
def softmax(self, x):
exp_scores = np.exp(x)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
return probs
def tanh_deriv(self, x):
return 1 - np.power(x, 2)
def lr_decay(self, epoch):
lr = self.lr_init/(1+self.decay_rate * epoch)
return lr
def forward_prop(self, W1, b1, W2, b2, W3, b3, x):
# Forward propagation
z1 = x.dot(W1) + b1
a1 = np.tanh(z1)
# layer 2 was added, i.e z2 and a2
z2 = a1.dot(W2) + b2
a2 = self.sigmoid(z2)
z3 = a2.dot(W3) + b3
a3 = self.softmax(z3)
return z1, a1, z2, a2, z3, a3
def backward_prop(self, z1, a1, z2, a2, z3, a3, W1, W2, W3, x, y):
delta4 = a3
# so delta 4 is error that we want to dissiminate to W3, W2, W1
# assigning to errors -1 ?
delta4[range(self.batch_size), y] -= 1
dW3 = (a2.T).dot(delta4)
db3 = np.sum(delta4, axis=0, keepdims=True)
# delta3 = error * by W3 * by sigmoid derivative
delta3 = delta4.dot(W3.T) * self.sigmoid_deriv(a2)
dW2 = (a1.T).dot(delta3)
db2 = np.sum(delta3, axis=0, keepdims=True)
# shouldn't we pass z1 to tanh_derivative?
delta2 = delta3.dot(W2.T) * self.tanh_deriv(a1)
dW1 = np.dot(x.T, delta2)
db1 = np.sum(delta2, axis=0)
return dW1, db1, dW2, db2, dW3, db3
def params_update(self, W1, b1, W2, b2, W3, b3, dW1, db1, dW2, db2, dW3, db3):
dW3 += self.reg_lambda * W3
dW2 += self.reg_lambda * W2
dW1 += self.reg_lambda * W1
W1 += -self.lr * dW1
b1 += -self.lr * db1
W2 += -self.lr * dW2
b2 += -self.lr * db2
W3 += -self.lr * dW3
b3 += -self.lr * db3
return W1, b1, W2, b2, W3, b3
def train(self, X, y, nn_hdim, batch_size):
# Initialize the parameters to random values. We need to learn these.
W1, b1, W2, b2, W3, b3 = self.init_weights(nn_hdim)
self.batch_size = batch_size
loss_res = []
accuracy_res = []
# This is what we return at the end
self.model = {}
# defining number of batches
num_batches = X.shape[0]//self.batch_size
# Gradient descent
for epoch in range(0, self.epochs):
print('epochs', epoch)
if epoch == 0:
self.lr = self.lr_init
else:
self.lr = self.lr_decay(epoch)
for batch_num in range(num_batches):
print('batch_num', batch_num)
# slicing batch data
start = batch_num * self.batch_size
end = (batch_num + 1) * self.batch_size
self.x_batched = X[start:end]
self.y_batched = np.array(y[start:end])
# training model by applying forward, backwar propagation and updating weithgs
z1, a1, z2, a2, z3, a3 = self.forward_prop(W1, b1, W2, b2, W3, b3, self.x_batched)
dW1, db1, dW2, db2, dW3, db3 = self.backward_prop(z1, a1, z2, a2, z3, a3, W1, W2, W3, self.x_batched, self.y_batched)
W1, b1, W2, b2, W3, b3 = self.params_update(W1, b1, W2, b2, W3, b3, dW1, db1, dW2, db2, dW3, db3)
# Assign new parameters to the model
self.model = {'W1': W1, 'b1': b1, 'W2': W2, 'b2': b2, 'W3': W3, 'b3': b3}
# IMPORTANT
# to compute loss value and accuracy we should use new weights and the same batch of x and y data
loss, acc = self.metrics(W1, W2, W3, b1, b2, b3, self.x_batched, self.y_batched)
loss_res.append(loss)
accuracy_res.append(acc)
return self.model, loss_res, accuracy_res
def metrics(self, W1, W2, W3, b1, b2, b3, X, y):
z1, a1, z2, a2, z3, a3 = self.forward_prop(W1, b1, W2, b2, W3, b3, X)
loss = self.calculate_loss(a3, y, W1, W2, W3)
acc = self.calculate_accuracy(a3, y)
return loss, acc
def calculate_loss(self, a3, y, W1, W2, W3):
corect_logprobs = -np.log(a3[range(self.batch_size), y])
data_loss = np.sum(corect_logprobs)
# Add regulatization term to loss (optional)
data_loss += self.reg_lambda/2 * (np.sum(np.square(W1)) + np.sum(np.square(W2))+np.sum(np.square(W3)))
#print('loss a2',1./self.batch_size * data_loss)
return 1./self.batch_size * data_loss
def calculate_accuracy(self, a3, y_true):
y_hat = np.argmax(a3, axis=1)
correct = sum(y_true == y_hat)
incorrect = len(y_true) - correct
return correct/len(y_true)*100
def predict(self, model, x):
W1, b1, W2, b2, W3, b3 = model['W1'], model['b1'], model['W2'], model['b2'], model['W3'], model['b3']
# Forward propagation
z1, a1, z2, a2, z3, a3 = self.forward_prop(W1, b1, W2, b2, W3, b3, x)
return np.argmax(a3, axis=1)
|
normal
|
{
"blob_id": "cdc32e7c767097a0eb0def71e55f0276982d6a96",
"index": 5235,
"step-1": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[19]:\n\n\nimport numpy as np\nimport pandas as pd\n\nclass simple_nn():\n '''\n This is simple nn class with 3 layers NN. In this class additional layer was added to the original layers\n from notebook given by Julian Stier and Sahib Julka.\n Moreover those functions were refactored so that final class would look more concise\n and easier to read. \n Additionaly optimization were done to work with multiclassification tasks (i.e > than 2 classes)\n -----------------------------------------------------------------------------------------------\n OUTPUT:\n weights that must be used to call predict method of the class\n loss_res - list that consist of loss value calculated during training steps\n accuracy_res - list that consist of accuracy value calculated during training steps \n -----------------------------------------------------------------------------------------------\n INPUT:\n creating a class examplar:\n \n simple_nn(input_dim, output_dim, lr, num_epochs, decay_rate)\n \n where: input_dim - input dimention of NN , \n output_dim - output dimention of NN, \n lr -learnin rate, \n num_epochs - number of epochs to iterate over \n decay_rate - decay rate for learning rate \n For example: \n model = simple_nn(2, 2, 0.01, 2, 0.5)\n \n Once model is initialized, we can call train method \n train(x, y, nn_hdim, batch_size)\n where: x, y are self-explanatory, \n nn_hdim - num of neurons in hidden layer,\n batch_size - size of batch wich will be used to split the data in each epoch\n \n For example: \n weights, loss_res, accuracy_res = model.train(X_train, y_train, 10, batch_size=50)\n ---------------------------------------------------------------------------------------\n PREDICT:\n Once model is trained it will return weights or also called \"model\".\n Having weights and x is sufficient to execute prediction with simple NN.\n Prediction will return predicted classes for the given inputs:\n \n y_hat = model.predict(weights, X_test) \n '''\n \n def __init__(self, nn_input_dim, nn_output_dim, lr, epochs, decay_rate):\n \n self.nn_input_dim = nn_input_dim # input layer dimensionality\n self.nn_output_dim = nn_output_dim # output layer dimensionality\n \n self.lr_init = lr # learning rate for gradient descent\n self.epochs = epochs\n self.decay_rate = decay_rate # decay rate for calculating learninng rate decay \n self.reg_lambda = 0.01 # regularization strength\n\n def init_weights(self, nn_hdim):\n np.random.seed(0)\n # when we initialize weights we normalise them by sqrt(n of input)\n # that has been empirically proved to improve the rate of convergence \n \n self.W1 = np.random.rand(self.nn_input_dim, nn_hdim)/ np.sqrt(self.nn_input_dim)\n self.b1 = np.random.rand(1, nn_hdim)\n self.W2 = np.random.rand(nn_hdim, nn_hdim)/ np.sqrt(nn_hdim)\n self.b2 = np.random.rand(1, nn_hdim)\n \n # W3 and b3 are added as here we are having +1 layer \n self.W3 = np.random.rand(nn_hdim, self.nn_output_dim)/ np.sqrt(nn_hdim)\n self.b3 = np.random.rand(1, self.nn_output_dim) \n \n return self.W1, self.b1, self.W2, self.b2, self.W3, self.b3\n \n # sigmoid and sigmoid derivative have been added to this NN\n def sigmoid(self, x):\n return 1/(1+np.exp(-x))\n \n def sigmoid_deriv(self, x):\n f = 1/(1+np.exp(-x))\n df = f * (1 - f)\n return df\n \n def softmax(self, x):\n exp_scores = np.exp(x)\n probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)\n return probs\n \n def tanh_deriv(self, x):\n return 1 - np.power(x, 2)\n \n def lr_decay(self, epoch):\n lr = self.lr_init/(1+self.decay_rate * epoch)\n return lr\n \n def forward_prop(self, W1, b1, W2, b2, W3, b3, x):\n # Forward propagation\n z1 = x.dot(W1) + b1\n a1 = np.tanh(z1)\n \n # layer 2 was added, i.e z2 and a2\n z2 = a1.dot(W2) + b2\n a2 = self.sigmoid(z2) \n \n z3 = a2.dot(W3) + b3\n a3 = self.softmax(z3)\n\n return z1, a1, z2, a2, z3, a3\n \n def backward_prop(self, z1, a1, z2, a2, z3, a3, W1, W2, W3, x, y):\n \n delta4 = a3 \n # so delta 4 is error that we want to dissiminate to W3, W2, W1\n # assigning to errors -1 ?\n delta4[range(self.batch_size), y] -= 1\n \n dW3 = (a2.T).dot(delta4)\n db3 = np.sum(delta4, axis=0, keepdims=True)\n \n # delta3 = error * by W3 * by sigmoid derivative\n delta3 = delta4.dot(W3.T) * self.sigmoid_deriv(a2)\n \n dW2 = (a1.T).dot(delta3)\n db2 = np.sum(delta3, axis=0, keepdims=True)\n \n # shouldn't we pass z1 to tanh_derivative? \n delta2 = delta3.dot(W2.T) * self.tanh_deriv(a1)\n \n dW1 = np.dot(x.T, delta2)\n db1 = np.sum(delta2, axis=0)\n \n return dW1, db1, dW2, db2, dW3, db3\n \n def params_update(self, W1, b1, W2, b2, W3, b3, dW1, db1, dW2, db2, dW3, db3):\n \n dW3 += self.reg_lambda * W3\n dW2 += self.reg_lambda * W2\n dW1 += self.reg_lambda * W1\n \n W1 += -self.lr * dW1\n b1 += -self.lr * db1\n W2 += -self.lr * dW2\n b2 += -self.lr * db2\n W3 += -self.lr * dW3\n b3 += -self.lr * db3\n \n return W1, b1, W2, b2, W3, b3 \n \n def train(self, X, y, nn_hdim, batch_size):\n \n # Initialize the parameters to random values. We need to learn these.\n\n W1, b1, W2, b2, W3, b3 = self.init_weights(nn_hdim) \n self.batch_size = batch_size\n loss_res = []\n accuracy_res = []\n \n # This is what we return at the end\n self.model = {}\n \n # defining number of batches \n num_batches = X.shape[0]//self.batch_size\n \n # Gradient descent\n for epoch in range(0, self.epochs):\n \n print('epochs', epoch)\n if epoch == 0:\n self.lr = self.lr_init\n else:\n self.lr = self.lr_decay(epoch)\n \n for batch_num in range(num_batches):\n print('batch_num', batch_num)\n \n # slicing batch data\n start = batch_num * self.batch_size\n end = (batch_num + 1) * self.batch_size\n self.x_batched = X[start:end]\n self.y_batched = np.array(y[start:end])\n \n # training model by applying forward, backwar propagation and updating weithgs \n z1, a1, z2, a2, z3, a3 = self.forward_prop(W1, b1, W2, b2, W3, b3, self.x_batched)\n dW1, db1, dW2, db2, dW3, db3 = self.backward_prop(z1, a1, z2, a2, z3, a3, W1, W2, W3, self.x_batched, self.y_batched)\n W1, b1, W2, b2, W3, b3 = self.params_update(W1, b1, W2, b2, W3, b3, dW1, db1, dW2, db2, dW3, db3)\n \n # Assign new parameters to the model\n self.model = {'W1': W1, 'b1': b1, 'W2': W2, 'b2': b2, 'W3': W3, 'b3': b3}\n \n # IMPORTANT\n # to compute loss value and accuracy we should use new weights and the same batch of x and y data \n loss, acc = self.metrics(W1, W2, W3, b1, b2, b3, self.x_batched, self.y_batched)\n loss_res.append(loss)\n accuracy_res.append(acc)\n\n return self.model, loss_res, accuracy_res\n\n def metrics(self, W1, W2, W3, b1, b2, b3, X, y):\n \n z1, a1, z2, a2, z3, a3 = self.forward_prop(W1, b1, W2, b2, W3, b3, X)\n loss = self.calculate_loss(a3, y, W1, W2, W3)\n acc = self.calculate_accuracy(a3, y)\n return loss, acc\n \n def calculate_loss(self, a3, y, W1, W2, W3):\n\n corect_logprobs = -np.log(a3[range(self.batch_size), y])\n data_loss = np.sum(corect_logprobs)\n # Add regulatization term to loss (optional)\n data_loss += self.reg_lambda/2 * (np.sum(np.square(W1)) + np.sum(np.square(W2))+np.sum(np.square(W3)))\n #print('loss a2',1./self.batch_size * data_loss)\n return 1./self.batch_size * data_loss \n\n def calculate_accuracy(self, a3, y_true):\n\n y_hat = np.argmax(a3, axis=1)\n correct = sum(y_true == y_hat)\n incorrect = len(y_true) - correct\n return correct/len(y_true)*100\n \n def predict(self, model, x):\n W1, b1, W2, b2, W3, b3 = model['W1'], model['b1'], model['W2'], model['b2'], model['W3'], model['b3']\n # Forward propagation\n z1, a1, z2, a2, z3, a3 = self.forward_prop(W1, b1, W2, b2, W3, b3, x)\n return np.argmax(a3, axis=1)\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from datetime import datetime
from iohelpers import lines_to_textfile
from typing import Iterator, List, Sequence
from zhmodules import ZhTopolectSynonyms, MandarinPronunciations, ZhTopolectPronunciations
def missing_philippine_hokkien_words_generator(synonyms: ZhTopolectSynonyms, hokprons: ZhTopolectPronunciations):
all_hokkien = set()
for word, syn_data in synonyms.all_words():
minnan = set(syn_data['Philippine-MN'])
minnan.update(syn_data['Quanzhou'])
minnan.update(syn_data['Xiamen'])
for hokkien in minnan:
banlamoe = hokkien.split(':')
all_hokkien.add(banlamoe[0])
return words_missing_prons(all_hokkien, hokprons)
def words_missing_prons(corpus: Sequence[str], prons: ZhTopolectPronunciations):
return [word for word in corpus if prons.pronunciation(word) is None and all(ord(char) > 255 for char in word)]
if __name__ == '__main__':
synonyms = ZhTopolectSynonyms.from_local_folder('../data/enwiktionary/module-zh-data-json/dial-syn')
mp = MandarinPronunciations.from_local_json_file('../data/enwiktionary/module-zh-data-json/combined-mandarin-pron.json')
missing_mandarin_prons = iter(words_missing_prons(synonyms.mandarin_words(), mp))
h = ZhTopolectPronunciations.from_local_json_folder('../data/enwiktionary/module-zh-data-json/nan-pron')
missing_hokkien_prons = iter(missing_philippine_hokkien_words_generator(synonyms, h))
today = datetime.today().strftime("%Y%m%d")
lines_to_textfile(f'../data/enwiktionary/words-search/missing-hokkien.{today}.txt', missing_hokkien_prons)
lines_to_textfile(f'../data/enwiktionary/words-search/missing-mandarin.{today}.txt', missing_mandarin_prons)
|
normal
|
{
"blob_id": "18366633489d905c96b0c30d65442bc2e2b188ea",
"index": 4703,
"step-1": "<mask token>\n\n\ndef missing_philippine_hokkien_words_generator(synonyms: ZhTopolectSynonyms,\n hokprons: ZhTopolectPronunciations):\n all_hokkien = set()\n for word, syn_data in synonyms.all_words():\n minnan = set(syn_data['Philippine-MN'])\n minnan.update(syn_data['Quanzhou'])\n minnan.update(syn_data['Xiamen'])\n for hokkien in minnan:\n banlamoe = hokkien.split(':')\n all_hokkien.add(banlamoe[0])\n return words_missing_prons(all_hokkien, hokprons)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef missing_philippine_hokkien_words_generator(synonyms: ZhTopolectSynonyms,\n hokprons: ZhTopolectPronunciations):\n all_hokkien = set()\n for word, syn_data in synonyms.all_words():\n minnan = set(syn_data['Philippine-MN'])\n minnan.update(syn_data['Quanzhou'])\n minnan.update(syn_data['Xiamen'])\n for hokkien in minnan:\n banlamoe = hokkien.split(':')\n all_hokkien.add(banlamoe[0])\n return words_missing_prons(all_hokkien, hokprons)\n\n\ndef words_missing_prons(corpus: Sequence[str], prons: ZhTopolectPronunciations\n ):\n return [word for word in corpus if prons.pronunciation(word) is None and\n all(ord(char) > 255 for char in word)]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef missing_philippine_hokkien_words_generator(synonyms: ZhTopolectSynonyms,\n hokprons: ZhTopolectPronunciations):\n all_hokkien = set()\n for word, syn_data in synonyms.all_words():\n minnan = set(syn_data['Philippine-MN'])\n minnan.update(syn_data['Quanzhou'])\n minnan.update(syn_data['Xiamen'])\n for hokkien in minnan:\n banlamoe = hokkien.split(':')\n all_hokkien.add(banlamoe[0])\n return words_missing_prons(all_hokkien, hokprons)\n\n\ndef words_missing_prons(corpus: Sequence[str], prons: ZhTopolectPronunciations\n ):\n return [word for word in corpus if prons.pronunciation(word) is None and\n all(ord(char) > 255 for char in word)]\n\n\nif __name__ == '__main__':\n synonyms = ZhTopolectSynonyms.from_local_folder(\n '../data/enwiktionary/module-zh-data-json/dial-syn')\n mp = MandarinPronunciations.from_local_json_file(\n '../data/enwiktionary/module-zh-data-json/combined-mandarin-pron.json')\n missing_mandarin_prons = iter(words_missing_prons(synonyms.\n mandarin_words(), mp))\n h = ZhTopolectPronunciations.from_local_json_folder(\n '../data/enwiktionary/module-zh-data-json/nan-pron')\n missing_hokkien_prons = iter(missing_philippine_hokkien_words_generator\n (synonyms, h))\n today = datetime.today().strftime('%Y%m%d')\n lines_to_textfile(\n f'../data/enwiktionary/words-search/missing-hokkien.{today}.txt',\n missing_hokkien_prons)\n lines_to_textfile(\n f'../data/enwiktionary/words-search/missing-mandarin.{today}.txt',\n missing_mandarin_prons)\n",
"step-4": "from datetime import datetime\nfrom iohelpers import lines_to_textfile\nfrom typing import Iterator, List, Sequence\nfrom zhmodules import ZhTopolectSynonyms, MandarinPronunciations, ZhTopolectPronunciations\n\n\ndef missing_philippine_hokkien_words_generator(synonyms: ZhTopolectSynonyms,\n hokprons: ZhTopolectPronunciations):\n all_hokkien = set()\n for word, syn_data in synonyms.all_words():\n minnan = set(syn_data['Philippine-MN'])\n minnan.update(syn_data['Quanzhou'])\n minnan.update(syn_data['Xiamen'])\n for hokkien in minnan:\n banlamoe = hokkien.split(':')\n all_hokkien.add(banlamoe[0])\n return words_missing_prons(all_hokkien, hokprons)\n\n\ndef words_missing_prons(corpus: Sequence[str], prons: ZhTopolectPronunciations\n ):\n return [word for word in corpus if prons.pronunciation(word) is None and\n all(ord(char) > 255 for char in word)]\n\n\nif __name__ == '__main__':\n synonyms = ZhTopolectSynonyms.from_local_folder(\n '../data/enwiktionary/module-zh-data-json/dial-syn')\n mp = MandarinPronunciations.from_local_json_file(\n '../data/enwiktionary/module-zh-data-json/combined-mandarin-pron.json')\n missing_mandarin_prons = iter(words_missing_prons(synonyms.\n mandarin_words(), mp))\n h = ZhTopolectPronunciations.from_local_json_folder(\n '../data/enwiktionary/module-zh-data-json/nan-pron')\n missing_hokkien_prons = iter(missing_philippine_hokkien_words_generator\n (synonyms, h))\n today = datetime.today().strftime('%Y%m%d')\n lines_to_textfile(\n f'../data/enwiktionary/words-search/missing-hokkien.{today}.txt',\n missing_hokkien_prons)\n lines_to_textfile(\n f'../data/enwiktionary/words-search/missing-mandarin.{today}.txt',\n missing_mandarin_prons)\n",
"step-5": "from datetime import datetime\nfrom iohelpers import lines_to_textfile\nfrom typing import Iterator, List, Sequence\nfrom zhmodules import ZhTopolectSynonyms, MandarinPronunciations, ZhTopolectPronunciations\n\n\ndef missing_philippine_hokkien_words_generator(synonyms: ZhTopolectSynonyms, hokprons: ZhTopolectPronunciations):\n all_hokkien = set()\n for word, syn_data in synonyms.all_words():\n minnan = set(syn_data['Philippine-MN'])\n minnan.update(syn_data['Quanzhou'])\n minnan.update(syn_data['Xiamen'])\n\n for hokkien in minnan:\n banlamoe = hokkien.split(':')\n all_hokkien.add(banlamoe[0])\n\n return words_missing_prons(all_hokkien, hokprons)\n\n\ndef words_missing_prons(corpus: Sequence[str], prons: ZhTopolectPronunciations):\n return [word for word in corpus if prons.pronunciation(word) is None and all(ord(char) > 255 for char in word)]\n\n\nif __name__ == '__main__':\n synonyms = ZhTopolectSynonyms.from_local_folder('../data/enwiktionary/module-zh-data-json/dial-syn')\n\n mp = MandarinPronunciations.from_local_json_file('../data/enwiktionary/module-zh-data-json/combined-mandarin-pron.json')\n missing_mandarin_prons = iter(words_missing_prons(synonyms.mandarin_words(), mp))\n h = ZhTopolectPronunciations.from_local_json_folder('../data/enwiktionary/module-zh-data-json/nan-pron')\n missing_hokkien_prons = iter(missing_philippine_hokkien_words_generator(synonyms, h))\n\n today = datetime.today().strftime(\"%Y%m%d\")\n lines_to_textfile(f'../data/enwiktionary/words-search/missing-hokkien.{today}.txt', missing_hokkien_prons)\n lines_to_textfile(f'../data/enwiktionary/words-search/missing-mandarin.{today}.txt', missing_mandarin_prons)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
s = 'ejp mysljylc kd kxveddknmc re jsicpdrysirbcpc ypc rtcsra dkh wyfrepkym veddknkmkrkcdde kr kd eoya kw aej tysr re ujdr lkgc jv'
sa = 'our language is impossible to understandthere are twenty six factorial possibilitiesso it is okay if you want to just give up'
ans = {}
for i in range(len(s)):
ans[s[i]] = sa[i];
S = set([])
for k in ans:
S.add(k)
#for w in range(26):
# if chr(w+97) not in S:
# print chr(w+97)
# q and z not in input so they must map to each other
ans['q'] = 'z'
ans['z'] = 'q'
f = open('A-small-attempt0.in', 'r')
L = f.readlines()
tc = 0
for i in range(1, len(L)):
s = L[i]
S = ''
for j in range(len(s)):
if s[j] == '\n':
continue
S += ans[s[j]]
tc += 1
print('Case #',tc,': ',S,sep='')
|
normal
|
{
"blob_id": "77b9b111cfb4d0b54e14b2aab81b7b05fd6bbccd",
"index": 8552,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(len(s)):\n ans[s[i]] = sa[i]\n<mask token>\nfor k in ans:\n S.add(k)\n<mask token>\nfor i in range(1, len(L)):\n s = L[i]\n S = ''\n for j in range(len(s)):\n if s[j] == '\\n':\n continue\n S += ans[s[j]]\n tc += 1\n print('Case #', tc, ': ', S, sep='')\n",
"step-3": "s = (\n 'ejp mysljylc kd kxveddknmc re jsicpdrysirbcpc ypc rtcsra dkh wyfrepkym veddknkmkrkcdde kr kd eoya kw aej tysr re ujdr lkgc jv'\n )\nsa = (\n 'our language is impossible to understandthere are twenty six factorial possibilitiesso it is okay if you want to just give up'\n )\nans = {}\nfor i in range(len(s)):\n ans[s[i]] = sa[i]\nS = set([])\nfor k in ans:\n S.add(k)\nans['q'] = 'z'\nans['z'] = 'q'\nf = open('A-small-attempt0.in', 'r')\nL = f.readlines()\ntc = 0\nfor i in range(1, len(L)):\n s = L[i]\n S = ''\n for j in range(len(s)):\n if s[j] == '\\n':\n continue\n S += ans[s[j]]\n tc += 1\n print('Case #', tc, ': ', S, sep='')\n",
"step-4": "s = 'ejp mysljylc kd kxveddknmc re jsicpdrysirbcpc ypc rtcsra dkh wyfrepkym veddknkmkrkcdde kr kd eoya kw aej tysr re ujdr lkgc jv'\nsa = 'our language is impossible to understandthere are twenty six factorial possibilitiesso it is okay if you want to just give up'\n\nans = {}\nfor i in range(len(s)):\n ans[s[i]] = sa[i];\nS = set([])\nfor k in ans:\n S.add(k)\n#for w in range(26):\n# if chr(w+97) not in S:\n# print chr(w+97)\n\n# q and z not in input so they must map to each other\nans['q'] = 'z'\nans['z'] = 'q'\n\nf = open('A-small-attempt0.in', 'r')\nL = f.readlines()\ntc = 0\nfor i in range(1, len(L)):\n s = L[i]\n S = ''\n for j in range(len(s)):\n if s[j] == '\\n':\n continue\n S += ans[s[j]]\n tc += 1\n print('Case #',tc,': ',S,sep='')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from flask import render_template, request, Response
from flask.views import MethodView, View
from flask.views import View
from repo import ClassifierRepo
from services import PredictDigitService
from settings import CLASSIFIER_STORAGE
class IndexView(View):
def dispatch_request(self):
return render_template('index.html')
class PredictDigitView(MethodView):
def post(self):
repo = ClassifierRepo(CLASSIFIER_STORAGE)
service = PredictDigitService(repo)
image_data_uri = request.json['image']
prediction = service.handle(image_data_uri)
return Response(str(prediction).encode(), status=200)
|
normal
|
{
"blob_id": "3ea42e7ad5301314a39bf522280c084342cd18c5",
"index": 332,
"step-1": "<mask token>\n\n\nclass PredictDigitView(MethodView):\n\n def post(self):\n repo = ClassifierRepo(CLASSIFIER_STORAGE)\n service = PredictDigitService(repo)\n image_data_uri = request.json['image']\n prediction = service.handle(image_data_uri)\n return Response(str(prediction).encode(), status=200)\n",
"step-2": "<mask token>\n\n\nclass IndexView(View):\n <mask token>\n\n\nclass PredictDigitView(MethodView):\n\n def post(self):\n repo = ClassifierRepo(CLASSIFIER_STORAGE)\n service = PredictDigitService(repo)\n image_data_uri = request.json['image']\n prediction = service.handle(image_data_uri)\n return Response(str(prediction).encode(), status=200)\n",
"step-3": "<mask token>\n\n\nclass IndexView(View):\n\n def dispatch_request(self):\n return render_template('index.html')\n\n\nclass PredictDigitView(MethodView):\n\n def post(self):\n repo = ClassifierRepo(CLASSIFIER_STORAGE)\n service = PredictDigitService(repo)\n image_data_uri = request.json['image']\n prediction = service.handle(image_data_uri)\n return Response(str(prediction).encode(), status=200)\n",
"step-4": "from flask import render_template, request, Response\nfrom flask.views import MethodView, View\nfrom flask.views import View\nfrom repo import ClassifierRepo\nfrom services import PredictDigitService\nfrom settings import CLASSIFIER_STORAGE\n\n\nclass IndexView(View):\n\n def dispatch_request(self):\n return render_template('index.html')\n\n\nclass PredictDigitView(MethodView):\n\n def post(self):\n repo = ClassifierRepo(CLASSIFIER_STORAGE)\n service = PredictDigitService(repo)\n image_data_uri = request.json['image']\n prediction = service.handle(image_data_uri)\n return Response(str(prediction).encode(), status=200)\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
import os
import sys
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup, find_packages
setup(
name='stripe-requests',
version='1.9.1-dev',
description='Stripe python bindings using requests',
author='Allan Lei',
author_email='[email protected]',
url='https://github.com/allanlei/stripe-requests',
license=open('LICENSE').read(),
packages=find_packages(),
package_data={'stripe': ['data/ca-certificates.crt']},
install_requires=[
'requests >= 1.2.0, < 1.3.0',
],
test_suite='stripe.tests',
classifiers=(
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: PyPy',
),
)
|
normal
|
{
"blob_id": "a6ee2be7bed59b419fa66fd6cfe4b5fff3fac260",
"index": 2596,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n from setuptools import setup, find_packages\nexcept ImportError:\n from distutils.core import setup, find_packages\nsetup(name='stripe-requests', version='1.9.1-dev', description=\n 'Stripe python bindings using requests', author='Allan Lei',\n author_email='[email protected]', url=\n 'https://github.com/allanlei/stripe-requests', license=open('LICENSE').\n read(), packages=find_packages(), package_data={'stripe': [\n 'data/ca-certificates.crt']}, install_requires=[\n 'requests >= 1.2.0, < 1.3.0'], test_suite='stripe.tests', classifiers=(\n 'Intended Audience :: Developers', 'Natural Language :: English',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.1',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: Implementation :: PyPy'))\n",
"step-3": "import os\nimport sys\ntry:\n from setuptools import setup, find_packages\nexcept ImportError:\n from distutils.core import setup, find_packages\nsetup(name='stripe-requests', version='1.9.1-dev', description=\n 'Stripe python bindings using requests', author='Allan Lei',\n author_email='[email protected]', url=\n 'https://github.com/allanlei/stripe-requests', license=open('LICENSE').\n read(), packages=find_packages(), package_data={'stripe': [\n 'data/ca-certificates.crt']}, install_requires=[\n 'requests >= 1.2.0, < 1.3.0'], test_suite='stripe.tests', classifiers=(\n 'Intended Audience :: Developers', 'Natural Language :: English',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.1',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: Implementation :: PyPy'))\n",
"step-4": "import os\nimport sys\n\ntry:\n from setuptools import setup, find_packages\nexcept ImportError:\n from distutils.core import setup, find_packages\n\n\nsetup(\n name='stripe-requests',\n version='1.9.1-dev',\n description='Stripe python bindings using requests',\n author='Allan Lei',\n author_email='[email protected]',\n url='https://github.com/allanlei/stripe-requests',\n license=open('LICENSE').read(),\n packages=find_packages(),\n package_data={'stripe': ['data/ca-certificates.crt']},\n install_requires=[\n 'requests >= 1.2.0, < 1.3.0',\n ],\n test_suite='stripe.tests',\n classifiers=(\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.1',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: Implementation :: PyPy',\n ),\n)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# 1로 만들기
import sys
N = int(sys.stdin.readline())
dp_table = [0 for _ in range(10**6 + 1)]
dp_table[2], dp_table[3] = 1, 1
for i in range(4,N+1):
two_per = 10**6
three_per = 10**6
if i % 3 ==0:
three_per = dp_table[i//3] + 1
if i % 2 ==0:
two_per = dp_table[i//2] + 1
minus = dp_table[i-1] + 1
dp_table[i] = min(minus, two_per, three_per)
# print(i, dp_table[i])
print(dp_table[N])
|
normal
|
{
"blob_id": "34a8fc38ed875e1c564f535348dc0d5d88c76ab1",
"index": 7281,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(4, N + 1):\n two_per = 10 ** 6\n three_per = 10 ** 6\n if i % 3 == 0:\n three_per = dp_table[i // 3] + 1\n if i % 2 == 0:\n two_per = dp_table[i // 2] + 1\n minus = dp_table[i - 1] + 1\n dp_table[i] = min(minus, two_per, three_per)\nprint(dp_table[N])\n",
"step-3": "<mask token>\nN = int(sys.stdin.readline())\ndp_table = [(0) for _ in range(10 ** 6 + 1)]\ndp_table[2], dp_table[3] = 1, 1\nfor i in range(4, N + 1):\n two_per = 10 ** 6\n three_per = 10 ** 6\n if i % 3 == 0:\n three_per = dp_table[i // 3] + 1\n if i % 2 == 0:\n two_per = dp_table[i // 2] + 1\n minus = dp_table[i - 1] + 1\n dp_table[i] = min(minus, two_per, three_per)\nprint(dp_table[N])\n",
"step-4": "import sys\nN = int(sys.stdin.readline())\ndp_table = [(0) for _ in range(10 ** 6 + 1)]\ndp_table[2], dp_table[3] = 1, 1\nfor i in range(4, N + 1):\n two_per = 10 ** 6\n three_per = 10 ** 6\n if i % 3 == 0:\n three_per = dp_table[i // 3] + 1\n if i % 2 == 0:\n two_per = dp_table[i // 2] + 1\n minus = dp_table[i - 1] + 1\n dp_table[i] = min(minus, two_per, three_per)\nprint(dp_table[N])\n",
"step-5": "# 1로 만들기\nimport sys\nN = int(sys.stdin.readline())\ndp_table = [0 for _ in range(10**6 + 1)]\ndp_table[2], dp_table[3] = 1, 1\n\nfor i in range(4,N+1):\n two_per = 10**6\n three_per = 10**6\n if i % 3 ==0:\n three_per = dp_table[i//3] + 1\n if i % 2 ==0:\n two_per = dp_table[i//2] + 1\n minus = dp_table[i-1] + 1\n dp_table[i] = min(minus, two_per, three_per)\n # print(i, dp_table[i])\n\nprint(dp_table[N])",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -------------------------------------------------------------------------
# File: mb_trap.py
# Created: Tue Feb 7 20:51:32 2006
# -------------------------------------------------------------------------
import random
import mb_io
import mb_subs
from mb_go import GameObject
class Trap(GameObject):
"""
This class is used to create traps (or blessing objects) that exist
in the arena on their own but that are not subject to attack.
The only real attributes traps have is different types of attacks that
they can carry out on combatants in the arena.
"""
def __init__(self, gamedir, filename = None):
self.attacks = list()
self.x = 0
self.y = 0
self.radius = 0
self.is_first_round = True
GameObject.__init__(self, gamedir, filename)
def read_in_config(self, filename):
parser = GameObject.read_in_config(self, filename)
if parser.has_section('attacks'):
self.attacks = mb_subs.actions(parser.items('attacks'))
del parser
def trigger_trap(self, victim):
attac = random.choice(self.attacks)
attack = attac[0]
damage = attac[1]
victim.health = mb_subs.subtract_to_floor(victim.health, damage)
if damage >= 0:
commentary = '(OH NO!) %s' % (attack % victim.name)
else:
commentary = '(WOW!) %s' % (attack % victim.name)
return commentary, damage
|
normal
|
{
"blob_id": "f2a94f6bfe86af439a8248b40732340c45d89b93",
"index": 9925,
"step-1": "<mask token>\n\n\nclass Trap(GameObject):\n <mask token>\n\n def __init__(self, gamedir, filename=None):\n self.attacks = list()\n self.x = 0\n self.y = 0\n self.radius = 0\n self.is_first_round = True\n GameObject.__init__(self, gamedir, filename)\n <mask token>\n\n def trigger_trap(self, victim):\n attac = random.choice(self.attacks)\n attack = attac[0]\n damage = attac[1]\n victim.health = mb_subs.subtract_to_floor(victim.health, damage)\n if damage >= 0:\n commentary = '(OH NO!) %s' % (attack % victim.name)\n else:\n commentary = '(WOW!) %s' % (attack % victim.name)\n return commentary, damage\n",
"step-2": "<mask token>\n\n\nclass Trap(GameObject):\n <mask token>\n\n def __init__(self, gamedir, filename=None):\n self.attacks = list()\n self.x = 0\n self.y = 0\n self.radius = 0\n self.is_first_round = True\n GameObject.__init__(self, gamedir, filename)\n\n def read_in_config(self, filename):\n parser = GameObject.read_in_config(self, filename)\n if parser.has_section('attacks'):\n self.attacks = mb_subs.actions(parser.items('attacks'))\n del parser\n\n def trigger_trap(self, victim):\n attac = random.choice(self.attacks)\n attack = attac[0]\n damage = attac[1]\n victim.health = mb_subs.subtract_to_floor(victim.health, damage)\n if damage >= 0:\n commentary = '(OH NO!) %s' % (attack % victim.name)\n else:\n commentary = '(WOW!) %s' % (attack % victim.name)\n return commentary, damage\n",
"step-3": "<mask token>\n\n\nclass Trap(GameObject):\n \"\"\"\n This class is used to create traps (or blessing objects) that exist\n in the arena on their own but that are not subject to attack.\n The only real attributes traps have is different types of attacks that\n they can carry out on combatants in the arena.\n\n \"\"\"\n\n def __init__(self, gamedir, filename=None):\n self.attacks = list()\n self.x = 0\n self.y = 0\n self.radius = 0\n self.is_first_round = True\n GameObject.__init__(self, gamedir, filename)\n\n def read_in_config(self, filename):\n parser = GameObject.read_in_config(self, filename)\n if parser.has_section('attacks'):\n self.attacks = mb_subs.actions(parser.items('attacks'))\n del parser\n\n def trigger_trap(self, victim):\n attac = random.choice(self.attacks)\n attack = attac[0]\n damage = attac[1]\n victim.health = mb_subs.subtract_to_floor(victim.health, damage)\n if damage >= 0:\n commentary = '(OH NO!) %s' % (attack % victim.name)\n else:\n commentary = '(WOW!) %s' % (attack % victim.name)\n return commentary, damage\n",
"step-4": "import random\nimport mb_io\nimport mb_subs\nfrom mb_go import GameObject\n\n\nclass Trap(GameObject):\n \"\"\"\n This class is used to create traps (or blessing objects) that exist\n in the arena on their own but that are not subject to attack.\n The only real attributes traps have is different types of attacks that\n they can carry out on combatants in the arena.\n\n \"\"\"\n\n def __init__(self, gamedir, filename=None):\n self.attacks = list()\n self.x = 0\n self.y = 0\n self.radius = 0\n self.is_first_round = True\n GameObject.__init__(self, gamedir, filename)\n\n def read_in_config(self, filename):\n parser = GameObject.read_in_config(self, filename)\n if parser.has_section('attacks'):\n self.attacks = mb_subs.actions(parser.items('attacks'))\n del parser\n\n def trigger_trap(self, victim):\n attac = random.choice(self.attacks)\n attack = attac[0]\n damage = attac[1]\n victim.health = mb_subs.subtract_to_floor(victim.health, damage)\n if damage >= 0:\n commentary = '(OH NO!) %s' % (attack % victim.name)\n else:\n commentary = '(WOW!) %s' % (attack % victim.name)\n return commentary, damage\n",
"step-5": "# -------------------------------------------------------------------------\n# File: mb_trap.py\n# Created: Tue Feb 7 20:51:32 2006\n# -------------------------------------------------------------------------\n\nimport random\n\nimport mb_io\nimport mb_subs\nfrom mb_go import GameObject\n\nclass Trap(GameObject):\n \"\"\"\n This class is used to create traps (or blessing objects) that exist\n in the arena on their own but that are not subject to attack.\n The only real attributes traps have is different types of attacks that\n they can carry out on combatants in the arena.\n\n \"\"\"\n def __init__(self, gamedir, filename = None):\n\n self.attacks = list()\n self.x = 0\n self.y = 0\n self.radius = 0\n self.is_first_round = True\n GameObject.__init__(self, gamedir, filename)\n\n def read_in_config(self, filename):\n parser = GameObject.read_in_config(self, filename)\n if parser.has_section('attacks'):\n self.attacks = mb_subs.actions(parser.items('attacks'))\n del parser\n\n def trigger_trap(self, victim):\n\n attac = random.choice(self.attacks)\n attack = attac[0]\n damage = attac[1]\n victim.health = mb_subs.subtract_to_floor(victim.health, damage)\n\n if damage >= 0:\n commentary = '(OH NO!) %s' % (attack % victim.name)\n else:\n commentary = '(WOW!) %s' % (attack % victim.name)\n return commentary, damage\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from selenium import webdriver
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
SELENIUM_TIMEOUT = 12
def get_browser_driver():
"""获取浏览器服务 使用后记得 driver.quit() 否则容易引起状态污染"""
try:
# PhantomJS 设置不加载图片
driver = webdriver.PhantomJS(service_args=['--load-images=no'])
except WebDriverException:
# chrome 设置不加载图片
chrome_options = webdriver.ChromeOptions()
chrome_profile = {"profile.managed_default_content_settings.images": 2}
chrome_options.add_experimental_option("prefs", chrome_profile)
driver = webdriver.Chrome(chrome_options=chrome_options)
driver.set_page_load_timeout(SELENIUM_TIMEOUT)
driver.implicitly_wait(SELENIUM_TIMEOUT)
return driver
def wait_driver(driver, id, wait_time, watch_step):
locator = (By.ID, id)
try:
WebDriverWait(driver, wait_time, watch_step).until(EC.presence_of_element_located(locator))
print(u"成功访问搜索引擎!")
except Exception as e:
print(e)
print(u"搜索引擎未加载成功,浏览器将被退出!")
driver.quit()
|
normal
|
{
"blob_id": "5ab877ef15cdcd52463b1567c28327dc2eeea2de",
"index": 1204,
"step-1": "<mask token>\n\n\ndef get_browser_driver():\n \"\"\"获取浏览器服务 使用后记得 driver.quit() 否则容易引起状态污染\"\"\"\n try:\n driver = webdriver.PhantomJS(service_args=['--load-images=no'])\n except WebDriverException:\n chrome_options = webdriver.ChromeOptions()\n chrome_profile = {'profile.managed_default_content_settings.images': 2}\n chrome_options.add_experimental_option('prefs', chrome_profile)\n driver = webdriver.Chrome(chrome_options=chrome_options)\n driver.set_page_load_timeout(SELENIUM_TIMEOUT)\n driver.implicitly_wait(SELENIUM_TIMEOUT)\n return driver\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_browser_driver():\n \"\"\"获取浏览器服务 使用后记得 driver.quit() 否则容易引起状态污染\"\"\"\n try:\n driver = webdriver.PhantomJS(service_args=['--load-images=no'])\n except WebDriverException:\n chrome_options = webdriver.ChromeOptions()\n chrome_profile = {'profile.managed_default_content_settings.images': 2}\n chrome_options.add_experimental_option('prefs', chrome_profile)\n driver = webdriver.Chrome(chrome_options=chrome_options)\n driver.set_page_load_timeout(SELENIUM_TIMEOUT)\n driver.implicitly_wait(SELENIUM_TIMEOUT)\n return driver\n\n\ndef wait_driver(driver, id, wait_time, watch_step):\n locator = By.ID, id\n try:\n WebDriverWait(driver, wait_time, watch_step).until(EC.\n presence_of_element_located(locator))\n print(u'成功访问搜索引擎!')\n except Exception as e:\n print(e)\n print(u'搜索引擎未加载成功,浏览器将被退出!')\n driver.quit()\n",
"step-3": "<mask token>\nSELENIUM_TIMEOUT = 12\n\n\ndef get_browser_driver():\n \"\"\"获取浏览器服务 使用后记得 driver.quit() 否则容易引起状态污染\"\"\"\n try:\n driver = webdriver.PhantomJS(service_args=['--load-images=no'])\n except WebDriverException:\n chrome_options = webdriver.ChromeOptions()\n chrome_profile = {'profile.managed_default_content_settings.images': 2}\n chrome_options.add_experimental_option('prefs', chrome_profile)\n driver = webdriver.Chrome(chrome_options=chrome_options)\n driver.set_page_load_timeout(SELENIUM_TIMEOUT)\n driver.implicitly_wait(SELENIUM_TIMEOUT)\n return driver\n\n\ndef wait_driver(driver, id, wait_time, watch_step):\n locator = By.ID, id\n try:\n WebDriverWait(driver, wait_time, watch_step).until(EC.\n presence_of_element_located(locator))\n print(u'成功访问搜索引擎!')\n except Exception as e:\n print(e)\n print(u'搜索引擎未加载成功,浏览器将被退出!')\n driver.quit()\n",
"step-4": "from selenium import webdriver\nfrom selenium.common.exceptions import WebDriverException\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nSELENIUM_TIMEOUT = 12\n\n\ndef get_browser_driver():\n \"\"\"获取浏览器服务 使用后记得 driver.quit() 否则容易引起状态污染\"\"\"\n try:\n driver = webdriver.PhantomJS(service_args=['--load-images=no'])\n except WebDriverException:\n chrome_options = webdriver.ChromeOptions()\n chrome_profile = {'profile.managed_default_content_settings.images': 2}\n chrome_options.add_experimental_option('prefs', chrome_profile)\n driver = webdriver.Chrome(chrome_options=chrome_options)\n driver.set_page_load_timeout(SELENIUM_TIMEOUT)\n driver.implicitly_wait(SELENIUM_TIMEOUT)\n return driver\n\n\ndef wait_driver(driver, id, wait_time, watch_step):\n locator = By.ID, id\n try:\n WebDriverWait(driver, wait_time, watch_step).until(EC.\n presence_of_element_located(locator))\n print(u'成功访问搜索引擎!')\n except Exception as e:\n print(e)\n print(u'搜索引擎未加载成功,浏览器将被退出!')\n driver.quit()\n",
"step-5": "from selenium import webdriver\nfrom selenium.common.exceptions import WebDriverException\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\n\nSELENIUM_TIMEOUT = 12\n\ndef get_browser_driver():\n \"\"\"获取浏览器服务 使用后记得 driver.quit() 否则容易引起状态污染\"\"\"\n try:\n # PhantomJS 设置不加载图片\n driver = webdriver.PhantomJS(service_args=['--load-images=no'])\n except WebDriverException:\n # chrome 设置不加载图片\n chrome_options = webdriver.ChromeOptions()\n chrome_profile = {\"profile.managed_default_content_settings.images\": 2}\n chrome_options.add_experimental_option(\"prefs\", chrome_profile)\n driver = webdriver.Chrome(chrome_options=chrome_options)\n driver.set_page_load_timeout(SELENIUM_TIMEOUT)\n driver.implicitly_wait(SELENIUM_TIMEOUT)\n return driver\n\ndef wait_driver(driver, id, wait_time, watch_step):\n locator = (By.ID, id)\n try:\n WebDriverWait(driver, wait_time, watch_step).until(EC.presence_of_element_located(locator))\n print(u\"成功访问搜索引擎!\")\n except Exception as e:\n print(e)\n print(u\"搜索引擎未加载成功,浏览器将被退出!\")\n driver.quit()",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/python
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy.orm import sessionmaker, relationship
engine = create_engine("sqlite:///banco.db")
Base = declarative_base()
Session = sessionmaker()
Session.configure(bind=engine)
session = Session()
class Funcionario(Base):
__tablename__ = 'funcionario'
id = Column(Integer,primary_key=True)
nome = Column(String)
dependente = relationship("Dependente")
class Dependente(Base):
__tablename__ = "dependente"
id = Column(Integer,primary_key=True)
nome = Column(String)
funcionario_id = Column(Integer,ForeignKey("funcionario.id"))
if __name__ == "__main__":
Base.metadata.create_all(engine)
# Buscando funcionario
result = session.query(Funcionario,Dependente) \
.join(Dependente) \
.filter(Funcionario.id==1).first()
dependente = session.query(Dependente).filter_by(id=1).first()
session.delete(dependente)
session.commit()
print "Funcionario: ",result.Funcionario.nome
for d in result.Funcionario.dependente:
print "Dependente: ",d.nome
|
normal
|
{
"blob_id": "6d5257158a7d2eef63faf2fea27f36721d4349ae",
"index": 4273,
"step-1": "#!/usr/bin/python\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Integer, String, ForeignKey\nfrom sqlalchemy.orm import sessionmaker, relationship\n\nengine = create_engine(\"sqlite:///banco.db\")\nBase = declarative_base()\nSession = sessionmaker()\nSession.configure(bind=engine)\nsession = Session()\n\nclass Funcionario(Base):\n __tablename__ = 'funcionario'\n id = Column(Integer,primary_key=True)\n nome = Column(String)\n dependente = relationship(\"Dependente\")\n\nclass Dependente(Base):\n __tablename__ = \"dependente\"\n id = Column(Integer,primary_key=True)\n nome = Column(String)\n funcionario_id = Column(Integer,ForeignKey(\"funcionario.id\"))\n\nif __name__ == \"__main__\":\n Base.metadata.create_all(engine)\n # Buscando funcionario\n result = session.query(Funcionario,Dependente) \\\n .join(Dependente) \\\n .filter(Funcionario.id==1).first()\n dependente = session.query(Dependente).filter_by(id=1).first()\n session.delete(dependente)\n session.commit()\n print \"Funcionario: \",result.Funcionario.nome\n for d in result.Funcionario.dependente:\n print \"Dependente: \",d.nome\n \n\n\n\n\n\n\n\n\n\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#processes are described by generator functions
#during the lifetime of a process, the process function(generator function)
#creates events and yields them
#when a process yields an event, it gets suspended
#Simpy resumes the process when the event is triggered
#multiple processes waiting on the same event is resumed in the same order
#it yielded the event
import simpy
def car(env):
# i = 0
# while i<=10:
while True:
print("The car will start parking at: ",env.now)
parking_timeout = 5
yield env.timeout(parking_timeout)
print("The car will start driving at: ",env.now)
driving_timeout = 2
yield env.timeout(driving_timeout)
# if i == 10:
# print("the car is done moving")
# yield env.timeout(1)
# i += 1
env = simpy.Environment()
env.process(car(env)) #the generator function creates the process called car
#env.run()
env.run(until=20)
|
normal
|
{
"blob_id": "892eb8d1802b01c035993232cc80c710211ab102",
"index": 802,
"step-1": "<mask token>\n\n\ndef car(env):\n while True:\n print('The car will start parking at: ', env.now)\n parking_timeout = 5\n yield env.timeout(parking_timeout)\n print('The car will start driving at: ', env.now)\n driving_timeout = 2\n yield env.timeout(driving_timeout)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef car(env):\n while True:\n print('The car will start parking at: ', env.now)\n parking_timeout = 5\n yield env.timeout(parking_timeout)\n print('The car will start driving at: ', env.now)\n driving_timeout = 2\n yield env.timeout(driving_timeout)\n\n\n<mask token>\nenv.process(car(env))\nenv.run(until=20)\n",
"step-3": "<mask token>\n\n\ndef car(env):\n while True:\n print('The car will start parking at: ', env.now)\n parking_timeout = 5\n yield env.timeout(parking_timeout)\n print('The car will start driving at: ', env.now)\n driving_timeout = 2\n yield env.timeout(driving_timeout)\n\n\nenv = simpy.Environment()\nenv.process(car(env))\nenv.run(until=20)\n",
"step-4": "import simpy\n\n\ndef car(env):\n while True:\n print('The car will start parking at: ', env.now)\n parking_timeout = 5\n yield env.timeout(parking_timeout)\n print('The car will start driving at: ', env.now)\n driving_timeout = 2\n yield env.timeout(driving_timeout)\n\n\nenv = simpy.Environment()\nenv.process(car(env))\nenv.run(until=20)\n",
"step-5": "#processes are described by generator functions\n#during the lifetime of a process, the process function(generator function) \n#creates events and yields them\n\n#when a process yields an event, it gets suspended\n#Simpy resumes the process when the event is triggered\n#multiple processes waiting on the same event is resumed in the same order\n#it yielded the event\n\nimport simpy\n\ndef car(env):\n # i = 0\n # while i<=10:\n while True:\n print(\"The car will start parking at: \",env.now)\n parking_timeout = 5\n yield env.timeout(parking_timeout)\n\n print(\"The car will start driving at: \",env.now)\n driving_timeout = 2\n yield env.timeout(driving_timeout)\n\n # if i == 10:\n # print(\"the car is done moving\")\n # yield env.timeout(1)\n # i += 1\n\n\nenv = simpy.Environment()\nenv.process(car(env)) #the generator function creates the process called car\n#env.run()\nenv.run(until=20)\n\n\n ",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
def main():
num = int(input('dia: '))
dia(num)
def dia(a):
if a == 1:
print('Domingo !')
elif a == 2:
print('Segunda !')
else:
print('valor invalido !')
main()
|
normal
|
{
"blob_id": "07332e2da5458fda2112de2507037a759d3c62db",
"index": 3382,
"step-1": "<mask token>\n",
"step-2": "def main():\n num = int(input('dia: '))\n dia(num)\n\n\n<mask token>\n",
"step-3": "def main():\n num = int(input('dia: '))\n dia(num)\n\n\ndef dia(a):\n if a == 1:\n print('Domingo !')\n elif a == 2:\n print('Segunda !')\n else:\n print('valor invalido !')\n\n\n<mask token>\n",
"step-4": "def main():\n num = int(input('dia: '))\n dia(num)\n\n\ndef dia(a):\n if a == 1:\n print('Domingo !')\n elif a == 2:\n print('Segunda !')\n else:\n print('valor invalido !')\n\n\nmain()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import math
import time
t1 = time.time()
# n(3n-1)/2
def isPentagon(item):
num = math.floor(math.sqrt(item*2//3))+1
if num*(3*num-1)//2 == item:
return True
return False
# n(2n-1)
def isHexagon(item):
num = math.floor(math.sqrt(item//2))+1
if num*(2*num-1) == item:
return True
return False
i = 285
t = 0
while t == 0:
i += 1
n = i*(i+1)//2
if isPentagon(n) and isHexagon(n):
t = 1
print (n)
print("time:",time.time()-t1)
|
normal
|
{
"blob_id": "0aec3fbc9f4b9f33aee021fa417c43f0feb0e3d1",
"index": 3296,
"step-1": "<mask token>\n\n\ndef isPentagon(item):\n num = math.floor(math.sqrt(item * 2 // 3)) + 1\n if num * (3 * num - 1) // 2 == item:\n return True\n return False\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef isPentagon(item):\n num = math.floor(math.sqrt(item * 2 // 3)) + 1\n if num * (3 * num - 1) // 2 == item:\n return True\n return False\n\n\ndef isHexagon(item):\n num = math.floor(math.sqrt(item // 2)) + 1\n if num * (2 * num - 1) == item:\n return True\n return False\n\n\n<mask token>\nwhile t == 0:\n i += 1\n n = i * (i + 1) // 2\n if isPentagon(n) and isHexagon(n):\n t = 1\n print(n)\nprint('time:', time.time() - t1)\n",
"step-3": "<mask token>\nt1 = time.time()\n\n\ndef isPentagon(item):\n num = math.floor(math.sqrt(item * 2 // 3)) + 1\n if num * (3 * num - 1) // 2 == item:\n return True\n return False\n\n\ndef isHexagon(item):\n num = math.floor(math.sqrt(item // 2)) + 1\n if num * (2 * num - 1) == item:\n return True\n return False\n\n\ni = 285\nt = 0\nwhile t == 0:\n i += 1\n n = i * (i + 1) // 2\n if isPentagon(n) and isHexagon(n):\n t = 1\n print(n)\nprint('time:', time.time() - t1)\n",
"step-4": "import math\nimport time\nt1 = time.time()\n\n\ndef isPentagon(item):\n num = math.floor(math.sqrt(item * 2 // 3)) + 1\n if num * (3 * num - 1) // 2 == item:\n return True\n return False\n\n\ndef isHexagon(item):\n num = math.floor(math.sqrt(item // 2)) + 1\n if num * (2 * num - 1) == item:\n return True\n return False\n\n\ni = 285\nt = 0\nwhile t == 0:\n i += 1\n n = i * (i + 1) // 2\n if isPentagon(n) and isHexagon(n):\n t = 1\n print(n)\nprint('time:', time.time() - t1)\n",
"step-5": "import math\nimport time\n\nt1 = time.time()\n\n# n(3n-1)/2\ndef isPentagon(item):\n num = math.floor(math.sqrt(item*2//3))+1\n if num*(3*num-1)//2 == item:\n return True\n return False\n\n# n(2n-1)\ndef isHexagon(item):\n num = math.floor(math.sqrt(item//2))+1\n if num*(2*num-1) == item:\n return True\n return False\n\n\ni = 285\nt = 0\nwhile t == 0:\n i += 1\n n = i*(i+1)//2\n if isPentagon(n) and isHexagon(n):\n t = 1\n print (n)\n\nprint(\"time:\",time.time()-t1)\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
g7 = int(input())
h7 = g7 / 2
i = g7 - 1
print(int(h7 * i))
|
normal
|
{
"blob_id": "abb08956f55fd1e8af27ce12fa94a4137d7d908e",
"index": 7251,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(int(h7 * i))\n",
"step-3": "g7 = int(input())\nh7 = g7 / 2\ni = g7 - 1\nprint(int(h7 * i))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import nltk
from nltk import bigrams
from lm import *
# Oppgave 1:
# opretter LM klasse til aa perpleksitere news og adventure
m = LM()
# Henter news og adventure for videre bruk
news=nltk.corpus.brown.sents(categories='news')
adventure=nltk.corpus.brown.sents(categories='adventure')
# initial parametre
perpNews = 0.0
perpAdventure = 0.0
# beregner perplexitet:
perpNews = m.perplexity(news)
perpAdventure = m.perplexity(adventure)
# printer ut perplexitet.
print("Perpleksitet til news: %.2f" %perpNews)
print("Perpleksitet til adventure: %.2f" %perpAdventure)
""" Oppgave 1 - evaluering av spraakmodeller
$ python oblig2b_steinrr.py
Perpleksitet til news: 72.69
Perpleksitet til adventure: 117.41
Perpleksiteten tiil adventure er hoeyeere fordi klassifikatoren vi benytter i LM er ikke trent paa dette korpuset.
Perpleksiteten til news ville ha veart lavere hvis klassifikatoren vi benytter hadde bare veart trent paa news.
Men dette er ikke bra pga da ville perpleksiteten til adventure veare enda hoyere enn den er naa.
"""
zippy = m.zipfity(news)
for sekvens in zippy:
print("Ord: %4s Antall: %4d Sekvens: %.4f " %(sekvens[0], sekvens[1], sekvens[2]))
""" Oppgave 2 - Zipfianske distribusjon
Ord: the Antall: 6386 Sekvens: 6386.0000
Ord: , Antall: 5188 Sekvens: 2594.0000
Ord: . Antall: 4030 Sekvens: 1343.3333
Ord: of Antall: 2861 Sekvens: 715.2500
Ord: and Antall: 2186 Sekvens: 437.2000
Ord: to Antall: 2144 Sekvens: 357.3333
Ord: a Antall: 2130 Sekvens: 304.2857
Ord: in Antall: 2020 Sekvens: 252.5000
Ord: for Antall: 969 Sekvens: 107.6667
Ord: that Antall: 829 Sekvens: 82.9000
"""
brown_tagged_sents = nltk.corpus.brown.tagged_sents(categories='adventure')
adventure = [[w.lower() for w in line] for line in nltk.corpus.brown.sents(categories='adventure')]
#m.regularTagger(adventure)
checkTaggStandardAdv = m.analyseRegularTagger('adventure')
checkTaggStandardFic = m.analyseRegularTagger('fiction')
checkTaggModifiedAdv = m.analyseRegularTagger('adventure', 'modified')
checkTaggModifiedFic = m.analyseRegularTagger('fiction', 'modified')
print("Standard vs modifisert tagging ved hjelp av reguleart uttrykk")
print("Med corpus: 'adventure'")
print(" Standard: %4.2f Modifisert: %4.2f " %(checkTaggStandardFic, checkTaggModifiedAdv))
print("Med corpus: 'fiction'")
print(" Standard: %4.2f Modifisert: %4.2f " %(checkTaggStandardFic, checkTaggModifiedFic))
infile = open("test_setninger.txt")
tekst = []
for line in infile:
words = line.split(" ")
tekst.append(words)
infile.close()
# fikser at alle ord har smaa bokstaver:
tekst = [[w.lower() for w in line] for line in tekst]
taggerTekst = m.regularTagger(tekst, 'modified')
for sentence in taggerTekst:
for taggs in sentence:
print(taggs)
""" Oppgave 3 - Ordklassetagging med regulære uttrykk
Standard vs modifisert tagging ved hjelp av reguleart uttrykk
Med corpus: 'adventure'
Standard: 0.18 Modifisert: 0.41
Med corpus: 'fiction'
Standard: 0.18 Modifisert: 0.40
...
..
... skriver ut tagger som blir kopiert inn til test_setninger_m_taggs.txt
..
Kommentarer for ytterligere forbedrelser:
1. said skulle ha veart kattegorisert som verb: VBD
2. he burde veare et pronom
3. had burde veare et verb til have
oppdatere reguleare utrykk:
1 og 3: (r'(.*ed|.*id|had)$', 'VBD')
2. regler for pronoum har jeg ikke lagt inn i det hele tatt saa dette er noe som
kan tilfoeres
"""
|
normal
|
{
"blob_id": "d268f8d563aac28852457f6f130b2fb4ea6269a2",
"index": 907,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Perpleksitet til news: %.2f' % perpNews)\nprint('Perpleksitet til adventure: %.2f' % perpAdventure)\n<mask token>\nfor sekvens in zippy:\n print('Ord: %4s Antall: %4d Sekvens: %.4f ' % (sekvens[0], sekvens[1],\n sekvens[2]))\n<mask token>\nprint('Standard vs modifisert tagging ved hjelp av reguleart uttrykk')\nprint(\"Med corpus: 'adventure'\")\nprint(' Standard: %4.2f Modifisert: %4.2f ' % (checkTaggStandardFic,\n checkTaggModifiedAdv))\nprint(\"Med corpus: 'fiction'\")\nprint(' Standard: %4.2f Modifisert: %4.2f ' % (checkTaggStandardFic,\n checkTaggModifiedFic))\n<mask token>\nfor line in infile:\n words = line.split(' ')\n tekst.append(words)\ninfile.close()\n<mask token>\nfor sentence in taggerTekst:\n for taggs in sentence:\n print(taggs)\n<mask token>\n",
"step-3": "<mask token>\nm = LM()\nnews = nltk.corpus.brown.sents(categories='news')\nadventure = nltk.corpus.brown.sents(categories='adventure')\nperpNews = 0.0\nperpAdventure = 0.0\nperpNews = m.perplexity(news)\nperpAdventure = m.perplexity(adventure)\nprint('Perpleksitet til news: %.2f' % perpNews)\nprint('Perpleksitet til adventure: %.2f' % perpAdventure)\n<mask token>\nzippy = m.zipfity(news)\nfor sekvens in zippy:\n print('Ord: %4s Antall: %4d Sekvens: %.4f ' % (sekvens[0], sekvens[1],\n sekvens[2]))\n<mask token>\nbrown_tagged_sents = nltk.corpus.brown.tagged_sents(categories='adventure')\nadventure = [[w.lower() for w in line] for line in nltk.corpus.brown.sents(\n categories='adventure')]\ncheckTaggStandardAdv = m.analyseRegularTagger('adventure')\ncheckTaggStandardFic = m.analyseRegularTagger('fiction')\ncheckTaggModifiedAdv = m.analyseRegularTagger('adventure', 'modified')\ncheckTaggModifiedFic = m.analyseRegularTagger('fiction', 'modified')\nprint('Standard vs modifisert tagging ved hjelp av reguleart uttrykk')\nprint(\"Med corpus: 'adventure'\")\nprint(' Standard: %4.2f Modifisert: %4.2f ' % (checkTaggStandardFic,\n checkTaggModifiedAdv))\nprint(\"Med corpus: 'fiction'\")\nprint(' Standard: %4.2f Modifisert: %4.2f ' % (checkTaggStandardFic,\n checkTaggModifiedFic))\ninfile = open('test_setninger.txt')\ntekst = []\nfor line in infile:\n words = line.split(' ')\n tekst.append(words)\ninfile.close()\ntekst = [[w.lower() for w in line] for line in tekst]\ntaggerTekst = m.regularTagger(tekst, 'modified')\nfor sentence in taggerTekst:\n for taggs in sentence:\n print(taggs)\n<mask token>\n",
"step-4": "import nltk\nfrom nltk import bigrams\nfrom lm import *\nm = LM()\nnews = nltk.corpus.brown.sents(categories='news')\nadventure = nltk.corpus.brown.sents(categories='adventure')\nperpNews = 0.0\nperpAdventure = 0.0\nperpNews = m.perplexity(news)\nperpAdventure = m.perplexity(adventure)\nprint('Perpleksitet til news: %.2f' % perpNews)\nprint('Perpleksitet til adventure: %.2f' % perpAdventure)\n<mask token>\nzippy = m.zipfity(news)\nfor sekvens in zippy:\n print('Ord: %4s Antall: %4d Sekvens: %.4f ' % (sekvens[0], sekvens[1],\n sekvens[2]))\n<mask token>\nbrown_tagged_sents = nltk.corpus.brown.tagged_sents(categories='adventure')\nadventure = [[w.lower() for w in line] for line in nltk.corpus.brown.sents(\n categories='adventure')]\ncheckTaggStandardAdv = m.analyseRegularTagger('adventure')\ncheckTaggStandardFic = m.analyseRegularTagger('fiction')\ncheckTaggModifiedAdv = m.analyseRegularTagger('adventure', 'modified')\ncheckTaggModifiedFic = m.analyseRegularTagger('fiction', 'modified')\nprint('Standard vs modifisert tagging ved hjelp av reguleart uttrykk')\nprint(\"Med corpus: 'adventure'\")\nprint(' Standard: %4.2f Modifisert: %4.2f ' % (checkTaggStandardFic,\n checkTaggModifiedAdv))\nprint(\"Med corpus: 'fiction'\")\nprint(' Standard: %4.2f Modifisert: %4.2f ' % (checkTaggStandardFic,\n checkTaggModifiedFic))\ninfile = open('test_setninger.txt')\ntekst = []\nfor line in infile:\n words = line.split(' ')\n tekst.append(words)\ninfile.close()\ntekst = [[w.lower() for w in line] for line in tekst]\ntaggerTekst = m.regularTagger(tekst, 'modified')\nfor sentence in taggerTekst:\n for taggs in sentence:\n print(taggs)\n<mask token>\n",
"step-5": "import nltk\nfrom nltk import bigrams\nfrom lm import *\n\n# Oppgave 1:\n# opretter LM klasse til aa perpleksitere news og adventure\nm = LM()\n\n# Henter news og adventure for videre bruk\nnews=nltk.corpus.brown.sents(categories='news')\nadventure=nltk.corpus.brown.sents(categories='adventure')\n\n# initial parametre\nperpNews = 0.0\nperpAdventure = 0.0\n\n# beregner perplexitet:\nperpNews = m.perplexity(news)\nperpAdventure = m.perplexity(adventure)\n\n# printer ut perplexitet.\nprint(\"Perpleksitet til news: %.2f\" %perpNews)\nprint(\"Perpleksitet til adventure: %.2f\" %perpAdventure)\n\n\n\"\"\" Oppgave 1 - evaluering av spraakmodeller\n\n$ python oblig2b_steinrr.py\nPerpleksitet til news: 72.69\nPerpleksitet til adventure: 117.41\n\n\nPerpleksiteten tiil adventure er hoeyeere fordi klassifikatoren vi benytter i LM er ikke trent paa dette korpuset.\nPerpleksiteten til news ville ha veart lavere hvis klassifikatoren vi benytter hadde bare veart trent paa news.\nMen dette er ikke bra pga da ville perpleksiteten til adventure veare enda hoyere enn den er naa.\n\n\"\"\"\n\nzippy = m.zipfity(news)\n\nfor sekvens in zippy:\n print(\"Ord: %4s Antall: %4d Sekvens: %.4f \" %(sekvens[0], sekvens[1], sekvens[2]))\n\n\"\"\" Oppgave 2 - Zipfianske distribusjon\n \nOrd: the Antall: 6386 Sekvens: 6386.0000 \nOrd: , Antall: 5188 Sekvens: 2594.0000 \nOrd: . Antall: 4030 Sekvens: 1343.3333 \nOrd: of Antall: 2861 Sekvens: 715.2500 \nOrd: and Antall: 2186 Sekvens: 437.2000 \nOrd: to Antall: 2144 Sekvens: 357.3333 \nOrd: a Antall: 2130 Sekvens: 304.2857 \nOrd: in Antall: 2020 Sekvens: 252.5000 \nOrd: for Antall: 969 Sekvens: 107.6667 \nOrd: that Antall: 829 Sekvens: 82.9000 \n\n\"\"\"\n\nbrown_tagged_sents = nltk.corpus.brown.tagged_sents(categories='adventure')\nadventure = [[w.lower() for w in line] for line in nltk.corpus.brown.sents(categories='adventure')]\n\n#m.regularTagger(adventure)\ncheckTaggStandardAdv = m.analyseRegularTagger('adventure')\ncheckTaggStandardFic = m.analyseRegularTagger('fiction')\ncheckTaggModifiedAdv = m.analyseRegularTagger('adventure', 'modified')\ncheckTaggModifiedFic = m.analyseRegularTagger('fiction', 'modified')\n\nprint(\"Standard vs modifisert tagging ved hjelp av reguleart uttrykk\")\nprint(\"Med corpus: 'adventure'\")\nprint(\" Standard: %4.2f Modifisert: %4.2f \" %(checkTaggStandardFic, checkTaggModifiedAdv))\nprint(\"Med corpus: 'fiction'\")\nprint(\" Standard: %4.2f Modifisert: %4.2f \" %(checkTaggStandardFic, checkTaggModifiedFic))\n\ninfile = open(\"test_setninger.txt\")\ntekst = []\n\nfor line in infile:\n words = line.split(\" \")\n tekst.append(words)\ninfile.close()\n\n# fikser at alle ord har smaa bokstaver:\ntekst = [[w.lower() for w in line] for line in tekst]\n\ntaggerTekst = m.regularTagger(tekst, 'modified')\n\nfor sentence in taggerTekst:\n for taggs in sentence:\n print(taggs)\n\n\"\"\" Oppgave 3 - Ordklassetagging med regulære uttrykk\nStandard vs modifisert tagging ved hjelp av reguleart uttrykk\nMed corpus: 'adventure'\n Standard: 0.18 Modifisert: 0.41 \nMed corpus: 'fiction'\n Standard: 0.18 Modifisert: 0.40 \n...\n..\n... skriver ut tagger som blir kopiert inn til test_setninger_m_taggs.txt\n..\n\nKommentarer for ytterligere forbedrelser:\n1. said skulle ha veart kattegorisert som verb: VBD\n2. he burde veare et pronom\n3. had burde veare et verb til have\n\noppdatere reguleare utrykk:\n1 og 3: (r'(.*ed|.*id|had)$', 'VBD')\n\n2. regler for pronoum har jeg ikke lagt inn i det hele tatt saa dette er noe som\nkan tilfoeres\n\"\"\"\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
IS_ZERO = lambda x: x == 0
ONE = 1
SUB1 = lambda x: x - 1
MULT = lambda x: lambda y: x * y
IF = lambda cond: lambda t_func: lambda f_func: t_func(None) if cond else f_func(None)
print(
(
lambda myself: (
lambda n: (
IF(
IS_ZERO(n)
)(
lambda _: ONE
)(
lambda _: MULT(n)( myself(myself)(SUB1(n)) )
)
)
)
)(
lambda myself: (
lambda n: (
IF(
IS_ZERO(n)
)(
lambda _: ONE
)(
lambda _: MULT(n)( myself(myself)(SUB1(n)) )
)
)
)
)
(6)
)
|
normal
|
{
"blob_id": "f8601ed7ba7c2b8d2dd8d5f74f7b5ae8e99dad78",
"index": 186,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint((lambda myself: lambda n: IF(IS_ZERO(n))(lambda _: ONE)(lambda _:\n MULT(n)(myself(myself)(SUB1(n)))))(lambda myself: lambda n: IF(IS_ZERO(\n n))(lambda _: ONE)(lambda _: MULT(n)(myself(myself)(SUB1(n)))))(6))\n",
"step-3": "IS_ZERO = lambda x: x == 0\nONE = 1\nSUB1 = lambda x: x - 1\nMULT = lambda x: lambda y: x * y\nIF = lambda cond: lambda t_func: lambda f_func: t_func(None\n ) if cond else f_func(None)\nprint((lambda myself: lambda n: IF(IS_ZERO(n))(lambda _: ONE)(lambda _:\n MULT(n)(myself(myself)(SUB1(n)))))(lambda myself: lambda n: IF(IS_ZERO(\n n))(lambda _: ONE)(lambda _: MULT(n)(myself(myself)(SUB1(n)))))(6))\n",
"step-4": "IS_ZERO = lambda x: x == 0\nONE = 1\nSUB1 = lambda x: x - 1\nMULT = lambda x: lambda y: x * y\nIF = lambda cond: lambda t_func: lambda f_func: t_func(None) if cond else f_func(None)\n\nprint(\n (\n lambda myself: (\n lambda n: (\n IF(\n IS_ZERO(n)\n )(\n lambda _: ONE\n )(\n lambda _: MULT(n)( myself(myself)(SUB1(n)) )\n )\n )\n )\n )(\n lambda myself: (\n lambda n: (\n IF(\n IS_ZERO(n)\n )(\n lambda _: ONE\n )(\n lambda _: MULT(n)( myself(myself)(SUB1(n)) )\n )\n )\n )\n )\n (6)\n)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from __future__ import annotations
from VersionControl.Branch import Branch
from Branches.Actions.Actions import Actions
from VersionControl.Git.Branches.Develop.Init import Init
class Develop(Branch):
def process(self):
if self.action is Actions.INIT:
self.start_message('Develop Init')
Init(self.state_handler, self.config_handler).process()
else:
raise NotImplementedError
|
normal
|
{
"blob_id": "338bf2406c233d857e1a688391161d58e1dab23c",
"index": 8910,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Develop(Branch):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Develop(Branch):\n\n def process(self):\n if self.action is Actions.INIT:\n self.start_message('Develop Init')\n Init(self.state_handler, self.config_handler).process()\n else:\n raise NotImplementedError\n",
"step-4": "from __future__ import annotations\nfrom VersionControl.Branch import Branch\nfrom Branches.Actions.Actions import Actions\nfrom VersionControl.Git.Branches.Develop.Init import Init\n\n\nclass Develop(Branch):\n\n def process(self):\n if self.action is Actions.INIT:\n self.start_message('Develop Init')\n Init(self.state_handler, self.config_handler).process()\n else:\n raise NotImplementedError\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import cv2 as cv
import numpy as np
img = np.zeros((512, 512, 3), np.uint8)
cv.line(img, (0, 0), (511, 511), (255, 255, 255), 10)
cv.rectangle(img, (384, 0), (510, 128), (255, 0, 0), 3)
cv.circle(img, (200, 60), 20, (0, 100, 255), 3)
cv.ellipse(img, (250, 250), (100, 50), 90, 0, 180, (255, 0, 255), 3)
font = cv.FONT_HERSHEY_SIMPLEX
cv.putText(img, 'OpenCV', (10, 500), font, 4, (255, 0, 255), 3)
cv.imshow('d1', img)
cv.waitKey(0)
|
normal
|
{
"blob_id": "08c5f5ac568b7575d8082976336a5893951b53c2",
"index": 9269,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncv.line(img, (0, 0), (511, 511), (255, 255, 255), 10)\ncv.rectangle(img, (384, 0), (510, 128), (255, 0, 0), 3)\ncv.circle(img, (200, 60), 20, (0, 100, 255), 3)\ncv.ellipse(img, (250, 250), (100, 50), 90, 0, 180, (255, 0, 255), 3)\n<mask token>\ncv.putText(img, 'OpenCV', (10, 500), font, 4, (255, 0, 255), 3)\ncv.imshow('d1', img)\ncv.waitKey(0)\n",
"step-3": "<mask token>\nimg = np.zeros((512, 512, 3), np.uint8)\ncv.line(img, (0, 0), (511, 511), (255, 255, 255), 10)\ncv.rectangle(img, (384, 0), (510, 128), (255, 0, 0), 3)\ncv.circle(img, (200, 60), 20, (0, 100, 255), 3)\ncv.ellipse(img, (250, 250), (100, 50), 90, 0, 180, (255, 0, 255), 3)\nfont = cv.FONT_HERSHEY_SIMPLEX\ncv.putText(img, 'OpenCV', (10, 500), font, 4, (255, 0, 255), 3)\ncv.imshow('d1', img)\ncv.waitKey(0)\n",
"step-4": "import cv2 as cv\nimport numpy as np\nimg = np.zeros((512, 512, 3), np.uint8)\ncv.line(img, (0, 0), (511, 511), (255, 255, 255), 10)\ncv.rectangle(img, (384, 0), (510, 128), (255, 0, 0), 3)\ncv.circle(img, (200, 60), 20, (0, 100, 255), 3)\ncv.ellipse(img, (250, 250), (100, 50), 90, 0, 180, (255, 0, 255), 3)\nfont = cv.FONT_HERSHEY_SIMPLEX\ncv.putText(img, 'OpenCV', (10, 500), font, 4, (255, 0, 255), 3)\ncv.imshow('d1', img)\ncv.waitKey(0)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/python
# Developed by Hector Cobos
import sys
import csv
import datetime
def mapper():
# Using a reader in order to read the whole file
reader = csv.reader(sys.stdin, delimiter='\t')
# Jump to the next line. We want to avoid the line with the name of the fields
reader.next()
# loop
for line in reader:
# Checking no. of fields are correct
if len(line) == 19:
author_id=line[3]
date=line[8]
time = date.strip().split(" ")
hour = time[1].strip().split(":")
print "{0}\t{1}".format(author_id, hour[0])
mapper()
|
normal
|
{
"blob_id": "d959ed49a83fb63e0bce31b5c81c013f0986706b",
"index": 4314,
"step-1": "#!/usr/bin/python\n\n# Developed by Hector Cobos\n\nimport sys\nimport csv\nimport datetime\n\ndef mapper():\n\t# Using a reader in order to read the whole file\n\treader = csv.reader(sys.stdin, delimiter='\\t')\n\t# Jump to the next line. We want to avoid the line with the name of the fields\n\treader.next()\n\t# loop\n\tfor line in reader:\n\t\t# Checking no. of fields are correct\n \t\tif len(line) == 19:\n\t\t\tauthor_id=line[3]\n\t\t\tdate=line[8]\n\t\t\ttime = date.strip().split(\" \")\n\t\t\thour = time[1].strip().split(\":\")\n \t\tprint \"{0}\\t{1}\".format(author_id, hour[0])\n\n\nmapper()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
'''
Created on Nov 16, 2013
@author: mo
'''
import unittest
from Board import TicTacToe_Board
from ComputerPlayer import ComputerPlayer
from utils import debug_print as d_pr
from main import StartNewGame
class Test(unittest.TestCase):
def setUp(self):
self.the_board = TicTacToe_Board()
def tearDown(self):
pass
#these may be impossible boards, but still it tests the win detector
def test_these_should_win_for_x(self):
self.assertEqual(TicTacToe_Board.IsWinningBoard_static( [ ['x', 'x', 'x'],
['o', 'x', 'o'],
['o', 'x', 'o']]), 'x', "should return x")
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([
['x', 'o', 'o'],
['o', 'x', 'o'],
['x', 'o', 'x']
]) , 'x', 'should return x')
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([
['o','x', 'o'],
['x', 'x', 'x'],
['-', '-', '-']
]), 'x', 'should return x'
)
def test_these_should_win_for_o(self):
self.assertEqual(TicTacToe_Board.IsWinningBoard_static( [ ['o', 'x', 'o'],
['o', 'x', 'x'],
['o', 'o', 'x']]), 'o', "should return o")
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([
['x', 'o', '-'],
['o', 'o', 'o'],
['o', 'x', 'x']
]) , 'o', 'should return o')
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([
['o','x', 'o'],
['x', 'o', 'x'],
['-', '-', 'o']
]), 'o', 'should return o'
)
def test_these_should_win_for_nobody(self):
self.assertEqual(TicTacToe_Board.IsWinningBoard_static( [ ['x', 'x', '-'],
['o', '-', 'o'],
['o', '-', 'o']]), None, "should return None")
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([
['-', '-', '-'],
['-', '-', '-'],
['x', 'o', 'x']
]) , None, 'should return None')
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([
['o','x', 'o'],
['-', '-', 'x'],
['-', 'o', 'o']
]), None, 'should return None'
)
def test_make_move(self):
self.the_board.board_array=[ ['x', '-', 'x'],
['o', '-', 'o'],
['o', 'x', '-']
]
self.the_board.whose_turn='o'
self.the_board.MakeMove([1,1])
self.assertEqual(self.the_board.board_array[1][1], 'o', "should be an o")
self.assertEqual(self.the_board.whose_turn, 'x', 'turn should change')
def test_computer_player_get_outcome(self):
comp_player = ComputerPlayer('x', self.the_board)
self.the_board.human_player_x_or_o = 'o'
self.the_board.c_player_x_or_o = 'x'
self.the_board.board_array = [ ['-', '-', 'x'],
['-', 'o', '-'],
['-', '-', '-']
]
self.the_board.whose_turn = 'x'
move_seq_1 = [ {'player': 'x', 'move' : [0,1] }, {'player': 'o', 'move' : [2,1]}, {'player': 'x', 'move': [0,0]} ]
out=self.the_board.GetOutcomeOfMoveSequence(move_seq_1)
self.assertEqual(out, 'x', 'x should win: outcome should be x')
move_seq_2 = [{'player': 'x', 'move' : [0,1] }, {'player': 'o', 'move' : [2,1]}]
out = self.the_board.GetOutcomeOfMoveSequence(move_seq_2)
self.assertEqual(out, None, 'no one should win: outcome will be None')
move_seq_3 = [ {'player': 'x', 'move' : [0,1] }, {'player': 'o', 'move' : [0,0] }, {'player': 'x', 'move' : [2,1]},
{'player': 'o', 'move' : [2,2] }
]
out = self.the_board.GetOutcomeOfMoveSequence(move_seq_3)
self.assertEqual(out, 'o', 'o should win')
def test_get_winning_moves_for_opponent(self):
comp_player = ComputerPlayer('x', self.the_board)
self.the_board.human_player_x_or_o = 'o'
self.the_board.c_player_x_or_o = 'x'
self.the_board.board_array = [ ['x', '-', 'x'],
['-', 'o', '-'],
['o', 'o', '-']
]
self.the_board.whose_turn = 'x'
winning_moves=self.the_board.GetWinningMovesFor( 'human')
d_pr(winning_moves)
self.assertIn([0,1], winning_moves)
self.assertIn([2,2], winning_moves)
comp_player = ComputerPlayer('o', self.the_board)
self.the_board.human_player_x_or_o = 'x'
self.the_board.c_player_x_or_o = 'o'
self.the_board.board_array = [ ['x', '-', 'x'],
['-', 'o', '-'],
['o', 'o', '-']
]
self.the_board.whose_turn = 'o'
winning_moves=self.the_board.GetWinningMovesFor( 'human')
d_pr(winning_moves)
self.assertIn([0,1], winning_moves)
def test_get_threatening_moves(self):
comp_player = ComputerPlayer('x', self.the_board)
self.the_board.human_player_x_or_o = 'o'
self.the_board.c_player_x_or_o = 'x'
self.the_board.board_array = [ ['-', '-', 'x'],
['-', 'o', '-'],
['o', '-', '-']
]
self.the_board.whose_turn = 'x'
threatening_moves=comp_player.GetThreateningMovesWithoutTraps(self.the_board.GetEmptySquares())
self.assertIn([0,0], threatening_moves)
self.assertIn([2,2], threatening_moves)
d_pr('threats without traps: ' + str(threatening_moves))
self.assertEqual(len(threatening_moves), 2)
self.the_board.human_player_x_or_o = 'o'
self.the_board.c_player_x_or_o = 'x'
self.the_board.board_array = [ ['-', '-', 'o'],
['-', 'x', '-'],
['o', '-', '-']
]
self.the_board.whose_turn = 'x'
threatening_moves=comp_player.GetThreateningMovesWithoutTraps(self.the_board.GetEmptySquares())
self.assertIn([0,1], threatening_moves)
self.assertIn([2,1], threatening_moves)
self.assertIn([1,0], threatening_moves)
self.assertIn([1,2], threatening_moves)
d_pr('threats without traps: ' + str(threatening_moves))
self.assertEqual(len(threatening_moves), 4)
def test_algorithm_by_playing_large_num_of_random_games(self):
NUM_GAMES = 10
#NUM_GAMES=100000 # this works but takes a long time
NUM_GAMES=10
for i in range(0, NUM_GAMES + 1):
win_result = StartNewGame(UseRandom=True)
self.assertTrue(win_result == 'Computer' or win_result == 'Tie')
def test_print(self):
self.the_board.board_array = [ ['-', '-', 'x'],
['-', 'o', '-'],
['x', 'o', '-']]
self.the_board.PrintBoardToConsole()
def test_empty_squares(self):
pass
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
normal
|
{
"blob_id": "1968923cd923e68dc5ff2148802f18e40a5e6c33",
"index": 939,
"step-1": "<mask token>\n\n\nclass Test(unittest.TestCase):\n <mask token>\n\n def tearDown(self):\n pass\n\n def test_these_should_win_for_x(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n 'x'], ['o', 'x', 'o'], ['o', 'x', 'o']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n 'o'], ['o', 'x', 'o'], ['x', 'o', 'x']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'x', 'x'], ['-', '-', '-']]), 'x', 'should return x')\n\n def test_these_should_win_for_o(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['o', 'x', 'x'], ['o', 'o', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n '-'], ['o', 'o', 'o'], ['o', 'x', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'o', 'x'], ['-', '-', 'o']]), 'o', 'should return o')\n\n def test_these_should_win_for_nobody(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n '-'], ['o', '-', 'o'], ['o', '-', 'o']]), None,\n 'should return None')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['-', '-',\n '-'], ['-', '-', '-'], ['x', 'o', 'x']]), None,\n 'should return None')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['-', '-', 'x'], ['-', 'o', 'o']]), None,\n 'should return None')\n\n def test_make_move(self):\n self.the_board.board_array = [['x', '-', 'x'], ['o', '-', 'o'], [\n 'o', 'x', '-']]\n self.the_board.whose_turn = 'o'\n self.the_board.MakeMove([1, 1])\n self.assertEqual(self.the_board.board_array[1][1], 'o',\n 'should be an o')\n self.assertEqual(self.the_board.whose_turn, 'x', 'turn should change')\n <mask token>\n <mask token>\n <mask token>\n\n def test_algorithm_by_playing_large_num_of_random_games(self):\n NUM_GAMES = 10\n NUM_GAMES = 10\n for i in range(0, NUM_GAMES + 1):\n win_result = StartNewGame(UseRandom=True)\n self.assertTrue(win_result == 'Computer' or win_result == 'Tie')\n\n def test_print(self):\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'x', 'o', '-']]\n self.the_board.PrintBoardToConsole()\n\n def test_empty_squares(self):\n pass\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Test(unittest.TestCase):\n\n def setUp(self):\n self.the_board = TicTacToe_Board()\n\n def tearDown(self):\n pass\n\n def test_these_should_win_for_x(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n 'x'], ['o', 'x', 'o'], ['o', 'x', 'o']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n 'o'], ['o', 'x', 'o'], ['x', 'o', 'x']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'x', 'x'], ['-', '-', '-']]), 'x', 'should return x')\n\n def test_these_should_win_for_o(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['o', 'x', 'x'], ['o', 'o', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n '-'], ['o', 'o', 'o'], ['o', 'x', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'o', 'x'], ['-', '-', 'o']]), 'o', 'should return o')\n\n def test_these_should_win_for_nobody(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n '-'], ['o', '-', 'o'], ['o', '-', 'o']]), None,\n 'should return None')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['-', '-',\n '-'], ['-', '-', '-'], ['x', 'o', 'x']]), None,\n 'should return None')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['-', '-', 'x'], ['-', 'o', 'o']]), None,\n 'should return None')\n\n def test_make_move(self):\n self.the_board.board_array = [['x', '-', 'x'], ['o', '-', 'o'], [\n 'o', 'x', '-']]\n self.the_board.whose_turn = 'o'\n self.the_board.MakeMove([1, 1])\n self.assertEqual(self.the_board.board_array[1][1], 'o',\n 'should be an o')\n self.assertEqual(self.the_board.whose_turn, 'x', 'turn should change')\n <mask token>\n\n def test_get_winning_moves_for_opponent(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['x', '-', 'x'], ['-', 'o', '-'], [\n 'o', 'o', '-']]\n self.the_board.whose_turn = 'x'\n winning_moves = self.the_board.GetWinningMovesFor('human')\n d_pr(winning_moves)\n self.assertIn([0, 1], winning_moves)\n self.assertIn([2, 2], winning_moves)\n comp_player = ComputerPlayer('o', self.the_board)\n self.the_board.human_player_x_or_o = 'x'\n self.the_board.c_player_x_or_o = 'o'\n self.the_board.board_array = [['x', '-', 'x'], ['-', 'o', '-'], [\n 'o', 'o', '-']]\n self.the_board.whose_turn = 'o'\n winning_moves = self.the_board.GetWinningMovesFor('human')\n d_pr(winning_moves)\n self.assertIn([0, 1], winning_moves)\n\n def test_get_threatening_moves(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'o', '-', '-']]\n self.the_board.whose_turn = 'x'\n threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self\n .the_board.GetEmptySquares())\n self.assertIn([0, 0], threatening_moves)\n self.assertIn([2, 2], threatening_moves)\n d_pr('threats without traps: ' + str(threatening_moves))\n self.assertEqual(len(threatening_moves), 2)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'o'], ['-', 'x', '-'], [\n 'o', '-', '-']]\n self.the_board.whose_turn = 'x'\n threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self\n .the_board.GetEmptySquares())\n self.assertIn([0, 1], threatening_moves)\n self.assertIn([2, 1], threatening_moves)\n self.assertIn([1, 0], threatening_moves)\n self.assertIn([1, 2], threatening_moves)\n d_pr('threats without traps: ' + str(threatening_moves))\n self.assertEqual(len(threatening_moves), 4)\n\n def test_algorithm_by_playing_large_num_of_random_games(self):\n NUM_GAMES = 10\n NUM_GAMES = 10\n for i in range(0, NUM_GAMES + 1):\n win_result = StartNewGame(UseRandom=True)\n self.assertTrue(win_result == 'Computer' or win_result == 'Tie')\n\n def test_print(self):\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'x', 'o', '-']]\n self.the_board.PrintBoardToConsole()\n\n def test_empty_squares(self):\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Test(unittest.TestCase):\n\n def setUp(self):\n self.the_board = TicTacToe_Board()\n\n def tearDown(self):\n pass\n\n def test_these_should_win_for_x(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n 'x'], ['o', 'x', 'o'], ['o', 'x', 'o']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n 'o'], ['o', 'x', 'o'], ['x', 'o', 'x']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'x', 'x'], ['-', '-', '-']]), 'x', 'should return x')\n\n def test_these_should_win_for_o(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['o', 'x', 'x'], ['o', 'o', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n '-'], ['o', 'o', 'o'], ['o', 'x', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'o', 'x'], ['-', '-', 'o']]), 'o', 'should return o')\n\n def test_these_should_win_for_nobody(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n '-'], ['o', '-', 'o'], ['o', '-', 'o']]), None,\n 'should return None')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['-', '-',\n '-'], ['-', '-', '-'], ['x', 'o', 'x']]), None,\n 'should return None')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['-', '-', 'x'], ['-', 'o', 'o']]), None,\n 'should return None')\n\n def test_make_move(self):\n self.the_board.board_array = [['x', '-', 'x'], ['o', '-', 'o'], [\n 'o', 'x', '-']]\n self.the_board.whose_turn = 'o'\n self.the_board.MakeMove([1, 1])\n self.assertEqual(self.the_board.board_array[1][1], 'o',\n 'should be an o')\n self.assertEqual(self.the_board.whose_turn, 'x', 'turn should change')\n\n def test_computer_player_get_outcome(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n '-', '-', '-']]\n self.the_board.whose_turn = 'x'\n move_seq_1 = [{'player': 'x', 'move': [0, 1]}, {'player': 'o',\n 'move': [2, 1]}, {'player': 'x', 'move': [0, 0]}]\n out = self.the_board.GetOutcomeOfMoveSequence(move_seq_1)\n self.assertEqual(out, 'x', 'x should win: outcome should be x')\n move_seq_2 = [{'player': 'x', 'move': [0, 1]}, {'player': 'o',\n 'move': [2, 1]}]\n out = self.the_board.GetOutcomeOfMoveSequence(move_seq_2)\n self.assertEqual(out, None, 'no one should win: outcome will be None')\n move_seq_3 = [{'player': 'x', 'move': [0, 1]}, {'player': 'o',\n 'move': [0, 0]}, {'player': 'x', 'move': [2, 1]}, {'player':\n 'o', 'move': [2, 2]}]\n out = self.the_board.GetOutcomeOfMoveSequence(move_seq_3)\n self.assertEqual(out, 'o', 'o should win')\n\n def test_get_winning_moves_for_opponent(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['x', '-', 'x'], ['-', 'o', '-'], [\n 'o', 'o', '-']]\n self.the_board.whose_turn = 'x'\n winning_moves = self.the_board.GetWinningMovesFor('human')\n d_pr(winning_moves)\n self.assertIn([0, 1], winning_moves)\n self.assertIn([2, 2], winning_moves)\n comp_player = ComputerPlayer('o', self.the_board)\n self.the_board.human_player_x_or_o = 'x'\n self.the_board.c_player_x_or_o = 'o'\n self.the_board.board_array = [['x', '-', 'x'], ['-', 'o', '-'], [\n 'o', 'o', '-']]\n self.the_board.whose_turn = 'o'\n winning_moves = self.the_board.GetWinningMovesFor('human')\n d_pr(winning_moves)\n self.assertIn([0, 1], winning_moves)\n\n def test_get_threatening_moves(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'o', '-', '-']]\n self.the_board.whose_turn = 'x'\n threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self\n .the_board.GetEmptySquares())\n self.assertIn([0, 0], threatening_moves)\n self.assertIn([2, 2], threatening_moves)\n d_pr('threats without traps: ' + str(threatening_moves))\n self.assertEqual(len(threatening_moves), 2)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'o'], ['-', 'x', '-'], [\n 'o', '-', '-']]\n self.the_board.whose_turn = 'x'\n threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self\n .the_board.GetEmptySquares())\n self.assertIn([0, 1], threatening_moves)\n self.assertIn([2, 1], threatening_moves)\n self.assertIn([1, 0], threatening_moves)\n self.assertIn([1, 2], threatening_moves)\n d_pr('threats without traps: ' + str(threatening_moves))\n self.assertEqual(len(threatening_moves), 4)\n\n def test_algorithm_by_playing_large_num_of_random_games(self):\n NUM_GAMES = 10\n NUM_GAMES = 10\n for i in range(0, NUM_GAMES + 1):\n win_result = StartNewGame(UseRandom=True)\n self.assertTrue(win_result == 'Computer' or win_result == 'Tie')\n\n def test_print(self):\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'x', 'o', '-']]\n self.the_board.PrintBoardToConsole()\n\n def test_empty_squares(self):\n pass\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Test(unittest.TestCase):\n\n def setUp(self):\n self.the_board = TicTacToe_Board()\n\n def tearDown(self):\n pass\n\n def test_these_should_win_for_x(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n 'x'], ['o', 'x', 'o'], ['o', 'x', 'o']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n 'o'], ['o', 'x', 'o'], ['x', 'o', 'x']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'x', 'x'], ['-', '-', '-']]), 'x', 'should return x')\n\n def test_these_should_win_for_o(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['o', 'x', 'x'], ['o', 'o', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n '-'], ['o', 'o', 'o'], ['o', 'x', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'o', 'x'], ['-', '-', 'o']]), 'o', 'should return o')\n\n def test_these_should_win_for_nobody(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n '-'], ['o', '-', 'o'], ['o', '-', 'o']]), None,\n 'should return None')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['-', '-',\n '-'], ['-', '-', '-'], ['x', 'o', 'x']]), None,\n 'should return None')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['-', '-', 'x'], ['-', 'o', 'o']]), None,\n 'should return None')\n\n def test_make_move(self):\n self.the_board.board_array = [['x', '-', 'x'], ['o', '-', 'o'], [\n 'o', 'x', '-']]\n self.the_board.whose_turn = 'o'\n self.the_board.MakeMove([1, 1])\n self.assertEqual(self.the_board.board_array[1][1], 'o',\n 'should be an o')\n self.assertEqual(self.the_board.whose_turn, 'x', 'turn should change')\n\n def test_computer_player_get_outcome(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n '-', '-', '-']]\n self.the_board.whose_turn = 'x'\n move_seq_1 = [{'player': 'x', 'move': [0, 1]}, {'player': 'o',\n 'move': [2, 1]}, {'player': 'x', 'move': [0, 0]}]\n out = self.the_board.GetOutcomeOfMoveSequence(move_seq_1)\n self.assertEqual(out, 'x', 'x should win: outcome should be x')\n move_seq_2 = [{'player': 'x', 'move': [0, 1]}, {'player': 'o',\n 'move': [2, 1]}]\n out = self.the_board.GetOutcomeOfMoveSequence(move_seq_2)\n self.assertEqual(out, None, 'no one should win: outcome will be None')\n move_seq_3 = [{'player': 'x', 'move': [0, 1]}, {'player': 'o',\n 'move': [0, 0]}, {'player': 'x', 'move': [2, 1]}, {'player':\n 'o', 'move': [2, 2]}]\n out = self.the_board.GetOutcomeOfMoveSequence(move_seq_3)\n self.assertEqual(out, 'o', 'o should win')\n\n def test_get_winning_moves_for_opponent(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['x', '-', 'x'], ['-', 'o', '-'], [\n 'o', 'o', '-']]\n self.the_board.whose_turn = 'x'\n winning_moves = self.the_board.GetWinningMovesFor('human')\n d_pr(winning_moves)\n self.assertIn([0, 1], winning_moves)\n self.assertIn([2, 2], winning_moves)\n comp_player = ComputerPlayer('o', self.the_board)\n self.the_board.human_player_x_or_o = 'x'\n self.the_board.c_player_x_or_o = 'o'\n self.the_board.board_array = [['x', '-', 'x'], ['-', 'o', '-'], [\n 'o', 'o', '-']]\n self.the_board.whose_turn = 'o'\n winning_moves = self.the_board.GetWinningMovesFor('human')\n d_pr(winning_moves)\n self.assertIn([0, 1], winning_moves)\n\n def test_get_threatening_moves(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'o', '-', '-']]\n self.the_board.whose_turn = 'x'\n threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self\n .the_board.GetEmptySquares())\n self.assertIn([0, 0], threatening_moves)\n self.assertIn([2, 2], threatening_moves)\n d_pr('threats without traps: ' + str(threatening_moves))\n self.assertEqual(len(threatening_moves), 2)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'o'], ['-', 'x', '-'], [\n 'o', '-', '-']]\n self.the_board.whose_turn = 'x'\n threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self\n .the_board.GetEmptySquares())\n self.assertIn([0, 1], threatening_moves)\n self.assertIn([2, 1], threatening_moves)\n self.assertIn([1, 0], threatening_moves)\n self.assertIn([1, 2], threatening_moves)\n d_pr('threats without traps: ' + str(threatening_moves))\n self.assertEqual(len(threatening_moves), 4)\n\n def test_algorithm_by_playing_large_num_of_random_games(self):\n NUM_GAMES = 10\n NUM_GAMES = 10\n for i in range(0, NUM_GAMES + 1):\n win_result = StartNewGame(UseRandom=True)\n self.assertTrue(win_result == 'Computer' or win_result == 'Tie')\n\n def test_print(self):\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'x', 'o', '-']]\n self.the_board.PrintBoardToConsole()\n\n def test_empty_squares(self):\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "'''\nCreated on Nov 16, 2013\n\n@author: mo\n'''\nimport unittest\nfrom Board import TicTacToe_Board\nfrom ComputerPlayer import ComputerPlayer\nfrom utils import debug_print as d_pr\n\nfrom main import StartNewGame\n\nclass Test(unittest.TestCase):\n\n\n def setUp(self):\n self.the_board = TicTacToe_Board()\n\n \n def tearDown(self):\n pass\n\n #these may be impossible boards, but still it tests the win detector\n \n def test_these_should_win_for_x(self):\n \n self.assertEqual(TicTacToe_Board.IsWinningBoard_static( [ ['x', 'x', 'x'], \n ['o', 'x', 'o'], \n ['o', 'x', 'o']]), 'x', \"should return x\")\n \n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([\n ['x', 'o', 'o'],\n ['o', 'x', 'o'],\n ['x', 'o', 'x']\n \n \n ]) , 'x', 'should return x')\n \n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([\n ['o','x', 'o'],\n ['x', 'x', 'x'],\n ['-', '-', '-']\n ]), 'x', 'should return x'\n )\n \n \n \n def test_these_should_win_for_o(self):\n \n \n self.assertEqual(TicTacToe_Board.IsWinningBoard_static( [ ['o', 'x', 'o'], \n ['o', 'x', 'x'], \n ['o', 'o', 'x']]), 'o', \"should return o\")\n \n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([\n ['x', 'o', '-'],\n ['o', 'o', 'o'],\n ['o', 'x', 'x']\n \n \n ]) , 'o', 'should return o')\n \n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([\n ['o','x', 'o'],\n ['x', 'o', 'x'],\n ['-', '-', 'o']\n ]), 'o', 'should return o'\n )\n \n\n\n def test_these_should_win_for_nobody(self):\n \n \n self.assertEqual(TicTacToe_Board.IsWinningBoard_static( [ ['x', 'x', '-'], \n ['o', '-', 'o'], \n ['o', '-', 'o']]), None, \"should return None\")\n \n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([\n ['-', '-', '-'],\n ['-', '-', '-'],\n ['x', 'o', 'x']\n \n \n ]) , None, 'should return None')\n \n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([\n ['o','x', 'o'],\n ['-', '-', 'x'],\n ['-', 'o', 'o']\n ]), None, 'should return None'\n )\n \n def test_make_move(self):\n \n self.the_board.board_array=[ ['x', '-', 'x'],\n ['o', '-', 'o'],\n ['o', 'x', '-']\n ]\n \n self.the_board.whose_turn='o'\n \n self.the_board.MakeMove([1,1])\n \n self.assertEqual(self.the_board.board_array[1][1], 'o', \"should be an o\")\n \n self.assertEqual(self.the_board.whose_turn, 'x', 'turn should change')\n \n \n\n def test_computer_player_get_outcome(self):\n \n comp_player = ComputerPlayer('x', self.the_board)\n \n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n \n \n self.the_board.board_array = [ ['-', '-', 'x'],\n ['-', 'o', '-'],\n ['-', '-', '-']\n ]\n self.the_board.whose_turn = 'x'\n \n move_seq_1 = [ {'player': 'x', 'move' : [0,1] }, {'player': 'o', 'move' : [2,1]}, {'player': 'x', 'move': [0,0]} ]\n \n out=self.the_board.GetOutcomeOfMoveSequence(move_seq_1)\n \n self.assertEqual(out, 'x', 'x should win: outcome should be x')\n \n \n move_seq_2 = [{'player': 'x', 'move' : [0,1] }, {'player': 'o', 'move' : [2,1]}]\n \n out = self.the_board.GetOutcomeOfMoveSequence(move_seq_2)\n self.assertEqual(out, None, 'no one should win: outcome will be None')\n\n move_seq_3 = [ {'player': 'x', 'move' : [0,1] }, {'player': 'o', 'move' : [0,0] }, {'player': 'x', 'move' : [2,1]},\n {'player': 'o', 'move' : [2,2] }\n ]\n \n out = self.the_board.GetOutcomeOfMoveSequence(move_seq_3)\n \n self.assertEqual(out, 'o', 'o should win')\n \n \n def test_get_winning_moves_for_opponent(self):\n \n comp_player = ComputerPlayer('x', self.the_board)\n \n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n \n \n self.the_board.board_array = [ ['x', '-', 'x'],\n ['-', 'o', '-'],\n ['o', 'o', '-']\n ]\n self.the_board.whose_turn = 'x'\n \n winning_moves=self.the_board.GetWinningMovesFor( 'human')\n \n d_pr(winning_moves)\n self.assertIn([0,1], winning_moves)\n self.assertIn([2,2], winning_moves)\n \n comp_player = ComputerPlayer('o', self.the_board)\n \n self.the_board.human_player_x_or_o = 'x'\n self.the_board.c_player_x_or_o = 'o'\n \n \n self.the_board.board_array = [ ['x', '-', 'x'],\n ['-', 'o', '-'],\n ['o', 'o', '-']\n ]\n self.the_board.whose_turn = 'o'\n \n winning_moves=self.the_board.GetWinningMovesFor( 'human')\n \n d_pr(winning_moves)\n self.assertIn([0,1], winning_moves)\n \n \n \n def test_get_threatening_moves(self):\n \n comp_player = ComputerPlayer('x', self.the_board)\n \n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n \n \n self.the_board.board_array = [ ['-', '-', 'x'],\n ['-', 'o', '-'],\n ['o', '-', '-']\n ]\n self.the_board.whose_turn = 'x'\n \n threatening_moves=comp_player.GetThreateningMovesWithoutTraps(self.the_board.GetEmptySquares())\n \n \n self.assertIn([0,0], threatening_moves)\n self.assertIn([2,2], threatening_moves)\n \n d_pr('threats without traps: ' + str(threatening_moves))\n \n self.assertEqual(len(threatening_moves), 2)\n \n \n \n \n \n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n \n \n self.the_board.board_array = [ ['-', '-', 'o'],\n ['-', 'x', '-'],\n ['o', '-', '-']\n ]\n self.the_board.whose_turn = 'x'\n \n threatening_moves=comp_player.GetThreateningMovesWithoutTraps(self.the_board.GetEmptySquares())\n \n \n self.assertIn([0,1], threatening_moves)\n self.assertIn([2,1], threatening_moves)\n self.assertIn([1,0], threatening_moves)\n self.assertIn([1,2], threatening_moves)\n \n \n \n d_pr('threats without traps: ' + str(threatening_moves))\n \n self.assertEqual(len(threatening_moves), 4)\n \n \n \n \n def test_algorithm_by_playing_large_num_of_random_games(self):\n \n NUM_GAMES = 10\n #NUM_GAMES=100000 # this works but takes a long time\n NUM_GAMES=10\n \n for i in range(0, NUM_GAMES + 1):\n win_result = StartNewGame(UseRandom=True)\n \n self.assertTrue(win_result == 'Computer' or win_result == 'Tie')\n \n \n def test_print(self):\n \n \n self.the_board.board_array = [ ['-', '-', 'x'],\n ['-', 'o', '-'],\n ['x', 'o', '-']]\n \n self.the_board.PrintBoardToConsole()\n \n \n def test_empty_squares(self):\n pass\n \n\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testName']\n unittest.main()\n",
"step-ids": [
9,
12,
13,
14,
16
]
}
|
[
9,
12,
13,
14,
16
] |
import collections
import copy
import threading
import typing as tp
from ..decorators.decorators import wraps
from ..typing import K, V, T
class Monitor:
"""
Base utility class for creating monitors (the synchronization thingies!)
These are NOT re-entrant!
Use it like that:
>>> class MyProtectedObject(Monitor):
>>> def __init__(self, *args, **kwargs):
>>> Monitor.__init__(self)
>>> ... do your job ..
>>> @Monitor.synchronized
>>> def function_that_needs_mutual_exclusion(self):
>>> .. do your threadsafe jobs ..
>>> def function_that_partially_needs_protection(self):
>>> .. do your jobs ..
>>> with Monitor.acquire(self):
>>> .. do your threadsafe jobs ..
>>> .. do your jobs ..
>>> with self:
>>> .. do your threadsafe jobs ..
"""
def __enter__(self) -> 'Monitor':
self._monitor_lock.acquire()
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> bool:
self._monitor_lock.release()
return False
def __init__(self):
"""You need to invoke this at your constructor
You can also use it to release locks of other objects."""
self._monitor_lock = threading.Lock() # type: threading.Lock
@staticmethod
def synchronize_on_attribute(attr_name: str):
"""
When a Monitor is an attribute of a class, and you have a method instance
that you would like secure by acquiring that monitor, use this.
The first argument taken by that method instance must be self.
:param attr_name: name of the attribute that is the monitor
"""
def outer(fun):
@wraps(fun)
def method(self, *args, **kwargs):
# noinspection PyProtectedMember
with getattr(self, attr_name)._monitor_lock:
return fun(self, *args, **kwargs)
return method
return outer
@staticmethod
def synchronized(fun: tp.Callable) -> tp.Callable:
"""
This is a decorator. Class method decorated with that will lock the
global lock of given instance, making it threadsafe. Depending on
usage pattern of your class and it's data semantics, your performance
may vary
"""
@wraps(fun)
def monitored(*args, **kwargs):
# noinspection PyProtectedMember
with args[0]._monitor_lock:
return fun(*args, **kwargs)
return monitored
class release:
"""
Returns a context manager object that can release another object
as long as that object is a monitor.
Consider foo, which is a monitor. You have a protected function,
but you feel that you can release it for a while as it would
improve parallelism. You can use it as such:
>>> @Monitor.synchronized
>>> def protected_function(self):
>>> .. do some stuff that needs mutual exclusion ..
>>> with Monitor.release(self):
>>> .. do some I/O that does not need mutual exclusion ..
>>> .. back to protected stuff ..
"""
__slots__ = ('foo',)
def __init__(self, foo: 'Monitor'):
self.foo = foo
def __enter__(self) -> None:
# noinspection PyProtectedMember
self.foo._monitor_lock.release()
def __exit__(self, e1, e2, e3) -> bool:
# noinspection PyProtectedMember
self.foo._monitor_lock.acquire()
return False
class acquire:
"""
Returns a context manager object that can lock another object,
as long as that object is a monitor.
Consider foo, which is a monitor. If you needed to lock it from
outside, you would do:
>>> with Monitor.acquire(foo):
>>> .. do operations on foo that need mutual exclusion ..
"""
__slots__ = ('foo',)
def __init__(self, foo: 'Monitor'):
self.foo = foo
def __enter__(self) -> None:
# noinspection PyProtectedMember
self.foo._monitor_lock.acquire()
def __exit__(self, e1, e2, e3) -> bool:
# noinspection PyProtectedMember
self.foo._monitor_lock.release()
return False
@classmethod
def synchronize_on(cls, monitor: 'Monitor') -> tp.Callable[[tp.Callable], tp.Callable]:
"""
A decorator for locking on non-self Monitor objects
Use it like:
>>> class MasterClass(Monitor):
>>> def get_object(self):
>>> class SlaveClass:
>>> @Monitor.synchronize_on(self)
>>> def get_object(self2):
>>> ...
>>> return SlaveClass
"""
def outer(fun):
@wraps(fun)
def inner(*args, **kwargs):
with cls.acquire(monitor):
return fun(*args, **kwargs)
return inner
return outer
class RMonitor(Monitor):
"""
Monitor, but using an reentrant lock instead of a normal one
"""
def __init__(self):
self._monitor_lock = threading.RLock() # type: threading.RLock
class MonitorList(tp.Generic[T], collections.UserList, Monitor):
"""
A list that is also a monitor.
Note that access to it's properties is not automatically synchronized, you got to
invoke the monitor to implement an opportunistic locking of your own choice
"""
def __init__(self, *args):
collections.UserList.__init__(self, *args)
Monitor.__init__(self)
def __copy__(self) -> 'MonitorList':
return MonitorList(copy.copy(self.data))
def __deepcopy__(self, memo) -> 'MonitorList':
return MonitorList(copy.deepcopy(self.data, memo=memo))
def __getitem__(self, item: tp.Union[slice, int]) -> T:
return self.data[item]
def __setitem__(self, key: int, value: T) -> None:
self.data[key] = value
def __delitem__(self, key: tp.Union[slice, int]) -> None:
del self.data[key]
class MonitorDict(tp.Generic[K, V], collections.UserDict, Monitor):
"""
A dict that is also a monitor.
Note that access to it's properties is not automatically synchronized, you got to
invoke the monitor to implement an opportunistic locking of your own choice
"""
def __init__(self, *args, **kwargs):
collections.UserDict.__init__(self, *args, **kwargs)
Monitor.__init__(self)
def __getitem__(self, item: K) -> V:
return self.data[item]
def __setitem__(self, key: K, value: V) -> None:
self.data[key] = value
def __delitem__(self, key: K) -> None:
del self.data[key]
def __copy__(self) -> 'MonitorDict':
return MonitorDict(copy.copy(self.data))
def __deepcopy__(self, memo) -> 'MonitorDict':
return MonitorDict(copy.deepcopy(self.data, memo=memo))
class MonitorSet(set, Monitor):
"""
A set that allows atomic insert-if-not-already-there operation
"""
def __init__(self, *args):
super().__init__(*args)
Monitor.__init__(self)
def insert_and_check(self, item) -> bool:
"""
Perform an atomic insert if not already in set
:param item: item to insert
:return: whether the item was successfully inserted
"""
with Monitor.acquire(self):
if item in self:
return False
self.add(item)
return True
|
normal
|
{
"blob_id": "0528d7761cbbf3dbe881ff05b81060f3d97e7f6c",
"index": 742,
"step-1": "<mask token>\n\n\nclass MonitorList(tp.Generic[T], collections.UserList, Monitor):\n <mask token>\n\n def __init__(self, *args):\n collections.UserList.__init__(self, *args)\n Monitor.__init__(self)\n <mask token>\n <mask token>\n\n def __getitem__(self, item: tp.Union[slice, int]) ->T:\n return self.data[item]\n\n def __setitem__(self, key: int, value: T) ->None:\n self.data[key] = value\n\n def __delitem__(self, key: tp.Union[slice, int]) ->None:\n del self.data[key]\n\n\nclass MonitorDict(tp.Generic[K, V], collections.UserDict, Monitor):\n \"\"\"\n A dict that is also a monitor.\n\n Note that access to it's properties is not automatically synchronized, you got to\n invoke the monitor to implement an opportunistic locking of your own choice\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n collections.UserDict.__init__(self, *args, **kwargs)\n Monitor.__init__(self)\n\n def __getitem__(self, item: K) ->V:\n return self.data[item]\n\n def __setitem__(self, key: K, value: V) ->None:\n self.data[key] = value\n\n def __delitem__(self, key: K) ->None:\n del self.data[key]\n\n def __copy__(self) ->'MonitorDict':\n return MonitorDict(copy.copy(self.data))\n\n def __deepcopy__(self, memo) ->'MonitorDict':\n return MonitorDict(copy.deepcopy(self.data, memo=memo))\n\n\nclass MonitorSet(set, Monitor):\n \"\"\"\n A set that allows atomic insert-if-not-already-there operation\n \"\"\"\n\n def __init__(self, *args):\n super().__init__(*args)\n Monitor.__init__(self)\n\n def insert_and_check(self, item) ->bool:\n \"\"\"\n Perform an atomic insert if not already in set\n\n :param item: item to insert\n :return: whether the item was successfully inserted\n \"\"\"\n with Monitor.acquire(self):\n if item in self:\n return False\n self.add(item)\n return True\n",
"step-2": "<mask token>\n\n\nclass MonitorList(tp.Generic[T], collections.UserList, Monitor):\n \"\"\"\n A list that is also a monitor.\n\n Note that access to it's properties is not automatically synchronized, you got to\n invoke the monitor to implement an opportunistic locking of your own choice\n \"\"\"\n\n def __init__(self, *args):\n collections.UserList.__init__(self, *args)\n Monitor.__init__(self)\n\n def __copy__(self) ->'MonitorList':\n return MonitorList(copy.copy(self.data))\n\n def __deepcopy__(self, memo) ->'MonitorList':\n return MonitorList(copy.deepcopy(self.data, memo=memo))\n\n def __getitem__(self, item: tp.Union[slice, int]) ->T:\n return self.data[item]\n\n def __setitem__(self, key: int, value: T) ->None:\n self.data[key] = value\n\n def __delitem__(self, key: tp.Union[slice, int]) ->None:\n del self.data[key]\n\n\nclass MonitorDict(tp.Generic[K, V], collections.UserDict, Monitor):\n \"\"\"\n A dict that is also a monitor.\n\n Note that access to it's properties is not automatically synchronized, you got to\n invoke the monitor to implement an opportunistic locking of your own choice\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n collections.UserDict.__init__(self, *args, **kwargs)\n Monitor.__init__(self)\n\n def __getitem__(self, item: K) ->V:\n return self.data[item]\n\n def __setitem__(self, key: K, value: V) ->None:\n self.data[key] = value\n\n def __delitem__(self, key: K) ->None:\n del self.data[key]\n\n def __copy__(self) ->'MonitorDict':\n return MonitorDict(copy.copy(self.data))\n\n def __deepcopy__(self, memo) ->'MonitorDict':\n return MonitorDict(copy.deepcopy(self.data, memo=memo))\n\n\nclass MonitorSet(set, Monitor):\n \"\"\"\n A set that allows atomic insert-if-not-already-there operation\n \"\"\"\n\n def __init__(self, *args):\n super().__init__(*args)\n Monitor.__init__(self)\n\n def insert_and_check(self, item) ->bool:\n \"\"\"\n Perform an atomic insert if not already in set\n\n :param item: item to insert\n :return: whether the item was successfully inserted\n \"\"\"\n with Monitor.acquire(self):\n if item in self:\n return False\n self.add(item)\n return True\n",
"step-3": "<mask token>\n\n\nclass RMonitor(Monitor):\n <mask token>\n\n def __init__(self):\n self._monitor_lock = threading.RLock()\n\n\nclass MonitorList(tp.Generic[T], collections.UserList, Monitor):\n \"\"\"\n A list that is also a monitor.\n\n Note that access to it's properties is not automatically synchronized, you got to\n invoke the monitor to implement an opportunistic locking of your own choice\n \"\"\"\n\n def __init__(self, *args):\n collections.UserList.__init__(self, *args)\n Monitor.__init__(self)\n\n def __copy__(self) ->'MonitorList':\n return MonitorList(copy.copy(self.data))\n\n def __deepcopy__(self, memo) ->'MonitorList':\n return MonitorList(copy.deepcopy(self.data, memo=memo))\n\n def __getitem__(self, item: tp.Union[slice, int]) ->T:\n return self.data[item]\n\n def __setitem__(self, key: int, value: T) ->None:\n self.data[key] = value\n\n def __delitem__(self, key: tp.Union[slice, int]) ->None:\n del self.data[key]\n\n\nclass MonitorDict(tp.Generic[K, V], collections.UserDict, Monitor):\n \"\"\"\n A dict that is also a monitor.\n\n Note that access to it's properties is not automatically synchronized, you got to\n invoke the monitor to implement an opportunistic locking of your own choice\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n collections.UserDict.__init__(self, *args, **kwargs)\n Monitor.__init__(self)\n\n def __getitem__(self, item: K) ->V:\n return self.data[item]\n\n def __setitem__(self, key: K, value: V) ->None:\n self.data[key] = value\n\n def __delitem__(self, key: K) ->None:\n del self.data[key]\n\n def __copy__(self) ->'MonitorDict':\n return MonitorDict(copy.copy(self.data))\n\n def __deepcopy__(self, memo) ->'MonitorDict':\n return MonitorDict(copy.deepcopy(self.data, memo=memo))\n\n\nclass MonitorSet(set, Monitor):\n \"\"\"\n A set that allows atomic insert-if-not-already-there operation\n \"\"\"\n\n def __init__(self, *args):\n super().__init__(*args)\n Monitor.__init__(self)\n\n def insert_and_check(self, item) ->bool:\n \"\"\"\n Perform an atomic insert if not already in set\n\n :param item: item to insert\n :return: whether the item was successfully inserted\n \"\"\"\n with Monitor.acquire(self):\n if item in self:\n return False\n self.add(item)\n return True\n",
"step-4": "<mask token>\n\n\nclass RMonitor(Monitor):\n \"\"\"\n Monitor, but using an reentrant lock instead of a normal one\n \"\"\"\n\n def __init__(self):\n self._monitor_lock = threading.RLock()\n\n\nclass MonitorList(tp.Generic[T], collections.UserList, Monitor):\n \"\"\"\n A list that is also a monitor.\n\n Note that access to it's properties is not automatically synchronized, you got to\n invoke the monitor to implement an opportunistic locking of your own choice\n \"\"\"\n\n def __init__(self, *args):\n collections.UserList.__init__(self, *args)\n Monitor.__init__(self)\n\n def __copy__(self) ->'MonitorList':\n return MonitorList(copy.copy(self.data))\n\n def __deepcopy__(self, memo) ->'MonitorList':\n return MonitorList(copy.deepcopy(self.data, memo=memo))\n\n def __getitem__(self, item: tp.Union[slice, int]) ->T:\n return self.data[item]\n\n def __setitem__(self, key: int, value: T) ->None:\n self.data[key] = value\n\n def __delitem__(self, key: tp.Union[slice, int]) ->None:\n del self.data[key]\n\n\nclass MonitorDict(tp.Generic[K, V], collections.UserDict, Monitor):\n \"\"\"\n A dict that is also a monitor.\n\n Note that access to it's properties is not automatically synchronized, you got to\n invoke the monitor to implement an opportunistic locking of your own choice\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n collections.UserDict.__init__(self, *args, **kwargs)\n Monitor.__init__(self)\n\n def __getitem__(self, item: K) ->V:\n return self.data[item]\n\n def __setitem__(self, key: K, value: V) ->None:\n self.data[key] = value\n\n def __delitem__(self, key: K) ->None:\n del self.data[key]\n\n def __copy__(self) ->'MonitorDict':\n return MonitorDict(copy.copy(self.data))\n\n def __deepcopy__(self, memo) ->'MonitorDict':\n return MonitorDict(copy.deepcopy(self.data, memo=memo))\n\n\nclass MonitorSet(set, Monitor):\n \"\"\"\n A set that allows atomic insert-if-not-already-there operation\n \"\"\"\n\n def __init__(self, *args):\n super().__init__(*args)\n Monitor.__init__(self)\n\n def insert_and_check(self, item) ->bool:\n \"\"\"\n Perform an atomic insert if not already in set\n\n :param item: item to insert\n :return: whether the item was successfully inserted\n \"\"\"\n with Monitor.acquire(self):\n if item in self:\n return False\n self.add(item)\n return True\n",
"step-5": "import collections\nimport copy\nimport threading\nimport typing as tp\n\nfrom ..decorators.decorators import wraps\n\nfrom ..typing import K, V, T\n\n\nclass Monitor:\n \"\"\"\n Base utility class for creating monitors (the synchronization thingies!)\n\n These are NOT re-entrant!\n\n Use it like that:\n\n >>> class MyProtectedObject(Monitor):\n >>> def __init__(self, *args, **kwargs):\n >>> Monitor.__init__(self)\n >>> ... do your job ..\n\n >>> @Monitor.synchronized\n >>> def function_that_needs_mutual_exclusion(self):\n >>> .. do your threadsafe jobs ..\n\n >>> def function_that_partially_needs_protection(self):\n >>> .. do your jobs ..\n >>> with Monitor.acquire(self):\n >>> .. do your threadsafe jobs ..\n >>> .. do your jobs ..\n >>> with self:\n >>> .. do your threadsafe jobs ..\n \"\"\"\n\n def __enter__(self) -> 'Monitor':\n self._monitor_lock.acquire()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb) -> bool:\n self._monitor_lock.release()\n return False\n\n def __init__(self):\n \"\"\"You need to invoke this at your constructor\n You can also use it to release locks of other objects.\"\"\"\n self._monitor_lock = threading.Lock() # type: threading.Lock\n\n @staticmethod\n def synchronize_on_attribute(attr_name: str):\n \"\"\"\n When a Monitor is an attribute of a class, and you have a method instance\n that you would like secure by acquiring that monitor, use this.\n\n The first argument taken by that method instance must be self.\n\n :param attr_name: name of the attribute that is the monitor\n \"\"\"\n\n def outer(fun):\n @wraps(fun)\n def method(self, *args, **kwargs):\n # noinspection PyProtectedMember\n with getattr(self, attr_name)._monitor_lock:\n return fun(self, *args, **kwargs)\n\n return method\n\n return outer\n\n @staticmethod\n def synchronized(fun: tp.Callable) -> tp.Callable:\n \"\"\"\n This is a decorator. Class method decorated with that will lock the\n global lock of given instance, making it threadsafe. Depending on\n usage pattern of your class and it's data semantics, your performance\n may vary\n \"\"\"\n\n @wraps(fun)\n def monitored(*args, **kwargs):\n # noinspection PyProtectedMember\n with args[0]._monitor_lock:\n return fun(*args, **kwargs)\n\n return monitored\n\n class release:\n \"\"\"\n Returns a context manager object that can release another object\n as long as that object is a monitor.\n\n Consider foo, which is a monitor. You have a protected function,\n but you feel that you can release it for a while as it would\n improve parallelism. You can use it as such:\n\n >>> @Monitor.synchronized\n >>> def protected_function(self):\n >>> .. do some stuff that needs mutual exclusion ..\n >>> with Monitor.release(self):\n >>> .. do some I/O that does not need mutual exclusion ..\n >>> .. back to protected stuff ..\n \"\"\"\n __slots__ = ('foo',)\n\n def __init__(self, foo: 'Monitor'):\n self.foo = foo\n\n def __enter__(self) -> None:\n # noinspection PyProtectedMember\n self.foo._monitor_lock.release()\n\n def __exit__(self, e1, e2, e3) -> bool:\n # noinspection PyProtectedMember\n self.foo._monitor_lock.acquire()\n return False\n\n class acquire:\n \"\"\"\n Returns a context manager object that can lock another object,\n as long as that object is a monitor.\n\n Consider foo, which is a monitor. If you needed to lock it from\n outside, you would do:\n\n >>> with Monitor.acquire(foo):\n >>> .. do operations on foo that need mutual exclusion ..\n \"\"\"\n __slots__ = ('foo',)\n\n def __init__(self, foo: 'Monitor'):\n self.foo = foo\n\n def __enter__(self) -> None:\n # noinspection PyProtectedMember\n self.foo._monitor_lock.acquire()\n\n def __exit__(self, e1, e2, e3) -> bool:\n # noinspection PyProtectedMember\n self.foo._monitor_lock.release()\n return False\n\n @classmethod\n def synchronize_on(cls, monitor: 'Monitor') -> tp.Callable[[tp.Callable], tp.Callable]:\n \"\"\"\n A decorator for locking on non-self Monitor objects\n\n Use it like:\n\n >>> class MasterClass(Monitor):\n >>> def get_object(self):\n >>> class SlaveClass:\n >>> @Monitor.synchronize_on(self)\n >>> def get_object(self2):\n >>> ...\n >>> return SlaveClass\n \"\"\"\n\n def outer(fun):\n @wraps(fun)\n def inner(*args, **kwargs):\n with cls.acquire(monitor):\n return fun(*args, **kwargs)\n\n return inner\n\n return outer\n\n\nclass RMonitor(Monitor):\n \"\"\"\n Monitor, but using an reentrant lock instead of a normal one\n \"\"\"\n\n def __init__(self):\n self._monitor_lock = threading.RLock() # type: threading.RLock\n\n\nclass MonitorList(tp.Generic[T], collections.UserList, Monitor):\n \"\"\"\n A list that is also a monitor.\n\n Note that access to it's properties is not automatically synchronized, you got to\n invoke the monitor to implement an opportunistic locking of your own choice\n \"\"\"\n\n def __init__(self, *args):\n collections.UserList.__init__(self, *args)\n Monitor.__init__(self)\n\n def __copy__(self) -> 'MonitorList':\n return MonitorList(copy.copy(self.data))\n\n def __deepcopy__(self, memo) -> 'MonitorList':\n return MonitorList(copy.deepcopy(self.data, memo=memo))\n\n def __getitem__(self, item: tp.Union[slice, int]) -> T:\n return self.data[item]\n\n def __setitem__(self, key: int, value: T) -> None:\n self.data[key] = value\n\n def __delitem__(self, key: tp.Union[slice, int]) -> None:\n del self.data[key]\n\n\nclass MonitorDict(tp.Generic[K, V], collections.UserDict, Monitor):\n \"\"\"\n A dict that is also a monitor.\n\n Note that access to it's properties is not automatically synchronized, you got to\n invoke the monitor to implement an opportunistic locking of your own choice\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n collections.UserDict.__init__(self, *args, **kwargs)\n Monitor.__init__(self)\n\n def __getitem__(self, item: K) -> V:\n return self.data[item]\n\n def __setitem__(self, key: K, value: V) -> None:\n self.data[key] = value\n\n def __delitem__(self, key: K) -> None:\n del self.data[key]\n\n def __copy__(self) -> 'MonitorDict':\n return MonitorDict(copy.copy(self.data))\n\n def __deepcopy__(self, memo) -> 'MonitorDict':\n return MonitorDict(copy.deepcopy(self.data, memo=memo))\n\n\nclass MonitorSet(set, Monitor):\n \"\"\"\n A set that allows atomic insert-if-not-already-there operation\n \"\"\"\n\n def __init__(self, *args):\n super().__init__(*args)\n Monitor.__init__(self)\n\n def insert_and_check(self, item) -> bool:\n \"\"\"\n Perform an atomic insert if not already in set\n\n :param item: item to insert\n :return: whether the item was successfully inserted\n \"\"\"\n with Monitor.acquire(self):\n if item in self:\n return False\n self.add(item)\n return True\n",
"step-ids": [
17,
20,
22,
23,
33
]
}
|
[
17,
20,
22,
23,
33
] |
##############################################################################
# Nombre : import.py
# Descripción : It takes the information from Transfom.sh Initial Node
# Final Node and HAVERSINE Formule
#
# Parámetros:
# Realizado Por :
#
# HISTORIAL DE CAMBIOS:
#Richard Abuabara Caserta
#
##############################################################################
import re
from collections import defaultdict
#from pprint import pprint
from random import randint
data_from_file=open('newAtmnet.txt', 'r').read()
def transform_to_my_format(data):
d = defaultdict(dict)
for (i1, i2, i3) in re.findall(r'([\d\.]+)\s+([\d\.]+)\s+([\d\.]+)', data):
d[i1].update({i2: float(i3)})
return d
Graph_Lat=transform_to_my_format(data_from_file)
def dijkstra_latency(start,goal):
Graph_Lat=transform_to_my_format(data_from_file)
graph=Graph_Lat
shortest_distance = {}
predecessor = {}
unseenNodes= {}
unseenNodes = graph
infinity = 9999999
path = []
for node in unseenNodes:
shortest_distance[node] = infinity
shortest_distance[start] = 0
while unseenNodes:
minNode = None
for node in unseenNodes:
if minNode is None:
minNode = node
elif shortest_distance[node] < shortest_distance[minNode]:
minNode = node
for childNode, weight in graph[minNode].items():
if weight + shortest_distance[minNode] < shortest_distance[childNode]:
shortest_distance[childNode] = weight + shortest_distance[minNode]
predecessor[childNode] = minNode
unseenNodes.pop(minNode)
currentNode = goal
while currentNode != start:
try:
path.insert(0,currentNode)
currentNode = predecessor[currentNode]
except KeyError:
print('Path not reachable')
break
path.insert(0,start)
if shortest_distance[goal] != infinity:
dj2=float(shortest_distance[goal])*1.1 #Latencia +/- 10
dj3=float(shortest_distance[goal])*1.2 #Price +/- 20 Verificar ojooo
f= open("output.txt","a+")
if (int(start) != int(goal)):
f.write('LC'+start+'_'+goal+','+'"LC'+start+'_'+goal+'",'+str(shortest_distance[goal])+','+'100'+',"Claro",'+'"S'+start+'",'+'"S'+goal+'"'+ "\n")
f.write('LM'+start+'_'+goal+','+'"LM'+start+'_'+goal+'",'+str(dj2)+','+'75'+',"Movistar",'+'"S'+start+'",'+'"S'+goal+'"'+ "\n")
f.write('LT'+start+'_'+goal+','+'"LT'+start+'_'+goal+'",'+str(dj3)+','+'60'+',"Tigo",'+'"S'+start+'",'+'"S'+goal+'"'+ "\n")
#f.write('mynet.addLink(LT'+start+'_'+goal+')'+ "\n")
else:
f.write('LC'+start+'_'+goal+','+'"LC'+start+'_'+goal+'",'+str(shortest_distance[goal])+','+'0'+',"Claro",'+'"S'+start+'",'+'"S'+goal+'"'+ "\n")
f.write('LM'+start+'_'+goal+','+'"LM'+start+'_'+goal+'",'+str(dj2)+','+'0'+',"Movistar",'+'"S'+start+'",'+'"S'+goal+'"'+ "\n")
f.write('LT'+start+'_'+goal+','+'"LT'+start+'_'+goal+'",'+str(dj3)+','+'0'+',"Tigo",'+'"S'+start+'",'+'"S'+goal+'"'+ "\n")
#f.write('mynet.addLink(LT'+start+'_'+goal+')'+ "\n")
f.close()
####modulo impresion######
max=(len(Graph_Lat))
for i in range(max): #este es el for - source
#print (i)
for j in range(max):
dijkstra_latency(str(i), str(j))
#debo imprimir L571=Link("L571",77,770,"operador1",5,7)
########Imprimir 2do Rquerimiento################
max=(len(Graph_Lat))
for i in range(max): #este es el for - source
f= open("output.txt","a+")
f.write('C'+str(i)+',S'+str(i)+',priceController,False'+"\n")
f.close()
#Switch creation and aggregation
for i in range(max): #este es el for - source
f= open("output.txt","a+")
#f.write('S'+str(i)+' = Switch("S'+str(i)+'", '+str(randint(10000,500000))+', "C'+str(i)+'", '+str(randint(2,10))+')'+"\n")
f.write('S'+str(i)+','+str(randint(10000,500000))+','+str(randint(2,10))+"\n")
f.close()
#S0 = Switch("S0", randint(10000,500000), "C0", randint(2,10))
#mynet.addSwitch(S0)
|
normal
|
{
"blob_id": "0018cbb1d945ad1b6469804e7993afee44406fd1",
"index": 2895,
"step-1": "<mask token>\n\n\ndef transform_to_my_format(data):\n d = defaultdict(dict)\n for i1, i2, i3 in re.findall('([\\\\d\\\\.]+)\\\\s+([\\\\d\\\\.]+)\\\\s+([\\\\d\\\\.]+)',\n data):\n d[i1].update({i2: float(i3)})\n return d\n\n\n<mask token>\n\n\ndef dijkstra_latency(start, goal):\n Graph_Lat = transform_to_my_format(data_from_file)\n graph = Graph_Lat\n shortest_distance = {}\n predecessor = {}\n unseenNodes = {}\n unseenNodes = graph\n infinity = 9999999\n path = []\n for node in unseenNodes:\n shortest_distance[node] = infinity\n shortest_distance[start] = 0\n while unseenNodes:\n minNode = None\n for node in unseenNodes:\n if minNode is None:\n minNode = node\n elif shortest_distance[node] < shortest_distance[minNode]:\n minNode = node\n for childNode, weight in graph[minNode].items():\n if weight + shortest_distance[minNode] < shortest_distance[\n childNode]:\n shortest_distance[childNode] = weight + shortest_distance[\n minNode]\n predecessor[childNode] = minNode\n unseenNodes.pop(minNode)\n currentNode = goal\n while currentNode != start:\n try:\n path.insert(0, currentNode)\n currentNode = predecessor[currentNode]\n except KeyError:\n print('Path not reachable')\n break\n path.insert(0, start)\n if shortest_distance[goal] != infinity:\n dj2 = float(shortest_distance[goal]) * 1.1\n dj3 = float(shortest_distance[goal]) * 1.2\n f = open('output.txt', 'a+')\n if int(start) != int(goal):\n f.write('LC' + start + '_' + goal + ',' + '\"LC' + start + '_' +\n goal + '\",' + str(shortest_distance[goal]) + ',' + '100' +\n ',\"Claro\",' + '\"S' + start + '\",' + '\"S' + goal + '\"' + '\\n')\n f.write('LM' + start + '_' + goal + ',' + '\"LM' + start + '_' +\n goal + '\",' + str(dj2) + ',' + '75' + ',\"Movistar\",' + '\"S' +\n start + '\",' + '\"S' + goal + '\"' + '\\n')\n f.write('LT' + start + '_' + goal + ',' + '\"LT' + start + '_' +\n goal + '\",' + str(dj3) + ',' + '60' + ',\"Tigo\",' + '\"S' +\n start + '\",' + '\"S' + goal + '\"' + '\\n')\n else:\n f.write('LC' + start + '_' + goal + ',' + '\"LC' + start + '_' +\n goal + '\",' + str(shortest_distance[goal]) + ',' + '0' +\n ',\"Claro\",' + '\"S' + start + '\",' + '\"S' + goal + '\"' + '\\n')\n f.write('LM' + start + '_' + goal + ',' + '\"LM' + start + '_' +\n goal + '\",' + str(dj2) + ',' + '0' + ',\"Movistar\",' + '\"S' +\n start + '\",' + '\"S' + goal + '\"' + '\\n')\n f.write('LT' + start + '_' + goal + ',' + '\"LT' + start + '_' +\n goal + '\",' + str(dj3) + ',' + '0' + ',\"Tigo\",' + '\"S' +\n start + '\",' + '\"S' + goal + '\"' + '\\n')\n f.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef transform_to_my_format(data):\n d = defaultdict(dict)\n for i1, i2, i3 in re.findall('([\\\\d\\\\.]+)\\\\s+([\\\\d\\\\.]+)\\\\s+([\\\\d\\\\.]+)',\n data):\n d[i1].update({i2: float(i3)})\n return d\n\n\n<mask token>\n\n\ndef dijkstra_latency(start, goal):\n Graph_Lat = transform_to_my_format(data_from_file)\n graph = Graph_Lat\n shortest_distance = {}\n predecessor = {}\n unseenNodes = {}\n unseenNodes = graph\n infinity = 9999999\n path = []\n for node in unseenNodes:\n shortest_distance[node] = infinity\n shortest_distance[start] = 0\n while unseenNodes:\n minNode = None\n for node in unseenNodes:\n if minNode is None:\n minNode = node\n elif shortest_distance[node] < shortest_distance[minNode]:\n minNode = node\n for childNode, weight in graph[minNode].items():\n if weight + shortest_distance[minNode] < shortest_distance[\n childNode]:\n shortest_distance[childNode] = weight + shortest_distance[\n minNode]\n predecessor[childNode] = minNode\n unseenNodes.pop(minNode)\n currentNode = goal\n while currentNode != start:\n try:\n path.insert(0, currentNode)\n currentNode = predecessor[currentNode]\n except KeyError:\n print('Path not reachable')\n break\n path.insert(0, start)\n if shortest_distance[goal] != infinity:\n dj2 = float(shortest_distance[goal]) * 1.1\n dj3 = float(shortest_distance[goal]) * 1.2\n f = open('output.txt', 'a+')\n if int(start) != int(goal):\n f.write('LC' + start + '_' + goal + ',' + '\"LC' + start + '_' +\n goal + '\",' + str(shortest_distance[goal]) + ',' + '100' +\n ',\"Claro\",' + '\"S' + start + '\",' + '\"S' + goal + '\"' + '\\n')\n f.write('LM' + start + '_' + goal + ',' + '\"LM' + start + '_' +\n goal + '\",' + str(dj2) + ',' + '75' + ',\"Movistar\",' + '\"S' +\n start + '\",' + '\"S' + goal + '\"' + '\\n')\n f.write('LT' + start + '_' + goal + ',' + '\"LT' + start + '_' +\n goal + '\",' + str(dj3) + ',' + '60' + ',\"Tigo\",' + '\"S' +\n start + '\",' + '\"S' + goal + '\"' + '\\n')\n else:\n f.write('LC' + start + '_' + goal + ',' + '\"LC' + start + '_' +\n goal + '\",' + str(shortest_distance[goal]) + ',' + '0' +\n ',\"Claro\",' + '\"S' + start + '\",' + '\"S' + goal + '\"' + '\\n')\n f.write('LM' + start + '_' + goal + ',' + '\"LM' + start + '_' +\n goal + '\",' + str(dj2) + ',' + '0' + ',\"Movistar\",' + '\"S' +\n start + '\",' + '\"S' + goal + '\"' + '\\n')\n f.write('LT' + start + '_' + goal + ',' + '\"LT' + start + '_' +\n goal + '\",' + str(dj3) + ',' + '0' + ',\"Tigo\",' + '\"S' +\n start + '\",' + '\"S' + goal + '\"' + '\\n')\n f.close()\n\n\n<mask token>\nfor i in range(max):\n for j in range(max):\n dijkstra_latency(str(i), str(j))\n<mask token>\nfor i in range(max):\n f = open('output.txt', 'a+')\n f.write('C' + str(i) + ',S' + str(i) + ',priceController,False' + '\\n')\n f.close()\nfor i in range(max):\n f = open('output.txt', 'a+')\n f.write('S' + str(i) + ',' + str(randint(10000, 500000)) + ',' + str(\n randint(2, 10)) + '\\n')\n f.close()\n",
"step-3": "<mask token>\ndata_from_file = open('newAtmnet.txt', 'r').read()\n\n\ndef transform_to_my_format(data):\n d = defaultdict(dict)\n for i1, i2, i3 in re.findall('([\\\\d\\\\.]+)\\\\s+([\\\\d\\\\.]+)\\\\s+([\\\\d\\\\.]+)',\n data):\n d[i1].update({i2: float(i3)})\n return d\n\n\nGraph_Lat = transform_to_my_format(data_from_file)\n\n\ndef dijkstra_latency(start, goal):\n Graph_Lat = transform_to_my_format(data_from_file)\n graph = Graph_Lat\n shortest_distance = {}\n predecessor = {}\n unseenNodes = {}\n unseenNodes = graph\n infinity = 9999999\n path = []\n for node in unseenNodes:\n shortest_distance[node] = infinity\n shortest_distance[start] = 0\n while unseenNodes:\n minNode = None\n for node in unseenNodes:\n if minNode is None:\n minNode = node\n elif shortest_distance[node] < shortest_distance[minNode]:\n minNode = node\n for childNode, weight in graph[minNode].items():\n if weight + shortest_distance[minNode] < shortest_distance[\n childNode]:\n shortest_distance[childNode] = weight + shortest_distance[\n minNode]\n predecessor[childNode] = minNode\n unseenNodes.pop(minNode)\n currentNode = goal\n while currentNode != start:\n try:\n path.insert(0, currentNode)\n currentNode = predecessor[currentNode]\n except KeyError:\n print('Path not reachable')\n break\n path.insert(0, start)\n if shortest_distance[goal] != infinity:\n dj2 = float(shortest_distance[goal]) * 1.1\n dj3 = float(shortest_distance[goal]) * 1.2\n f = open('output.txt', 'a+')\n if int(start) != int(goal):\n f.write('LC' + start + '_' + goal + ',' + '\"LC' + start + '_' +\n goal + '\",' + str(shortest_distance[goal]) + ',' + '100' +\n ',\"Claro\",' + '\"S' + start + '\",' + '\"S' + goal + '\"' + '\\n')\n f.write('LM' + start + '_' + goal + ',' + '\"LM' + start + '_' +\n goal + '\",' + str(dj2) + ',' + '75' + ',\"Movistar\",' + '\"S' +\n start + '\",' + '\"S' + goal + '\"' + '\\n')\n f.write('LT' + start + '_' + goal + ',' + '\"LT' + start + '_' +\n goal + '\",' + str(dj3) + ',' + '60' + ',\"Tigo\",' + '\"S' +\n start + '\",' + '\"S' + goal + '\"' + '\\n')\n else:\n f.write('LC' + start + '_' + goal + ',' + '\"LC' + start + '_' +\n goal + '\",' + str(shortest_distance[goal]) + ',' + '0' +\n ',\"Claro\",' + '\"S' + start + '\",' + '\"S' + goal + '\"' + '\\n')\n f.write('LM' + start + '_' + goal + ',' + '\"LM' + start + '_' +\n goal + '\",' + str(dj2) + ',' + '0' + ',\"Movistar\",' + '\"S' +\n start + '\",' + '\"S' + goal + '\"' + '\\n')\n f.write('LT' + start + '_' + goal + ',' + '\"LT' + start + '_' +\n goal + '\",' + str(dj3) + ',' + '0' + ',\"Tigo\",' + '\"S' +\n start + '\",' + '\"S' + goal + '\"' + '\\n')\n f.close()\n\n\nmax = len(Graph_Lat)\nfor i in range(max):\n for j in range(max):\n dijkstra_latency(str(i), str(j))\nmax = len(Graph_Lat)\nfor i in range(max):\n f = open('output.txt', 'a+')\n f.write('C' + str(i) + ',S' + str(i) + ',priceController,False' + '\\n')\n f.close()\nfor i in range(max):\n f = open('output.txt', 'a+')\n f.write('S' + str(i) + ',' + str(randint(10000, 500000)) + ',' + str(\n randint(2, 10)) + '\\n')\n f.close()\n",
"step-4": "import re\nfrom collections import defaultdict\nfrom random import randint\ndata_from_file = open('newAtmnet.txt', 'r').read()\n\n\ndef transform_to_my_format(data):\n d = defaultdict(dict)\n for i1, i2, i3 in re.findall('([\\\\d\\\\.]+)\\\\s+([\\\\d\\\\.]+)\\\\s+([\\\\d\\\\.]+)',\n data):\n d[i1].update({i2: float(i3)})\n return d\n\n\nGraph_Lat = transform_to_my_format(data_from_file)\n\n\ndef dijkstra_latency(start, goal):\n Graph_Lat = transform_to_my_format(data_from_file)\n graph = Graph_Lat\n shortest_distance = {}\n predecessor = {}\n unseenNodes = {}\n unseenNodes = graph\n infinity = 9999999\n path = []\n for node in unseenNodes:\n shortest_distance[node] = infinity\n shortest_distance[start] = 0\n while unseenNodes:\n minNode = None\n for node in unseenNodes:\n if minNode is None:\n minNode = node\n elif shortest_distance[node] < shortest_distance[minNode]:\n minNode = node\n for childNode, weight in graph[minNode].items():\n if weight + shortest_distance[minNode] < shortest_distance[\n childNode]:\n shortest_distance[childNode] = weight + shortest_distance[\n minNode]\n predecessor[childNode] = minNode\n unseenNodes.pop(minNode)\n currentNode = goal\n while currentNode != start:\n try:\n path.insert(0, currentNode)\n currentNode = predecessor[currentNode]\n except KeyError:\n print('Path not reachable')\n break\n path.insert(0, start)\n if shortest_distance[goal] != infinity:\n dj2 = float(shortest_distance[goal]) * 1.1\n dj3 = float(shortest_distance[goal]) * 1.2\n f = open('output.txt', 'a+')\n if int(start) != int(goal):\n f.write('LC' + start + '_' + goal + ',' + '\"LC' + start + '_' +\n goal + '\",' + str(shortest_distance[goal]) + ',' + '100' +\n ',\"Claro\",' + '\"S' + start + '\",' + '\"S' + goal + '\"' + '\\n')\n f.write('LM' + start + '_' + goal + ',' + '\"LM' + start + '_' +\n goal + '\",' + str(dj2) + ',' + '75' + ',\"Movistar\",' + '\"S' +\n start + '\",' + '\"S' + goal + '\"' + '\\n')\n f.write('LT' + start + '_' + goal + ',' + '\"LT' + start + '_' +\n goal + '\",' + str(dj3) + ',' + '60' + ',\"Tigo\",' + '\"S' +\n start + '\",' + '\"S' + goal + '\"' + '\\n')\n else:\n f.write('LC' + start + '_' + goal + ',' + '\"LC' + start + '_' +\n goal + '\",' + str(shortest_distance[goal]) + ',' + '0' +\n ',\"Claro\",' + '\"S' + start + '\",' + '\"S' + goal + '\"' + '\\n')\n f.write('LM' + start + '_' + goal + ',' + '\"LM' + start + '_' +\n goal + '\",' + str(dj2) + ',' + '0' + ',\"Movistar\",' + '\"S' +\n start + '\",' + '\"S' + goal + '\"' + '\\n')\n f.write('LT' + start + '_' + goal + ',' + '\"LT' + start + '_' +\n goal + '\",' + str(dj3) + ',' + '0' + ',\"Tigo\",' + '\"S' +\n start + '\",' + '\"S' + goal + '\"' + '\\n')\n f.close()\n\n\nmax = len(Graph_Lat)\nfor i in range(max):\n for j in range(max):\n dijkstra_latency(str(i), str(j))\nmax = len(Graph_Lat)\nfor i in range(max):\n f = open('output.txt', 'a+')\n f.write('C' + str(i) + ',S' + str(i) + ',priceController,False' + '\\n')\n f.close()\nfor i in range(max):\n f = open('output.txt', 'a+')\n f.write('S' + str(i) + ',' + str(randint(10000, 500000)) + ',' + str(\n randint(2, 10)) + '\\n')\n f.close()\n",
"step-5": "##############################################################################\n# Nombre : import.py\n# Descripción : It takes the information from Transfom.sh Initial Node\n# Final Node and HAVERSINE Formule\n# \n# Parámetros:\n# Realizado Por : \n#\n# HISTORIAL DE CAMBIOS:\n#Richard Abuabara Caserta\n# \n##############################################################################\nimport re\nfrom collections import defaultdict\n#from pprint import pprint\nfrom random import randint\n\ndata_from_file=open('newAtmnet.txt', 'r').read()\n\ndef transform_to_my_format(data):\n d = defaultdict(dict)\n for (i1, i2, i3) in re.findall(r'([\\d\\.]+)\\s+([\\d\\.]+)\\s+([\\d\\.]+)', data):\n d[i1].update({i2: float(i3)})\n return d\n\nGraph_Lat=transform_to_my_format(data_from_file)\n\ndef dijkstra_latency(start,goal):\n Graph_Lat=transform_to_my_format(data_from_file)\n graph=Graph_Lat\n shortest_distance = {}\n predecessor = {}\n unseenNodes= {}\n unseenNodes = graph\n infinity = 9999999\n path = []\n \n for node in unseenNodes:\n shortest_distance[node] = infinity\n shortest_distance[start] = 0\n \n while unseenNodes:\n minNode = None\n for node in unseenNodes:\n if minNode is None:\n minNode = node\n elif shortest_distance[node] < shortest_distance[minNode]:\n minNode = node\n \n for childNode, weight in graph[minNode].items():\n if weight + shortest_distance[minNode] < shortest_distance[childNode]:\n shortest_distance[childNode] = weight + shortest_distance[minNode]\n predecessor[childNode] = minNode\n unseenNodes.pop(minNode)\n \n currentNode = goal\n while currentNode != start:\n try:\n path.insert(0,currentNode)\n currentNode = predecessor[currentNode]\n except KeyError:\n print('Path not reachable')\n break\n path.insert(0,start)\n if shortest_distance[goal] != infinity:\n dj2=float(shortest_distance[goal])*1.1 #Latencia +/- 10\n dj3=float(shortest_distance[goal])*1.2 #Price +/- 20 Verificar ojooo\n f= open(\"output.txt\",\"a+\")\n if (int(start) != int(goal)):\n f.write('LC'+start+'_'+goal+','+'\"LC'+start+'_'+goal+'\",'+str(shortest_distance[goal])+','+'100'+',\"Claro\",'+'\"S'+start+'\",'+'\"S'+goal+'\"'+ \"\\n\")\n f.write('LM'+start+'_'+goal+','+'\"LM'+start+'_'+goal+'\",'+str(dj2)+','+'75'+',\"Movistar\",'+'\"S'+start+'\",'+'\"S'+goal+'\"'+ \"\\n\")\n f.write('LT'+start+'_'+goal+','+'\"LT'+start+'_'+goal+'\",'+str(dj3)+','+'60'+',\"Tigo\",'+'\"S'+start+'\",'+'\"S'+goal+'\"'+ \"\\n\")\n #f.write('mynet.addLink(LT'+start+'_'+goal+')'+ \"\\n\")\n else:\n f.write('LC'+start+'_'+goal+','+'\"LC'+start+'_'+goal+'\",'+str(shortest_distance[goal])+','+'0'+',\"Claro\",'+'\"S'+start+'\",'+'\"S'+goal+'\"'+ \"\\n\")\n f.write('LM'+start+'_'+goal+','+'\"LM'+start+'_'+goal+'\",'+str(dj2)+','+'0'+',\"Movistar\",'+'\"S'+start+'\",'+'\"S'+goal+'\"'+ \"\\n\")\n f.write('LT'+start+'_'+goal+','+'\"LT'+start+'_'+goal+'\",'+str(dj3)+','+'0'+',\"Tigo\",'+'\"S'+start+'\",'+'\"S'+goal+'\"'+ \"\\n\")\n #f.write('mynet.addLink(LT'+start+'_'+goal+')'+ \"\\n\")\n \n f.close()\n\n####modulo impresion######\nmax=(len(Graph_Lat))\nfor i in range(max): #este es el for - source\n #print (i)\n for j in range(max):\n dijkstra_latency(str(i), str(j)) \n\t#debo imprimir L571=Link(\"L571\",77,770,\"operador1\",5,7) \n########Imprimir 2do Rquerimiento################\n\nmax=(len(Graph_Lat))\n\nfor i in range(max): #este es el for - source\n f= open(\"output.txt\",\"a+\")\n f.write('C'+str(i)+',S'+str(i)+',priceController,False'+\"\\n\")\n f.close()\n\n\n#Switch creation and aggregation\nfor i in range(max): #este es el for - source\n f= open(\"output.txt\",\"a+\")\n #f.write('S'+str(i)+' = Switch(\"S'+str(i)+'\", '+str(randint(10000,500000))+', \"C'+str(i)+'\", '+str(randint(2,10))+')'+\"\\n\")\n f.write('S'+str(i)+','+str(randint(10000,500000))+','+str(randint(2,10))+\"\\n\")\n f.close()\n\n#S0 = Switch(\"S0\", randint(10000,500000), \"C0\", randint(2,10))\n#mynet.addSwitch(S0) \n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import torch
from torchvision import transforms
from torch.autograd import Variable
class NormalizeImageDict(object):
"""
Normalize image in dictionary
normalize range is True, the image is divided by 255
"""
def __init__(self, image_keys, normalizeRange=True):
self.image_keys = image_keys
self.normalizeRange = normalizeRange
self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
def __call__(self, sample):
for key in self.image_keys:
if self.normalizeRange:
sample[key] /= 255.0
sample[key] = self.normalize(sample[key])
return sample
|
normal
|
{
"blob_id": "4293ad0b2a4a352d6bdc4b860448c4a3b14ca629",
"index": 8648,
"step-1": "<mask token>\n\n\nclass NormalizeImageDict(object):\n <mask token>\n <mask token>\n\n def __call__(self, sample):\n for key in self.image_keys:\n if self.normalizeRange:\n sample[key] /= 255.0\n sample[key] = self.normalize(sample[key])\n return sample\n",
"step-2": "<mask token>\n\n\nclass NormalizeImageDict(object):\n <mask token>\n\n def __init__(self, image_keys, normalizeRange=True):\n self.image_keys = image_keys\n self.normalizeRange = normalizeRange\n self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n def __call__(self, sample):\n for key in self.image_keys:\n if self.normalizeRange:\n sample[key] /= 255.0\n sample[key] = self.normalize(sample[key])\n return sample\n",
"step-3": "<mask token>\n\n\nclass NormalizeImageDict(object):\n \"\"\"\n Normalize image in dictionary\n normalize range is True, the image is divided by 255\n \"\"\"\n\n def __init__(self, image_keys, normalizeRange=True):\n self.image_keys = image_keys\n self.normalizeRange = normalizeRange\n self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n def __call__(self, sample):\n for key in self.image_keys:\n if self.normalizeRange:\n sample[key] /= 255.0\n sample[key] = self.normalize(sample[key])\n return sample\n",
"step-4": "import torch\nfrom torchvision import transforms\nfrom torch.autograd import Variable\n\n\nclass NormalizeImageDict(object):\n \"\"\"\n Normalize image in dictionary\n normalize range is True, the image is divided by 255\n \"\"\"\n\n def __init__(self, image_keys, normalizeRange=True):\n self.image_keys = image_keys\n self.normalizeRange = normalizeRange\n self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n def __call__(self, sample):\n for key in self.image_keys:\n if self.normalizeRange:\n sample[key] /= 255.0\n sample[key] = self.normalize(sample[key])\n return sample\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 25 19:21:32 2019
@author: Nikos
"""
import torch
import torch.optim as optim
from utilities import *
from model import *
from torch.autograd import Variable
import numpy as np
import random
class A2C_agent(object):
def __init__(self, env, actor_hidden_size, actor_lr, actor_batch_size,
critic_gamma, mem_size, critic_hidden_size, critic_lr, critic_batch_size):
self.env = env
self.actor_hidden_size = actor_hidden_size
self.actor_lr = actor_lr
self.actor_batch_size = actor_batch_size
self.critic_hidden_size = critic_hidden_size
self.critic_lr = critic_lr
self.critic_batch_size = critic_batch_size
self.critic_gamma = critic_gamma
self.mem_size = mem_size
self.num_of_states = env.observation_space.shape[0]
self.num_of_actions = env.action_space.n
self.experience_replay_buffer = ReplayBuffer(self.mem_size)
# initialize the Actor network (policy)
self.actor_network = ActorNet(self.num_of_states, self.actor_hidden_size, self.num_of_actions)
self.actor_optimizer = optim.Adam(self.actor_network.parameters(), lr = self.actor_lr)
# initialize the Critic network (v-learning)
# The difference between the critic in A2C (here) and the
# critic int he "vanilla" Actor-Critic version is that the
# critic in A2C models the value function, hence it needs
# to only output the value of each state and not the Q-value
# for each (state, action) pair. Therefore, the output size
# here needs to be a scalar.
self.critic_network = CriticNet(self.num_of_states, self.critic_hidden_size, 1)
self.critic_optimizer = optim.Adam(self.critic_network.parameters(), lr = self.critic_lr)
def act(self, state):
# compute the action distribution based on the current state via the policy net
action_distribution = self.actor_network.forward(state)
# pick an action based on that distribution
action = np.random.choice(self.num_of_actions, p = action_distribution.detach().numpy())
return action
def memorize(self, state, action, new_state, reward, done):
# this function takes a transition (state, action, new_state, reward, done)
# and stores it into the experience memory buffer
self.experience_replay_buffer.push(state, action, new_state, reward, done)
def learn(self, rewards_batch, states_batch, actions_batch, new_states_batch, new_actions_batch):
#states_batch = torch.tensor(states_batch, dtype=torch.float)
states_batch = np.asarray(states_batch)
actions_batch = torch.tensor(actions_batch, dtype=torch.long)
rewards_batch = torch.tensor(rewards_batch, dtype=torch.float)
new_states_batch = np.asarray(states_batch)
new_actions_batch = torch.tensor(actions_batch, dtype=torch.long)
V_batch = []
V_prime_batch = []
for state, new_state, new_action in zip(states_batch,\
new_states_batch, new_actions_batch):
state = torch.Tensor(state)
v_value = self.critic_network.forward(state)
# get q-value for specific action
#Q = q_values.gather(-1, action)
V_batch.append(v_value)
new_state = torch.Tensor(new_state)
v_prime_value = self.critic_network.forward(new_state)
#V_prime = q_prime_values.gather(-1, new_action)
V_prime_batch.append(v_prime_value)
# compute the log of the probabilities that the policy outputs for each state
log_probs = torch.log(self.actor_network(states_batch))
# pick those log probabilities that correspond to the actions that were selected
selected_log_probs = rewards_batch * log_probs[np.arange(len(actions_batch)), actions_batch]
# compute the monte-carlo estimate by averaging the losses and then form the optimization
# criterion, which will be the negative log probs.
actor_loss = -selected_log_probs.mean()
self.actor_optimizer.zero_grad()
actor_loss.backward()
# if we need smooth updates we clip the grads between -1 and 1
#for param in self.online_dqn_network.parameters():
# param.grad.data.clamp_(-1,1)
self.actor_optimizer.step()
# Compute TD error for V network
V_prime_batch = torch.stack(V_prime_batch)
V_batch = torch.stack(V_batch)
# A(s, a) = r_prime + gamma * V_prime - V
advantage = rewards_batch + self.critic_gamma * V_prime_batch - V_batch
#print(deltas)
critic_loss = (V_batch - (rewards_batch + self.critic_gamma * V_prime_batch)).pow(2).mean()
#print(critic_loss)
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
#return loss
|
normal
|
{
"blob_id": "72b086e833ab3ee4ec3102869d74513ef3657675",
"index": 1926,
"step-1": "<mask token>\n\n\nclass A2C_agent(object):\n <mask token>\n\n def act(self, state):\n action_distribution = self.actor_network.forward(state)\n action = np.random.choice(self.num_of_actions, p=\n action_distribution.detach().numpy())\n return action\n\n def memorize(self, state, action, new_state, reward, done):\n self.experience_replay_buffer.push(state, action, new_state, reward,\n done)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass A2C_agent(object):\n <mask token>\n\n def act(self, state):\n action_distribution = self.actor_network.forward(state)\n action = np.random.choice(self.num_of_actions, p=\n action_distribution.detach().numpy())\n return action\n\n def memorize(self, state, action, new_state, reward, done):\n self.experience_replay_buffer.push(state, action, new_state, reward,\n done)\n\n def learn(self, rewards_batch, states_batch, actions_batch,\n new_states_batch, new_actions_batch):\n states_batch = np.asarray(states_batch)\n actions_batch = torch.tensor(actions_batch, dtype=torch.long)\n rewards_batch = torch.tensor(rewards_batch, dtype=torch.float)\n new_states_batch = np.asarray(states_batch)\n new_actions_batch = torch.tensor(actions_batch, dtype=torch.long)\n V_batch = []\n V_prime_batch = []\n for state, new_state, new_action in zip(states_batch,\n new_states_batch, new_actions_batch):\n state = torch.Tensor(state)\n v_value = self.critic_network.forward(state)\n V_batch.append(v_value)\n new_state = torch.Tensor(new_state)\n v_prime_value = self.critic_network.forward(new_state)\n V_prime_batch.append(v_prime_value)\n log_probs = torch.log(self.actor_network(states_batch))\n selected_log_probs = rewards_batch * log_probs[np.arange(len(\n actions_batch)), actions_batch]\n actor_loss = -selected_log_probs.mean()\n self.actor_optimizer.zero_grad()\n actor_loss.backward()\n self.actor_optimizer.step()\n V_prime_batch = torch.stack(V_prime_batch)\n V_batch = torch.stack(V_batch)\n advantage = rewards_batch + self.critic_gamma * V_prime_batch - V_batch\n critic_loss = (V_batch - (rewards_batch + self.critic_gamma *\n V_prime_batch)).pow(2).mean()\n self.critic_optimizer.zero_grad()\n critic_loss.backward()\n self.critic_optimizer.step()\n",
"step-3": "<mask token>\n\n\nclass A2C_agent(object):\n\n def __init__(self, env, actor_hidden_size, actor_lr, actor_batch_size,\n critic_gamma, mem_size, critic_hidden_size, critic_lr,\n critic_batch_size):\n self.env = env\n self.actor_hidden_size = actor_hidden_size\n self.actor_lr = actor_lr\n self.actor_batch_size = actor_batch_size\n self.critic_hidden_size = critic_hidden_size\n self.critic_lr = critic_lr\n self.critic_batch_size = critic_batch_size\n self.critic_gamma = critic_gamma\n self.mem_size = mem_size\n self.num_of_states = env.observation_space.shape[0]\n self.num_of_actions = env.action_space.n\n self.experience_replay_buffer = ReplayBuffer(self.mem_size)\n self.actor_network = ActorNet(self.num_of_states, self.\n actor_hidden_size, self.num_of_actions)\n self.actor_optimizer = optim.Adam(self.actor_network.parameters(),\n lr=self.actor_lr)\n self.critic_network = CriticNet(self.num_of_states, self.\n critic_hidden_size, 1)\n self.critic_optimizer = optim.Adam(self.critic_network.parameters(),\n lr=self.critic_lr)\n\n def act(self, state):\n action_distribution = self.actor_network.forward(state)\n action = np.random.choice(self.num_of_actions, p=\n action_distribution.detach().numpy())\n return action\n\n def memorize(self, state, action, new_state, reward, done):\n self.experience_replay_buffer.push(state, action, new_state, reward,\n done)\n\n def learn(self, rewards_batch, states_batch, actions_batch,\n new_states_batch, new_actions_batch):\n states_batch = np.asarray(states_batch)\n actions_batch = torch.tensor(actions_batch, dtype=torch.long)\n rewards_batch = torch.tensor(rewards_batch, dtype=torch.float)\n new_states_batch = np.asarray(states_batch)\n new_actions_batch = torch.tensor(actions_batch, dtype=torch.long)\n V_batch = []\n V_prime_batch = []\n for state, new_state, new_action in zip(states_batch,\n new_states_batch, new_actions_batch):\n state = torch.Tensor(state)\n v_value = self.critic_network.forward(state)\n V_batch.append(v_value)\n new_state = torch.Tensor(new_state)\n v_prime_value = self.critic_network.forward(new_state)\n V_prime_batch.append(v_prime_value)\n log_probs = torch.log(self.actor_network(states_batch))\n selected_log_probs = rewards_batch * log_probs[np.arange(len(\n actions_batch)), actions_batch]\n actor_loss = -selected_log_probs.mean()\n self.actor_optimizer.zero_grad()\n actor_loss.backward()\n self.actor_optimizer.step()\n V_prime_batch = torch.stack(V_prime_batch)\n V_batch = torch.stack(V_batch)\n advantage = rewards_batch + self.critic_gamma * V_prime_batch - V_batch\n critic_loss = (V_batch - (rewards_batch + self.critic_gamma *\n V_prime_batch)).pow(2).mean()\n self.critic_optimizer.zero_grad()\n critic_loss.backward()\n self.critic_optimizer.step()\n",
"step-4": "<mask token>\nimport torch\nimport torch.optim as optim\nfrom utilities import *\nfrom model import *\nfrom torch.autograd import Variable\nimport numpy as np\nimport random\n\n\nclass A2C_agent(object):\n\n def __init__(self, env, actor_hidden_size, actor_lr, actor_batch_size,\n critic_gamma, mem_size, critic_hidden_size, critic_lr,\n critic_batch_size):\n self.env = env\n self.actor_hidden_size = actor_hidden_size\n self.actor_lr = actor_lr\n self.actor_batch_size = actor_batch_size\n self.critic_hidden_size = critic_hidden_size\n self.critic_lr = critic_lr\n self.critic_batch_size = critic_batch_size\n self.critic_gamma = critic_gamma\n self.mem_size = mem_size\n self.num_of_states = env.observation_space.shape[0]\n self.num_of_actions = env.action_space.n\n self.experience_replay_buffer = ReplayBuffer(self.mem_size)\n self.actor_network = ActorNet(self.num_of_states, self.\n actor_hidden_size, self.num_of_actions)\n self.actor_optimizer = optim.Adam(self.actor_network.parameters(),\n lr=self.actor_lr)\n self.critic_network = CriticNet(self.num_of_states, self.\n critic_hidden_size, 1)\n self.critic_optimizer = optim.Adam(self.critic_network.parameters(),\n lr=self.critic_lr)\n\n def act(self, state):\n action_distribution = self.actor_network.forward(state)\n action = np.random.choice(self.num_of_actions, p=\n action_distribution.detach().numpy())\n return action\n\n def memorize(self, state, action, new_state, reward, done):\n self.experience_replay_buffer.push(state, action, new_state, reward,\n done)\n\n def learn(self, rewards_batch, states_batch, actions_batch,\n new_states_batch, new_actions_batch):\n states_batch = np.asarray(states_batch)\n actions_batch = torch.tensor(actions_batch, dtype=torch.long)\n rewards_batch = torch.tensor(rewards_batch, dtype=torch.float)\n new_states_batch = np.asarray(states_batch)\n new_actions_batch = torch.tensor(actions_batch, dtype=torch.long)\n V_batch = []\n V_prime_batch = []\n for state, new_state, new_action in zip(states_batch,\n new_states_batch, new_actions_batch):\n state = torch.Tensor(state)\n v_value = self.critic_network.forward(state)\n V_batch.append(v_value)\n new_state = torch.Tensor(new_state)\n v_prime_value = self.critic_network.forward(new_state)\n V_prime_batch.append(v_prime_value)\n log_probs = torch.log(self.actor_network(states_batch))\n selected_log_probs = rewards_batch * log_probs[np.arange(len(\n actions_batch)), actions_batch]\n actor_loss = -selected_log_probs.mean()\n self.actor_optimizer.zero_grad()\n actor_loss.backward()\n self.actor_optimizer.step()\n V_prime_batch = torch.stack(V_prime_batch)\n V_batch = torch.stack(V_batch)\n advantage = rewards_batch + self.critic_gamma * V_prime_batch - V_batch\n critic_loss = (V_batch - (rewards_batch + self.critic_gamma *\n V_prime_batch)).pow(2).mean()\n self.critic_optimizer.zero_grad()\n critic_loss.backward()\n self.critic_optimizer.step()\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 25 19:21:32 2019\n\n@author: Nikos\n\"\"\"\n\nimport torch\nimport torch.optim as optim\nfrom utilities import *\nfrom model import *\nfrom torch.autograd import Variable\nimport numpy as np\nimport random\n\nclass A2C_agent(object):\n\tdef __init__(self, env, actor_hidden_size, actor_lr, actor_batch_size, \n\t\tcritic_gamma, mem_size, critic_hidden_size, critic_lr, critic_batch_size):\n \n\t\tself.env = env\n\t\tself.actor_hidden_size = actor_hidden_size\n\t\tself.actor_lr = actor_lr\n\t\tself.actor_batch_size = actor_batch_size\n\n\t\tself.critic_hidden_size = critic_hidden_size\n\t\tself.critic_lr = critic_lr\n\t\tself.critic_batch_size = critic_batch_size\n\t\tself.critic_gamma = critic_gamma\n\n\t\tself.mem_size = mem_size\n \n\t\tself.num_of_states = env.observation_space.shape[0]\n\t\tself.num_of_actions = env.action_space.n\n\n\t\tself.experience_replay_buffer = ReplayBuffer(self.mem_size)\n \n # initialize the Actor network (policy)\n\t\tself.actor_network = ActorNet(self.num_of_states, self.actor_hidden_size, self.num_of_actions)\n \n\t\tself.actor_optimizer = optim.Adam(self.actor_network.parameters(), lr = self.actor_lr) \n\n\t\t# initialize the Critic network (v-learning)\n\t\t# The difference between the critic in A2C (here) and the \n\t# critic int he \"vanilla\" Actor-Critic version is that the\n\t# critic in A2C models the value function, hence it needs\n\t# to only output the value of each state and not the Q-value\n\t# for each (state, action) pair. Therefore, the output size\n\t# here needs to be a scalar.\n\t\tself.critic_network = CriticNet(self.num_of_states, self.critic_hidden_size, 1)\n \n\t\tself.critic_optimizer = optim.Adam(self.critic_network.parameters(), lr = self.critic_lr) \n \n\tdef act(self, state):\n \t# compute the action distribution based on the current state via the policy net\n\t\taction_distribution = self.actor_network.forward(state)\n\n # pick an action based on that distribution\n\t\taction = np.random.choice(self.num_of_actions, p = action_distribution.detach().numpy())\n\t\treturn action\n\t\t\n\tdef memorize(self, state, action, new_state, reward, done):\n # this function takes a transition (state, action, new_state, reward, done)\n # and stores it into the experience memory buffer\n\t\tself.experience_replay_buffer.push(state, action, new_state, reward, done)\n\n\tdef learn(self, rewards_batch, states_batch, actions_batch, new_states_batch, new_actions_batch):\n\n\t\t#states_batch = torch.tensor(states_batch, dtype=torch.float)\n\t\tstates_batch = np.asarray(states_batch)\n\t\tactions_batch = torch.tensor(actions_batch, dtype=torch.long)\n\t\trewards_batch = torch.tensor(rewards_batch, dtype=torch.float)\n\t\tnew_states_batch = np.asarray(states_batch)\n\t\tnew_actions_batch = torch.tensor(actions_batch, dtype=torch.long)\n\t\tV_batch = []\n\t\tV_prime_batch = []\n\n\t\tfor state, new_state, new_action in zip(states_batch,\\\n\t\t\tnew_states_batch, new_actions_batch):\n\t\t\tstate = torch.Tensor(state)\n\n\t\t\tv_value = self.critic_network.forward(state)\n\t\t\t# get q-value for specific action\n\t\t\t#Q = q_values.gather(-1, action)\n\t\t\tV_batch.append(v_value)\n\n\t\t\tnew_state = torch.Tensor(new_state)\n\t\t\tv_prime_value = self.critic_network.forward(new_state)\n\t\t\t#V_prime = q_prime_values.gather(-1, new_action)\n\t\t\tV_prime_batch.append(v_prime_value)\n \n # compute the log of the probabilities that the policy outputs for each state\n\t\tlog_probs = torch.log(self.actor_network(states_batch))\n # pick those log probabilities that correspond to the actions that were selected\n\t\tselected_log_probs = rewards_batch * log_probs[np.arange(len(actions_batch)), actions_batch]\n # compute the monte-carlo estimate by averaging the losses and then form the optimization\n # criterion, which will be the negative log probs.\n\t\tactor_loss = -selected_log_probs.mean()\n\t\tself.actor_optimizer.zero_grad()\n\t\tactor_loss.backward()\n \n # if we need smooth updates we clip the grads between -1 and 1\n #for param in self.online_dqn_network.parameters():\n # param.grad.data.clamp_(-1,1)\n\t\tself.actor_optimizer.step()\n\n\t\t# Compute TD error for V network\n\t\tV_prime_batch = torch.stack(V_prime_batch)\n\t\tV_batch = torch.stack(V_batch)\n\t\t# A(s, a) = r_prime + gamma * V_prime - V\n\t\tadvantage = rewards_batch + self.critic_gamma * V_prime_batch - V_batch\n\t\t#print(deltas)\n\n\t\tcritic_loss = (V_batch - (rewards_batch + self.critic_gamma * V_prime_batch)).pow(2).mean()\n\t\t#print(critic_loss)\n\t\tself.critic_optimizer.zero_grad()\n\t\tcritic_loss.backward()\n\t\tself.critic_optimizer.step()\n\n\n\t\t#return loss",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
def plot3D(xValues, labels, figure = 0):
minClass = min(labels)
numberOfClasses = int(max(labels) - minClass)
fig = plt.figure(figure)
ax = plt.axes(projection='3d')
colors = ["r", "b", "y", "c", "m"]
for i in range(numberOfClasses+1):
classLocation = np.argwhere(labels == i+minClass)
ax.scatter3D(xValues[classLocation, 0], xValues[classLocation, 1], xValues[classLocation, 2]) #3D
|
normal
|
{
"blob_id": "8dfd92ab0ce0e71b41ce94bd8fcf057c8995a2a4",
"index": 1668,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef plot3D(xValues, labels, figure=0):\n minClass = min(labels)\n numberOfClasses = int(max(labels) - minClass)\n fig = plt.figure(figure)\n ax = plt.axes(projection='3d')\n colors = ['r', 'b', 'y', 'c', 'm']\n for i in range(numberOfClasses + 1):\n classLocation = np.argwhere(labels == i + minClass)\n ax.scatter3D(xValues[classLocation, 0], xValues[classLocation, 1],\n xValues[classLocation, 2])\n",
"step-3": "import matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\n\n\ndef plot3D(xValues, labels, figure=0):\n minClass = min(labels)\n numberOfClasses = int(max(labels) - minClass)\n fig = plt.figure(figure)\n ax = plt.axes(projection='3d')\n colors = ['r', 'b', 'y', 'c', 'm']\n for i in range(numberOfClasses + 1):\n classLocation = np.argwhere(labels == i + minClass)\n ax.scatter3D(xValues[classLocation, 0], xValues[classLocation, 1],\n xValues[classLocation, 2])\n",
"step-4": "import matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\n\ndef plot3D(xValues, labels, figure = 0):\n minClass = min(labels)\n numberOfClasses = int(max(labels) - minClass)\n\n fig = plt.figure(figure)\n ax = plt.axes(projection='3d')\n colors = [\"r\", \"b\", \"y\", \"c\", \"m\"]\n for i in range(numberOfClasses+1):\n classLocation = np.argwhere(labels == i+minClass)\n ax.scatter3D(xValues[classLocation, 0], xValues[classLocation, 1], xValues[classLocation, 2]) #3D\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from math import pi
from root_regula_falsi import *
r = 1.0
ρs = 200.0
ρw = 1000.0
def f(h):
Vw = 4*pi*r**3/3 - pi*h**2/3*(3*r - h) # displaced volume of water
Vs = 4*pi*r**3/3
return ρw*Vw - ρs*Vs
xr = root_regula_falsi(f, 0.0, 2*r)
|
normal
|
{
"blob_id": "3e7d2bacb15c39658ef5044685b73068deb1c145",
"index": 6060,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef f(h):\n Vw = 4 * pi * r ** 3 / 3 - pi * h ** 2 / 3 * (3 * r - h)\n Vs = 4 * pi * r ** 3 / 3\n return ρw * Vw - ρs * Vs\n\n\n<mask token>\n",
"step-3": "<mask token>\nr = 1.0\nρs = 200.0\nρw = 1000.0\n\n\ndef f(h):\n Vw = 4 * pi * r ** 3 / 3 - pi * h ** 2 / 3 * (3 * r - h)\n Vs = 4 * pi * r ** 3 / 3\n return ρw * Vw - ρs * Vs\n\n\nxr = root_regula_falsi(f, 0.0, 2 * r)\n",
"step-4": "from math import pi\nfrom root_regula_falsi import *\nr = 1.0\nρs = 200.0\nρw = 1000.0\n\n\ndef f(h):\n Vw = 4 * pi * r ** 3 / 3 - pi * h ** 2 / 3 * (3 * r - h)\n Vs = 4 * pi * r ** 3 / 3\n return ρw * Vw - ρs * Vs\n\n\nxr = root_regula_falsi(f, 0.0, 2 * r)\n",
"step-5": "from math import pi\nfrom root_regula_falsi import *\n\nr = 1.0\nρs = 200.0\nρw = 1000.0\n\ndef f(h):\n Vw = 4*pi*r**3/3 - pi*h**2/3*(3*r - h) # displaced volume of water\n Vs = 4*pi*r**3/3\n return ρw*Vw - ρs*Vs\n\n\nxr = root_regula_falsi(f, 0.0, 2*r)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from __future__ import print_function
import os
from twisted.internet.task import react
from twisted.internet.defer import Deferred, inlineCallbacks
from twisted.internet.protocol import Factory
from twisted.internet.protocol import Protocol
from twisted.internet.endpoints import TCP4ClientEndpoint, connectProtocol
from twisted.protocols.basic import LineReceiver
import msgpack
class ChatClient(Protocol):
def __init__(self, done):
self.done = done
self.unpacker = msgpack.Unpacker()
def connectionLost(self, reason):
print(reason.getErrorMessage())
self.done.callback(reason)
def sendMessage(self, nick, msg):
print("sending", nick, msg)
data = msgpack.packb([nick, msg])
self.transport.write(data)
def dataReceived(self, data):
# ditto to server: go over what about "burst" messages?
# (and do "original" code here at first: msg = msgpack.unpack(data)
self.unpacker.feed(data)
for msg in self.unpacker:
print("{}: {}".format(*msg))
class StdIOFactory(Factory):
def __init__(self, nick, proto):
self.nick = nick
self.proto = proto
def buildProtocol(self, addr):
return StandardInput(self.nick, self.proto)
from twisted.internet.stdio import StandardIO
class StandardInput(LineReceiver, StandardIO):
'''
Reads stdin and writes every line received as a message to the
server. No fancy editing or anything, simple pipe.
'''
delimiter = os.linesep
def lineReceived(self, line):
return self.protocol.sendMessage(self.nick, line)
def __init__(self, nick, proto):
self.nick = nick
self.protocol = proto
def connectionLost(self, reason):
self.protocol.transport.loseConnection()
|
normal
|
{
"blob_id": "532bcf8ae0ee40dc3eb4bd7170acfcb5d21cc4b9",
"index": 1984,
"step-1": "<mask token>\n\n\nclass StdIOFactory(Factory):\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass StandardInput(LineReceiver, StandardIO):\n \"\"\"\n Reads stdin and writes every line received as a message to the\n server. No fancy editing or anything, simple pipe.\n \"\"\"\n delimiter = os.linesep\n\n def lineReceived(self, line):\n return self.protocol.sendMessage(self.nick, line)\n\n def __init__(self, nick, proto):\n self.nick = nick\n self.protocol = proto\n\n def connectionLost(self, reason):\n self.protocol.transport.loseConnection()\n",
"step-2": "<mask token>\n\n\nclass ChatClient(Protocol):\n\n def __init__(self, done):\n self.done = done\n self.unpacker = msgpack.Unpacker()\n\n def connectionLost(self, reason):\n print(reason.getErrorMessage())\n self.done.callback(reason)\n <mask token>\n <mask token>\n\n\nclass StdIOFactory(Factory):\n\n def __init__(self, nick, proto):\n self.nick = nick\n self.proto = proto\n\n def buildProtocol(self, addr):\n return StandardInput(self.nick, self.proto)\n\n\n<mask token>\n\n\nclass StandardInput(LineReceiver, StandardIO):\n \"\"\"\n Reads stdin and writes every line received as a message to the\n server. No fancy editing or anything, simple pipe.\n \"\"\"\n delimiter = os.linesep\n\n def lineReceived(self, line):\n return self.protocol.sendMessage(self.nick, line)\n\n def __init__(self, nick, proto):\n self.nick = nick\n self.protocol = proto\n\n def connectionLost(self, reason):\n self.protocol.transport.loseConnection()\n",
"step-3": "<mask token>\n\n\nclass ChatClient(Protocol):\n\n def __init__(self, done):\n self.done = done\n self.unpacker = msgpack.Unpacker()\n\n def connectionLost(self, reason):\n print(reason.getErrorMessage())\n self.done.callback(reason)\n\n def sendMessage(self, nick, msg):\n print('sending', nick, msg)\n data = msgpack.packb([nick, msg])\n self.transport.write(data)\n <mask token>\n\n\nclass StdIOFactory(Factory):\n\n def __init__(self, nick, proto):\n self.nick = nick\n self.proto = proto\n\n def buildProtocol(self, addr):\n return StandardInput(self.nick, self.proto)\n\n\n<mask token>\n\n\nclass StandardInput(LineReceiver, StandardIO):\n \"\"\"\n Reads stdin and writes every line received as a message to the\n server. No fancy editing or anything, simple pipe.\n \"\"\"\n delimiter = os.linesep\n\n def lineReceived(self, line):\n return self.protocol.sendMessage(self.nick, line)\n\n def __init__(self, nick, proto):\n self.nick = nick\n self.protocol = proto\n\n def connectionLost(self, reason):\n self.protocol.transport.loseConnection()\n",
"step-4": "<mask token>\n\n\nclass ChatClient(Protocol):\n\n def __init__(self, done):\n self.done = done\n self.unpacker = msgpack.Unpacker()\n\n def connectionLost(self, reason):\n print(reason.getErrorMessage())\n self.done.callback(reason)\n\n def sendMessage(self, nick, msg):\n print('sending', nick, msg)\n data = msgpack.packb([nick, msg])\n self.transport.write(data)\n\n def dataReceived(self, data):\n self.unpacker.feed(data)\n for msg in self.unpacker:\n print('{}: {}'.format(*msg))\n\n\nclass StdIOFactory(Factory):\n\n def __init__(self, nick, proto):\n self.nick = nick\n self.proto = proto\n\n def buildProtocol(self, addr):\n return StandardInput(self.nick, self.proto)\n\n\n<mask token>\n\n\nclass StandardInput(LineReceiver, StandardIO):\n \"\"\"\n Reads stdin and writes every line received as a message to the\n server. No fancy editing or anything, simple pipe.\n \"\"\"\n delimiter = os.linesep\n\n def lineReceived(self, line):\n return self.protocol.sendMessage(self.nick, line)\n\n def __init__(self, nick, proto):\n self.nick = nick\n self.protocol = proto\n\n def connectionLost(self, reason):\n self.protocol.transport.loseConnection()\n",
"step-5": "from __future__ import print_function\nimport os\n\nfrom twisted.internet.task import react\nfrom twisted.internet.defer import Deferred, inlineCallbacks\n\nfrom twisted.internet.protocol import Factory\nfrom twisted.internet.protocol import Protocol\nfrom twisted.internet.endpoints import TCP4ClientEndpoint, connectProtocol\nfrom twisted.protocols.basic import LineReceiver\n\nimport msgpack\n\n\nclass ChatClient(Protocol):\n def __init__(self, done):\n self.done = done\n self.unpacker = msgpack.Unpacker()\n\n def connectionLost(self, reason):\n print(reason.getErrorMessage())\n self.done.callback(reason)\n\n def sendMessage(self, nick, msg):\n print(\"sending\", nick, msg)\n data = msgpack.packb([nick, msg])\n self.transport.write(data)\n\n def dataReceived(self, data):\n # ditto to server: go over what about \"burst\" messages?\n # (and do \"original\" code here at first: msg = msgpack.unpack(data)\n self.unpacker.feed(data)\n for msg in self.unpacker:\n print(\"{}: {}\".format(*msg))\n\n\nclass StdIOFactory(Factory):\n def __init__(self, nick, proto):\n self.nick = nick\n self.proto = proto\n\n def buildProtocol(self, addr):\n return StandardInput(self.nick, self.proto)\n\n\nfrom twisted.internet.stdio import StandardIO\nclass StandardInput(LineReceiver, StandardIO):\n '''\n Reads stdin and writes every line received as a message to the\n server. No fancy editing or anything, simple pipe.\n '''\n delimiter = os.linesep\n\n def lineReceived(self, line):\n return self.protocol.sendMessage(self.nick, line)\n\n def __init__(self, nick, proto):\n self.nick = nick\n self.protocol = proto\n\n def connectionLost(self, reason):\n self.protocol.transport.loseConnection()\n",
"step-ids": [
7,
12,
13,
14,
16
]
}
|
[
7,
12,
13,
14,
16
] |
string="Rutuja MaluSare"
print(string.casefold())
print(len(string))
"""string1=input("enter string 1")
print("string1")
print(len(string1))
string2=input("enter string 2")
print("string2")
print(len(string2))
string3=string1+string2
print(len(string3))"""
#lower case
print(string.lower())
#upper case
print(string.upper())
#strip =removes white spaces from start and end
a=" hello "
print(a)
print(a.strip())
#isdigit
b= 12
print(b)
|
normal
|
{
"blob_id": "024bc95f7255bb8be5c3c4ade9d212c9555a4f01",
"index": 3034,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(string.casefold())\nprint(len(string))\n<mask token>\nprint(string.lower())\nprint(string.upper())\n<mask token>\nprint(a)\nprint(a.strip())\n<mask token>\nprint(b)\n",
"step-3": "string = 'Rutuja MaluSare'\nprint(string.casefold())\nprint(len(string))\n<mask token>\nprint(string.lower())\nprint(string.upper())\na = ' hello '\nprint(a)\nprint(a.strip())\nb = 12\nprint(b)\n",
"step-4": "string=\"Rutuja MaluSare\"\nprint(string.casefold())\nprint(len(string))\n\n\"\"\"string1=input(\"enter string 1\")\nprint(\"string1\")\nprint(len(string1))\n\nstring2=input(\"enter string 2\")\nprint(\"string2\")\nprint(len(string2))\n\n\nstring3=string1+string2\nprint(len(string3))\"\"\"\n\n#lower case\nprint(string.lower())\n#upper case\nprint(string.upper())\n\n#strip =removes white spaces from start and end\na=\" hello \"\nprint(a)\nprint(a.strip())\n\n#isdigit\nb= 12\nprint(b)\n\n\n\n\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import API.enum as enum
import re
class ObjectValidator():
def __init__(self, validationData={}, *args, **kwargs):
self.data = validationData
self.statusCode = 200
self.validationPipeline = []
self.errors = {}
self.invalidFields = []
def flush(self):
self = ObjectValidator()
return self
def setError(self, field, error):
if field not in self.invalidFields:
fieldErrors = self.errors.get(field, [])
if error[0] not in fieldErrors:
self.errors[field] = fieldErrors + [error[0]]
self.statusCode = error[1]
self.invalidFields.append(field)
def getErrors(self):
return self.errors
def validate(self):
for validation in self.validationPipeline:
try:
validation['validator'](validation['data'])
except:
self.setError(validation['data']['field'], enum.Error.INVALID_FIELD_DATA.value)
def addValidation(self, data, validatorFunction):
self.validationPipeline.append({
'data': data,
'validator': validatorFunction
})
def _check_with_authenticationValidator(self, data):
if not data['user'].is_authenticated:
self.setError(data['field'], enum.Error.UNAUTHORIZED.value)
def _check_with_nonDuplicateObjectValidator(self, data):
model = data['model']
if model.objects.filter(**data['filter']):
self.setError(data['field'], enum.Error.DUPLICATE_FIELDS.value)
def _check_with_ObjectExistenceValidator(self, data):
model = data['model']
if not model.objects.filter(**data['filter']):
self.setError(data['field'], enum.Error.GENERIC_OBJECT_NOT_FOUND.value)
def checkNonDuplicateObject(self, field, model, **filter):
self.addValidation({'field': field, 'model': model, 'filter': filter},
self._check_with_nonDuplicateObjectValidator)
return self
def checkObjectExistence(self, field, model, **filter):
self.addValidation({'field': field, 'model': model, 'filter': filter},
self._check_with_ObjectExistenceValidator)
return self
def checkUserAuthentication(self, field, user):
self.addValidation({'field': field, 'user': user},
self._check_with_authenticationValidator)
return self
#\b(?!(\d)\1{3})[13-9]{4}[1346-9][013-9]{5}\b
# postal code validation
class FieldValidator():
def __init__(self, validationData={}, *args, **kwargs):
self.data = validationData
self.validationPipeline = []
self.statusCode = 200
self.errors = {}
self.invalidFields = []
def flush(self):
self = FieldValidator()
def setError(self, field, error):
if field not in self.invalidFields:
fieldErrors = self.errors.get(field, [])
if error[0] not in fieldErrors:
self.errors[field] = fieldErrors + [error[0]]
self.statusCode = error[1]
self.invalidFields.append(field)
def getErrors(self):
return self.errors
def validate(self):
for validation in self.validationPipeline:
try:
validation['validator'](validation['data'])
except:
self.setError(validation['data']['field'], enum.Error.INVALID_FIELD_DATA.value)
return self
def addValidation(self, data, validatorFunction):
if (data['value'] == 'unAssigned') and data['field'] in self.data.keys():
data['value'] = self.data[data['field']]
elif data['value'] == 'unAssigned' and data['field'] not in self.data.keys():
data['value'] = None
self.validationPipeline.append({
'data': data,
'validator': validatorFunction
})
def _check_with_typeValidator(self, data):
if not isinstance(data['value'], data['type']):
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_nationalLegalCodeValidator(self, data):
nationalLegalCode = data['value']
result = 0
validationList = [29, 27, 23, 19, 17, 29, 27, 23, 19, 17]
if len(nationalLegalCode) != 11:
self.setError(data['field'], enum.Error.INVALID_NATIONAL_LEGAL_CODE.value)
return
for i in range(10):
result += (int(nationalLegalCode[-2]) + 2 + int(nationalLegalCode[i])) * validationList[i]
if result % 11 == 10:
reminder = 0
else:
reminder = result % 11
if reminder == int(nationalLegalCode[-1]):
valid = True
else:
valid = False
if valid is False:
self.setError(data['field'], enum.Error.INVALID_NATIONAL_LEGAL_CODE.value)
def _check_with_nationalCodeValidator(self, data):
nCode = data['value']
valid = True
if len(nCode) != 10:
valid = False
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
return
sum = 0
for i in range(9):
sum += int(nCode[i]) * (10 - i)
r = sum % 11
if (r < 2 and r == int(nCode[9])) or r >= 2 and r == 11 - int(nCode[9]):
valid = valid and True
if valid is False:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_officer1NationalCodeValidator(self, data):
nCode = data['value']
valid = True
if len(nCode) != 10:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
return
sum = 0
for i in range(9):
sum += int(nCode[i]) * (10 - i)
r = sum % 11
if (r < 2 and r == int(nCode[9])) or r >= 2 and r == 11 - int(nCode[9]):
valid = valid and True
if valid is False:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_officer2NationalCodeValidator(self, data):
nCode = data['value']
valid = True
if len(nCode) != 10:
valid = False
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
return
sum = 0
for i in range(9):
sum += int(nCode[i]) * (10 - i)
r = sum % 11
if (r < 2 and r == int(nCode[9])) or r >= 2 and r == 11 - int(nCode[9]):
valid = valid and True
if valid is False:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_featuresValidator(self, data):
for i in data['value']:
if i not in ["پلتفرم پرداخت در محل", "باشگاه مشتریان", "درگاه پرداخت اینترنتی"]:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
break
def _check_with_userNameValidator(self, data):
username = re.match(r"^[A-Za-z]+(?:[ _-][A-Za-z0-9]+)*$", data["value"])
if 'admin' in data['value'] or 'zibal' in data['value'] or username is None:
self.setError(data['field'], enum.Error.INVALID_USERNAME.value)
def _check_with_phoneNumberValidator(self, data):
if data['value'] is None or len(data) < 1:
self.setError(data['field'], enum.Error.PHONE_INCORRECT_TEMPLATE.value)
def _check_with_mobileValidator(self, data):
mobileNumber = data['value']
if mobileNumber is None:
self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)
return
match_object = re.match(r"(^09[0-9]{9}$)", mobileNumber)
if match_object is None or mobileNumber is None:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_emailValidator(self, data):
email = data['value']
if email is None:
self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)
return
match_object = re.match(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", email)
if match_object is None or email is None:
self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)
def _check_with_noneValidator(self, data):
if data['value'] is None or data['value'] == "":
self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)
def _check_with_fileValidator(self, data):
file = data['value']
field = data['field']
if file is None:
self.setError(field, enum.Error.EMPTY_INPUT_FIELD.value)
return
elif file.size > enum.Limits.FILE_SIZE_LIMIT.value:
self.setError(field, enum.Error.FILE_SIZE_EXCEED.value)
types = data['options'].get('types', None)
valid = False
if types is not None:
for type in types:
valid = valid or type in file.content_type
if valid is False:
self.setError(field, enum.Error.REQUEST_TYPE_ERROR.value)
def _check_with_IBANValidator(self, data):
iban = data['value']
if len(iban)!=26 or not iban.startswith("IR"):
self.setError(data['field'], enum.Error.IBAN_ERROR.value)
return
code = iban[4:]+iban[:4]
code = code.replace('I','18').replace('R','27')
if int(code)%97!=1:
self.setError(data['field'], enum.Error.IBAN_ERROR.value)
def _check_with_subMerchantBankAccountValidator(self, data):
if not SubMerchant.objects.filter(idsql=data['value']['userId'], ID=data['value']['subId'], status=1).exists():
self.setError(data['field'], enum.Error.IMPOSSIBLE_BANK_ACCOUNT_DESTINATION.value)
def _check_with_minDataLengthValidator(self, data):
if data['value'] is None or len(data['value']) < data['length']:
self.setError(data['field'], (enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),
enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))
def _check_with_maxDataLengthValidator(self, data):
if data['value'] is None or len(data['value']) > data['length']:
self.setError(data['field'], (enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),
enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))
def _check_with_equalDataLengthValidator(self, data):
if data['value'] is None or len(data['value']) != data['length']:
self.setError(data['field'], (enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),
enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))
def _check_with_inputValidator(self, data):
if data['value'] is None or len(data['value']) < 1:
self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)
def _check_with_IbanTransferable(self, data):
if data['value'][4:7]=='062' and data['value'][-13:-10]=='080':
self.setError(data['field'], enum.Error.NOT_IBAN_TRANSFERABLE.value)
def _check_with_username(self, data):
username = re.match(r"^[a-zA-Z0-9_.-]+$", data["value"])
if username is None:
self.setError(data['field'], enum.Error.INVALID_USERNAME.value)
#############################################################################
def checkType(self, field, type, value="unAssigned"):
self.addValidation({'field': field, 'type': type, 'value': value}, self._check_with_typeValidator)
return self
def checkNationalLegalCode(self, field, code="unAssigned"):
self.addValidation({'field': field, 'value': code}, self._check_with_nationalLegalCodeValidator)
return self
def checkOfficer1NationalCode(self, field, code="unAssigned"):
self.addValidation({'field': field, 'value': code}, self._check_with_officer1NationalCodeValidator)
return self
def checkOfficer2NationalCode(self, field, code="unAssigned"):
self.addValidation({'field': field, 'value': code}, self._check_with_officer2NationalCodeValidator)
return self
def checkNationalCode(self, field, code="unAssigned"):
self.addValidation({'field': field, 'value': code}, self._check_with_nationalCodeValidator)
return self
def checkFeatures(self, field, features="unAssigned"):
self.addValidation({'field': field, 'value': features}, self._check_with_featuresValidator)
return self
def checkUserName(self, field, username="unAssigned"):
self.addValidation({'field': field, 'value': username}, self._check_with_userNameValidator)
return self
def checkPhone(self, field, data="unAssigned"):
self.addValidation({'field': field, 'value': data}, self._check_with_phoneNumberValidator)
return self
def checkMobile(self, field, data="unAssigned"):
self.addValidation({'field': field, 'value': data}, self._check_with_mobileValidator)
return self
def checkEmail(self, field, data="unAssigned"):
self.addValidation({'field': field, 'value': data}, self._check_with_emailValidator)
return self
def checkNotNone(self, field, data="unAssigned"):
self.addValidation({'field': field, 'value': data}, self._check_with_noneValidator)
return self
def checkFile(self, field, data, **options):
self.addValidation({'field': field, 'value': data, 'options': options}, self._check_with_fileValidator)
return self
def checkIBAN(self, field, data="unAssigned"):
self.addValidation({'field': field, 'value': data}, self._check_with_IBANValidator)
return self
def checkBankAccountDestinationForSubmerchant(self, field, userId, subId):
data = {
'userId': userId,
'subId': subId
}
self.addValidation({'field': field, 'value': data}, self._check_with_subMerchantBankAccountValidator)
return self
def checkDataLength(self, field, length,mode='equal', data="unAssigned"):
if mode == 'equal':
validatorFunction = self._check_with_equalDataLengthValidator
if mode == 'min':
validatorFunction = self._check_with_minDataLengthValidator
if mode == 'max':
validatorFunction = self._check_with_minDataLengthValidator
self.addValidation({'field': field, 'value': data, 'length': length}, validatorFunction)
return self
def checkInputData(self, field, data="unAssigned"):
self.addValidation({'field': field, 'value': data}, self._check_with_inputValidator)
return self
def checkTelephone(self, field, data="unAssigned"): ##TODO
self.addValidation({'field': field, 'value': data}, self._check_with_phoneNumberValidator)
return self
def checkIsIbanTransferable(self, field, data="unAssigned"):
self.addValidation({'field': field, 'value': data}, self._check_with_IbanTransferable)
return self
def checkUsername(self, field, data="unAssigned"):
self.addValidation({'field': field, 'value': data}, self._check_with_username())
class DataValidator:
def __init__(self, data={}):
self.fieldValidator = FieldValidator(data)
self.objectValidator = ObjectValidator()
self.errors = {}
self.statusCode = 200
def getValidatorsErrors(self):
self.objectValidator.validate()
self.fieldValidator.validate()
for key in self.fieldValidator.getErrors().keys():
self.errors[key] = self.errors.get(key, []) + self.fieldValidator.getErrors()[key]
self.statusCode = self.fieldValidator.statusCode
for key in self.objectValidator.getErrors().keys():
self.errors[key] = self.errors.get(key, []) + self.objectValidator.getErrors()[key]
self.statusCode = self.objectValidator.statusCode if self.objectValidator.statusCode != 200 else self.statusCode
return self.errors
def generateMessage(self):
messages = []
errorKeys = self.errors.keys()
if 'email' in errorKeys:
messages.append(' آدرس ایمیل نامعتبر است')
if "name" in errorKeys :
messages.append('نام را وارد کنید')
if 'username' in errorKeys:
messages.append('نام کاربری را وارد کنید')
if 'password' in errorKeys:
messages.append('رمز عبور را وارد کنید')
if 'mobile' in errorKeys:
messages.append('تلفن همراه خود را وارد کنید.')
if 'phone' in errorKeys:
messages.append('تلفن ثابت را به فرمت 02122407556 و 11 رقمی وارد کنید')
if 'iban' in errorKeys or 'IBAN' in errorKeys:
messages.append('شماره شبای وارد شده معتبر نیست. 26 کاراکتر و شروع با IR و بدون خط تیره (-) و فاصله')
if 'user' in errorKeys:
messages.append('لطفا وارد شوید')
return messages
|
normal
|
{
"blob_id": "e8daf03f987c7512ff245bfbe16c447acd6b5986",
"index": 7574,
"step-1": "<mask token>\n\n\nclass FieldValidator:\n\n def __init__(self, validationData={}, *args, **kwargs):\n self.data = validationData\n self.validationPipeline = []\n self.statusCode = 200\n self.errors = {}\n self.invalidFields = []\n\n def flush(self):\n self = FieldValidator()\n\n def setError(self, field, error):\n if field not in self.invalidFields:\n fieldErrors = self.errors.get(field, [])\n if error[0] not in fieldErrors:\n self.errors[field] = fieldErrors + [error[0]]\n self.statusCode = error[1]\n self.invalidFields.append(field)\n\n def getErrors(self):\n return self.errors\n\n def validate(self):\n for validation in self.validationPipeline:\n try:\n validation['validator'](validation['data'])\n except:\n self.setError(validation['data']['field'], enum.Error.\n INVALID_FIELD_DATA.value)\n return self\n\n def addValidation(self, data, validatorFunction):\n if data['value'] == 'unAssigned' and data['field'] in self.data.keys():\n data['value'] = self.data[data['field']]\n elif data['value'] == 'unAssigned' and data['field'\n ] not in self.data.keys():\n data['value'] = None\n self.validationPipeline.append({'data': data, 'validator':\n validatorFunction})\n\n def _check_with_typeValidator(self, data):\n if not isinstance(data['value'], data['type']):\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_nationalLegalCodeValidator(self, data):\n nationalLegalCode = data['value']\n result = 0\n validationList = [29, 27, 23, 19, 17, 29, 27, 23, 19, 17]\n if len(nationalLegalCode) != 11:\n self.setError(data['field'], enum.Error.\n INVALID_NATIONAL_LEGAL_CODE.value)\n return\n for i in range(10):\n result += (int(nationalLegalCode[-2]) + 2 + int(\n nationalLegalCode[i])) * validationList[i]\n if result % 11 == 10:\n reminder = 0\n else:\n reminder = result % 11\n if reminder == int(nationalLegalCode[-1]):\n valid = True\n else:\n valid = False\n if valid is False:\n self.setError(data['field'], enum.Error.\n INVALID_NATIONAL_LEGAL_CODE.value)\n <mask token>\n\n def _check_with_officer1NationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_officer2NationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n valid = False\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_featuresValidator(self, data):\n for i in data['value']:\n if i not in ['پلتفرم پرداخت در محل', 'باشگاه مشتریان',\n 'درگاه پرداخت اینترنتی']:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.\n value)\n break\n\n def _check_with_userNameValidator(self, data):\n username = re.match('^[A-Za-z]+(?:[ _-][A-Za-z0-9]+)*$', data['value'])\n if 'admin' in data['value'] or 'zibal' in data['value'\n ] or username is None:\n self.setError(data['field'], enum.Error.INVALID_USERNAME.value)\n\n def _check_with_phoneNumberValidator(self, data):\n if data['value'] is None or len(data) < 1:\n self.setError(data['field'], enum.Error.\n PHONE_INCORRECT_TEMPLATE.value)\n <mask token>\n <mask token>\n <mask token>\n\n def _check_with_fileValidator(self, data):\n file = data['value']\n field = data['field']\n if file is None:\n self.setError(field, enum.Error.EMPTY_INPUT_FIELD.value)\n return\n elif file.size > enum.Limits.FILE_SIZE_LIMIT.value:\n self.setError(field, enum.Error.FILE_SIZE_EXCEED.value)\n types = data['options'].get('types', None)\n valid = False\n if types is not None:\n for type in types:\n valid = valid or type in file.content_type\n if valid is False:\n self.setError(field, enum.Error.REQUEST_TYPE_ERROR.value)\n <mask token>\n\n def _check_with_subMerchantBankAccountValidator(self, data):\n if not SubMerchant.objects.filter(idsql=data['value']['userId'], ID\n =data['value']['subId'], status=1).exists():\n self.setError(data['field'], enum.Error.\n IMPOSSIBLE_BANK_ACCOUNT_DESTINATION.value)\n\n def _check_with_minDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) < data['length']:\n self.setError(data['field'], (enum.Error.\n MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_maxDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) > data['length']:\n self.setError(data['field'], (enum.Error.\n MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_equalDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) != data['length']:\n self.setError(data['field'], (enum.Error.\n MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_inputValidator(self, data):\n if data['value'] is None or len(data['value']) < 1:\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n\n def _check_with_IbanTransferable(self, data):\n if data['value'][4:7] == '062' and data['value'][-13:-10] == '080':\n self.setError(data['field'], enum.Error.NOT_IBAN_TRANSFERABLE.value\n )\n\n def _check_with_username(self, data):\n username = re.match('^[a-zA-Z0-9_.-]+$', data['value'])\n if username is None:\n self.setError(data['field'], enum.Error.INVALID_USERNAME.value)\n\n def checkType(self, field, type, value='unAssigned'):\n self.addValidation({'field': field, 'type': type, 'value': value},\n self._check_with_typeValidator)\n return self\n\n def checkNationalLegalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_nationalLegalCodeValidator)\n return self\n\n def checkOfficer1NationalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_officer1NationalCodeValidator)\n return self\n\n def checkOfficer2NationalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_officer2NationalCodeValidator)\n return self\n\n def checkNationalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_nationalCodeValidator)\n return self\n\n def checkFeatures(self, field, features='unAssigned'):\n self.addValidation({'field': field, 'value': features}, self.\n _check_with_featuresValidator)\n return self\n <mask token>\n <mask token>\n <mask token>\n\n def checkEmail(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_emailValidator)\n return self\n\n def checkNotNone(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_noneValidator)\n return self\n\n def checkFile(self, field, data, **options):\n self.addValidation({'field': field, 'value': data, 'options':\n options}, self._check_with_fileValidator)\n return self\n\n def checkIBAN(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_IBANValidator)\n return self\n\n def checkBankAccountDestinationForSubmerchant(self, field, userId, subId):\n data = {'userId': userId, 'subId': subId}\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_subMerchantBankAccountValidator)\n return self\n\n def checkDataLength(self, field, length, mode='equal', data='unAssigned'):\n if mode == 'equal':\n validatorFunction = self._check_with_equalDataLengthValidator\n if mode == 'min':\n validatorFunction = self._check_with_minDataLengthValidator\n if mode == 'max':\n validatorFunction = self._check_with_minDataLengthValidator\n self.addValidation({'field': field, 'value': data, 'length': length\n }, validatorFunction)\n return self\n\n def checkInputData(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_inputValidator)\n return self\n <mask token>\n\n def checkIsIbanTransferable(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_IbanTransferable)\n return self\n <mask token>\n\n\nclass DataValidator:\n\n def __init__(self, data={}):\n self.fieldValidator = FieldValidator(data)\n self.objectValidator = ObjectValidator()\n self.errors = {}\n self.statusCode = 200\n\n def getValidatorsErrors(self):\n self.objectValidator.validate()\n self.fieldValidator.validate()\n for key in self.fieldValidator.getErrors().keys():\n self.errors[key] = self.errors.get(key, []\n ) + self.fieldValidator.getErrors()[key]\n self.statusCode = self.fieldValidator.statusCode\n for key in self.objectValidator.getErrors().keys():\n self.errors[key] = self.errors.get(key, []\n ) + self.objectValidator.getErrors()[key]\n self.statusCode = (self.objectValidator.statusCode if self.\n objectValidator.statusCode != 200 else self.statusCode)\n return self.errors\n\n def generateMessage(self):\n messages = []\n errorKeys = self.errors.keys()\n if 'email' in errorKeys:\n messages.append(' آدرس ایمیل نامعتبر است')\n if 'name' in errorKeys:\n messages.append('نام را وارد کنید')\n if 'username' in errorKeys:\n messages.append('نام کاربری را وارد کنید')\n if 'password' in errorKeys:\n messages.append('رمز عبور را وارد کنید')\n if 'mobile' in errorKeys:\n messages.append('تلفن همراه خود را وارد کنید.')\n if 'phone' in errorKeys:\n messages.append(\n 'تلفن ثابت را به فرمت 02122407556 و 11 رقمی وارد کنید')\n if 'iban' in errorKeys or 'IBAN' in errorKeys:\n messages.append(\n 'شماره شبای وارد شده معتبر نیست. 26 کاراکتر و شروع با IR و بدون خط تیره (-) و فاصله'\n )\n if 'user' in errorKeys:\n messages.append('لطفا وارد شوید')\n return messages\n",
"step-2": "<mask token>\n\n\nclass ObjectValidator:\n\n def __init__(self, validationData={}, *args, **kwargs):\n self.data = validationData\n self.statusCode = 200\n self.validationPipeline = []\n self.errors = {}\n self.invalidFields = []\n\n def flush(self):\n self = ObjectValidator()\n return self\n <mask token>\n <mask token>\n <mask token>\n\n def addValidation(self, data, validatorFunction):\n self.validationPipeline.append({'data': data, 'validator':\n validatorFunction})\n\n def _check_with_authenticationValidator(self, data):\n if not data['user'].is_authenticated:\n self.setError(data['field'], enum.Error.UNAUTHORIZED.value)\n <mask token>\n\n def _check_with_ObjectExistenceValidator(self, data):\n model = data['model']\n if not model.objects.filter(**data['filter']):\n self.setError(data['field'], enum.Error.\n GENERIC_OBJECT_NOT_FOUND.value)\n <mask token>\n\n def checkObjectExistence(self, field, model, **filter):\n self.addValidation({'field': field, 'model': model, 'filter':\n filter}, self._check_with_ObjectExistenceValidator)\n return self\n\n def checkUserAuthentication(self, field, user):\n self.addValidation({'field': field, 'user': user}, self.\n _check_with_authenticationValidator)\n return self\n\n\nclass FieldValidator:\n\n def __init__(self, validationData={}, *args, **kwargs):\n self.data = validationData\n self.validationPipeline = []\n self.statusCode = 200\n self.errors = {}\n self.invalidFields = []\n\n def flush(self):\n self = FieldValidator()\n\n def setError(self, field, error):\n if field not in self.invalidFields:\n fieldErrors = self.errors.get(field, [])\n if error[0] not in fieldErrors:\n self.errors[field] = fieldErrors + [error[0]]\n self.statusCode = error[1]\n self.invalidFields.append(field)\n\n def getErrors(self):\n return self.errors\n\n def validate(self):\n for validation in self.validationPipeline:\n try:\n validation['validator'](validation['data'])\n except:\n self.setError(validation['data']['field'], enum.Error.\n INVALID_FIELD_DATA.value)\n return self\n\n def addValidation(self, data, validatorFunction):\n if data['value'] == 'unAssigned' and data['field'] in self.data.keys():\n data['value'] = self.data[data['field']]\n elif data['value'] == 'unAssigned' and data['field'\n ] not in self.data.keys():\n data['value'] = None\n self.validationPipeline.append({'data': data, 'validator':\n validatorFunction})\n\n def _check_with_typeValidator(self, data):\n if not isinstance(data['value'], data['type']):\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_nationalLegalCodeValidator(self, data):\n nationalLegalCode = data['value']\n result = 0\n validationList = [29, 27, 23, 19, 17, 29, 27, 23, 19, 17]\n if len(nationalLegalCode) != 11:\n self.setError(data['field'], enum.Error.\n INVALID_NATIONAL_LEGAL_CODE.value)\n return\n for i in range(10):\n result += (int(nationalLegalCode[-2]) + 2 + int(\n nationalLegalCode[i])) * validationList[i]\n if result % 11 == 10:\n reminder = 0\n else:\n reminder = result % 11\n if reminder == int(nationalLegalCode[-1]):\n valid = True\n else:\n valid = False\n if valid is False:\n self.setError(data['field'], enum.Error.\n INVALID_NATIONAL_LEGAL_CODE.value)\n\n def _check_with_nationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n valid = False\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_officer1NationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_officer2NationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n valid = False\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_featuresValidator(self, data):\n for i in data['value']:\n if i not in ['پلتفرم پرداخت در محل', 'باشگاه مشتریان',\n 'درگاه پرداخت اینترنتی']:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.\n value)\n break\n\n def _check_with_userNameValidator(self, data):\n username = re.match('^[A-Za-z]+(?:[ _-][A-Za-z0-9]+)*$', data['value'])\n if 'admin' in data['value'] or 'zibal' in data['value'\n ] or username is None:\n self.setError(data['field'], enum.Error.INVALID_USERNAME.value)\n\n def _check_with_phoneNumberValidator(self, data):\n if data['value'] is None or len(data) < 1:\n self.setError(data['field'], enum.Error.\n PHONE_INCORRECT_TEMPLATE.value)\n\n def _check_with_mobileValidator(self, data):\n mobileNumber = data['value']\n if mobileNumber is None:\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n return\n match_object = re.match('(^09[0-9]{9}$)', mobileNumber)\n if match_object is None or mobileNumber is None:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_emailValidator(self, data):\n email = data['value']\n if email is None:\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n return\n match_object = re.match(\n '(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\\\.[a-zA-Z0-9-.]+$)', email)\n if match_object is None or email is None:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_noneValidator(self, data):\n if data['value'] is None or data['value'] == '':\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n\n def _check_with_fileValidator(self, data):\n file = data['value']\n field = data['field']\n if file is None:\n self.setError(field, enum.Error.EMPTY_INPUT_FIELD.value)\n return\n elif file.size > enum.Limits.FILE_SIZE_LIMIT.value:\n self.setError(field, enum.Error.FILE_SIZE_EXCEED.value)\n types = data['options'].get('types', None)\n valid = False\n if types is not None:\n for type in types:\n valid = valid or type in file.content_type\n if valid is False:\n self.setError(field, enum.Error.REQUEST_TYPE_ERROR.value)\n\n def _check_with_IBANValidator(self, data):\n iban = data['value']\n if len(iban) != 26 or not iban.startswith('IR'):\n self.setError(data['field'], enum.Error.IBAN_ERROR.value)\n return\n code = iban[4:] + iban[:4]\n code = code.replace('I', '18').replace('R', '27')\n if int(code) % 97 != 1:\n self.setError(data['field'], enum.Error.IBAN_ERROR.value)\n\n def _check_with_subMerchantBankAccountValidator(self, data):\n if not SubMerchant.objects.filter(idsql=data['value']['userId'], ID\n =data['value']['subId'], status=1).exists():\n self.setError(data['field'], enum.Error.\n IMPOSSIBLE_BANK_ACCOUNT_DESTINATION.value)\n\n def _check_with_minDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) < data['length']:\n self.setError(data['field'], (enum.Error.\n MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_maxDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) > data['length']:\n self.setError(data['field'], (enum.Error.\n MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_equalDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) != data['length']:\n self.setError(data['field'], (enum.Error.\n MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_inputValidator(self, data):\n if data['value'] is None or len(data['value']) < 1:\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n\n def _check_with_IbanTransferable(self, data):\n if data['value'][4:7] == '062' and data['value'][-13:-10] == '080':\n self.setError(data['field'], enum.Error.NOT_IBAN_TRANSFERABLE.value\n )\n\n def _check_with_username(self, data):\n username = re.match('^[a-zA-Z0-9_.-]+$', data['value'])\n if username is None:\n self.setError(data['field'], enum.Error.INVALID_USERNAME.value)\n\n def checkType(self, field, type, value='unAssigned'):\n self.addValidation({'field': field, 'type': type, 'value': value},\n self._check_with_typeValidator)\n return self\n\n def checkNationalLegalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_nationalLegalCodeValidator)\n return self\n\n def checkOfficer1NationalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_officer1NationalCodeValidator)\n return self\n\n def checkOfficer2NationalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_officer2NationalCodeValidator)\n return self\n\n def checkNationalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_nationalCodeValidator)\n return self\n\n def checkFeatures(self, field, features='unAssigned'):\n self.addValidation({'field': field, 'value': features}, self.\n _check_with_featuresValidator)\n return self\n\n def checkUserName(self, field, username='unAssigned'):\n self.addValidation({'field': field, 'value': username}, self.\n _check_with_userNameValidator)\n return self\n\n def checkPhone(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_phoneNumberValidator)\n return self\n\n def checkMobile(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_mobileValidator)\n return self\n\n def checkEmail(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_emailValidator)\n return self\n\n def checkNotNone(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_noneValidator)\n return self\n\n def checkFile(self, field, data, **options):\n self.addValidation({'field': field, 'value': data, 'options':\n options}, self._check_with_fileValidator)\n return self\n\n def checkIBAN(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_IBANValidator)\n return self\n\n def checkBankAccountDestinationForSubmerchant(self, field, userId, subId):\n data = {'userId': userId, 'subId': subId}\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_subMerchantBankAccountValidator)\n return self\n\n def checkDataLength(self, field, length, mode='equal', data='unAssigned'):\n if mode == 'equal':\n validatorFunction = self._check_with_equalDataLengthValidator\n if mode == 'min':\n validatorFunction = self._check_with_minDataLengthValidator\n if mode == 'max':\n validatorFunction = self._check_with_minDataLengthValidator\n self.addValidation({'field': field, 'value': data, 'length': length\n }, validatorFunction)\n return self\n\n def checkInputData(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_inputValidator)\n return self\n\n def checkTelephone(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_phoneNumberValidator)\n return self\n\n def checkIsIbanTransferable(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_IbanTransferable)\n return self\n\n def checkUsername(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_username())\n\n\nclass DataValidator:\n\n def __init__(self, data={}):\n self.fieldValidator = FieldValidator(data)\n self.objectValidator = ObjectValidator()\n self.errors = {}\n self.statusCode = 200\n\n def getValidatorsErrors(self):\n self.objectValidator.validate()\n self.fieldValidator.validate()\n for key in self.fieldValidator.getErrors().keys():\n self.errors[key] = self.errors.get(key, []\n ) + self.fieldValidator.getErrors()[key]\n self.statusCode = self.fieldValidator.statusCode\n for key in self.objectValidator.getErrors().keys():\n self.errors[key] = self.errors.get(key, []\n ) + self.objectValidator.getErrors()[key]\n self.statusCode = (self.objectValidator.statusCode if self.\n objectValidator.statusCode != 200 else self.statusCode)\n return self.errors\n\n def generateMessage(self):\n messages = []\n errorKeys = self.errors.keys()\n if 'email' in errorKeys:\n messages.append(' آدرس ایمیل نامعتبر است')\n if 'name' in errorKeys:\n messages.append('نام را وارد کنید')\n if 'username' in errorKeys:\n messages.append('نام کاربری را وارد کنید')\n if 'password' in errorKeys:\n messages.append('رمز عبور را وارد کنید')\n if 'mobile' in errorKeys:\n messages.append('تلفن همراه خود را وارد کنید.')\n if 'phone' in errorKeys:\n messages.append(\n 'تلفن ثابت را به فرمت 02122407556 و 11 رقمی وارد کنید')\n if 'iban' in errorKeys or 'IBAN' in errorKeys:\n messages.append(\n 'شماره شبای وارد شده معتبر نیست. 26 کاراکتر و شروع با IR و بدون خط تیره (-) و فاصله'\n )\n if 'user' in errorKeys:\n messages.append('لطفا وارد شوید')\n return messages\n",
"step-3": "<mask token>\n\n\nclass ObjectValidator:\n\n def __init__(self, validationData={}, *args, **kwargs):\n self.data = validationData\n self.statusCode = 200\n self.validationPipeline = []\n self.errors = {}\n self.invalidFields = []\n\n def flush(self):\n self = ObjectValidator()\n return self\n <mask token>\n\n def getErrors(self):\n return self.errors\n\n def validate(self):\n for validation in self.validationPipeline:\n try:\n validation['validator'](validation['data'])\n except:\n self.setError(validation['data']['field'], enum.Error.\n INVALID_FIELD_DATA.value)\n\n def addValidation(self, data, validatorFunction):\n self.validationPipeline.append({'data': data, 'validator':\n validatorFunction})\n\n def _check_with_authenticationValidator(self, data):\n if not data['user'].is_authenticated:\n self.setError(data['field'], enum.Error.UNAUTHORIZED.value)\n\n def _check_with_nonDuplicateObjectValidator(self, data):\n model = data['model']\n if model.objects.filter(**data['filter']):\n self.setError(data['field'], enum.Error.DUPLICATE_FIELDS.value)\n\n def _check_with_ObjectExistenceValidator(self, data):\n model = data['model']\n if not model.objects.filter(**data['filter']):\n self.setError(data['field'], enum.Error.\n GENERIC_OBJECT_NOT_FOUND.value)\n\n def checkNonDuplicateObject(self, field, model, **filter):\n self.addValidation({'field': field, 'model': model, 'filter':\n filter}, self._check_with_nonDuplicateObjectValidator)\n return self\n\n def checkObjectExistence(self, field, model, **filter):\n self.addValidation({'field': field, 'model': model, 'filter':\n filter}, self._check_with_ObjectExistenceValidator)\n return self\n\n def checkUserAuthentication(self, field, user):\n self.addValidation({'field': field, 'user': user}, self.\n _check_with_authenticationValidator)\n return self\n\n\nclass FieldValidator:\n\n def __init__(self, validationData={}, *args, **kwargs):\n self.data = validationData\n self.validationPipeline = []\n self.statusCode = 200\n self.errors = {}\n self.invalidFields = []\n\n def flush(self):\n self = FieldValidator()\n\n def setError(self, field, error):\n if field not in self.invalidFields:\n fieldErrors = self.errors.get(field, [])\n if error[0] not in fieldErrors:\n self.errors[field] = fieldErrors + [error[0]]\n self.statusCode = error[1]\n self.invalidFields.append(field)\n\n def getErrors(self):\n return self.errors\n\n def validate(self):\n for validation in self.validationPipeline:\n try:\n validation['validator'](validation['data'])\n except:\n self.setError(validation['data']['field'], enum.Error.\n INVALID_FIELD_DATA.value)\n return self\n\n def addValidation(self, data, validatorFunction):\n if data['value'] == 'unAssigned' and data['field'] in self.data.keys():\n data['value'] = self.data[data['field']]\n elif data['value'] == 'unAssigned' and data['field'\n ] not in self.data.keys():\n data['value'] = None\n self.validationPipeline.append({'data': data, 'validator':\n validatorFunction})\n\n def _check_with_typeValidator(self, data):\n if not isinstance(data['value'], data['type']):\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_nationalLegalCodeValidator(self, data):\n nationalLegalCode = data['value']\n result = 0\n validationList = [29, 27, 23, 19, 17, 29, 27, 23, 19, 17]\n if len(nationalLegalCode) != 11:\n self.setError(data['field'], enum.Error.\n INVALID_NATIONAL_LEGAL_CODE.value)\n return\n for i in range(10):\n result += (int(nationalLegalCode[-2]) + 2 + int(\n nationalLegalCode[i])) * validationList[i]\n if result % 11 == 10:\n reminder = 0\n else:\n reminder = result % 11\n if reminder == int(nationalLegalCode[-1]):\n valid = True\n else:\n valid = False\n if valid is False:\n self.setError(data['field'], enum.Error.\n INVALID_NATIONAL_LEGAL_CODE.value)\n\n def _check_with_nationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n valid = False\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_officer1NationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_officer2NationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n valid = False\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_featuresValidator(self, data):\n for i in data['value']:\n if i not in ['پلتفرم پرداخت در محل', 'باشگاه مشتریان',\n 'درگاه پرداخت اینترنتی']:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.\n value)\n break\n\n def _check_with_userNameValidator(self, data):\n username = re.match('^[A-Za-z]+(?:[ _-][A-Za-z0-9]+)*$', data['value'])\n if 'admin' in data['value'] or 'zibal' in data['value'\n ] or username is None:\n self.setError(data['field'], enum.Error.INVALID_USERNAME.value)\n\n def _check_with_phoneNumberValidator(self, data):\n if data['value'] is None or len(data) < 1:\n self.setError(data['field'], enum.Error.\n PHONE_INCORRECT_TEMPLATE.value)\n\n def _check_with_mobileValidator(self, data):\n mobileNumber = data['value']\n if mobileNumber is None:\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n return\n match_object = re.match('(^09[0-9]{9}$)', mobileNumber)\n if match_object is None or mobileNumber is None:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_emailValidator(self, data):\n email = data['value']\n if email is None:\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n return\n match_object = re.match(\n '(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\\\.[a-zA-Z0-9-.]+$)', email)\n if match_object is None or email is None:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_noneValidator(self, data):\n if data['value'] is None or data['value'] == '':\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n\n def _check_with_fileValidator(self, data):\n file = data['value']\n field = data['field']\n if file is None:\n self.setError(field, enum.Error.EMPTY_INPUT_FIELD.value)\n return\n elif file.size > enum.Limits.FILE_SIZE_LIMIT.value:\n self.setError(field, enum.Error.FILE_SIZE_EXCEED.value)\n types = data['options'].get('types', None)\n valid = False\n if types is not None:\n for type in types:\n valid = valid or type in file.content_type\n if valid is False:\n self.setError(field, enum.Error.REQUEST_TYPE_ERROR.value)\n\n def _check_with_IBANValidator(self, data):\n iban = data['value']\n if len(iban) != 26 or not iban.startswith('IR'):\n self.setError(data['field'], enum.Error.IBAN_ERROR.value)\n return\n code = iban[4:] + iban[:4]\n code = code.replace('I', '18').replace('R', '27')\n if int(code) % 97 != 1:\n self.setError(data['field'], enum.Error.IBAN_ERROR.value)\n\n def _check_with_subMerchantBankAccountValidator(self, data):\n if not SubMerchant.objects.filter(idsql=data['value']['userId'], ID\n =data['value']['subId'], status=1).exists():\n self.setError(data['field'], enum.Error.\n IMPOSSIBLE_BANK_ACCOUNT_DESTINATION.value)\n\n def _check_with_minDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) < data['length']:\n self.setError(data['field'], (enum.Error.\n MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_maxDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) > data['length']:\n self.setError(data['field'], (enum.Error.\n MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_equalDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) != data['length']:\n self.setError(data['field'], (enum.Error.\n MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_inputValidator(self, data):\n if data['value'] is None or len(data['value']) < 1:\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n\n def _check_with_IbanTransferable(self, data):\n if data['value'][4:7] == '062' and data['value'][-13:-10] == '080':\n self.setError(data['field'], enum.Error.NOT_IBAN_TRANSFERABLE.value\n )\n\n def _check_with_username(self, data):\n username = re.match('^[a-zA-Z0-9_.-]+$', data['value'])\n if username is None:\n self.setError(data['field'], enum.Error.INVALID_USERNAME.value)\n\n def checkType(self, field, type, value='unAssigned'):\n self.addValidation({'field': field, 'type': type, 'value': value},\n self._check_with_typeValidator)\n return self\n\n def checkNationalLegalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_nationalLegalCodeValidator)\n return self\n\n def checkOfficer1NationalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_officer1NationalCodeValidator)\n return self\n\n def checkOfficer2NationalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_officer2NationalCodeValidator)\n return self\n\n def checkNationalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_nationalCodeValidator)\n return self\n\n def checkFeatures(self, field, features='unAssigned'):\n self.addValidation({'field': field, 'value': features}, self.\n _check_with_featuresValidator)\n return self\n\n def checkUserName(self, field, username='unAssigned'):\n self.addValidation({'field': field, 'value': username}, self.\n _check_with_userNameValidator)\n return self\n\n def checkPhone(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_phoneNumberValidator)\n return self\n\n def checkMobile(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_mobileValidator)\n return self\n\n def checkEmail(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_emailValidator)\n return self\n\n def checkNotNone(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_noneValidator)\n return self\n\n def checkFile(self, field, data, **options):\n self.addValidation({'field': field, 'value': data, 'options':\n options}, self._check_with_fileValidator)\n return self\n\n def checkIBAN(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_IBANValidator)\n return self\n\n def checkBankAccountDestinationForSubmerchant(self, field, userId, subId):\n data = {'userId': userId, 'subId': subId}\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_subMerchantBankAccountValidator)\n return self\n\n def checkDataLength(self, field, length, mode='equal', data='unAssigned'):\n if mode == 'equal':\n validatorFunction = self._check_with_equalDataLengthValidator\n if mode == 'min':\n validatorFunction = self._check_with_minDataLengthValidator\n if mode == 'max':\n validatorFunction = self._check_with_minDataLengthValidator\n self.addValidation({'field': field, 'value': data, 'length': length\n }, validatorFunction)\n return self\n\n def checkInputData(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_inputValidator)\n return self\n\n def checkTelephone(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_phoneNumberValidator)\n return self\n\n def checkIsIbanTransferable(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_IbanTransferable)\n return self\n\n def checkUsername(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_username())\n\n\nclass DataValidator:\n\n def __init__(self, data={}):\n self.fieldValidator = FieldValidator(data)\n self.objectValidator = ObjectValidator()\n self.errors = {}\n self.statusCode = 200\n\n def getValidatorsErrors(self):\n self.objectValidator.validate()\n self.fieldValidator.validate()\n for key in self.fieldValidator.getErrors().keys():\n self.errors[key] = self.errors.get(key, []\n ) + self.fieldValidator.getErrors()[key]\n self.statusCode = self.fieldValidator.statusCode\n for key in self.objectValidator.getErrors().keys():\n self.errors[key] = self.errors.get(key, []\n ) + self.objectValidator.getErrors()[key]\n self.statusCode = (self.objectValidator.statusCode if self.\n objectValidator.statusCode != 200 else self.statusCode)\n return self.errors\n\n def generateMessage(self):\n messages = []\n errorKeys = self.errors.keys()\n if 'email' in errorKeys:\n messages.append(' آدرس ایمیل نامعتبر است')\n if 'name' in errorKeys:\n messages.append('نام را وارد کنید')\n if 'username' in errorKeys:\n messages.append('نام کاربری را وارد کنید')\n if 'password' in errorKeys:\n messages.append('رمز عبور را وارد کنید')\n if 'mobile' in errorKeys:\n messages.append('تلفن همراه خود را وارد کنید.')\n if 'phone' in errorKeys:\n messages.append(\n 'تلفن ثابت را به فرمت 02122407556 و 11 رقمی وارد کنید')\n if 'iban' in errorKeys or 'IBAN' in errorKeys:\n messages.append(\n 'شماره شبای وارد شده معتبر نیست. 26 کاراکتر و شروع با IR و بدون خط تیره (-) و فاصله'\n )\n if 'user' in errorKeys:\n messages.append('لطفا وارد شوید')\n return messages\n",
"step-4": "<mask token>\n\n\nclass ObjectValidator:\n\n def __init__(self, validationData={}, *args, **kwargs):\n self.data = validationData\n self.statusCode = 200\n self.validationPipeline = []\n self.errors = {}\n self.invalidFields = []\n\n def flush(self):\n self = ObjectValidator()\n return self\n\n def setError(self, field, error):\n if field not in self.invalidFields:\n fieldErrors = self.errors.get(field, [])\n if error[0] not in fieldErrors:\n self.errors[field] = fieldErrors + [error[0]]\n self.statusCode = error[1]\n self.invalidFields.append(field)\n\n def getErrors(self):\n return self.errors\n\n def validate(self):\n for validation in self.validationPipeline:\n try:\n validation['validator'](validation['data'])\n except:\n self.setError(validation['data']['field'], enum.Error.\n INVALID_FIELD_DATA.value)\n\n def addValidation(self, data, validatorFunction):\n self.validationPipeline.append({'data': data, 'validator':\n validatorFunction})\n\n def _check_with_authenticationValidator(self, data):\n if not data['user'].is_authenticated:\n self.setError(data['field'], enum.Error.UNAUTHORIZED.value)\n\n def _check_with_nonDuplicateObjectValidator(self, data):\n model = data['model']\n if model.objects.filter(**data['filter']):\n self.setError(data['field'], enum.Error.DUPLICATE_FIELDS.value)\n\n def _check_with_ObjectExistenceValidator(self, data):\n model = data['model']\n if not model.objects.filter(**data['filter']):\n self.setError(data['field'], enum.Error.\n GENERIC_OBJECT_NOT_FOUND.value)\n\n def checkNonDuplicateObject(self, field, model, **filter):\n self.addValidation({'field': field, 'model': model, 'filter':\n filter}, self._check_with_nonDuplicateObjectValidator)\n return self\n\n def checkObjectExistence(self, field, model, **filter):\n self.addValidation({'field': field, 'model': model, 'filter':\n filter}, self._check_with_ObjectExistenceValidator)\n return self\n\n def checkUserAuthentication(self, field, user):\n self.addValidation({'field': field, 'user': user}, self.\n _check_with_authenticationValidator)\n return self\n\n\nclass FieldValidator:\n\n def __init__(self, validationData={}, *args, **kwargs):\n self.data = validationData\n self.validationPipeline = []\n self.statusCode = 200\n self.errors = {}\n self.invalidFields = []\n\n def flush(self):\n self = FieldValidator()\n\n def setError(self, field, error):\n if field not in self.invalidFields:\n fieldErrors = self.errors.get(field, [])\n if error[0] not in fieldErrors:\n self.errors[field] = fieldErrors + [error[0]]\n self.statusCode = error[1]\n self.invalidFields.append(field)\n\n def getErrors(self):\n return self.errors\n\n def validate(self):\n for validation in self.validationPipeline:\n try:\n validation['validator'](validation['data'])\n except:\n self.setError(validation['data']['field'], enum.Error.\n INVALID_FIELD_DATA.value)\n return self\n\n def addValidation(self, data, validatorFunction):\n if data['value'] == 'unAssigned' and data['field'] in self.data.keys():\n data['value'] = self.data[data['field']]\n elif data['value'] == 'unAssigned' and data['field'\n ] not in self.data.keys():\n data['value'] = None\n self.validationPipeline.append({'data': data, 'validator':\n validatorFunction})\n\n def _check_with_typeValidator(self, data):\n if not isinstance(data['value'], data['type']):\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_nationalLegalCodeValidator(self, data):\n nationalLegalCode = data['value']\n result = 0\n validationList = [29, 27, 23, 19, 17, 29, 27, 23, 19, 17]\n if len(nationalLegalCode) != 11:\n self.setError(data['field'], enum.Error.\n INVALID_NATIONAL_LEGAL_CODE.value)\n return\n for i in range(10):\n result += (int(nationalLegalCode[-2]) + 2 + int(\n nationalLegalCode[i])) * validationList[i]\n if result % 11 == 10:\n reminder = 0\n else:\n reminder = result % 11\n if reminder == int(nationalLegalCode[-1]):\n valid = True\n else:\n valid = False\n if valid is False:\n self.setError(data['field'], enum.Error.\n INVALID_NATIONAL_LEGAL_CODE.value)\n\n def _check_with_nationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n valid = False\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_officer1NationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_officer2NationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n valid = False\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if r < 2 and r == int(nCode[9]) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_featuresValidator(self, data):\n for i in data['value']:\n if i not in ['پلتفرم پرداخت در محل', 'باشگاه مشتریان',\n 'درگاه پرداخت اینترنتی']:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.\n value)\n break\n\n def _check_with_userNameValidator(self, data):\n username = re.match('^[A-Za-z]+(?:[ _-][A-Za-z0-9]+)*$', data['value'])\n if 'admin' in data['value'] or 'zibal' in data['value'\n ] or username is None:\n self.setError(data['field'], enum.Error.INVALID_USERNAME.value)\n\n def _check_with_phoneNumberValidator(self, data):\n if data['value'] is None or len(data) < 1:\n self.setError(data['field'], enum.Error.\n PHONE_INCORRECT_TEMPLATE.value)\n\n def _check_with_mobileValidator(self, data):\n mobileNumber = data['value']\n if mobileNumber is None:\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n return\n match_object = re.match('(^09[0-9]{9}$)', mobileNumber)\n if match_object is None or mobileNumber is None:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_emailValidator(self, data):\n email = data['value']\n if email is None:\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n return\n match_object = re.match(\n '(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\\\.[a-zA-Z0-9-.]+$)', email)\n if match_object is None or email is None:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_noneValidator(self, data):\n if data['value'] is None or data['value'] == '':\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n\n def _check_with_fileValidator(self, data):\n file = data['value']\n field = data['field']\n if file is None:\n self.setError(field, enum.Error.EMPTY_INPUT_FIELD.value)\n return\n elif file.size > enum.Limits.FILE_SIZE_LIMIT.value:\n self.setError(field, enum.Error.FILE_SIZE_EXCEED.value)\n types = data['options'].get('types', None)\n valid = False\n if types is not None:\n for type in types:\n valid = valid or type in file.content_type\n if valid is False:\n self.setError(field, enum.Error.REQUEST_TYPE_ERROR.value)\n\n def _check_with_IBANValidator(self, data):\n iban = data['value']\n if len(iban) != 26 or not iban.startswith('IR'):\n self.setError(data['field'], enum.Error.IBAN_ERROR.value)\n return\n code = iban[4:] + iban[:4]\n code = code.replace('I', '18').replace('R', '27')\n if int(code) % 97 != 1:\n self.setError(data['field'], enum.Error.IBAN_ERROR.value)\n\n def _check_with_subMerchantBankAccountValidator(self, data):\n if not SubMerchant.objects.filter(idsql=data['value']['userId'], ID\n =data['value']['subId'], status=1).exists():\n self.setError(data['field'], enum.Error.\n IMPOSSIBLE_BANK_ACCOUNT_DESTINATION.value)\n\n def _check_with_minDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) < data['length']:\n self.setError(data['field'], (enum.Error.\n MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_maxDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) > data['length']:\n self.setError(data['field'], (enum.Error.\n MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_equalDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) != data['length']:\n self.setError(data['field'], (enum.Error.\n MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_inputValidator(self, data):\n if data['value'] is None or len(data['value']) < 1:\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n\n def _check_with_IbanTransferable(self, data):\n if data['value'][4:7] == '062' and data['value'][-13:-10] == '080':\n self.setError(data['field'], enum.Error.NOT_IBAN_TRANSFERABLE.value\n )\n\n def _check_with_username(self, data):\n username = re.match('^[a-zA-Z0-9_.-]+$', data['value'])\n if username is None:\n self.setError(data['field'], enum.Error.INVALID_USERNAME.value)\n\n def checkType(self, field, type, value='unAssigned'):\n self.addValidation({'field': field, 'type': type, 'value': value},\n self._check_with_typeValidator)\n return self\n\n def checkNationalLegalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_nationalLegalCodeValidator)\n return self\n\n def checkOfficer1NationalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_officer1NationalCodeValidator)\n return self\n\n def checkOfficer2NationalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_officer2NationalCodeValidator)\n return self\n\n def checkNationalCode(self, field, code='unAssigned'):\n self.addValidation({'field': field, 'value': code}, self.\n _check_with_nationalCodeValidator)\n return self\n\n def checkFeatures(self, field, features='unAssigned'):\n self.addValidation({'field': field, 'value': features}, self.\n _check_with_featuresValidator)\n return self\n\n def checkUserName(self, field, username='unAssigned'):\n self.addValidation({'field': field, 'value': username}, self.\n _check_with_userNameValidator)\n return self\n\n def checkPhone(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_phoneNumberValidator)\n return self\n\n def checkMobile(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_mobileValidator)\n return self\n\n def checkEmail(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_emailValidator)\n return self\n\n def checkNotNone(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_noneValidator)\n return self\n\n def checkFile(self, field, data, **options):\n self.addValidation({'field': field, 'value': data, 'options':\n options}, self._check_with_fileValidator)\n return self\n\n def checkIBAN(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_IBANValidator)\n return self\n\n def checkBankAccountDestinationForSubmerchant(self, field, userId, subId):\n data = {'userId': userId, 'subId': subId}\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_subMerchantBankAccountValidator)\n return self\n\n def checkDataLength(self, field, length, mode='equal', data='unAssigned'):\n if mode == 'equal':\n validatorFunction = self._check_with_equalDataLengthValidator\n if mode == 'min':\n validatorFunction = self._check_with_minDataLengthValidator\n if mode == 'max':\n validatorFunction = self._check_with_minDataLengthValidator\n self.addValidation({'field': field, 'value': data, 'length': length\n }, validatorFunction)\n return self\n\n def checkInputData(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_inputValidator)\n return self\n\n def checkTelephone(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_phoneNumberValidator)\n return self\n\n def checkIsIbanTransferable(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_IbanTransferable)\n return self\n\n def checkUsername(self, field, data='unAssigned'):\n self.addValidation({'field': field, 'value': data}, self.\n _check_with_username())\n\n\nclass DataValidator:\n\n def __init__(self, data={}):\n self.fieldValidator = FieldValidator(data)\n self.objectValidator = ObjectValidator()\n self.errors = {}\n self.statusCode = 200\n\n def getValidatorsErrors(self):\n self.objectValidator.validate()\n self.fieldValidator.validate()\n for key in self.fieldValidator.getErrors().keys():\n self.errors[key] = self.errors.get(key, []\n ) + self.fieldValidator.getErrors()[key]\n self.statusCode = self.fieldValidator.statusCode\n for key in self.objectValidator.getErrors().keys():\n self.errors[key] = self.errors.get(key, []\n ) + self.objectValidator.getErrors()[key]\n self.statusCode = (self.objectValidator.statusCode if self.\n objectValidator.statusCode != 200 else self.statusCode)\n return self.errors\n\n def generateMessage(self):\n messages = []\n errorKeys = self.errors.keys()\n if 'email' in errorKeys:\n messages.append(' آدرس ایمیل نامعتبر است')\n if 'name' in errorKeys:\n messages.append('نام را وارد کنید')\n if 'username' in errorKeys:\n messages.append('نام کاربری را وارد کنید')\n if 'password' in errorKeys:\n messages.append('رمز عبور را وارد کنید')\n if 'mobile' in errorKeys:\n messages.append('تلفن همراه خود را وارد کنید.')\n if 'phone' in errorKeys:\n messages.append(\n 'تلفن ثابت را به فرمت 02122407556 و 11 رقمی وارد کنید')\n if 'iban' in errorKeys or 'IBAN' in errorKeys:\n messages.append(\n 'شماره شبای وارد شده معتبر نیست. 26 کاراکتر و شروع با IR و بدون خط تیره (-) و فاصله'\n )\n if 'user' in errorKeys:\n messages.append('لطفا وارد شوید')\n return messages\n",
"step-5": "import API.enum as enum\nimport re\n\nclass ObjectValidator():\n\n def __init__(self, validationData={}, *args, **kwargs):\n self.data = validationData\n self.statusCode = 200\n self.validationPipeline = []\n self.errors = {}\n self.invalidFields = []\n\n def flush(self):\n self = ObjectValidator()\n return self\n\n def setError(self, field, error):\n if field not in self.invalidFields:\n fieldErrors = self.errors.get(field, [])\n if error[0] not in fieldErrors:\n self.errors[field] = fieldErrors + [error[0]]\n self.statusCode = error[1]\n self.invalidFields.append(field)\n def getErrors(self):\n return self.errors\n\n def validate(self):\n for validation in self.validationPipeline:\n try:\n validation['validator'](validation['data'])\n except:\n self.setError(validation['data']['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def addValidation(self, data, validatorFunction):\n self.validationPipeline.append({\n 'data': data,\n 'validator': validatorFunction\n })\n\n def _check_with_authenticationValidator(self, data):\n if not data['user'].is_authenticated:\n self.setError(data['field'], enum.Error.UNAUTHORIZED.value)\n\n def _check_with_nonDuplicateObjectValidator(self, data):\n model = data['model']\n if model.objects.filter(**data['filter']):\n self.setError(data['field'], enum.Error.DUPLICATE_FIELDS.value)\n\n def _check_with_ObjectExistenceValidator(self, data):\n model = data['model']\n if not model.objects.filter(**data['filter']):\n self.setError(data['field'], enum.Error.GENERIC_OBJECT_NOT_FOUND.value)\n\n def checkNonDuplicateObject(self, field, model, **filter):\n self.addValidation({'field': field, 'model': model, 'filter': filter},\n self._check_with_nonDuplicateObjectValidator)\n return self\n\n def checkObjectExistence(self, field, model, **filter):\n self.addValidation({'field': field, 'model': model, 'filter': filter},\n self._check_with_ObjectExistenceValidator)\n return self\n\n def checkUserAuthentication(self, field, user):\n self.addValidation({'field': field, 'user': user},\n self._check_with_authenticationValidator)\n return self\n\n\n#\\b(?!(\\d)\\1{3})[13-9]{4}[1346-9][013-9]{5}\\b\n# postal code validation\n\n\nclass FieldValidator():\n\n def __init__(self, validationData={}, *args, **kwargs):\n self.data = validationData\n self.validationPipeline = []\n self.statusCode = 200\n self.errors = {}\n self.invalidFields = []\n\n def flush(self):\n self = FieldValidator()\n\n def setError(self, field, error):\n if field not in self.invalidFields:\n fieldErrors = self.errors.get(field, [])\n if error[0] not in fieldErrors:\n self.errors[field] = fieldErrors + [error[0]]\n self.statusCode = error[1]\n self.invalidFields.append(field)\n\n def getErrors(self):\n return self.errors\n\n def validate(self):\n for validation in self.validationPipeline:\n try:\n validation['validator'](validation['data'])\n except:\n self.setError(validation['data']['field'], enum.Error.INVALID_FIELD_DATA.value)\n return self\n def addValidation(self, data, validatorFunction):\n if (data['value'] == 'unAssigned') and data['field'] in self.data.keys():\n data['value'] = self.data[data['field']]\n elif data['value'] == 'unAssigned' and data['field'] not in self.data.keys():\n data['value'] = None\n self.validationPipeline.append({\n 'data': data,\n 'validator': validatorFunction\n })\n\n def _check_with_typeValidator(self, data):\n if not isinstance(data['value'], data['type']):\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_nationalLegalCodeValidator(self, data):\n nationalLegalCode = data['value']\n result = 0\n validationList = [29, 27, 23, 19, 17, 29, 27, 23, 19, 17]\n if len(nationalLegalCode) != 11:\n self.setError(data['field'], enum.Error.INVALID_NATIONAL_LEGAL_CODE.value)\n return\n for i in range(10):\n result += (int(nationalLegalCode[-2]) + 2 + int(nationalLegalCode[i])) * validationList[i]\n if result % 11 == 10:\n reminder = 0\n else:\n reminder = result % 11\n if reminder == int(nationalLegalCode[-1]):\n valid = True\n else:\n valid = False\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_NATIONAL_LEGAL_CODE.value)\n\n def _check_with_nationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n valid = False\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if (r < 2 and r == int(nCode[9])) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n def _check_with_officer1NationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if (r < 2 and r == int(nCode[9])) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n def _check_with_officer2NationalCodeValidator(self, data):\n nCode = data['value']\n valid = True\n if len(nCode) != 10:\n valid = False\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n return\n sum = 0\n for i in range(9):\n sum += int(nCode[i]) * (10 - i)\n r = sum % 11\n if (r < 2 and r == int(nCode[9])) or r >= 2 and r == 11 - int(nCode[9]):\n valid = valid and True\n if valid is False:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_featuresValidator(self, data):\n for i in data['value']:\n if i not in [\"پلتفرم پرداخت در محل\", \"باشگاه مشتریان\", \"درگاه پرداخت اینترنتی\"]:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n break\n\n def _check_with_userNameValidator(self, data):\n username = re.match(r\"^[A-Za-z]+(?:[ _-][A-Za-z0-9]+)*$\", data[\"value\"])\n if 'admin' in data['value'] or 'zibal' in data['value'] or username is None:\n self.setError(data['field'], enum.Error.INVALID_USERNAME.value)\n\n def _check_with_phoneNumberValidator(self, data):\n if data['value'] is None or len(data) < 1:\n self.setError(data['field'], enum.Error.PHONE_INCORRECT_TEMPLATE.value)\n\n def _check_with_mobileValidator(self, data):\n mobileNumber = data['value']\n if mobileNumber is None:\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n return\n match_object = re.match(r\"(^09[0-9]{9}$)\", mobileNumber)\n if match_object is None or mobileNumber is None:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_emailValidator(self, data):\n email = data['value']\n if email is None:\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n return\n match_object = re.match(r\"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)\", email)\n if match_object is None or email is None:\n self.setError(data['field'], enum.Error.INVALID_FIELD_DATA.value)\n\n def _check_with_noneValidator(self, data):\n if data['value'] is None or data['value'] == \"\":\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n\n def _check_with_fileValidator(self, data):\n\n file = data['value']\n field = data['field']\n if file is None:\n self.setError(field, enum.Error.EMPTY_INPUT_FIELD.value)\n return\n elif file.size > enum.Limits.FILE_SIZE_LIMIT.value:\n self.setError(field, enum.Error.FILE_SIZE_EXCEED.value)\n types = data['options'].get('types', None)\n valid = False\n if types is not None:\n for type in types:\n valid = valid or type in file.content_type\n if valid is False:\n self.setError(field, enum.Error.REQUEST_TYPE_ERROR.value)\n\n def _check_with_IBANValidator(self, data):\n iban = data['value']\n if len(iban)!=26 or not iban.startswith(\"IR\"):\n self.setError(data['field'], enum.Error.IBAN_ERROR.value)\n return\n code = iban[4:]+iban[:4]\n code = code.replace('I','18').replace('R','27')\n if int(code)%97!=1:\n self.setError(data['field'], enum.Error.IBAN_ERROR.value)\n\n def _check_with_subMerchantBankAccountValidator(self, data):\n if not SubMerchant.objects.filter(idsql=data['value']['userId'], ID=data['value']['subId'], status=1).exists():\n self.setError(data['field'], enum.Error.IMPOSSIBLE_BANK_ACCOUNT_DESTINATION.value)\n\n def _check_with_minDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) < data['length']:\n self.setError(data['field'], (enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_maxDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) > data['length']:\n self.setError(data['field'], (enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_equalDataLengthValidator(self, data):\n if data['value'] is None or len(data['value']) != data['length']:\n self.setError(data['field'], (enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[0].format(data['length']),\n enum.Error.MESSAGE_INSUFFICIENT_LENGTH.value[1]))\n\n def _check_with_inputValidator(self, data):\n if data['value'] is None or len(data['value']) < 1:\n self.setError(data['field'], enum.Error.EMPTY_INPUT_FIELD.value)\n\n def _check_with_IbanTransferable(self, data):\n if data['value'][4:7]=='062' and data['value'][-13:-10]=='080':\n self.setError(data['field'], enum.Error.NOT_IBAN_TRANSFERABLE.value)\n\n def _check_with_username(self, data):\n username = re.match(r\"^[a-zA-Z0-9_.-]+$\", data[\"value\"])\n if username is None:\n self.setError(data['field'], enum.Error.INVALID_USERNAME.value)\n\n #############################################################################\n\n def checkType(self, field, type, value=\"unAssigned\"):\n self.addValidation({'field': field, 'type': type, 'value': value}, self._check_with_typeValidator)\n return self\n\n def checkNationalLegalCode(self, field, code=\"unAssigned\"):\n self.addValidation({'field': field, 'value': code}, self._check_with_nationalLegalCodeValidator)\n return self\n\n def checkOfficer1NationalCode(self, field, code=\"unAssigned\"):\n self.addValidation({'field': field, 'value': code}, self._check_with_officer1NationalCodeValidator)\n return self\n\n def checkOfficer2NationalCode(self, field, code=\"unAssigned\"):\n self.addValidation({'field': field, 'value': code}, self._check_with_officer2NationalCodeValidator)\n return self\n\n def checkNationalCode(self, field, code=\"unAssigned\"):\n self.addValidation({'field': field, 'value': code}, self._check_with_nationalCodeValidator)\n return self\n\n def checkFeatures(self, field, features=\"unAssigned\"):\n self.addValidation({'field': field, 'value': features}, self._check_with_featuresValidator)\n return self\n\n def checkUserName(self, field, username=\"unAssigned\"):\n self.addValidation({'field': field, 'value': username}, self._check_with_userNameValidator)\n return self\n\n def checkPhone(self, field, data=\"unAssigned\"):\n self.addValidation({'field': field, 'value': data}, self._check_with_phoneNumberValidator)\n return self\n\n def checkMobile(self, field, data=\"unAssigned\"):\n self.addValidation({'field': field, 'value': data}, self._check_with_mobileValidator)\n return self\n\n def checkEmail(self, field, data=\"unAssigned\"):\n self.addValidation({'field': field, 'value': data}, self._check_with_emailValidator)\n return self\n\n def checkNotNone(self, field, data=\"unAssigned\"):\n self.addValidation({'field': field, 'value': data}, self._check_with_noneValidator)\n return self\n\n def checkFile(self, field, data, **options):\n self.addValidation({'field': field, 'value': data, 'options': options}, self._check_with_fileValidator)\n return self\n\n def checkIBAN(self, field, data=\"unAssigned\"):\n self.addValidation({'field': field, 'value': data}, self._check_with_IBANValidator)\n return self\n\n def checkBankAccountDestinationForSubmerchant(self, field, userId, subId):\n data = {\n 'userId': userId,\n 'subId': subId\n }\n self.addValidation({'field': field, 'value': data}, self._check_with_subMerchantBankAccountValidator)\n return self\n\n def checkDataLength(self, field, length,mode='equal', data=\"unAssigned\"):\n if mode == 'equal':\n validatorFunction = self._check_with_equalDataLengthValidator\n if mode == 'min':\n validatorFunction = self._check_with_minDataLengthValidator\n if mode == 'max':\n validatorFunction = self._check_with_minDataLengthValidator\n\n self.addValidation({'field': field, 'value': data, 'length': length}, validatorFunction)\n\n return self\n\n def checkInputData(self, field, data=\"unAssigned\"):\n self.addValidation({'field': field, 'value': data}, self._check_with_inputValidator)\n return self\n\n def checkTelephone(self, field, data=\"unAssigned\"): ##TODO\n self.addValidation({'field': field, 'value': data}, self._check_with_phoneNumberValidator)\n return self\n\n def checkIsIbanTransferable(self, field, data=\"unAssigned\"):\n self.addValidation({'field': field, 'value': data}, self._check_with_IbanTransferable)\n return self\n\n def checkUsername(self, field, data=\"unAssigned\"):\n self.addValidation({'field': field, 'value': data}, self._check_with_username())\n\n\nclass DataValidator:\n\n def __init__(self, data={}):\n self.fieldValidator = FieldValidator(data)\n self.objectValidator = ObjectValidator()\n self.errors = {}\n self.statusCode = 200\n\n def getValidatorsErrors(self):\n self.objectValidator.validate()\n self.fieldValidator.validate()\n for key in self.fieldValidator.getErrors().keys():\n self.errors[key] = self.errors.get(key, []) + self.fieldValidator.getErrors()[key]\n self.statusCode = self.fieldValidator.statusCode\n for key in self.objectValidator.getErrors().keys():\n self.errors[key] = self.errors.get(key, []) + self.objectValidator.getErrors()[key]\n self.statusCode = self.objectValidator.statusCode if self.objectValidator.statusCode != 200 else self.statusCode\n return self.errors\n\n def generateMessage(self):\n messages = []\n errorKeys = self.errors.keys()\n if 'email' in errorKeys:\n messages.append(' آدرس ایمیل نامعتبر است')\n\n if \"name\" in errorKeys :\n messages.append('نام را وارد کنید')\n\n if 'username' in errorKeys:\n messages.append('نام کاربری را وارد کنید')\n\n if 'password' in errorKeys:\n messages.append('رمز عبور را وارد کنید')\n\n if 'mobile' in errorKeys:\n messages.append('تلفن همراه خود را وارد کنید.')\n\n if 'phone' in errorKeys:\n messages.append('تلفن ثابت را به فرمت 02122407556 و 11 رقمی وارد کنید')\n if 'iban' in errorKeys or 'IBAN' in errorKeys:\n messages.append('شماره شبای وارد شده معتبر نیست. 26 کاراکتر و شروع با IR و بدون خط تیره (-) و فاصله')\n if 'user' in errorKeys:\n messages.append('لطفا وارد شوید')\n\n return messages",
"step-ids": [
40,
58,
62,
63,
65
]
}
|
[
40,
58,
62,
63,
65
] |
from accounts.models import User
from django.forms import ModelForm
from django import forms
from django.contrib.auth.forms import UserCreationForm
class UserRegistrationForm(UserCreationForm):
email = forms.EmailField(required=True)
password1 = forms.CharField(
widget=forms.PasswordInput,
# help_text=password_validation.password_validators_help_text_html(),
)
class Meta:
model = User
fields = ("first_name","last_name","email", "password1", "password2")
def save(self, commit=True):
user = super(UserRegistrationForm, self).save(commit=False)
user.email = self.cleaned_data['email']
user.user_type = 2
if commit:
user.save()
return user
class UserLoginForm(forms.Form):
username=forms.CharField(label='',widget=forms.TextInput(attrs={'placeholder':'Username'}))
password=forms.CharField(label='',widget=forms.PasswordInput(attrs={'placeholder':'Password'}))
|
normal
|
{
"blob_id": "e50517910e191594034f60a021647f4415b6f1c4",
"index": 2822,
"step-1": "<mask token>\n\n\nclass UserRegistrationForm(UserCreationForm):\n <mask token>\n <mask token>\n\n\n class Meta:\n model = User\n fields = 'first_name', 'last_name', 'email', 'password1', 'password2'\n <mask token>\n\n\nclass UserLoginForm(forms.Form):\n username = forms.CharField(label='', widget=forms.TextInput(attrs={\n 'placeholder': 'Username'}))\n password = forms.CharField(label='', widget=forms.PasswordInput(attrs={\n 'placeholder': 'Password'}))\n",
"step-2": "<mask token>\n\n\nclass UserRegistrationForm(UserCreationForm):\n <mask token>\n <mask token>\n\n\n class Meta:\n model = User\n fields = 'first_name', 'last_name', 'email', 'password1', 'password2'\n\n def save(self, commit=True):\n user = super(UserRegistrationForm, self).save(commit=False)\n user.email = self.cleaned_data['email']\n user.user_type = 2\n if commit:\n user.save()\n return user\n\n\nclass UserLoginForm(forms.Form):\n username = forms.CharField(label='', widget=forms.TextInput(attrs={\n 'placeholder': 'Username'}))\n password = forms.CharField(label='', widget=forms.PasswordInput(attrs={\n 'placeholder': 'Password'}))\n",
"step-3": "<mask token>\n\n\nclass UserRegistrationForm(UserCreationForm):\n email = forms.EmailField(required=True)\n password1 = forms.CharField(widget=forms.PasswordInput)\n\n\n class Meta:\n model = User\n fields = 'first_name', 'last_name', 'email', 'password1', 'password2'\n\n def save(self, commit=True):\n user = super(UserRegistrationForm, self).save(commit=False)\n user.email = self.cleaned_data['email']\n user.user_type = 2\n if commit:\n user.save()\n return user\n\n\nclass UserLoginForm(forms.Form):\n username = forms.CharField(label='', widget=forms.TextInput(attrs={\n 'placeholder': 'Username'}))\n password = forms.CharField(label='', widget=forms.PasswordInput(attrs={\n 'placeholder': 'Password'}))\n",
"step-4": "from accounts.models import User\nfrom django.forms import ModelForm\nfrom django import forms\nfrom django.contrib.auth.forms import UserCreationForm\n\n\nclass UserRegistrationForm(UserCreationForm):\n email = forms.EmailField(required=True)\n password1 = forms.CharField(widget=forms.PasswordInput)\n\n\n class Meta:\n model = User\n fields = 'first_name', 'last_name', 'email', 'password1', 'password2'\n\n def save(self, commit=True):\n user = super(UserRegistrationForm, self).save(commit=False)\n user.email = self.cleaned_data['email']\n user.user_type = 2\n if commit:\n user.save()\n return user\n\n\nclass UserLoginForm(forms.Form):\n username = forms.CharField(label='', widget=forms.TextInput(attrs={\n 'placeholder': 'Username'}))\n password = forms.CharField(label='', widget=forms.PasswordInput(attrs={\n 'placeholder': 'Password'}))\n",
"step-5": "from accounts.models import User\nfrom django.forms import ModelForm\nfrom django import forms\nfrom django.contrib.auth.forms import UserCreationForm\n\nclass UserRegistrationForm(UserCreationForm):\n\temail = forms.EmailField(required=True)\n\tpassword1 = forms.CharField(\n\n\t\twidget=forms.PasswordInput,\n\t\t# help_text=password_validation.password_validators_help_text_html(),\n\t)\n\n\tclass Meta:\n\t\tmodel = User\n\t\tfields = (\"first_name\",\"last_name\",\"email\", \"password1\", \"password2\")\n\n\n\tdef save(self, commit=True):\n\t\tuser = super(UserRegistrationForm, self).save(commit=False)\n\t\tuser.email = self.cleaned_data['email']\n\t\tuser.user_type = 2\n\t\tif commit:\n\t\t\tuser.save()\n\t\treturn user\n\n\nclass UserLoginForm(forms.Form):\n username=forms.CharField(label='',widget=forms.TextInput(attrs={'placeholder':'Username'}))\n password=forms.CharField(label='',widget=forms.PasswordInput(attrs={'placeholder':'Password'}))\n\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import boto3
import json
from botocore.exceptions import ClientError
# upload_to_s3("abc.png", 1)
def upload_to_s3(file_name, node_number):
try:
key_info_json = open("awsinfo.json").read()
except FileNotFoundError:
print("awsinfo.json is not exist in dir.")
exit(-1)
data=json.loads(key_info_json)
s3 = boto3.client(
's3',
aws_access_key_id = data['accessKeyId'],
aws_secret_access_key = data['secretAccessKey']
)
with open(file_name, "rb") as f:
s3.upload_fileobj(f,"capstone12", str(node_number)+"/"+file_name,
ExtraArgs={'ACL' : 'public-read-write'}
)
print("File Upload Complete to " + str(node_number) + "/" + file_name)
|
normal
|
{
"blob_id": "2f0d611fecdb5717029938d2ec2cd2db345b8f3a",
"index": 8176,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef upload_to_s3(file_name, node_number):\n try:\n key_info_json = open('awsinfo.json').read()\n except FileNotFoundError:\n print('awsinfo.json is not exist in dir.')\n exit(-1)\n data = json.loads(key_info_json)\n s3 = boto3.client('s3', aws_access_key_id=data['accessKeyId'],\n aws_secret_access_key=data['secretAccessKey'])\n with open(file_name, 'rb') as f:\n s3.upload_fileobj(f, 'capstone12', str(node_number) + '/' +\n file_name, ExtraArgs={'ACL': 'public-read-write'})\n print('File Upload Complete to ' + str(node_number) + '/' + file_name)\n",
"step-3": "import boto3\nimport json\nfrom botocore.exceptions import ClientError\n\n\ndef upload_to_s3(file_name, node_number):\n try:\n key_info_json = open('awsinfo.json').read()\n except FileNotFoundError:\n print('awsinfo.json is not exist in dir.')\n exit(-1)\n data = json.loads(key_info_json)\n s3 = boto3.client('s3', aws_access_key_id=data['accessKeyId'],\n aws_secret_access_key=data['secretAccessKey'])\n with open(file_name, 'rb') as f:\n s3.upload_fileobj(f, 'capstone12', str(node_number) + '/' +\n file_name, ExtraArgs={'ACL': 'public-read-write'})\n print('File Upload Complete to ' + str(node_number) + '/' + file_name)\n",
"step-4": "import boto3\r\nimport json\r\nfrom botocore.exceptions import ClientError\r\n\r\n# upload_to_s3(\"abc.png\", 1)\r\ndef upload_to_s3(file_name, node_number):\r\n try:\r\n key_info_json = open(\"awsinfo.json\").read()\r\n except FileNotFoundError:\r\n print(\"awsinfo.json is not exist in dir.\")\r\n exit(-1)\r\n\r\n data=json.loads(key_info_json)\r\n\r\n s3 = boto3.client(\r\n 's3',\r\n aws_access_key_id = data['accessKeyId'],\r\n aws_secret_access_key = data['secretAccessKey']\r\n )\r\n\r\n with open(file_name, \"rb\") as f:\r\n s3.upload_fileobj(f,\"capstone12\", str(node_number)+\"/\"+file_name,\r\n ExtraArgs={'ACL' : 'public-read-write'}\r\n )\r\n print(\"File Upload Complete to \" + str(node_number) + \"/\" + file_name)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from fabric.api import local,run
INSTALL_STEPS = ['yes | sudo apt-get install libmysqlclient-dev python-dev python-mysqldb python-virtualenv',
'virtualenv --no-site-packages env',
'. env/bin/activate;pip install -r requirements.txt']
def deps_local():
for step in INSTALL_STEPS:
local(step)
def deps_remote():
for step in INSTALL_STEPS:
run(step)
|
normal
|
{
"blob_id": "d64140466e62b78506d0f200f451649023697a3b",
"index": 1386,
"step-1": "<mask token>\n\n\ndef deps_remote():\n for step in INSTALL_STEPS:\n run(step)\n",
"step-2": "<mask token>\n\n\ndef deps_local():\n for step in INSTALL_STEPS:\n local(step)\n\n\ndef deps_remote():\n for step in INSTALL_STEPS:\n run(step)\n",
"step-3": "<mask token>\nINSTALL_STEPS = [\n 'yes | sudo apt-get install libmysqlclient-dev\\t python-dev python-mysqldb python-virtualenv'\n , 'virtualenv --no-site-packages env',\n '. env/bin/activate;pip install -r requirements.txt']\n\n\ndef deps_local():\n for step in INSTALL_STEPS:\n local(step)\n\n\ndef deps_remote():\n for step in INSTALL_STEPS:\n run(step)\n",
"step-4": "from fabric.api import local, run\nINSTALL_STEPS = [\n 'yes | sudo apt-get install libmysqlclient-dev\\t python-dev python-mysqldb python-virtualenv'\n , 'virtualenv --no-site-packages env',\n '. env/bin/activate;pip install -r requirements.txt']\n\n\ndef deps_local():\n for step in INSTALL_STEPS:\n local(step)\n\n\ndef deps_remote():\n for step in INSTALL_STEPS:\n run(step)\n",
"step-5": "from fabric.api import local,run\nINSTALL_STEPS = ['yes | sudo apt-get install libmysqlclient-dev\t python-dev python-mysqldb python-virtualenv',\n 'virtualenv --no-site-packages env',\n '. env/bin/activate;pip install -r requirements.txt']\ndef deps_local():\n for step in INSTALL_STEPS:\n \tlocal(step)\ndef deps_remote():\n for step in INSTALL_STEPS:\n \trun(step)\t\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
'''
@Description:
@Version: 1.0
@Autor: Henggao
@Date: 2020-02-20 16:17:05
@LastEditors: Henggao
@LastEditTime: 2020-02-20 16:32:45
'''
name = "henggao"
def change():
name = "Brill"
print(name)
print(locals())
print(globals())
change()
print(name)
|
normal
|
{
"blob_id": "6c7162a9bd81d618abda204c24031c5a5acc61b4",
"index": 7967,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef change():\n name = 'Brill'\n print(name)\n print(locals())\n print(globals())\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef change():\n name = 'Brill'\n print(name)\n print(locals())\n print(globals())\n\n\nchange()\nprint(name)\n",
"step-4": "<mask token>\nname = 'henggao'\n\n\ndef change():\n name = 'Brill'\n print(name)\n print(locals())\n print(globals())\n\n\nchange()\nprint(name)\n",
"step-5": "'''\n@Description: \n@Version: 1.0\n@Autor: Henggao\n@Date: 2020-02-20 16:17:05\n@LastEditors: Henggao\n@LastEditTime: 2020-02-20 16:32:45\n'''\nname = \"henggao\"\ndef change():\n name = \"Brill\"\n print(name)\n print(locals())\n print(globals())\n \nchange() \n\nprint(name)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# binary search
# iterative
def Iter_BinarySearch(array,b,e,value):
while(b<=e):#pay attention to the judgement!
mid=(b+e)/2#floor
if (array[mid]<value):#value in [mid,e]
b=mid+1
elif (array[mid]>value):#value in [b,mid]
e=mid-1
else:
print "find it! the index is: ", mid
return mid
print "cannot fint it!"
return -1
# test code for iterative BinarySearch(array,b,e,value)
array=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
Iter_BinarySearch(array,0,15,15)
# recursive
def Recur_BinarySearch(arrray,b,e,value):
mid=(b+e)/2#floor
if (b<=e):
if (array[mid]<value):#value in [mid,e]
b=mid+1
elif (array[mid]>value):#value in [b,mid]
e=mid-1
else:
print "find it! the index is: ", mid
return mid
else:
print "cannot find it"
return
Recur_BinarySearch(array,b,e,value)
# test code for recursive BinarySearch
array=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
Iter_BinarySearch(array,0,15,16)
|
normal
|
{
"blob_id": "f2d7f0b0d27bd43223d0eb6a6279b67968461dad",
"index": 9499,
"step-1": "# binary search\n\n# iterative\ndef Iter_BinarySearch(array,b,e,value):\n while(b<=e):#pay attention to the judgement!\n mid=(b+e)/2#floor\n if (array[mid]<value):#value in [mid,e]\n b=mid+1\n elif (array[mid]>value):#value in [b,mid]\n e=mid-1\n else:\n print \"find it! the index is: \", mid\n return mid\n print \"cannot fint it!\"\n return -1\n\n\n# test code for iterative BinarySearch(array,b,e,value)\narray=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]\nIter_BinarySearch(array,0,15,15)\n\n\n# recursive\ndef Recur_BinarySearch(arrray,b,e,value):\n mid=(b+e)/2#floor\n if (b<=e):\n if (array[mid]<value):#value in [mid,e]\n b=mid+1\n elif (array[mid]>value):#value in [b,mid]\n e=mid-1\n else:\n print \"find it! the index is: \", mid\n return mid\n else:\n print \"cannot find it\"\n return\n Recur_BinarySearch(array,b,e,value)\n\n# test code for recursive BinarySearch\narray=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]\nIter_BinarySearch(array,0,15,16)",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
'''
python open() 函数用于打开一个文件,创建一个 file 对象,相关的方法才可以调用它进行读写。
更多文件操作可参考:Python 文件I/O。
函数语法
open(name[, mode[, buffering]])
参数说明:
name : 一个包含了你要访问的文件名称的字符串值。
mode : mode 决定了打开文件的模式:只读,写入,追加等。所有可取值见如下的完全列表。这个参数是非强制的,默认文件访问模式为只读(r)。
buffering : 如果 buffering 的值被设为 0,就不会有寄存。如果 buffering 的值取 1,访问文件时会寄存行。如果将 buffering 的值设为大于 1 的整数,表明了这就是的寄存区的缓冲大小。如果取负值,寄存区的缓冲大小则为系统默认。
不同模式打开文件的完全列表:
模式
描述
r
以只读方式打开文件。文件的指针将会放在文件的开头。这是默认模式。
rb
以二进制格式打开一个文件用于只读。文件指针将会放在文件的开头。这是默认模式。
r+
打开一个文件用于读写。文件指针将会放在文件的开头。
rb+
以二进制格式打开一个文件用于读写。文件指针将会放在文件的开头。
w
打开一个文件只用于写入。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。
wb
以二进制格式打开一个文件只用于写入。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。
w+
打开一个文件用于读写。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。
wb+
以二进制格式打开一个文件用于读写。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。
a
打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。也就是说,新的内容将会被写入到已有内容之后。如果该文件不存在,创建新文件进行写入。
ab
以二进制格式打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。也就是说,新的内容将会被写入到已有内容之后。如果该文件不存在,创建新文件进行写入。
a+
打开一个文件用于读写。如果该文件已存在,文件指针将会放在文件的结尾。文件打开时会是追加模式。如果该文件不存在,创建新文件用于读写。
ab+
以二进制格式打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。如果该文件不存在,创建新文件用于读写。
file 对象方法
file.read([size]):size 未指定则返回整个文件,如果文件大小 >2 倍内存则有问题,f.read()读到文件尾时返回""(空字串)。
file.readline():返回一行。
file.readlines([size]) :返回包含size行的列表, size 未指定则返回全部行。
for line in f: print line :通过迭代器访问。
f.write("hello\n"):如果要写入字符串以外的数据,先将他转换为字符串。
f.tell():返回一个整数,表示当前文件指针的位置(就是到文件头的比特数)。
f.seek(偏移量,[起始位置]):用来移动文件指针。
偏移量: 单位为比特,可正可负
起始位置: 0 - 文件头, 默认值; 1 - 当前位置; 2 - 文件尾
f.close() 关闭文件
open(filename [, mode [, bufsize]])
打开一个文件,返回一个file对象。 如果文件无法打开,将处罚IOError异常。
应该使用open()来代替直接使用file类型的构造函数打开文件。
参数filename表示将要被打开的文件的路径字符串;
参数mode表示打开的模式,最常用的模式有:'r'表示读文本,'w'表示写文本文件,'a'表示在文件中追加。
Mode的默认值是'r'。
当操作的是二进制文件时,只要在模式值上添加'b'。这样提高了程序的可移植性。
可选参数bufsize定义了文件缓冲区的大小。0表示不缓冲;1表示行缓冲;任何其他正数表示使用该大小的缓冲区;
负数表示使用系统默认缓冲区大小,对于tty设备它往往是行缓冲,而对于其他文件往往完全缓冲。如果参数值被省却。
使用系统默认值。
'''
f=open('1.txt','r',encoding='utf-8')
print(f.read())
'''
输出...
ltf
zhongguo
shanxi
yuncheng
男
20
'''
#参考博客 https://www.cnblogs.com/Devilf/p/8006663.html
|
normal
|
{
"blob_id": "3a65565af4c55fa5479e323a737c48f7f2fdb8ce",
"index": 596,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(f.read())\n<mask token>\n",
"step-3": "<mask token>\nf = open('1.txt', 'r', encoding='utf-8')\nprint(f.read())\n<mask token>\n",
"step-4": "'''\npython open() 函数用于打开一个文件,创建一个 file 对象,相关的方法才可以调用它进行读写。\n更多文件操作可参考:Python 文件I/O。\n函数语法\nopen(name[, mode[, buffering]])\n参数说明:\nname : 一个包含了你要访问的文件名称的字符串值。\nmode : mode 决定了打开文件的模式:只读,写入,追加等。所有可取值见如下的完全列表。这个参数是非强制的,默认文件访问模式为只读(r)。\nbuffering : 如果 buffering 的值被设为 0,就不会有寄存。如果 buffering 的值取 1,访问文件时会寄存行。如果将 buffering 的值设为大于 1 的整数,表明了这就是的寄存区的缓冲大小。如果取负值,寄存区的缓冲大小则为系统默认。\n不同模式打开文件的完全列表:\n模式\n描述\nr\n以只读方式打开文件。文件的指针将会放在文件的开头。这是默认模式。\nrb\n以二进制格式打开一个文件用于只读。文件指针将会放在文件的开头。这是默认模式。\nr+\n打开一个文件用于读写。文件指针将会放在文件的开头。\nrb+\n以二进制格式打开一个文件用于读写。文件指针将会放在文件的开头。\nw\n打开一个文件只用于写入。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。\nwb\n以二进制格式打开一个文件只用于写入。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。\nw+\n打开一个文件用于读写。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。\nwb+\n以二进制格式打开一个文件用于读写。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。\na\n打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。也就是说,新的内容将会被写入到已有内容之后。如果该文件不存在,创建新文件进行写入。\nab\n以二进制格式打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。也就是说,新的内容将会被写入到已有内容之后。如果该文件不存在,创建新文件进行写入。\na+\n打开一个文件用于读写。如果该文件已存在,文件指针将会放在文件的结尾。文件打开时会是追加模式。如果该文件不存在,创建新文件用于读写。\nab+\n以二进制格式打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。如果该文件不存在,创建新文件用于读写。\nfile 对象方法\nfile.read([size]):size 未指定则返回整个文件,如果文件大小 >2 倍内存则有问题,f.read()读到文件尾时返回\"\"(空字串)。\nfile.readline():返回一行。\nfile.readlines([size]) :返回包含size行的列表, size 未指定则返回全部行。\nfor line in f: print line :通过迭代器访问。\nf.write(\"hello\\n\"):如果要写入字符串以外的数据,先将他转换为字符串。\nf.tell():返回一个整数,表示当前文件指针的位置(就是到文件头的比特数)。\nf.seek(偏移量,[起始位置]):用来移动文件指针。\n偏移量: 单位为比特,可正可负\n起始位置: 0 - 文件头, 默认值; 1 - 当前位置; 2 - 文件尾\nf.close() 关闭文件\n\n\nopen(filename [, mode [, bufsize]])\n打开一个文件,返回一个file对象。 如果文件无法打开,将处罚IOError异常。\n应该使用open()来代替直接使用file类型的构造函数打开文件。\n参数filename表示将要被打开的文件的路径字符串;\n参数mode表示打开的模式,最常用的模式有:'r'表示读文本,'w'表示写文本文件,'a'表示在文件中追加。\nMode的默认值是'r'。\n当操作的是二进制文件时,只要在模式值上添加'b'。这样提高了程序的可移植性。\n可选参数bufsize定义了文件缓冲区的大小。0表示不缓冲;1表示行缓冲;任何其他正数表示使用该大小的缓冲区;\n负数表示使用系统默认缓冲区大小,对于tty设备它往往是行缓冲,而对于其他文件往往完全缓冲。如果参数值被省却。\n使用系统默认值。\n'''\n\nf=open('1.txt','r',encoding='utf-8')\nprint(f.read())\n'''\n输出...\nltf\nzhongguo\nshanxi\nyuncheng\n男\n20\n'''\n\n#参考博客 https://www.cnblogs.com/Devilf/p/8006663.html\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.db import models
from django.contrib.auth.models import User
from django.utils.encoding import smart_unicode
from django.core.validators import MinValueValidator
from django.utils import timezone
from concurrency.fields import IntegerVersionField
class ProductCategory(models.Model):
name = models.CharField(max_length=20)
class Meta:
unique_together = (("name"),)
def __unicode__(self):
return smart_unicode(self.name)
class Product(models.Model):
name = models.CharField(max_length=20)
seller = models.ForeignKey(User, verbose_name="seller")
initial_price = models.DecimalField(max_digits=10, decimal_places=2,
validators=[MinValueValidator(0)], verbose_name="starting bid")
description = models.TextField(max_length=280)
timestamp = models.DateTimeField(auto_now_add=timezone.now(), auto_now=False)
product_category = models.ForeignKey(ProductCategory, verbose_name="product category")
version = IntegerVersionField()
def __unicode__(self):
return smart_unicode(self.name)
class AuctionStatus(models.Model):
name = models.CharField(max_length=20)
version = IntegerVersionField()
class Meta:
unique_together = (("name"),)
def __unicode__(self):
return smart_unicode(self.name)
class Auction(models.Model):
title = models.CharField(max_length=20)
current_price = models.DecimalField(max_digits=10, decimal_places=2, default=0,
null=True, blank=True, verbose_name="current bid")
updated_time = models.DateTimeField(auto_now_add=False, auto_now=timezone.now())
end_time = models.DateTimeField(verbose_name="end time")
product = models.OneToOneField(Product, related_name='product')
status = models.ForeignKey(AuctionStatus, verbose_name="auction status")
version = IntegerVersionField()
class Meta:
unique_together = (("title"),)
ordering = ['end_time']
def __unicode__(self):
return smart_unicode(self.title)
@classmethod
def fetchActiveAuctions(cls):
try:
queryset = cls.objects.filter(status_id=1).order_by('-end_time').reverse()
return queryset
except IndexError:
return None
@classmethod
def getAuctionByID(cls, aucid):
try:
return cls.objects.get(id=aucid, status_id=1)
except IndexError:
return None
@classmethod
def getAuctionByCategory(cls, catid):
try:
prodcat = Product.objects.filter(product_category=catid)
queryset = Auction.objects.filter(product_id=prodcat, status_id=1)
return queryset
except IndexError:
return None
@classmethod
def getAuctionByOwner(cls, ownerid):
try:
myprod = Product.objects.filter(seller_id=ownerid)
queryset = Auction.objects.filter(product_id=myprod, status_id=1)
return queryset
except IndexError:
return None
@classmethod
def getOwnerByAuctionID(cls, aucid):
try:
queryset = Auction.objects.get(id=aucid, status_id=1)
myprod = Product.objects.get(id=queryset.product_id)
seller = myprod.seller
return seller
except IndexError:
return None
@classmethod
def getAuctionByProductID(cls, product_id):
try:
queryset = Auction.objects.get(product=product_id, status_id=1)
return queryset
except IndexError:
return None
class Bidder(models.Model):
contender = models.ForeignKey(User, related_name='buyer', verbose_name='contender')
auctions = models.ManyToManyField(Auction, related_name='auctions', through='AuctionBidder')
version = IntegerVersionField()
def __unicode__(self):
return smart_unicode(self.contender)
class Meta:
ordering = ["contender"]
class AuctionBidder(models.Model):
unique_bidder = models.ForeignKey(Bidder, related_name='unique_bidder')
auc = models.ForeignKey(Auction, related_name='unique_auction')
bid_amount = models.DecimalField(max_digits=10, decimal_places=2,
verbose_name="bid amount")
bid_time = models.DateTimeField(auto_now_add=False, auto_now=timezone.now(), )
version = IntegerVersionField()
def __unicode__(self):
return smart_unicode(self.auc)
class Meta:
ordering = ["bid_time"]
|
normal
|
{
"blob_id": "9bb15842b39c7fd3e6f6c0048a51c2b2112ddb94",
"index": 8082,
"step-1": "<mask token>\n\n\nclass Auction(models.Model):\n title = models.CharField(max_length=20)\n current_price = models.DecimalField(max_digits=10, decimal_places=2,\n default=0, null=True, blank=True, verbose_name='current bid')\n updated_time = models.DateTimeField(auto_now_add=False, auto_now=\n timezone.now())\n end_time = models.DateTimeField(verbose_name='end time')\n product = models.OneToOneField(Product, related_name='product')\n status = models.ForeignKey(AuctionStatus, verbose_name='auction status')\n version = IntegerVersionField()\n\n\n class Meta:\n unique_together = 'title',\n ordering = ['end_time']\n\n def __unicode__(self):\n return smart_unicode(self.title)\n\n @classmethod\n def fetchActiveAuctions(cls):\n try:\n queryset = cls.objects.filter(status_id=1).order_by('-end_time'\n ).reverse()\n return queryset\n except IndexError:\n return None\n\n @classmethod\n def getAuctionByID(cls, aucid):\n try:\n return cls.objects.get(id=aucid, status_id=1)\n except IndexError:\n return None\n\n @classmethod\n def getAuctionByCategory(cls, catid):\n try:\n prodcat = Product.objects.filter(product_category=catid)\n queryset = Auction.objects.filter(product_id=prodcat, status_id=1)\n return queryset\n except IndexError:\n return None\n\n @classmethod\n def getAuctionByOwner(cls, ownerid):\n try:\n myprod = Product.objects.filter(seller_id=ownerid)\n queryset = Auction.objects.filter(product_id=myprod, status_id=1)\n return queryset\n except IndexError:\n return None\n\n @classmethod\n def getOwnerByAuctionID(cls, aucid):\n try:\n queryset = Auction.objects.get(id=aucid, status_id=1)\n myprod = Product.objects.get(id=queryset.product_id)\n seller = myprod.seller\n return seller\n except IndexError:\n return None\n\n @classmethod\n def getAuctionByProductID(cls, product_id):\n try:\n queryset = Auction.objects.get(product=product_id, status_id=1)\n return queryset\n except IndexError:\n return None\n\n\nclass Bidder(models.Model):\n contender = models.ForeignKey(User, related_name='buyer', verbose_name=\n 'contender')\n auctions = models.ManyToManyField(Auction, related_name='auctions',\n through='AuctionBidder')\n version = IntegerVersionField()\n\n def __unicode__(self):\n return smart_unicode(self.contender)\n\n\n class Meta:\n ordering = ['contender']\n\n\nclass AuctionBidder(models.Model):\n unique_bidder = models.ForeignKey(Bidder, related_name='unique_bidder')\n auc = models.ForeignKey(Auction, related_name='unique_auction')\n bid_amount = models.DecimalField(max_digits=10, decimal_places=2,\n verbose_name='bid amount')\n bid_time = models.DateTimeField(auto_now_add=False, auto_now=timezone.now()\n )\n version = IntegerVersionField()\n\n def __unicode__(self):\n return smart_unicode(self.auc)\n\n\n class Meta:\n ordering = ['bid_time']\n",
"step-2": "<mask token>\n\n\nclass Product(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __unicode__(self):\n return smart_unicode(self.name)\n\n\nclass AuctionStatus(models.Model):\n name = models.CharField(max_length=20)\n version = IntegerVersionField()\n\n\n class Meta:\n unique_together = 'name',\n\n def __unicode__(self):\n return smart_unicode(self.name)\n\n\nclass Auction(models.Model):\n title = models.CharField(max_length=20)\n current_price = models.DecimalField(max_digits=10, decimal_places=2,\n default=0, null=True, blank=True, verbose_name='current bid')\n updated_time = models.DateTimeField(auto_now_add=False, auto_now=\n timezone.now())\n end_time = models.DateTimeField(verbose_name='end time')\n product = models.OneToOneField(Product, related_name='product')\n status = models.ForeignKey(AuctionStatus, verbose_name='auction status')\n version = IntegerVersionField()\n\n\n class Meta:\n unique_together = 'title',\n ordering = ['end_time']\n\n def __unicode__(self):\n return smart_unicode(self.title)\n\n @classmethod\n def fetchActiveAuctions(cls):\n try:\n queryset = cls.objects.filter(status_id=1).order_by('-end_time'\n ).reverse()\n return queryset\n except IndexError:\n return None\n\n @classmethod\n def getAuctionByID(cls, aucid):\n try:\n return cls.objects.get(id=aucid, status_id=1)\n except IndexError:\n return None\n\n @classmethod\n def getAuctionByCategory(cls, catid):\n try:\n prodcat = Product.objects.filter(product_category=catid)\n queryset = Auction.objects.filter(product_id=prodcat, status_id=1)\n return queryset\n except IndexError:\n return None\n\n @classmethod\n def getAuctionByOwner(cls, ownerid):\n try:\n myprod = Product.objects.filter(seller_id=ownerid)\n queryset = Auction.objects.filter(product_id=myprod, status_id=1)\n return queryset\n except IndexError:\n return None\n\n @classmethod\n def getOwnerByAuctionID(cls, aucid):\n try:\n queryset = Auction.objects.get(id=aucid, status_id=1)\n myprod = Product.objects.get(id=queryset.product_id)\n seller = myprod.seller\n return seller\n except IndexError:\n return None\n\n @classmethod\n def getAuctionByProductID(cls, product_id):\n try:\n queryset = Auction.objects.get(product=product_id, status_id=1)\n return queryset\n except IndexError:\n return None\n\n\nclass Bidder(models.Model):\n contender = models.ForeignKey(User, related_name='buyer', verbose_name=\n 'contender')\n auctions = models.ManyToManyField(Auction, related_name='auctions',\n through='AuctionBidder')\n version = IntegerVersionField()\n\n def __unicode__(self):\n return smart_unicode(self.contender)\n\n\n class Meta:\n ordering = ['contender']\n\n\nclass AuctionBidder(models.Model):\n unique_bidder = models.ForeignKey(Bidder, related_name='unique_bidder')\n auc = models.ForeignKey(Auction, related_name='unique_auction')\n bid_amount = models.DecimalField(max_digits=10, decimal_places=2,\n verbose_name='bid amount')\n bid_time = models.DateTimeField(auto_now_add=False, auto_now=timezone.now()\n )\n version = IntegerVersionField()\n\n def __unicode__(self):\n return smart_unicode(self.auc)\n\n\n class Meta:\n ordering = ['bid_time']\n",
"step-3": "<mask token>\n\n\nclass ProductCategory(models.Model):\n <mask token>\n\n\n class Meta:\n unique_together = 'name',\n\n def __unicode__(self):\n return smart_unicode(self.name)\n\n\nclass Product(models.Model):\n name = models.CharField(max_length=20)\n seller = models.ForeignKey(User, verbose_name='seller')\n initial_price = models.DecimalField(max_digits=10, decimal_places=2,\n validators=[MinValueValidator(0)], verbose_name='starting bid')\n description = models.TextField(max_length=280)\n timestamp = models.DateTimeField(auto_now_add=timezone.now(), auto_now=\n False)\n product_category = models.ForeignKey(ProductCategory, verbose_name=\n 'product category')\n version = IntegerVersionField()\n\n def __unicode__(self):\n return smart_unicode(self.name)\n\n\nclass AuctionStatus(models.Model):\n name = models.CharField(max_length=20)\n version = IntegerVersionField()\n\n\n class Meta:\n unique_together = 'name',\n\n def __unicode__(self):\n return smart_unicode(self.name)\n\n\nclass Auction(models.Model):\n title = models.CharField(max_length=20)\n current_price = models.DecimalField(max_digits=10, decimal_places=2,\n default=0, null=True, blank=True, verbose_name='current bid')\n updated_time = models.DateTimeField(auto_now_add=False, auto_now=\n timezone.now())\n end_time = models.DateTimeField(verbose_name='end time')\n product = models.OneToOneField(Product, related_name='product')\n status = models.ForeignKey(AuctionStatus, verbose_name='auction status')\n version = IntegerVersionField()\n\n\n class Meta:\n unique_together = 'title',\n ordering = ['end_time']\n\n def __unicode__(self):\n return smart_unicode(self.title)\n\n @classmethod\n def fetchActiveAuctions(cls):\n try:\n queryset = cls.objects.filter(status_id=1).order_by('-end_time'\n ).reverse()\n return queryset\n except IndexError:\n return None\n\n @classmethod\n def getAuctionByID(cls, aucid):\n try:\n return cls.objects.get(id=aucid, status_id=1)\n except IndexError:\n return None\n\n @classmethod\n def getAuctionByCategory(cls, catid):\n try:\n prodcat = Product.objects.filter(product_category=catid)\n queryset = Auction.objects.filter(product_id=prodcat, status_id=1)\n return queryset\n except IndexError:\n return None\n\n @classmethod\n def getAuctionByOwner(cls, ownerid):\n try:\n myprod = Product.objects.filter(seller_id=ownerid)\n queryset = Auction.objects.filter(product_id=myprod, status_id=1)\n return queryset\n except IndexError:\n return None\n\n @classmethod\n def getOwnerByAuctionID(cls, aucid):\n try:\n queryset = Auction.objects.get(id=aucid, status_id=1)\n myprod = Product.objects.get(id=queryset.product_id)\n seller = myprod.seller\n return seller\n except IndexError:\n return None\n\n @classmethod\n def getAuctionByProductID(cls, product_id):\n try:\n queryset = Auction.objects.get(product=product_id, status_id=1)\n return queryset\n except IndexError:\n return None\n\n\nclass Bidder(models.Model):\n contender = models.ForeignKey(User, related_name='buyer', verbose_name=\n 'contender')\n auctions = models.ManyToManyField(Auction, related_name='auctions',\n through='AuctionBidder')\n version = IntegerVersionField()\n\n def __unicode__(self):\n return smart_unicode(self.contender)\n\n\n class Meta:\n ordering = ['contender']\n\n\nclass AuctionBidder(models.Model):\n unique_bidder = models.ForeignKey(Bidder, related_name='unique_bidder')\n auc = models.ForeignKey(Auction, related_name='unique_auction')\n bid_amount = models.DecimalField(max_digits=10, decimal_places=2,\n verbose_name='bid amount')\n bid_time = models.DateTimeField(auto_now_add=False, auto_now=timezone.now()\n )\n version = IntegerVersionField()\n\n def __unicode__(self):\n return smart_unicode(self.auc)\n\n\n class Meta:\n ordering = ['bid_time']\n",
"step-4": "from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.utils.encoding import smart_unicode\nfrom django.core.validators import MinValueValidator\nfrom django.utils import timezone\nfrom concurrency.fields import IntegerVersionField\n\n\nclass ProductCategory(models.Model):\n name = models.CharField(max_length=20)\n\n\n class Meta:\n unique_together = 'name',\n\n def __unicode__(self):\n return smart_unicode(self.name)\n\n\nclass Product(models.Model):\n name = models.CharField(max_length=20)\n seller = models.ForeignKey(User, verbose_name='seller')\n initial_price = models.DecimalField(max_digits=10, decimal_places=2,\n validators=[MinValueValidator(0)], verbose_name='starting bid')\n description = models.TextField(max_length=280)\n timestamp = models.DateTimeField(auto_now_add=timezone.now(), auto_now=\n False)\n product_category = models.ForeignKey(ProductCategory, verbose_name=\n 'product category')\n version = IntegerVersionField()\n\n def __unicode__(self):\n return smart_unicode(self.name)\n\n\nclass AuctionStatus(models.Model):\n name = models.CharField(max_length=20)\n version = IntegerVersionField()\n\n\n class Meta:\n unique_together = 'name',\n\n def __unicode__(self):\n return smart_unicode(self.name)\n\n\nclass Auction(models.Model):\n title = models.CharField(max_length=20)\n current_price = models.DecimalField(max_digits=10, decimal_places=2,\n default=0, null=True, blank=True, verbose_name='current bid')\n updated_time = models.DateTimeField(auto_now_add=False, auto_now=\n timezone.now())\n end_time = models.DateTimeField(verbose_name='end time')\n product = models.OneToOneField(Product, related_name='product')\n status = models.ForeignKey(AuctionStatus, verbose_name='auction status')\n version = IntegerVersionField()\n\n\n class Meta:\n unique_together = 'title',\n ordering = ['end_time']\n\n def __unicode__(self):\n return smart_unicode(self.title)\n\n @classmethod\n def fetchActiveAuctions(cls):\n try:\n queryset = cls.objects.filter(status_id=1).order_by('-end_time'\n ).reverse()\n return queryset\n except IndexError:\n return None\n\n @classmethod\n def getAuctionByID(cls, aucid):\n try:\n return cls.objects.get(id=aucid, status_id=1)\n except IndexError:\n return None\n\n @classmethod\n def getAuctionByCategory(cls, catid):\n try:\n prodcat = Product.objects.filter(product_category=catid)\n queryset = Auction.objects.filter(product_id=prodcat, status_id=1)\n return queryset\n except IndexError:\n return None\n\n @classmethod\n def getAuctionByOwner(cls, ownerid):\n try:\n myprod = Product.objects.filter(seller_id=ownerid)\n queryset = Auction.objects.filter(product_id=myprod, status_id=1)\n return queryset\n except IndexError:\n return None\n\n @classmethod\n def getOwnerByAuctionID(cls, aucid):\n try:\n queryset = Auction.objects.get(id=aucid, status_id=1)\n myprod = Product.objects.get(id=queryset.product_id)\n seller = myprod.seller\n return seller\n except IndexError:\n return None\n\n @classmethod\n def getAuctionByProductID(cls, product_id):\n try:\n queryset = Auction.objects.get(product=product_id, status_id=1)\n return queryset\n except IndexError:\n return None\n\n\nclass Bidder(models.Model):\n contender = models.ForeignKey(User, related_name='buyer', verbose_name=\n 'contender')\n auctions = models.ManyToManyField(Auction, related_name='auctions',\n through='AuctionBidder')\n version = IntegerVersionField()\n\n def __unicode__(self):\n return smart_unicode(self.contender)\n\n\n class Meta:\n ordering = ['contender']\n\n\nclass AuctionBidder(models.Model):\n unique_bidder = models.ForeignKey(Bidder, related_name='unique_bidder')\n auc = models.ForeignKey(Auction, related_name='unique_auction')\n bid_amount = models.DecimalField(max_digits=10, decimal_places=2,\n verbose_name='bid amount')\n bid_time = models.DateTimeField(auto_now_add=False, auto_now=timezone.now()\n )\n version = IntegerVersionField()\n\n def __unicode__(self):\n return smart_unicode(self.auc)\n\n\n class Meta:\n ordering = ['bid_time']\n",
"step-5": "from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.utils.encoding import smart_unicode\nfrom django.core.validators import MinValueValidator\nfrom django.utils import timezone\nfrom concurrency.fields import IntegerVersionField\n\n\nclass ProductCategory(models.Model):\n name = models.CharField(max_length=20)\n\n class Meta:\n unique_together = ((\"name\"),)\n\n def __unicode__(self):\n return smart_unicode(self.name)\n\n\nclass Product(models.Model):\n name = models.CharField(max_length=20)\n seller = models.ForeignKey(User, verbose_name=\"seller\")\n initial_price = models.DecimalField(max_digits=10, decimal_places=2,\n validators=[MinValueValidator(0)], verbose_name=\"starting bid\")\n description = models.TextField(max_length=280)\n timestamp = models.DateTimeField(auto_now_add=timezone.now(), auto_now=False)\n product_category = models.ForeignKey(ProductCategory, verbose_name=\"product category\")\n version = IntegerVersionField()\n\n def __unicode__(self):\n return smart_unicode(self.name)\n\n\nclass AuctionStatus(models.Model):\n name = models.CharField(max_length=20)\n version = IntegerVersionField()\n\n class Meta:\n unique_together = ((\"name\"),)\n\n def __unicode__(self):\n return smart_unicode(self.name)\n\n\nclass Auction(models.Model):\n title = models.CharField(max_length=20)\n current_price = models.DecimalField(max_digits=10, decimal_places=2, default=0,\n null=True, blank=True, verbose_name=\"current bid\")\n updated_time = models.DateTimeField(auto_now_add=False, auto_now=timezone.now())\n end_time = models.DateTimeField(verbose_name=\"end time\")\n product = models.OneToOneField(Product, related_name='product')\n status = models.ForeignKey(AuctionStatus, verbose_name=\"auction status\")\n version = IntegerVersionField()\n\n class Meta:\n unique_together = ((\"title\"),)\n ordering = ['end_time']\n\n def __unicode__(self):\n return smart_unicode(self.title)\n\n\n @classmethod\n def fetchActiveAuctions(cls):\n try:\n queryset = cls.objects.filter(status_id=1).order_by('-end_time').reverse()\n return queryset\n except IndexError:\n return None\n\n @classmethod\n def getAuctionByID(cls, aucid):\n try:\n return cls.objects.get(id=aucid, status_id=1)\n except IndexError:\n return None\n\n @classmethod\n def getAuctionByCategory(cls, catid):\n try:\n prodcat = Product.objects.filter(product_category=catid)\n queryset = Auction.objects.filter(product_id=prodcat, status_id=1)\n return queryset\n except IndexError:\n return None\n\n @classmethod\n def getAuctionByOwner(cls, ownerid):\n try:\n myprod = Product.objects.filter(seller_id=ownerid)\n queryset = Auction.objects.filter(product_id=myprod, status_id=1)\n return queryset\n except IndexError:\n return None\n\n @classmethod\n def getOwnerByAuctionID(cls, aucid):\n try:\n queryset = Auction.objects.get(id=aucid, status_id=1)\n myprod = Product.objects.get(id=queryset.product_id)\n seller = myprod.seller\n return seller\n except IndexError:\n return None\n\n @classmethod\n def getAuctionByProductID(cls, product_id):\n try:\n queryset = Auction.objects.get(product=product_id, status_id=1)\n return queryset\n except IndexError:\n return None\n\n\nclass Bidder(models.Model):\n contender = models.ForeignKey(User, related_name='buyer', verbose_name='contender')\n auctions = models.ManyToManyField(Auction, related_name='auctions', through='AuctionBidder')\n version = IntegerVersionField()\n\n def __unicode__(self):\n return smart_unicode(self.contender)\n\n class Meta:\n ordering = [\"contender\"]\n\n\nclass AuctionBidder(models.Model):\n unique_bidder = models.ForeignKey(Bidder, related_name='unique_bidder')\n auc = models.ForeignKey(Auction, related_name='unique_auction')\n bid_amount = models.DecimalField(max_digits=10, decimal_places=2,\n verbose_name=\"bid amount\")\n bid_time = models.DateTimeField(auto_now_add=False, auto_now=timezone.now(), )\n version = IntegerVersionField()\n\n def __unicode__(self):\n return smart_unicode(self.auc)\n\n class Meta:\n ordering = [\"bid_time\"]\n\n\n\n\n",
"step-ids": [
15,
20,
23,
25,
26
]
}
|
[
15,
20,
23,
25,
26
] |
# DISCLAIMER
# The "Math" code was taken from http://depado.markdownblog.com/2015-09-29-mistune-parser-syntax-highlighter-mathjax-support-and-centered-images
# The HighlightRenderer code was taken from https://github.com/rupeshk/MarkdownHighlighter
# MarkdownHighlighter is a simple syntax highlighter for Markdown syntax.
# The initial code for MarkdownHighlighter was taken from niwmarkdowneditor by John Schember
# Copyright 2009 John Schember, Copyright 2012 Rupesh Kumar
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import re
import mistune
import pygments
from pygments.lexers import get_lexer_by_name
from pygments.formatters import html
class HighlightRenderer(mistune.Renderer):
def block_code(self, code, lang):
if not lang:
return '\n<pre><code>%s</code></pre>\n' % \
mistune.escape(code)
try:
lexer = get_lexer_by_name(lang, stripall=True)
except pygments.util.ClassNotFound:
return '\n<pre><code>%s</code></pre>\n' % \
mistune.escape(code)
formatter = html.HtmlFormatter()
return pygments.highlight(code, lexer, formatter)
# Pass math through unaltered - mathjax does the rendering in the browser
def block_math(self, text):
return '$$%s$$' % text
def latex_environment(self, name, text):
return r'\begin{%s}%s\end{%s}' % (name, text, name)
def inline_math(self, text):
return '$%s$' % text
class MathBlockGrammar(mistune.BlockGrammar):
block_math = re.compile(r"^\$\$(.*?)\$\$", re.DOTALL)
latex_environment = re.compile(
r"^\\begin\{([a-z]*\*?)\}(.*?)\\end\{\1\}", re.DOTALL)
class MathBlockLexer(mistune.BlockLexer):
default_rules = [
'block_math', 'latex_environment'] + mistune.BlockLexer.default_rules
def __init__(self, rules=None, **kwargs):
if rules is None:
rules = MathBlockGrammar()
super(MathBlockLexer, self).__init__(rules, **kwargs)
def parse_block_math(self, m):
"""Parse a $$math$$ block"""
self.tokens.append({
'type': 'block_math',
'text': m.group(1)
})
def parse_latex_environment(self, m):
self.tokens.append({
'type': 'latex_environment',
'name': m.group(1),
'text': m.group(2)
})
class MathInlineGrammar(mistune.InlineGrammar):
math = re.compile(r"^\$(.+?)\$", re.DOTALL)
block_math = re.compile(r"^\$\$(.+?)\$\$", re.DOTALL)
text = re.compile(r'^[\s\S]+?(?=[\\<!\[_*`~$]|https?://| {2,}\n|$)')
class MathInlineLexer(mistune.InlineLexer):
default_rules = ['block_math', 'math'] + mistune.InlineLexer.default_rules
def __init__(self, renderer, rules=None, **kwargs):
if rules is None:
rules = MathInlineGrammar()
super(MathInlineLexer, self).__init__(renderer, rules, **kwargs)
def output_math(self, m):
return self.renderer.inline_math(m.group(1))
def output_block_math(self, m):
return self.renderer.block_math(m.group(1))
class MarkdownWithMath(mistune.Markdown):
def __init__(self, renderer, **kwargs):
if 'inline' not in kwargs:
kwargs['inline'] = MathInlineLexer
if 'block' not in kwargs:
kwargs['block'] = MathBlockLexer
super(MarkdownWithMath, self).__init__(renderer, **kwargs)
def output_block_math(self):
return self.renderer.block_math(self.token['text'])
def output_latex_environment(self):
return self.renderer.latex_environment(self.token['name'],
self.token['text'])
|
normal
|
{
"blob_id": "a6c45ab3df0a692cd625d8203e1152e942a4cd6c",
"index": 5908,
"step-1": "<mask token>\n\n\nclass MathBlockLexer(mistune.BlockLexer):\n <mask token>\n\n def __init__(self, rules=None, **kwargs):\n if rules is None:\n rules = MathBlockGrammar()\n super(MathBlockLexer, self).__init__(rules, **kwargs)\n\n def parse_block_math(self, m):\n \"\"\"Parse a $$math$$ block\"\"\"\n self.tokens.append({'type': 'block_math', 'text': m.group(1)})\n\n def parse_latex_environment(self, m):\n self.tokens.append({'type': 'latex_environment', 'name': m.group(1),\n 'text': m.group(2)})\n\n\nclass MathInlineGrammar(mistune.InlineGrammar):\n math = re.compile('^\\\\$(.+?)\\\\$', re.DOTALL)\n block_math = re.compile('^\\\\$\\\\$(.+?)\\\\$\\\\$', re.DOTALL)\n text = re.compile('^[\\\\s\\\\S]+?(?=[\\\\\\\\<!\\\\[_*`~$]|https?://| {2,}\\\\n|$)')\n\n\nclass MathInlineLexer(mistune.InlineLexer):\n default_rules = ['block_math', 'math'] + mistune.InlineLexer.default_rules\n\n def __init__(self, renderer, rules=None, **kwargs):\n if rules is None:\n rules = MathInlineGrammar()\n super(MathInlineLexer, self).__init__(renderer, rules, **kwargs)\n\n def output_math(self, m):\n return self.renderer.inline_math(m.group(1))\n\n def output_block_math(self, m):\n return self.renderer.block_math(m.group(1))\n\n\nclass MarkdownWithMath(mistune.Markdown):\n\n def __init__(self, renderer, **kwargs):\n if 'inline' not in kwargs:\n kwargs['inline'] = MathInlineLexer\n if 'block' not in kwargs:\n kwargs['block'] = MathBlockLexer\n super(MarkdownWithMath, self).__init__(renderer, **kwargs)\n\n def output_block_math(self):\n return self.renderer.block_math(self.token['text'])\n\n def output_latex_environment(self):\n return self.renderer.latex_environment(self.token['name'], self.\n token['text'])\n",
"step-2": "<mask token>\n\n\nclass MathBlockGrammar(mistune.BlockGrammar):\n <mask token>\n <mask token>\n\n\nclass MathBlockLexer(mistune.BlockLexer):\n default_rules = ['block_math', 'latex_environment'\n ] + mistune.BlockLexer.default_rules\n\n def __init__(self, rules=None, **kwargs):\n if rules is None:\n rules = MathBlockGrammar()\n super(MathBlockLexer, self).__init__(rules, **kwargs)\n\n def parse_block_math(self, m):\n \"\"\"Parse a $$math$$ block\"\"\"\n self.tokens.append({'type': 'block_math', 'text': m.group(1)})\n\n def parse_latex_environment(self, m):\n self.tokens.append({'type': 'latex_environment', 'name': m.group(1),\n 'text': m.group(2)})\n\n\nclass MathInlineGrammar(mistune.InlineGrammar):\n math = re.compile('^\\\\$(.+?)\\\\$', re.DOTALL)\n block_math = re.compile('^\\\\$\\\\$(.+?)\\\\$\\\\$', re.DOTALL)\n text = re.compile('^[\\\\s\\\\S]+?(?=[\\\\\\\\<!\\\\[_*`~$]|https?://| {2,}\\\\n|$)')\n\n\nclass MathInlineLexer(mistune.InlineLexer):\n default_rules = ['block_math', 'math'] + mistune.InlineLexer.default_rules\n\n def __init__(self, renderer, rules=None, **kwargs):\n if rules is None:\n rules = MathInlineGrammar()\n super(MathInlineLexer, self).__init__(renderer, rules, **kwargs)\n\n def output_math(self, m):\n return self.renderer.inline_math(m.group(1))\n\n def output_block_math(self, m):\n return self.renderer.block_math(m.group(1))\n\n\nclass MarkdownWithMath(mistune.Markdown):\n\n def __init__(self, renderer, **kwargs):\n if 'inline' not in kwargs:\n kwargs['inline'] = MathInlineLexer\n if 'block' not in kwargs:\n kwargs['block'] = MathBlockLexer\n super(MarkdownWithMath, self).__init__(renderer, **kwargs)\n\n def output_block_math(self):\n return self.renderer.block_math(self.token['text'])\n\n def output_latex_environment(self):\n return self.renderer.latex_environment(self.token['name'], self.\n token['text'])\n",
"step-3": "<mask token>\n\n\nclass HighlightRenderer(mistune.Renderer):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass MathBlockGrammar(mistune.BlockGrammar):\n block_math = re.compile('^\\\\$\\\\$(.*?)\\\\$\\\\$', re.DOTALL)\n latex_environment = re.compile(\n '^\\\\\\\\begin\\\\{([a-z]*\\\\*?)\\\\}(.*?)\\\\\\\\end\\\\{\\\\1\\\\}', re.DOTALL)\n\n\nclass MathBlockLexer(mistune.BlockLexer):\n default_rules = ['block_math', 'latex_environment'\n ] + mistune.BlockLexer.default_rules\n\n def __init__(self, rules=None, **kwargs):\n if rules is None:\n rules = MathBlockGrammar()\n super(MathBlockLexer, self).__init__(rules, **kwargs)\n\n def parse_block_math(self, m):\n \"\"\"Parse a $$math$$ block\"\"\"\n self.tokens.append({'type': 'block_math', 'text': m.group(1)})\n\n def parse_latex_environment(self, m):\n self.tokens.append({'type': 'latex_environment', 'name': m.group(1),\n 'text': m.group(2)})\n\n\nclass MathInlineGrammar(mistune.InlineGrammar):\n math = re.compile('^\\\\$(.+?)\\\\$', re.DOTALL)\n block_math = re.compile('^\\\\$\\\\$(.+?)\\\\$\\\\$', re.DOTALL)\n text = re.compile('^[\\\\s\\\\S]+?(?=[\\\\\\\\<!\\\\[_*`~$]|https?://| {2,}\\\\n|$)')\n\n\nclass MathInlineLexer(mistune.InlineLexer):\n default_rules = ['block_math', 'math'] + mistune.InlineLexer.default_rules\n\n def __init__(self, renderer, rules=None, **kwargs):\n if rules is None:\n rules = MathInlineGrammar()\n super(MathInlineLexer, self).__init__(renderer, rules, **kwargs)\n\n def output_math(self, m):\n return self.renderer.inline_math(m.group(1))\n\n def output_block_math(self, m):\n return self.renderer.block_math(m.group(1))\n\n\nclass MarkdownWithMath(mistune.Markdown):\n\n def __init__(self, renderer, **kwargs):\n if 'inline' not in kwargs:\n kwargs['inline'] = MathInlineLexer\n if 'block' not in kwargs:\n kwargs['block'] = MathBlockLexer\n super(MarkdownWithMath, self).__init__(renderer, **kwargs)\n\n def output_block_math(self):\n return self.renderer.block_math(self.token['text'])\n\n def output_latex_environment(self):\n return self.renderer.latex_environment(self.token['name'], self.\n token['text'])\n",
"step-4": "import re\nimport mistune\nimport pygments\nfrom pygments.lexers import get_lexer_by_name\nfrom pygments.formatters import html\n\n\nclass HighlightRenderer(mistune.Renderer):\n\n def block_code(self, code, lang):\n if not lang:\n return '\\n<pre><code>%s</code></pre>\\n' % mistune.escape(code)\n try:\n lexer = get_lexer_by_name(lang, stripall=True)\n except pygments.util.ClassNotFound:\n return '\\n<pre><code>%s</code></pre>\\n' % mistune.escape(code)\n formatter = html.HtmlFormatter()\n return pygments.highlight(code, lexer, formatter)\n\n def block_math(self, text):\n return '$$%s$$' % text\n\n def latex_environment(self, name, text):\n return '\\\\begin{%s}%s\\\\end{%s}' % (name, text, name)\n\n def inline_math(self, text):\n return '$%s$' % text\n\n\nclass MathBlockGrammar(mistune.BlockGrammar):\n block_math = re.compile('^\\\\$\\\\$(.*?)\\\\$\\\\$', re.DOTALL)\n latex_environment = re.compile(\n '^\\\\\\\\begin\\\\{([a-z]*\\\\*?)\\\\}(.*?)\\\\\\\\end\\\\{\\\\1\\\\}', re.DOTALL)\n\n\nclass MathBlockLexer(mistune.BlockLexer):\n default_rules = ['block_math', 'latex_environment'\n ] + mistune.BlockLexer.default_rules\n\n def __init__(self, rules=None, **kwargs):\n if rules is None:\n rules = MathBlockGrammar()\n super(MathBlockLexer, self).__init__(rules, **kwargs)\n\n def parse_block_math(self, m):\n \"\"\"Parse a $$math$$ block\"\"\"\n self.tokens.append({'type': 'block_math', 'text': m.group(1)})\n\n def parse_latex_environment(self, m):\n self.tokens.append({'type': 'latex_environment', 'name': m.group(1),\n 'text': m.group(2)})\n\n\nclass MathInlineGrammar(mistune.InlineGrammar):\n math = re.compile('^\\\\$(.+?)\\\\$', re.DOTALL)\n block_math = re.compile('^\\\\$\\\\$(.+?)\\\\$\\\\$', re.DOTALL)\n text = re.compile('^[\\\\s\\\\S]+?(?=[\\\\\\\\<!\\\\[_*`~$]|https?://| {2,}\\\\n|$)')\n\n\nclass MathInlineLexer(mistune.InlineLexer):\n default_rules = ['block_math', 'math'] + mistune.InlineLexer.default_rules\n\n def __init__(self, renderer, rules=None, **kwargs):\n if rules is None:\n rules = MathInlineGrammar()\n super(MathInlineLexer, self).__init__(renderer, rules, **kwargs)\n\n def output_math(self, m):\n return self.renderer.inline_math(m.group(1))\n\n def output_block_math(self, m):\n return self.renderer.block_math(m.group(1))\n\n\nclass MarkdownWithMath(mistune.Markdown):\n\n def __init__(self, renderer, **kwargs):\n if 'inline' not in kwargs:\n kwargs['inline'] = MathInlineLexer\n if 'block' not in kwargs:\n kwargs['block'] = MathBlockLexer\n super(MarkdownWithMath, self).__init__(renderer, **kwargs)\n\n def output_block_math(self):\n return self.renderer.block_math(self.token['text'])\n\n def output_latex_environment(self):\n return self.renderer.latex_environment(self.token['name'], self.\n token['text'])\n",
"step-5": "# DISCLAIMER\n# The \"Math\" code was taken from http://depado.markdownblog.com/2015-09-29-mistune-parser-syntax-highlighter-mathjax-support-and-centered-images\n# The HighlightRenderer code was taken from https://github.com/rupeshk/MarkdownHighlighter\n\n\n# MarkdownHighlighter is a simple syntax highlighter for Markdown syntax.\n# The initial code for MarkdownHighlighter was taken from niwmarkdowneditor by John Schember\n# Copyright 2009 John Schember, Copyright 2012 Rupesh Kumar\n\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,\n# MA 02110-1301, USA.\n\n\nimport re\nimport mistune\nimport pygments\nfrom pygments.lexers import get_lexer_by_name\nfrom pygments.formatters import html\n\n\nclass HighlightRenderer(mistune.Renderer):\n\n def block_code(self, code, lang):\n if not lang:\n return '\\n<pre><code>%s</code></pre>\\n' % \\\n mistune.escape(code)\n try:\n lexer = get_lexer_by_name(lang, stripall=True)\n except pygments.util.ClassNotFound:\n return '\\n<pre><code>%s</code></pre>\\n' % \\\n mistune.escape(code)\n\n formatter = html.HtmlFormatter()\n return pygments.highlight(code, lexer, formatter)\n\n # Pass math through unaltered - mathjax does the rendering in the browser\n def block_math(self, text):\n return '$$%s$$' % text\n\n def latex_environment(self, name, text):\n return r'\\begin{%s}%s\\end{%s}' % (name, text, name)\n\n def inline_math(self, text):\n return '$%s$' % text\n\n\nclass MathBlockGrammar(mistune.BlockGrammar):\n block_math = re.compile(r\"^\\$\\$(.*?)\\$\\$\", re.DOTALL)\n latex_environment = re.compile(\n r\"^\\\\begin\\{([a-z]*\\*?)\\}(.*?)\\\\end\\{\\1\\}\", re.DOTALL)\n\n\nclass MathBlockLexer(mistune.BlockLexer):\n default_rules = [\n 'block_math', 'latex_environment'] + mistune.BlockLexer.default_rules\n\n def __init__(self, rules=None, **kwargs):\n if rules is None:\n rules = MathBlockGrammar()\n super(MathBlockLexer, self).__init__(rules, **kwargs)\n\n def parse_block_math(self, m):\n \"\"\"Parse a $$math$$ block\"\"\"\n self.tokens.append({\n 'type': 'block_math',\n 'text': m.group(1)\n })\n\n def parse_latex_environment(self, m):\n self.tokens.append({\n 'type': 'latex_environment',\n 'name': m.group(1),\n 'text': m.group(2)\n })\n\n\nclass MathInlineGrammar(mistune.InlineGrammar):\n math = re.compile(r\"^\\$(.+?)\\$\", re.DOTALL)\n block_math = re.compile(r\"^\\$\\$(.+?)\\$\\$\", re.DOTALL)\n text = re.compile(r'^[\\s\\S]+?(?=[\\\\<!\\[_*`~$]|https?://| {2,}\\n|$)')\n\n\nclass MathInlineLexer(mistune.InlineLexer):\n default_rules = ['block_math', 'math'] + mistune.InlineLexer.default_rules\n\n def __init__(self, renderer, rules=None, **kwargs):\n if rules is None:\n rules = MathInlineGrammar()\n super(MathInlineLexer, self).__init__(renderer, rules, **kwargs)\n\n def output_math(self, m):\n return self.renderer.inline_math(m.group(1))\n\n def output_block_math(self, m):\n return self.renderer.block_math(m.group(1))\n\n\nclass MarkdownWithMath(mistune.Markdown):\n def __init__(self, renderer, **kwargs):\n if 'inline' not in kwargs:\n kwargs['inline'] = MathInlineLexer\n if 'block' not in kwargs:\n kwargs['block'] = MathBlockLexer\n super(MarkdownWithMath, self).__init__(renderer, **kwargs)\n\n def output_block_math(self):\n return self.renderer.block_math(self.token['text'])\n\n def output_latex_environment(self):\n return self.renderer.latex_environment(self.token['name'],\n self.token['text'])\n",
"step-ids": [
15,
17,
19,
24,
25
]
}
|
[
15,
17,
19,
24,
25
] |
import time
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
def open_browser(browser="chrome"):
driver = None
if browser == "chrome":
driver = webdriver.Chrome()
elif browser == "firefox":
driver = webdriver.Firefox()
elif browser == "ie":
driver = webdriver.Ie()
else:
# driver = None
print("请输入正确的浏览器,例如'chrome','Firefox','ie'")
return driver
class Base:
def __init__(self, driver):
self.driver = driver
def open_url(self, url):
self.driver.get(url)
self.driver.maximize_window() # 窗口最大化
def find_element(self, locator, timeout=10):
element = WebDriverWait(self.driver, timeout).until(EC.presence_of_element_located(locator))
return element
def find_elements(self, locator, timeout=10):
elements = WebDriverWait(self.driver, timeout).until(EC.presence_of_all_elements_located(locator))
return elements
def click(self, locator, timeout=10):
element = self.find_element(locator=locator, timeout=timeout)
element.click()
def send_keys(self, locator, text, timeout=10):
element = self.find_element(locator=locator, timeout=timeout)
element.clear()
element.send_keys(text)
def is_text_in_element(self, locator, text, timeout=10):
try:
result = WebDriverWait(self.driver, timeout=timeout).until(EC.text_to_be_present_in_element(locator, text))
return result
except:
return False
def is_value_in_element(self, locator, value, timeout=10):
try:
result = WebDriverWait(self.driver, timeout=timeout).until(
EC.text_to_be_present_in_element_value(locator, value))
return result
except:
return False
def close_browser(self):
self.driver.quit()
|
normal
|
{
"blob_id": "82fc86e44d02c45d7904139e4dfdff069e2bdb90",
"index": 5634,
"step-1": "<mask token>\n\n\nclass Base:\n <mask token>\n\n def open_url(self, url):\n self.driver.get(url)\n self.driver.maximize_window()\n\n def find_element(self, locator, timeout=10):\n element = WebDriverWait(self.driver, timeout).until(EC.\n presence_of_element_located(locator))\n return element\n <mask token>\n <mask token>\n\n def send_keys(self, locator, text, timeout=10):\n element = self.find_element(locator=locator, timeout=timeout)\n element.clear()\n element.send_keys(text)\n\n def is_text_in_element(self, locator, text, timeout=10):\n try:\n result = WebDriverWait(self.driver, timeout=timeout).until(EC.\n text_to_be_present_in_element(locator, text))\n return result\n except:\n return False\n\n def is_value_in_element(self, locator, value, timeout=10):\n try:\n result = WebDriverWait(self.driver, timeout=timeout).until(EC.\n text_to_be_present_in_element_value(locator, value))\n return result\n except:\n return False\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Base:\n\n def __init__(self, driver):\n self.driver = driver\n\n def open_url(self, url):\n self.driver.get(url)\n self.driver.maximize_window()\n\n def find_element(self, locator, timeout=10):\n element = WebDriverWait(self.driver, timeout).until(EC.\n presence_of_element_located(locator))\n return element\n\n def find_elements(self, locator, timeout=10):\n elements = WebDriverWait(self.driver, timeout).until(EC.\n presence_of_all_elements_located(locator))\n return elements\n <mask token>\n\n def send_keys(self, locator, text, timeout=10):\n element = self.find_element(locator=locator, timeout=timeout)\n element.clear()\n element.send_keys(text)\n\n def is_text_in_element(self, locator, text, timeout=10):\n try:\n result = WebDriverWait(self.driver, timeout=timeout).until(EC.\n text_to_be_present_in_element(locator, text))\n return result\n except:\n return False\n\n def is_value_in_element(self, locator, value, timeout=10):\n try:\n result = WebDriverWait(self.driver, timeout=timeout).until(EC.\n text_to_be_present_in_element_value(locator, value))\n return result\n except:\n return False\n\n def close_browser(self):\n self.driver.quit()\n",
"step-3": "<mask token>\n\n\ndef open_browser(browser='chrome'):\n driver = None\n if browser == 'chrome':\n driver = webdriver.Chrome()\n elif browser == 'firefox':\n driver = webdriver.Firefox()\n elif browser == 'ie':\n driver = webdriver.Ie()\n else:\n print(\"请输入正确的浏览器,例如'chrome','Firefox','ie'\")\n return driver\n\n\nclass Base:\n\n def __init__(self, driver):\n self.driver = driver\n\n def open_url(self, url):\n self.driver.get(url)\n self.driver.maximize_window()\n\n def find_element(self, locator, timeout=10):\n element = WebDriverWait(self.driver, timeout).until(EC.\n presence_of_element_located(locator))\n return element\n\n def find_elements(self, locator, timeout=10):\n elements = WebDriverWait(self.driver, timeout).until(EC.\n presence_of_all_elements_located(locator))\n return elements\n\n def click(self, locator, timeout=10):\n element = self.find_element(locator=locator, timeout=timeout)\n element.click()\n\n def send_keys(self, locator, text, timeout=10):\n element = self.find_element(locator=locator, timeout=timeout)\n element.clear()\n element.send_keys(text)\n\n def is_text_in_element(self, locator, text, timeout=10):\n try:\n result = WebDriverWait(self.driver, timeout=timeout).until(EC.\n text_to_be_present_in_element(locator, text))\n return result\n except:\n return False\n\n def is_value_in_element(self, locator, value, timeout=10):\n try:\n result = WebDriverWait(self.driver, timeout=timeout).until(EC.\n text_to_be_present_in_element_value(locator, value))\n return result\n except:\n return False\n\n def close_browser(self):\n self.driver.quit()\n",
"step-4": "import time\nfrom selenium import webdriver\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\n\ndef open_browser(browser='chrome'):\n driver = None\n if browser == 'chrome':\n driver = webdriver.Chrome()\n elif browser == 'firefox':\n driver = webdriver.Firefox()\n elif browser == 'ie':\n driver = webdriver.Ie()\n else:\n print(\"请输入正确的浏览器,例如'chrome','Firefox','ie'\")\n return driver\n\n\nclass Base:\n\n def __init__(self, driver):\n self.driver = driver\n\n def open_url(self, url):\n self.driver.get(url)\n self.driver.maximize_window()\n\n def find_element(self, locator, timeout=10):\n element = WebDriverWait(self.driver, timeout).until(EC.\n presence_of_element_located(locator))\n return element\n\n def find_elements(self, locator, timeout=10):\n elements = WebDriverWait(self.driver, timeout).until(EC.\n presence_of_all_elements_located(locator))\n return elements\n\n def click(self, locator, timeout=10):\n element = self.find_element(locator=locator, timeout=timeout)\n element.click()\n\n def send_keys(self, locator, text, timeout=10):\n element = self.find_element(locator=locator, timeout=timeout)\n element.clear()\n element.send_keys(text)\n\n def is_text_in_element(self, locator, text, timeout=10):\n try:\n result = WebDriverWait(self.driver, timeout=timeout).until(EC.\n text_to_be_present_in_element(locator, text))\n return result\n except:\n return False\n\n def is_value_in_element(self, locator, value, timeout=10):\n try:\n result = WebDriverWait(self.driver, timeout=timeout).until(EC.\n text_to_be_present_in_element_value(locator, value))\n return result\n except:\n return False\n\n def close_browser(self):\n self.driver.quit()\n",
"step-5": "import time\n\nfrom selenium import webdriver\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\n\ndef open_browser(browser=\"chrome\"):\n driver = None\n if browser == \"chrome\":\n driver = webdriver.Chrome()\n elif browser == \"firefox\":\n driver = webdriver.Firefox()\n elif browser == \"ie\":\n driver = webdriver.Ie()\n else:\n # driver = None\n print(\"请输入正确的浏览器,例如'chrome','Firefox','ie'\")\n return driver\n\n\nclass Base:\n def __init__(self, driver):\n self.driver = driver\n\n def open_url(self, url):\n self.driver.get(url)\n self.driver.maximize_window() # 窗口最大化\n\n def find_element(self, locator, timeout=10):\n element = WebDriverWait(self.driver, timeout).until(EC.presence_of_element_located(locator))\n return element\n\n def find_elements(self, locator, timeout=10):\n elements = WebDriverWait(self.driver, timeout).until(EC.presence_of_all_elements_located(locator))\n return elements\n\n def click(self, locator, timeout=10):\n element = self.find_element(locator=locator, timeout=timeout)\n element.click()\n\n def send_keys(self, locator, text, timeout=10):\n element = self.find_element(locator=locator, timeout=timeout)\n element.clear()\n element.send_keys(text)\n\n def is_text_in_element(self, locator, text, timeout=10):\n try:\n result = WebDriverWait(self.driver, timeout=timeout).until(EC.text_to_be_present_in_element(locator, text))\n return result\n except:\n return False\n\n def is_value_in_element(self, locator, value, timeout=10):\n try:\n result = WebDriverWait(self.driver, timeout=timeout).until(\n EC.text_to_be_present_in_element_value(locator, value))\n return result\n except:\n return False\n\n def close_browser(self):\n self.driver.quit()\n\n\n\n",
"step-ids": [
6,
9,
11,
12,
13
]
}
|
[
6,
9,
11,
12,
13
] |
"""
Test /cohort/:id/user/:id
"""
import re
from unittest.mock import patch
from django.urls.base import reverse_lazy
from rest_framework import status
from breathecode.tests.mocks import (
GOOGLE_CLOUD_PATH,
apply_google_cloud_client_mock,
apply_google_cloud_bucket_mock,
apply_google_cloud_blob_mock,
)
from ..mixins import AdmissionsTestCase
class CohortIdUserIdTestSuite(AdmissionsTestCase):
"""Test /cohort/:id/user/:id"""
@patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())
@patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())
@patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())
def test_cohort_id_user_id_without_auth(self):
"""Test /cohort/:id/user/:id without auth"""
url = reverse_lazy('admissions:cohort_id_user_id', kwargs={'cohort_id': 1, 'user_id': 1})
response = self.client.get(url)
json = response.json()
self.assertEqual(
json, {
'detail': 'Authentication credentials were not provided.',
'status_code': status.HTTP_401_UNAUTHORIZED
})
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
@patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())
@patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())
@patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())
def test_cohort_id_user_id_put_with_bad_cohort_id(self):
"""Test /cohort/:id/user/:id without auth"""
model = self.generate_models(authenticate=True)
url = reverse_lazy('admissions:cohort_id_user_id', kwargs={'cohort_id': 1, 'user_id': 1})
data = {}
response = self.client.put(url, data)
json = response.json()
expected = {'status_code': 400, 'detail': 'invalid cohort_id'}
self.assertEqual(json, expected)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
@patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())
@patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())
@patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())
def test_cohort_id_user_id_put_with_bad_user_id(self):
"""Test /cohort/:id/user/:id without auth"""
model = self.generate_models(authenticate=True, cohort=True)
url = reverse_lazy('admissions:cohort_id_user_id',
kwargs={
'cohort_id': model.cohort.id,
'user_id': 999
})
data = {}
response = self.client.put(url, data)
json = response.json()
expected = {'status_code': 400, 'detail': 'invalid user_id'}
self.assertEqual(json, expected)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
@patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())
@patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())
@patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())
def test_cohort_id_user_id_put_with_bad_id(self):
"""Test /cohort/:id/user/:id without auth"""
model = self.generate_models(authenticate=True, cohort=True, user=True)
url = reverse_lazy('admissions:cohort_id_user_id',
kwargs={
'cohort_id': model.cohort.id,
'user_id': model.user.id
})
data = {}
response = self.client.put(url, data)
json = response.json()
expected = {'status_code': 400, 'detail': 'Specified cohort not be found'}
self.assertEqual(json, expected)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
@patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())
@patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())
@patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())
def test_cohort_id_user_id_put_with_id_but_without_user(self):
"""Test /cohort/:id/user/:id without auth"""
model = self.generate_models(authenticate=True, cohort=True)
url = reverse_lazy('admissions:cohort_id_user_id',
kwargs={
'cohort_id': model.cohort.id,
'user_id': model.user.id
})
data = {}
response = self.client.put(url, data)
json = response.json()
expected = {'status_code': 400, 'detail': 'Specified cohort not be found'}
self.assertEqual(json, expected)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
@patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())
@patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())
@patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())
def test_cohort_id_user_id_put_with_id_but_with_user(self):
"""Test /cohort/:id/user/:id without auth"""
model = self.generate_models(authenticate=True, cohort=True, user=True)
url = reverse_lazy('admissions:cohort_id_user_id',
kwargs={
'cohort_id': model.cohort.id,
'user_id': model.user.id
})
data = {}
response = self.client.put(url, data)
json = response.json()
expected = {'status_code': 400, 'detail': 'Specified cohort not be found'}
self.assertEqual(json, expected)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
@patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())
@patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())
@patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())
def test_cohort_id_user_id_put_with_id(self):
"""Test /cohort/:id/user/:id without auth"""
model = self.generate_models(authenticate=True,
cohort=True,
user=True,
specialty_mode=True,
profile_academy=True,
cohort_user=True)
model_dict = self.get_cohort_user_dict(1)
url = reverse_lazy('admissions:cohort_id_user_id',
kwargs={
'cohort_id': model.cohort.id,
'user_id': model.user.id
})
data = {'specialty_mode': model.specialty_mode.id}
response = self.client.put(url, data)
json = response.json()
expected = {
'id': model.cohort_user.id,
'role': model.cohort_user.role,
'educational_status': model.cohort_user.educational_status,
'finantial_status': model.cohort_user.finantial_status,
}
self.assertEqual(json, expected)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self.count_cohort_user(), 1)
self.assertEqual(self.get_cohort_user_dict(1), model_dict)
@patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())
@patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())
@patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())
def test_cohort_id_user_id_delete_with_id_with_bad_user_id(self):
"""Test /cohort/:id/user/:id without auth"""
model = self.generate_models(authenticate=True,
cohort=True,
user=True,
specialty_mode=True,
profile_academy=True,
cohort_user=True)
url = reverse_lazy('admissions:cohort_id_user_id',
kwargs={
'cohort_id': model.cohort.id,
'user_id': 9999
})
data = {'specialty_mode': model.specialty_mode.id}
response = self.client.delete(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
@patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())
@patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())
@patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())
def test_cohort_id_user_id_delete_with_id_with_bad_cohort_id(self):
"""Test /cohort/:id/user/:id without auth"""
model = self.generate_models(authenticate=True,
cohort=True,
user=True,
specialty_mode=True,
profile_academy=True,
cohort_user=True)
url = reverse_lazy('admissions:cohort_id_user_id',
kwargs={
'cohort_id': 9999,
'user_id': model.user.id
})
data = {'specialty_mode': model.specialty_mode.id}
response = self.client.delete(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
@patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())
@patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())
@patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())
def test_cohort_id_user_id_delete_with_id(self):
"""Test /cohort/:id/user/:id without auth"""
model = self.generate_models(authenticate=True,
cohort=True,
user=True,
specialty_mode=True,
profile_academy=True,
cohort_user=True)
url = reverse_lazy('admissions:cohort_id_user_id',
kwargs={
'cohort_id': model.cohort.id,
'user_id': model.user.id
})
data = {'specialty_mode': model.specialty_mode.id}
response = self.client.delete(url, data)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(self.count_cohort_user(), 0)
@patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())
@patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())
@patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())
def test_cohort_id_user_id_put_with_unsuccess_task(self):
"""Test /cohort/:id/user/:id without auth"""
model = self.generate_models(authenticate=True,
cohort=True,
user=True,
profile_academy=True,
cohort_user=True,
task=True,
task_status='PENDING',
task_type='PROJECT')
url = reverse_lazy('admissions:cohort_id_user_id',
kwargs={
'cohort_id': model.cohort.id,
'user_id': model.user.id
})
data = {
'educational_status': 'GRADUATED',
}
response = self.client.put(url, data)
json = response.json()
expected = {
'status_code': 400,
'detail': 'User has tasks with status pending the educational status cannot be GRADUATED',
}
self.assertEqual(json, expected)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
@patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())
@patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())
@patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())
def test_cohort_id_user_id_put_with_unsuccess_finantial_status(self):
"""Test /cohort/:id/user/:id without auth"""
model = self.generate_models(authenticate=True,
cohort=True,
user=True,
profile_academy=True,
cohort_user=True)
url = reverse_lazy('admissions:cohort_id_user_id',
kwargs={
'cohort_id': model.cohort.id,
'user_id': model.user.id
})
data = {
'educational_status': 'GRADUATED',
'finantial_status': 'LATE',
}
response = self.client.put(url, data)
json = response.json()
expected = {
'status_code': 400,
'detail': 'Cannot be marked as `GRADUATED` if its financial status is `LATE`',
}
self.assertEqual(json, expected)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
|
normal
|
{
"blob_id": "937711546271c145d0f0df2981bdd7d1e9297e3a",
"index": 3788,
"step-1": "<mask token>\n\n\nclass CohortIdUserIdTestSuite(AdmissionsTestCase):\n <mask token>\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_without_auth(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': 1, 'user_id': 1})\n response = self.client.get(url)\n json = response.json()\n self.assertEqual(json, {'detail':\n 'Authentication credentials were not provided.', 'status_code':\n status.HTTP_401_UNAUTHORIZED})\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_put_with_bad_cohort_id(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True)\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': 1, 'user_id': 1})\n data = {}\n response = self.client.put(url, data)\n json = response.json()\n expected = {'status_code': 400, 'detail': 'invalid cohort_id'}\n self.assertEqual(json, expected)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n <mask token>\n <mask token>\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_put_with_id_but_without_user(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True, cohort=True)\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': model.cohort.id, 'user_id': model.user.id})\n data = {}\n response = self.client.put(url, data)\n json = response.json()\n expected = {'status_code': 400, 'detail':\n 'Specified cohort not be found'}\n self.assertEqual(json, expected)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_put_with_id_but_with_user(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True, cohort=True, user=True)\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': model.cohort.id, 'user_id': model.user.id})\n data = {}\n response = self.client.put(url, data)\n json = response.json()\n expected = {'status_code': 400, 'detail':\n 'Specified cohort not be found'}\n self.assertEqual(json, expected)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_put_with_id(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True, cohort=True, user=\n True, specialty_mode=True, profile_academy=True, cohort_user=True)\n model_dict = self.get_cohort_user_dict(1)\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': model.cohort.id, 'user_id': model.user.id})\n data = {'specialty_mode': model.specialty_mode.id}\n response = self.client.put(url, data)\n json = response.json()\n expected = {'id': model.cohort_user.id, 'role': model.cohort_user.\n role, 'educational_status': model.cohort_user.\n educational_status, 'finantial_status': model.cohort_user.\n finantial_status}\n self.assertEqual(json, expected)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(self.count_cohort_user(), 1)\n self.assertEqual(self.get_cohort_user_dict(1), model_dict)\n <mask token>\n <mask token>\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_delete_with_id(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True, cohort=True, user=\n True, specialty_mode=True, profile_academy=True, cohort_user=True)\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': model.cohort.id, 'user_id': model.user.id})\n data = {'specialty_mode': model.specialty_mode.id}\n response = self.client.delete(url, data)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(self.count_cohort_user(), 0)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_put_with_unsuccess_task(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True, cohort=True, user=\n True, profile_academy=True, cohort_user=True, task=True,\n task_status='PENDING', task_type='PROJECT')\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': model.cohort.id, 'user_id': model.user.id})\n data = {'educational_status': 'GRADUATED'}\n response = self.client.put(url, data)\n json = response.json()\n expected = {'status_code': 400, 'detail':\n 'User has tasks with status pending the educational status cannot be GRADUATED'\n }\n self.assertEqual(json, expected)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_put_with_unsuccess_finantial_status(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True, cohort=True, user=\n True, profile_academy=True, cohort_user=True)\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': model.cohort.id, 'user_id': model.user.id})\n data = {'educational_status': 'GRADUATED', 'finantial_status': 'LATE'}\n response = self.client.put(url, data)\n json = response.json()\n expected = {'status_code': 400, 'detail':\n 'Cannot be marked as `GRADUATED` if its financial status is `LATE`'\n }\n self.assertEqual(json, expected)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n",
"step-2": "<mask token>\n\n\nclass CohortIdUserIdTestSuite(AdmissionsTestCase):\n <mask token>\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_without_auth(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': 1, 'user_id': 1})\n response = self.client.get(url)\n json = response.json()\n self.assertEqual(json, {'detail':\n 'Authentication credentials were not provided.', 'status_code':\n status.HTTP_401_UNAUTHORIZED})\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_put_with_bad_cohort_id(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True)\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': 1, 'user_id': 1})\n data = {}\n response = self.client.put(url, data)\n json = response.json()\n expected = {'status_code': 400, 'detail': 'invalid cohort_id'}\n self.assertEqual(json, expected)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_put_with_bad_user_id(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True, cohort=True)\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': model.cohort.id, 'user_id': 999})\n data = {}\n response = self.client.put(url, data)\n json = response.json()\n expected = {'status_code': 400, 'detail': 'invalid user_id'}\n self.assertEqual(json, expected)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_put_with_bad_id(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True, cohort=True, user=True)\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': model.cohort.id, 'user_id': model.user.id})\n data = {}\n response = self.client.put(url, data)\n json = response.json()\n expected = {'status_code': 400, 'detail':\n 'Specified cohort not be found'}\n self.assertEqual(json, expected)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_put_with_id_but_without_user(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True, cohort=True)\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': model.cohort.id, 'user_id': model.user.id})\n data = {}\n response = self.client.put(url, data)\n json = response.json()\n expected = {'status_code': 400, 'detail':\n 'Specified cohort not be found'}\n self.assertEqual(json, expected)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_put_with_id_but_with_user(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True, cohort=True, user=True)\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': model.cohort.id, 'user_id': model.user.id})\n data = {}\n response = self.client.put(url, data)\n json = response.json()\n expected = {'status_code': 400, 'detail':\n 'Specified cohort not be found'}\n self.assertEqual(json, expected)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_put_with_id(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True, cohort=True, user=\n True, specialty_mode=True, profile_academy=True, cohort_user=True)\n model_dict = self.get_cohort_user_dict(1)\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': model.cohort.id, 'user_id': model.user.id})\n data = {'specialty_mode': model.specialty_mode.id}\n response = self.client.put(url, data)\n json = response.json()\n expected = {'id': model.cohort_user.id, 'role': model.cohort_user.\n role, 'educational_status': model.cohort_user.\n educational_status, 'finantial_status': model.cohort_user.\n finantial_status}\n self.assertEqual(json, expected)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(self.count_cohort_user(), 1)\n self.assertEqual(self.get_cohort_user_dict(1), model_dict)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_delete_with_id_with_bad_user_id(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True, cohort=True, user=\n True, specialty_mode=True, profile_academy=True, cohort_user=True)\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': model.cohort.id, 'user_id': 9999})\n data = {'specialty_mode': model.specialty_mode.id}\n response = self.client.delete(url, data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_delete_with_id_with_bad_cohort_id(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True, cohort=True, user=\n True, specialty_mode=True, profile_academy=True, cohort_user=True)\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': 9999, 'user_id': model.user.id})\n data = {'specialty_mode': model.specialty_mode.id}\n response = self.client.delete(url, data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_delete_with_id(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True, cohort=True, user=\n True, specialty_mode=True, profile_academy=True, cohort_user=True)\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': model.cohort.id, 'user_id': model.user.id})\n data = {'specialty_mode': model.specialty_mode.id}\n response = self.client.delete(url, data)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(self.count_cohort_user(), 0)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_put_with_unsuccess_task(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True, cohort=True, user=\n True, profile_academy=True, cohort_user=True, task=True,\n task_status='PENDING', task_type='PROJECT')\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': model.cohort.id, 'user_id': model.user.id})\n data = {'educational_status': 'GRADUATED'}\n response = self.client.put(url, data)\n json = response.json()\n expected = {'status_code': 400, 'detail':\n 'User has tasks with status pending the educational status cannot be GRADUATED'\n }\n self.assertEqual(json, expected)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_put_with_unsuccess_finantial_status(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True, cohort=True, user=\n True, profile_academy=True, cohort_user=True)\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': model.cohort.id, 'user_id': model.user.id})\n data = {'educational_status': 'GRADUATED', 'finantial_status': 'LATE'}\n response = self.client.put(url, data)\n json = response.json()\n expected = {'status_code': 400, 'detail':\n 'Cannot be marked as `GRADUATED` if its financial status is `LATE`'\n }\n self.assertEqual(json, expected)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n",
"step-3": "<mask token>\n\n\nclass CohortIdUserIdTestSuite(AdmissionsTestCase):\n \"\"\"Test /cohort/:id/user/:id\"\"\"\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_without_auth(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': 1, 'user_id': 1})\n response = self.client.get(url)\n json = response.json()\n self.assertEqual(json, {'detail':\n 'Authentication credentials were not provided.', 'status_code':\n status.HTTP_401_UNAUTHORIZED})\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_put_with_bad_cohort_id(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True)\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': 1, 'user_id': 1})\n data = {}\n response = self.client.put(url, data)\n json = response.json()\n expected = {'status_code': 400, 'detail': 'invalid cohort_id'}\n self.assertEqual(json, expected)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_put_with_bad_user_id(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True, cohort=True)\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': model.cohort.id, 'user_id': 999})\n data = {}\n response = self.client.put(url, data)\n json = response.json()\n expected = {'status_code': 400, 'detail': 'invalid user_id'}\n self.assertEqual(json, expected)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_put_with_bad_id(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True, cohort=True, user=True)\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': model.cohort.id, 'user_id': model.user.id})\n data = {}\n response = self.client.put(url, data)\n json = response.json()\n expected = {'status_code': 400, 'detail':\n 'Specified cohort not be found'}\n self.assertEqual(json, expected)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_put_with_id_but_without_user(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True, cohort=True)\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': model.cohort.id, 'user_id': model.user.id})\n data = {}\n response = self.client.put(url, data)\n json = response.json()\n expected = {'status_code': 400, 'detail':\n 'Specified cohort not be found'}\n self.assertEqual(json, expected)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_put_with_id_but_with_user(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True, cohort=True, user=True)\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': model.cohort.id, 'user_id': model.user.id})\n data = {}\n response = self.client.put(url, data)\n json = response.json()\n expected = {'status_code': 400, 'detail':\n 'Specified cohort not be found'}\n self.assertEqual(json, expected)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_put_with_id(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True, cohort=True, user=\n True, specialty_mode=True, profile_academy=True, cohort_user=True)\n model_dict = self.get_cohort_user_dict(1)\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': model.cohort.id, 'user_id': model.user.id})\n data = {'specialty_mode': model.specialty_mode.id}\n response = self.client.put(url, data)\n json = response.json()\n expected = {'id': model.cohort_user.id, 'role': model.cohort_user.\n role, 'educational_status': model.cohort_user.\n educational_status, 'finantial_status': model.cohort_user.\n finantial_status}\n self.assertEqual(json, expected)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(self.count_cohort_user(), 1)\n self.assertEqual(self.get_cohort_user_dict(1), model_dict)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_delete_with_id_with_bad_user_id(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True, cohort=True, user=\n True, specialty_mode=True, profile_academy=True, cohort_user=True)\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': model.cohort.id, 'user_id': 9999})\n data = {'specialty_mode': model.specialty_mode.id}\n response = self.client.delete(url, data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_delete_with_id_with_bad_cohort_id(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True, cohort=True, user=\n True, specialty_mode=True, profile_academy=True, cohort_user=True)\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': 9999, 'user_id': model.user.id})\n data = {'specialty_mode': model.specialty_mode.id}\n response = self.client.delete(url, data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_delete_with_id(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True, cohort=True, user=\n True, specialty_mode=True, profile_academy=True, cohort_user=True)\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': model.cohort.id, 'user_id': model.user.id})\n data = {'specialty_mode': model.specialty_mode.id}\n response = self.client.delete(url, data)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(self.count_cohort_user(), 0)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_put_with_unsuccess_task(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True, cohort=True, user=\n True, profile_academy=True, cohort_user=True, task=True,\n task_status='PENDING', task_type='PROJECT')\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': model.cohort.id, 'user_id': model.user.id})\n data = {'educational_status': 'GRADUATED'}\n response = self.client.put(url, data)\n json = response.json()\n expected = {'status_code': 400, 'detail':\n 'User has tasks with status pending the educational status cannot be GRADUATED'\n }\n self.assertEqual(json, expected)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_put_with_unsuccess_finantial_status(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True, cohort=True, user=\n True, profile_academy=True, cohort_user=True)\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': model.cohort.id, 'user_id': model.user.id})\n data = {'educational_status': 'GRADUATED', 'finantial_status': 'LATE'}\n response = self.client.put(url, data)\n json = response.json()\n expected = {'status_code': 400, 'detail':\n 'Cannot be marked as `GRADUATED` if its financial status is `LATE`'\n }\n self.assertEqual(json, expected)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n",
"step-4": "<mask token>\nimport re\nfrom unittest.mock import patch\nfrom django.urls.base import reverse_lazy\nfrom rest_framework import status\nfrom breathecode.tests.mocks import GOOGLE_CLOUD_PATH, apply_google_cloud_client_mock, apply_google_cloud_bucket_mock, apply_google_cloud_blob_mock\nfrom ..mixins import AdmissionsTestCase\n\n\nclass CohortIdUserIdTestSuite(AdmissionsTestCase):\n \"\"\"Test /cohort/:id/user/:id\"\"\"\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_without_auth(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': 1, 'user_id': 1})\n response = self.client.get(url)\n json = response.json()\n self.assertEqual(json, {'detail':\n 'Authentication credentials were not provided.', 'status_code':\n status.HTTP_401_UNAUTHORIZED})\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_put_with_bad_cohort_id(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True)\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': 1, 'user_id': 1})\n data = {}\n response = self.client.put(url, data)\n json = response.json()\n expected = {'status_code': 400, 'detail': 'invalid cohort_id'}\n self.assertEqual(json, expected)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_put_with_bad_user_id(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True, cohort=True)\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': model.cohort.id, 'user_id': 999})\n data = {}\n response = self.client.put(url, data)\n json = response.json()\n expected = {'status_code': 400, 'detail': 'invalid user_id'}\n self.assertEqual(json, expected)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_put_with_bad_id(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True, cohort=True, user=True)\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': model.cohort.id, 'user_id': model.user.id})\n data = {}\n response = self.client.put(url, data)\n json = response.json()\n expected = {'status_code': 400, 'detail':\n 'Specified cohort not be found'}\n self.assertEqual(json, expected)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_put_with_id_but_without_user(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True, cohort=True)\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': model.cohort.id, 'user_id': model.user.id})\n data = {}\n response = self.client.put(url, data)\n json = response.json()\n expected = {'status_code': 400, 'detail':\n 'Specified cohort not be found'}\n self.assertEqual(json, expected)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_put_with_id_but_with_user(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True, cohort=True, user=True)\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': model.cohort.id, 'user_id': model.user.id})\n data = {}\n response = self.client.put(url, data)\n json = response.json()\n expected = {'status_code': 400, 'detail':\n 'Specified cohort not be found'}\n self.assertEqual(json, expected)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_put_with_id(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True, cohort=True, user=\n True, specialty_mode=True, profile_academy=True, cohort_user=True)\n model_dict = self.get_cohort_user_dict(1)\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': model.cohort.id, 'user_id': model.user.id})\n data = {'specialty_mode': model.specialty_mode.id}\n response = self.client.put(url, data)\n json = response.json()\n expected = {'id': model.cohort_user.id, 'role': model.cohort_user.\n role, 'educational_status': model.cohort_user.\n educational_status, 'finantial_status': model.cohort_user.\n finantial_status}\n self.assertEqual(json, expected)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(self.count_cohort_user(), 1)\n self.assertEqual(self.get_cohort_user_dict(1), model_dict)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_delete_with_id_with_bad_user_id(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True, cohort=True, user=\n True, specialty_mode=True, profile_academy=True, cohort_user=True)\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': model.cohort.id, 'user_id': 9999})\n data = {'specialty_mode': model.specialty_mode.id}\n response = self.client.delete(url, data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_delete_with_id_with_bad_cohort_id(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True, cohort=True, user=\n True, specialty_mode=True, profile_academy=True, cohort_user=True)\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': 9999, 'user_id': model.user.id})\n data = {'specialty_mode': model.specialty_mode.id}\n response = self.client.delete(url, data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_delete_with_id(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True, cohort=True, user=\n True, specialty_mode=True, profile_academy=True, cohort_user=True)\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': model.cohort.id, 'user_id': model.user.id})\n data = {'specialty_mode': model.specialty_mode.id}\n response = self.client.delete(url, data)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(self.count_cohort_user(), 0)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_put_with_unsuccess_task(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True, cohort=True, user=\n True, profile_academy=True, cohort_user=True, task=True,\n task_status='PENDING', task_type='PROJECT')\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': model.cohort.id, 'user_id': model.user.id})\n data = {'educational_status': 'GRADUATED'}\n response = self.client.put(url, data)\n json = response.json()\n expected = {'status_code': 400, 'detail':\n 'User has tasks with status pending the educational status cannot be GRADUATED'\n }\n self.assertEqual(json, expected)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_put_with_unsuccess_finantial_status(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True, cohort=True, user=\n True, profile_academy=True, cohort_user=True)\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={\n 'cohort_id': model.cohort.id, 'user_id': model.user.id})\n data = {'educational_status': 'GRADUATED', 'finantial_status': 'LATE'}\n response = self.client.put(url, data)\n json = response.json()\n expected = {'status_code': 400, 'detail':\n 'Cannot be marked as `GRADUATED` if its financial status is `LATE`'\n }\n self.assertEqual(json, expected)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n",
"step-5": "\"\"\"\nTest /cohort/:id/user/:id\n\"\"\"\nimport re\nfrom unittest.mock import patch\nfrom django.urls.base import reverse_lazy\nfrom rest_framework import status\nfrom breathecode.tests.mocks import (\n GOOGLE_CLOUD_PATH,\n apply_google_cloud_client_mock,\n apply_google_cloud_bucket_mock,\n apply_google_cloud_blob_mock,\n)\nfrom ..mixins import AdmissionsTestCase\n\n\nclass CohortIdUserIdTestSuite(AdmissionsTestCase):\n \"\"\"Test /cohort/:id/user/:id\"\"\"\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_without_auth(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={'cohort_id': 1, 'user_id': 1})\n response = self.client.get(url)\n json = response.json()\n\n self.assertEqual(\n json, {\n 'detail': 'Authentication credentials were not provided.',\n 'status_code': status.HTTP_401_UNAUTHORIZED\n })\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_put_with_bad_cohort_id(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True)\n url = reverse_lazy('admissions:cohort_id_user_id', kwargs={'cohort_id': 1, 'user_id': 1})\n data = {}\n response = self.client.put(url, data)\n json = response.json()\n expected = {'status_code': 400, 'detail': 'invalid cohort_id'}\n\n self.assertEqual(json, expected)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_put_with_bad_user_id(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True, cohort=True)\n url = reverse_lazy('admissions:cohort_id_user_id',\n kwargs={\n 'cohort_id': model.cohort.id,\n 'user_id': 999\n })\n data = {}\n response = self.client.put(url, data)\n json = response.json()\n expected = {'status_code': 400, 'detail': 'invalid user_id'}\n\n self.assertEqual(json, expected)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_put_with_bad_id(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True, cohort=True, user=True)\n url = reverse_lazy('admissions:cohort_id_user_id',\n kwargs={\n 'cohort_id': model.cohort.id,\n 'user_id': model.user.id\n })\n data = {}\n response = self.client.put(url, data)\n json = response.json()\n expected = {'status_code': 400, 'detail': 'Specified cohort not be found'}\n\n self.assertEqual(json, expected)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_put_with_id_but_without_user(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True, cohort=True)\n url = reverse_lazy('admissions:cohort_id_user_id',\n kwargs={\n 'cohort_id': model.cohort.id,\n 'user_id': model.user.id\n })\n data = {}\n response = self.client.put(url, data)\n json = response.json()\n expected = {'status_code': 400, 'detail': 'Specified cohort not be found'}\n\n self.assertEqual(json, expected)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_put_with_id_but_with_user(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True, cohort=True, user=True)\n url = reverse_lazy('admissions:cohort_id_user_id',\n kwargs={\n 'cohort_id': model.cohort.id,\n 'user_id': model.user.id\n })\n data = {}\n response = self.client.put(url, data)\n json = response.json()\n expected = {'status_code': 400, 'detail': 'Specified cohort not be found'}\n\n self.assertEqual(json, expected)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_put_with_id(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True,\n cohort=True,\n user=True,\n specialty_mode=True,\n profile_academy=True,\n cohort_user=True)\n model_dict = self.get_cohort_user_dict(1)\n url = reverse_lazy('admissions:cohort_id_user_id',\n kwargs={\n 'cohort_id': model.cohort.id,\n 'user_id': model.user.id\n })\n data = {'specialty_mode': model.specialty_mode.id}\n response = self.client.put(url, data)\n json = response.json()\n expected = {\n 'id': model.cohort_user.id,\n 'role': model.cohort_user.role,\n 'educational_status': model.cohort_user.educational_status,\n 'finantial_status': model.cohort_user.finantial_status,\n }\n\n self.assertEqual(json, expected)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(self.count_cohort_user(), 1)\n self.assertEqual(self.get_cohort_user_dict(1), model_dict)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_delete_with_id_with_bad_user_id(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True,\n cohort=True,\n user=True,\n specialty_mode=True,\n profile_academy=True,\n cohort_user=True)\n url = reverse_lazy('admissions:cohort_id_user_id',\n kwargs={\n 'cohort_id': model.cohort.id,\n 'user_id': 9999\n })\n data = {'specialty_mode': model.specialty_mode.id}\n response = self.client.delete(url, data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_delete_with_id_with_bad_cohort_id(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True,\n cohort=True,\n user=True,\n specialty_mode=True,\n profile_academy=True,\n cohort_user=True)\n url = reverse_lazy('admissions:cohort_id_user_id',\n kwargs={\n 'cohort_id': 9999,\n 'user_id': model.user.id\n })\n data = {'specialty_mode': model.specialty_mode.id}\n response = self.client.delete(url, data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_delete_with_id(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True,\n cohort=True,\n user=True,\n specialty_mode=True,\n profile_academy=True,\n cohort_user=True)\n url = reverse_lazy('admissions:cohort_id_user_id',\n kwargs={\n 'cohort_id': model.cohort.id,\n 'user_id': model.user.id\n })\n data = {'specialty_mode': model.specialty_mode.id}\n response = self.client.delete(url, data)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(self.count_cohort_user(), 0)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_put_with_unsuccess_task(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True,\n cohort=True,\n user=True,\n profile_academy=True,\n cohort_user=True,\n task=True,\n task_status='PENDING',\n task_type='PROJECT')\n url = reverse_lazy('admissions:cohort_id_user_id',\n kwargs={\n 'cohort_id': model.cohort.id,\n 'user_id': model.user.id\n })\n data = {\n 'educational_status': 'GRADUATED',\n }\n response = self.client.put(url, data)\n json = response.json()\n expected = {\n 'status_code': 400,\n 'detail': 'User has tasks with status pending the educational status cannot be GRADUATED',\n }\n\n self.assertEqual(json, expected)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n @patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())\n @patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())\n @patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())\n def test_cohort_id_user_id_put_with_unsuccess_finantial_status(self):\n \"\"\"Test /cohort/:id/user/:id without auth\"\"\"\n model = self.generate_models(authenticate=True,\n cohort=True,\n user=True,\n profile_academy=True,\n cohort_user=True)\n url = reverse_lazy('admissions:cohort_id_user_id',\n kwargs={\n 'cohort_id': model.cohort.id,\n 'user_id': model.user.id\n })\n data = {\n 'educational_status': 'GRADUATED',\n 'finantial_status': 'LATE',\n }\n response = self.client.put(url, data)\n json = response.json()\n expected = {\n 'status_code': 400,\n 'detail': 'Cannot be marked as `GRADUATED` if its financial status is `LATE`',\n }\n\n self.assertEqual(json, expected)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n",
"step-ids": [
9,
13,
14,
15,
16
]
}
|
[
9,
13,
14,
15,
16
] |
# Evolutionary Trees contains algorithms and methods used in determining phylogenetic inheritance of various species.
# Main algos UPGMA and CLUSTALW
from dataclasses import dataclass
import FormattingET
@dataclass
class Node:
age: int
num: int
label: str
alignment: []
def __init__(self, child1=None, child2=None):
self.child1 = child1
self.child2 = child2
#UPGMA algos
def initializeMatrix(m, n):
mtx = [[0 for x in range(n)] for y in range(m)]
return mtx
def initializeClusters(t):
numNodes = len(t)
numLeaves = (numNodes + 1) / 2
clusters = [0]*int(numLeaves)
for i in range(int(numLeaves)):
clusters[i] = t[i]
return clusters
def initializeTree(speciesNames):
numLeaves = len(speciesNames)
t = [Node]*(2*numLeaves - 1)
for i in range(len(t)):
vx = Node()
if i < numLeaves:
vx.label = speciesNames[i]
else:
vx.label = "Ancestor species" + str(i)
vx.num = i
t[i] = vx
return t
def countLeaves(v: Node):
if v.child1 is None or v.child2 is None:
return 1
return countLeaves(v.child1) + countLeaves(v.child2)
def delClusters(clusters, row, col):
del clusters[col]
del clusters[row]
return clusters
def findMinElement(mtx):
minRow = 0
minCol = 1
minElement = mtx[0][1]
for row in range(0, len(mtx)):
for col in range(row+1, len(mtx)):
if mtx[row][col] < minElement:
minRow = row
minCol = col
minElement = mtx[row][col]
return minRow, minCol, minElement
def delRowCol(mtx, row, col):
del mtx[col]
del mtx[row]
for i in range(len(mtx)):
del mtx[i][col]
del mtx[i][row]
return mtx
def addRowCol(mtx, clusters, row, col):
newRow = [0]*(len(mtx) + 1)
for i in range(len(newRow) - 1):
if i != row and i != col:
size1 = countLeaves(clusters[row])
size2 = countLeaves(clusters[col])
avg = (size1*mtx[row][i] + size2*mtx[i][col]) / (size1 + size2)
newRow[i] = avg
mtx.append(newRow)
for i in range(len(newRow) - 1):
mtx[i].append(newRow[i])
return mtx
def upgma(mtx, speciesNames):
tree = initializeTree(speciesNames)
clusters = initializeClusters(tree)
numLeaves = len(mtx)
for i in range(numLeaves, 2*numLeaves - 1):
minElements = findMinElement(mtx)
row = minElements[0]
col = minElements[1]
min = minElements[2]
tree[i].age = min/2
tree[i].child1 = clusters[row]
tree[i].child2 = clusters[col]
mtx = addRowCol(mtx, clusters, row, col)
clusters.append(tree[i])
mtx = delRowCol(mtx, row, col)
clusters = delClusters(clusters, row, col)
return tree
#CLUSTALW algos
def sumPairScores(align1, align2, idx1, idx2, match, mismatch, gap):
alignment1 = ['']*len(align1)
for i in range(len(align1)):
alignment1[i] = align1[i][idx1]
alignment2 = [''] * len(align2)
for i in range(len(align2)):
alignment2[i] = align2[i][idx2]
score = 0.0
for char in alignment1:
for char2 in alignment2:
if char == '-' and char2 == '-':
continue
elif char == char2:
score += match
elif char != '-' and char2 != '-':
score -= mismatch
else:
score -= gap
return score
def generateScoreTable(align1, align2, match, mismatch, gap, supergap):
scoreTable = [[0 for j in range(len(align2[0]) + 1)] for i in range(len(align1[0]) + 1)]
for i in range(len(scoreTable)):
scoreTable[i][0] = i * (-supergap)
for i in range(len(scoreTable[0])):
scoreTable[0][i] = i * (-supergap)
for i in range(1, len(align1[0]) + 1):
for j in range(1, len(align2[0]) + 1):
up = scoreTable[i-1][j] - supergap
left = scoreTable[i][j-1] - supergap
diag = scoreTable[i-1][j-1] + sumPairScores(align1, align2, i-1, j-1, match, mismatch, gap)
scoreTable[i][j] = max(up, left, diag)
return scoreTable
def progressiveBacktrack(scoreTable, align1, align2, match, mismatch, gap, supergap):
numRows = len(align1[0]) + 1
numCols = len(align2[0]) + 1
backtrack = [['' for i in range(numCols)] for j in range(numRows)]
for i in range(1, numCols):
backtrack[0][i] = "LEFT"
for i in range(1, numRows):
backtrack[i][0] = "UP"
for i in range(1, numRows):
for j in range(1, numCols):
if (scoreTable[i][j] == scoreTable[i-1][j] - supergap):
backtrack[i][j] = "UP"
elif scoreTable[i][j] == scoreTable[i][j-1] - supergap:
backtrack[i][j] = "LEFT"
else:
backtrack[i][j] = "DIAG"
return backtrack
def backtracker(string, backtrack, orientation):
aligned = ""
row = len(backtrack) - 1
col = len(backtrack[0]) - 1
while(row != 0 or col != 0):
k = len(string)
if backtrack[row][col] == "UP":
if (orientation == "top"):
aligned = "-" + aligned
elif orientation == "side":
aligned = str(string[k - 1]) + aligned
string = string[:k - 1]
row -= 1
elif backtrack[row][col] == "LEFT":
if (orientation == "side"):
aligned = "-" + aligned
elif orientation == "top":
aligned = str(string[k-1]) + aligned
string = string[:k-1]
col -= 1
else:
aligned = str(string[k-1]) + aligned
string = string[:k-1]
row -= 1
col -= 1
return aligned
def outputProgressiveAlign(align1, align2, backtrack):
a = [[""] for i in range(len(align1) + len(align2))]
for i in range(len(align1)):
a[i] = backtracker(align1[i], backtrack, "side")
for j in range(len(align1), len(align2) + len(align1)):
a[j] = backtracker(align2[j - len(align1)], backtrack, "top")
return a
def progressiveAlign(align1, align2, match, mismatch, gap, supergap):
scoreTable = generateScoreTable(align1, align2, match, mismatch, gap, supergap)
backtrack = progressiveBacktrack(scoreTable, align1, align2, match, mismatch, gap, supergap)
opt = outputProgressiveAlign(align1, align2, backtrack)
return opt
def clustalw(guideTree, dnaStrings, match, mismatch, gap, supergap):
for i in range(len(dnaStrings)):
guideTree[i].alignment = [dnaStrings[i]]
for j in range(len(dnaStrings), len(guideTree)):
child1 = guideTree[j].child1
child2 = guideTree[j].child2
guideTree[j].alignment = progressiveAlign(child1.alignment, child2.alignment, match, mismatch, gap, supergap)
return guideTree[len(guideTree) - 1].alignment
#main
if __name__ == "__main__":
print("UPGMA Test")
mtx = [[0, 3, 4, 3], [3, 0, 4, 5], [4, 4, 0, 2], [3, 5, 2, 0]]
labels = ["H", "C", "W", "S"]
tree = upgma(mtx, labels)
print("CLUSTALW Test")
#cats = ["USA", "CHN", "ITA"]
mtxreturn = FormattingET.readMatrixFromFile("Datasets/Input/Test-Example/distance.mtx")
mtx1 = mtxreturn[0]
labels1 = mtxreturn[1]
t = upgma(mtx1, labels1)
match = 1.0
mismatch = 1.0
gap = 1.0
supergap = 6.0
dnaMap = FormattingET.readDNAStringsFromFile("Datasets/Input/Test-Example/RAW/toy-example.fasta")
keyvalues = FormattingET.getKeyValues(dnaMap)
newLabels = keyvalues[0]
newDnaStrings = keyvalues[1]
dnaStrings = FormattingET.rearrangeStrings(labels1, newLabels, newDnaStrings)
align = clustalw(t, dnaStrings, match, mismatch, gap, supergap)
FormattingET.writeAlignmentToFile(align, labels1, "Datasets/Output/Test-Example", "toy.aln")
print(align)
|
normal
|
{
"blob_id": "53cf2dfe3319c39ca6f1dc890eea578fae654b5b",
"index": 8847,
"step-1": "<mask token>\n\n\n@dataclass\nclass Node:\n age: int\n num: int\n label: str\n alignment: []\n\n def __init__(self, child1=None, child2=None):\n self.child1 = child1\n self.child2 = child2\n\n\n<mask token>\n\n\ndef initializeClusters(t):\n numNodes = len(t)\n numLeaves = (numNodes + 1) / 2\n clusters = [0] * int(numLeaves)\n for i in range(int(numLeaves)):\n clusters[i] = t[i]\n return clusters\n\n\n<mask token>\n\n\ndef upgma(mtx, speciesNames):\n tree = initializeTree(speciesNames)\n clusters = initializeClusters(tree)\n numLeaves = len(mtx)\n for i in range(numLeaves, 2 * numLeaves - 1):\n minElements = findMinElement(mtx)\n row = minElements[0]\n col = minElements[1]\n min = minElements[2]\n tree[i].age = min / 2\n tree[i].child1 = clusters[row]\n tree[i].child2 = clusters[col]\n mtx = addRowCol(mtx, clusters, row, col)\n clusters.append(tree[i])\n mtx = delRowCol(mtx, row, col)\n clusters = delClusters(clusters, row, col)\n return tree\n\n\ndef sumPairScores(align1, align2, idx1, idx2, match, mismatch, gap):\n alignment1 = [''] * len(align1)\n for i in range(len(align1)):\n alignment1[i] = align1[i][idx1]\n alignment2 = [''] * len(align2)\n for i in range(len(align2)):\n alignment2[i] = align2[i][idx2]\n score = 0.0\n for char in alignment1:\n for char2 in alignment2:\n if char == '-' and char2 == '-':\n continue\n elif char == char2:\n score += match\n elif char != '-' and char2 != '-':\n score -= mismatch\n else:\n score -= gap\n return score\n\n\n<mask token>\n\n\ndef progressiveBacktrack(scoreTable, align1, align2, match, mismatch, gap,\n supergap):\n numRows = len(align1[0]) + 1\n numCols = len(align2[0]) + 1\n backtrack = [['' for i in range(numCols)] for j in range(numRows)]\n for i in range(1, numCols):\n backtrack[0][i] = 'LEFT'\n for i in range(1, numRows):\n backtrack[i][0] = 'UP'\n for i in range(1, numRows):\n for j in range(1, numCols):\n if scoreTable[i][j] == scoreTable[i - 1][j] - supergap:\n backtrack[i][j] = 'UP'\n elif scoreTable[i][j] == scoreTable[i][j - 1] - supergap:\n backtrack[i][j] = 'LEFT'\n else:\n backtrack[i][j] = 'DIAG'\n return backtrack\n\n\ndef backtracker(string, backtrack, orientation):\n aligned = ''\n row = len(backtrack) - 1\n col = len(backtrack[0]) - 1\n while row != 0 or col != 0:\n k = len(string)\n if backtrack[row][col] == 'UP':\n if orientation == 'top':\n aligned = '-' + aligned\n elif orientation == 'side':\n aligned = str(string[k - 1]) + aligned\n string = string[:k - 1]\n row -= 1\n elif backtrack[row][col] == 'LEFT':\n if orientation == 'side':\n aligned = '-' + aligned\n elif orientation == 'top':\n aligned = str(string[k - 1]) + aligned\n string = string[:k - 1]\n col -= 1\n else:\n aligned = str(string[k - 1]) + aligned\n string = string[:k - 1]\n row -= 1\n col -= 1\n return aligned\n\n\ndef outputProgressiveAlign(align1, align2, backtrack):\n a = [[''] for i in range(len(align1) + len(align2))]\n for i in range(len(align1)):\n a[i] = backtracker(align1[i], backtrack, 'side')\n for j in range(len(align1), len(align2) + len(align1)):\n a[j] = backtracker(align2[j - len(align1)], backtrack, 'top')\n return a\n\n\n<mask token>\n\n\ndef clustalw(guideTree, dnaStrings, match, mismatch, gap, supergap):\n for i in range(len(dnaStrings)):\n guideTree[i].alignment = [dnaStrings[i]]\n for j in range(len(dnaStrings), len(guideTree)):\n child1 = guideTree[j].child1\n child2 = guideTree[j].child2\n guideTree[j].alignment = progressiveAlign(child1.alignment, child2.\n alignment, match, mismatch, gap, supergap)\n return guideTree[len(guideTree) - 1].alignment\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@dataclass\nclass Node:\n age: int\n num: int\n label: str\n alignment: []\n\n def __init__(self, child1=None, child2=None):\n self.child1 = child1\n self.child2 = child2\n\n\n<mask token>\n\n\ndef initializeClusters(t):\n numNodes = len(t)\n numLeaves = (numNodes + 1) / 2\n clusters = [0] * int(numLeaves)\n for i in range(int(numLeaves)):\n clusters[i] = t[i]\n return clusters\n\n\n<mask token>\n\n\ndef countLeaves(v: Node):\n if v.child1 is None or v.child2 is None:\n return 1\n return countLeaves(v.child1) + countLeaves(v.child2)\n\n\n<mask token>\n\n\ndef findMinElement(mtx):\n minRow = 0\n minCol = 1\n minElement = mtx[0][1]\n for row in range(0, len(mtx)):\n for col in range(row + 1, len(mtx)):\n if mtx[row][col] < minElement:\n minRow = row\n minCol = col\n minElement = mtx[row][col]\n return minRow, minCol, minElement\n\n\n<mask token>\n\n\ndef upgma(mtx, speciesNames):\n tree = initializeTree(speciesNames)\n clusters = initializeClusters(tree)\n numLeaves = len(mtx)\n for i in range(numLeaves, 2 * numLeaves - 1):\n minElements = findMinElement(mtx)\n row = minElements[0]\n col = minElements[1]\n min = minElements[2]\n tree[i].age = min / 2\n tree[i].child1 = clusters[row]\n tree[i].child2 = clusters[col]\n mtx = addRowCol(mtx, clusters, row, col)\n clusters.append(tree[i])\n mtx = delRowCol(mtx, row, col)\n clusters = delClusters(clusters, row, col)\n return tree\n\n\ndef sumPairScores(align1, align2, idx1, idx2, match, mismatch, gap):\n alignment1 = [''] * len(align1)\n for i in range(len(align1)):\n alignment1[i] = align1[i][idx1]\n alignment2 = [''] * len(align2)\n for i in range(len(align2)):\n alignment2[i] = align2[i][idx2]\n score = 0.0\n for char in alignment1:\n for char2 in alignment2:\n if char == '-' and char2 == '-':\n continue\n elif char == char2:\n score += match\n elif char != '-' and char2 != '-':\n score -= mismatch\n else:\n score -= gap\n return score\n\n\ndef generateScoreTable(align1, align2, match, mismatch, gap, supergap):\n scoreTable = [[(0) for j in range(len(align2[0]) + 1)] for i in range(\n len(align1[0]) + 1)]\n for i in range(len(scoreTable)):\n scoreTable[i][0] = i * -supergap\n for i in range(len(scoreTable[0])):\n scoreTable[0][i] = i * -supergap\n for i in range(1, len(align1[0]) + 1):\n for j in range(1, len(align2[0]) + 1):\n up = scoreTable[i - 1][j] - supergap\n left = scoreTable[i][j - 1] - supergap\n diag = scoreTable[i - 1][j - 1] + sumPairScores(align1, align2,\n i - 1, j - 1, match, mismatch, gap)\n scoreTable[i][j] = max(up, left, diag)\n return scoreTable\n\n\ndef progressiveBacktrack(scoreTable, align1, align2, match, mismatch, gap,\n supergap):\n numRows = len(align1[0]) + 1\n numCols = len(align2[0]) + 1\n backtrack = [['' for i in range(numCols)] for j in range(numRows)]\n for i in range(1, numCols):\n backtrack[0][i] = 'LEFT'\n for i in range(1, numRows):\n backtrack[i][0] = 'UP'\n for i in range(1, numRows):\n for j in range(1, numCols):\n if scoreTable[i][j] == scoreTable[i - 1][j] - supergap:\n backtrack[i][j] = 'UP'\n elif scoreTable[i][j] == scoreTable[i][j - 1] - supergap:\n backtrack[i][j] = 'LEFT'\n else:\n backtrack[i][j] = 'DIAG'\n return backtrack\n\n\ndef backtracker(string, backtrack, orientation):\n aligned = ''\n row = len(backtrack) - 1\n col = len(backtrack[0]) - 1\n while row != 0 or col != 0:\n k = len(string)\n if backtrack[row][col] == 'UP':\n if orientation == 'top':\n aligned = '-' + aligned\n elif orientation == 'side':\n aligned = str(string[k - 1]) + aligned\n string = string[:k - 1]\n row -= 1\n elif backtrack[row][col] == 'LEFT':\n if orientation == 'side':\n aligned = '-' + aligned\n elif orientation == 'top':\n aligned = str(string[k - 1]) + aligned\n string = string[:k - 1]\n col -= 1\n else:\n aligned = str(string[k - 1]) + aligned\n string = string[:k - 1]\n row -= 1\n col -= 1\n return aligned\n\n\ndef outputProgressiveAlign(align1, align2, backtrack):\n a = [[''] for i in range(len(align1) + len(align2))]\n for i in range(len(align1)):\n a[i] = backtracker(align1[i], backtrack, 'side')\n for j in range(len(align1), len(align2) + len(align1)):\n a[j] = backtracker(align2[j - len(align1)], backtrack, 'top')\n return a\n\n\n<mask token>\n\n\ndef clustalw(guideTree, dnaStrings, match, mismatch, gap, supergap):\n for i in range(len(dnaStrings)):\n guideTree[i].alignment = [dnaStrings[i]]\n for j in range(len(dnaStrings), len(guideTree)):\n child1 = guideTree[j].child1\n child2 = guideTree[j].child2\n guideTree[j].alignment = progressiveAlign(child1.alignment, child2.\n alignment, match, mismatch, gap, supergap)\n return guideTree[len(guideTree) - 1].alignment\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@dataclass\nclass Node:\n age: int\n num: int\n label: str\n alignment: []\n\n def __init__(self, child1=None, child2=None):\n self.child1 = child1\n self.child2 = child2\n\n\ndef initializeMatrix(m, n):\n mtx = [[(0) for x in range(n)] for y in range(m)]\n return mtx\n\n\ndef initializeClusters(t):\n numNodes = len(t)\n numLeaves = (numNodes + 1) / 2\n clusters = [0] * int(numLeaves)\n for i in range(int(numLeaves)):\n clusters[i] = t[i]\n return clusters\n\n\ndef initializeTree(speciesNames):\n numLeaves = len(speciesNames)\n t = [Node] * (2 * numLeaves - 1)\n for i in range(len(t)):\n vx = Node()\n if i < numLeaves:\n vx.label = speciesNames[i]\n else:\n vx.label = 'Ancestor species' + str(i)\n vx.num = i\n t[i] = vx\n return t\n\n\ndef countLeaves(v: Node):\n if v.child1 is None or v.child2 is None:\n return 1\n return countLeaves(v.child1) + countLeaves(v.child2)\n\n\n<mask token>\n\n\ndef findMinElement(mtx):\n minRow = 0\n minCol = 1\n minElement = mtx[0][1]\n for row in range(0, len(mtx)):\n for col in range(row + 1, len(mtx)):\n if mtx[row][col] < minElement:\n minRow = row\n minCol = col\n minElement = mtx[row][col]\n return minRow, minCol, minElement\n\n\n<mask token>\n\n\ndef addRowCol(mtx, clusters, row, col):\n newRow = [0] * (len(mtx) + 1)\n for i in range(len(newRow) - 1):\n if i != row and i != col:\n size1 = countLeaves(clusters[row])\n size2 = countLeaves(clusters[col])\n avg = (size1 * mtx[row][i] + size2 * mtx[i][col]) / (size1 + size2)\n newRow[i] = avg\n mtx.append(newRow)\n for i in range(len(newRow) - 1):\n mtx[i].append(newRow[i])\n return mtx\n\n\ndef upgma(mtx, speciesNames):\n tree = initializeTree(speciesNames)\n clusters = initializeClusters(tree)\n numLeaves = len(mtx)\n for i in range(numLeaves, 2 * numLeaves - 1):\n minElements = findMinElement(mtx)\n row = minElements[0]\n col = minElements[1]\n min = minElements[2]\n tree[i].age = min / 2\n tree[i].child1 = clusters[row]\n tree[i].child2 = clusters[col]\n mtx = addRowCol(mtx, clusters, row, col)\n clusters.append(tree[i])\n mtx = delRowCol(mtx, row, col)\n clusters = delClusters(clusters, row, col)\n return tree\n\n\ndef sumPairScores(align1, align2, idx1, idx2, match, mismatch, gap):\n alignment1 = [''] * len(align1)\n for i in range(len(align1)):\n alignment1[i] = align1[i][idx1]\n alignment2 = [''] * len(align2)\n for i in range(len(align2)):\n alignment2[i] = align2[i][idx2]\n score = 0.0\n for char in alignment1:\n for char2 in alignment2:\n if char == '-' and char2 == '-':\n continue\n elif char == char2:\n score += match\n elif char != '-' and char2 != '-':\n score -= mismatch\n else:\n score -= gap\n return score\n\n\ndef generateScoreTable(align1, align2, match, mismatch, gap, supergap):\n scoreTable = [[(0) for j in range(len(align2[0]) + 1)] for i in range(\n len(align1[0]) + 1)]\n for i in range(len(scoreTable)):\n scoreTable[i][0] = i * -supergap\n for i in range(len(scoreTable[0])):\n scoreTable[0][i] = i * -supergap\n for i in range(1, len(align1[0]) + 1):\n for j in range(1, len(align2[0]) + 1):\n up = scoreTable[i - 1][j] - supergap\n left = scoreTable[i][j - 1] - supergap\n diag = scoreTable[i - 1][j - 1] + sumPairScores(align1, align2,\n i - 1, j - 1, match, mismatch, gap)\n scoreTable[i][j] = max(up, left, diag)\n return scoreTable\n\n\ndef progressiveBacktrack(scoreTable, align1, align2, match, mismatch, gap,\n supergap):\n numRows = len(align1[0]) + 1\n numCols = len(align2[0]) + 1\n backtrack = [['' for i in range(numCols)] for j in range(numRows)]\n for i in range(1, numCols):\n backtrack[0][i] = 'LEFT'\n for i in range(1, numRows):\n backtrack[i][0] = 'UP'\n for i in range(1, numRows):\n for j in range(1, numCols):\n if scoreTable[i][j] == scoreTable[i - 1][j] - supergap:\n backtrack[i][j] = 'UP'\n elif scoreTable[i][j] == scoreTable[i][j - 1] - supergap:\n backtrack[i][j] = 'LEFT'\n else:\n backtrack[i][j] = 'DIAG'\n return backtrack\n\n\ndef backtracker(string, backtrack, orientation):\n aligned = ''\n row = len(backtrack) - 1\n col = len(backtrack[0]) - 1\n while row != 0 or col != 0:\n k = len(string)\n if backtrack[row][col] == 'UP':\n if orientation == 'top':\n aligned = '-' + aligned\n elif orientation == 'side':\n aligned = str(string[k - 1]) + aligned\n string = string[:k - 1]\n row -= 1\n elif backtrack[row][col] == 'LEFT':\n if orientation == 'side':\n aligned = '-' + aligned\n elif orientation == 'top':\n aligned = str(string[k - 1]) + aligned\n string = string[:k - 1]\n col -= 1\n else:\n aligned = str(string[k - 1]) + aligned\n string = string[:k - 1]\n row -= 1\n col -= 1\n return aligned\n\n\ndef outputProgressiveAlign(align1, align2, backtrack):\n a = [[''] for i in range(len(align1) + len(align2))]\n for i in range(len(align1)):\n a[i] = backtracker(align1[i], backtrack, 'side')\n for j in range(len(align1), len(align2) + len(align1)):\n a[j] = backtracker(align2[j - len(align1)], backtrack, 'top')\n return a\n\n\ndef progressiveAlign(align1, align2, match, mismatch, gap, supergap):\n scoreTable = generateScoreTable(align1, align2, match, mismatch, gap,\n supergap)\n backtrack = progressiveBacktrack(scoreTable, align1, align2, match,\n mismatch, gap, supergap)\n opt = outputProgressiveAlign(align1, align2, backtrack)\n return opt\n\n\ndef clustalw(guideTree, dnaStrings, match, mismatch, gap, supergap):\n for i in range(len(dnaStrings)):\n guideTree[i].alignment = [dnaStrings[i]]\n for j in range(len(dnaStrings), len(guideTree)):\n child1 = guideTree[j].child1\n child2 = guideTree[j].child2\n guideTree[j].alignment = progressiveAlign(child1.alignment, child2.\n alignment, match, mismatch, gap, supergap)\n return guideTree[len(guideTree) - 1].alignment\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\n@dataclass\nclass Node:\n age: int\n num: int\n label: str\n alignment: []\n\n def __init__(self, child1=None, child2=None):\n self.child1 = child1\n self.child2 = child2\n\n\ndef initializeMatrix(m, n):\n mtx = [[(0) for x in range(n)] for y in range(m)]\n return mtx\n\n\ndef initializeClusters(t):\n numNodes = len(t)\n numLeaves = (numNodes + 1) / 2\n clusters = [0] * int(numLeaves)\n for i in range(int(numLeaves)):\n clusters[i] = t[i]\n return clusters\n\n\ndef initializeTree(speciesNames):\n numLeaves = len(speciesNames)\n t = [Node] * (2 * numLeaves - 1)\n for i in range(len(t)):\n vx = Node()\n if i < numLeaves:\n vx.label = speciesNames[i]\n else:\n vx.label = 'Ancestor species' + str(i)\n vx.num = i\n t[i] = vx\n return t\n\n\ndef countLeaves(v: Node):\n if v.child1 is None or v.child2 is None:\n return 1\n return countLeaves(v.child1) + countLeaves(v.child2)\n\n\ndef delClusters(clusters, row, col):\n del clusters[col]\n del clusters[row]\n return clusters\n\n\ndef findMinElement(mtx):\n minRow = 0\n minCol = 1\n minElement = mtx[0][1]\n for row in range(0, len(mtx)):\n for col in range(row + 1, len(mtx)):\n if mtx[row][col] < minElement:\n minRow = row\n minCol = col\n minElement = mtx[row][col]\n return minRow, minCol, minElement\n\n\n<mask token>\n\n\ndef addRowCol(mtx, clusters, row, col):\n newRow = [0] * (len(mtx) + 1)\n for i in range(len(newRow) - 1):\n if i != row and i != col:\n size1 = countLeaves(clusters[row])\n size2 = countLeaves(clusters[col])\n avg = (size1 * mtx[row][i] + size2 * mtx[i][col]) / (size1 + size2)\n newRow[i] = avg\n mtx.append(newRow)\n for i in range(len(newRow) - 1):\n mtx[i].append(newRow[i])\n return mtx\n\n\ndef upgma(mtx, speciesNames):\n tree = initializeTree(speciesNames)\n clusters = initializeClusters(tree)\n numLeaves = len(mtx)\n for i in range(numLeaves, 2 * numLeaves - 1):\n minElements = findMinElement(mtx)\n row = minElements[0]\n col = minElements[1]\n min = minElements[2]\n tree[i].age = min / 2\n tree[i].child1 = clusters[row]\n tree[i].child2 = clusters[col]\n mtx = addRowCol(mtx, clusters, row, col)\n clusters.append(tree[i])\n mtx = delRowCol(mtx, row, col)\n clusters = delClusters(clusters, row, col)\n return tree\n\n\ndef sumPairScores(align1, align2, idx1, idx2, match, mismatch, gap):\n alignment1 = [''] * len(align1)\n for i in range(len(align1)):\n alignment1[i] = align1[i][idx1]\n alignment2 = [''] * len(align2)\n for i in range(len(align2)):\n alignment2[i] = align2[i][idx2]\n score = 0.0\n for char in alignment1:\n for char2 in alignment2:\n if char == '-' and char2 == '-':\n continue\n elif char == char2:\n score += match\n elif char != '-' and char2 != '-':\n score -= mismatch\n else:\n score -= gap\n return score\n\n\ndef generateScoreTable(align1, align2, match, mismatch, gap, supergap):\n scoreTable = [[(0) for j in range(len(align2[0]) + 1)] for i in range(\n len(align1[0]) + 1)]\n for i in range(len(scoreTable)):\n scoreTable[i][0] = i * -supergap\n for i in range(len(scoreTable[0])):\n scoreTable[0][i] = i * -supergap\n for i in range(1, len(align1[0]) + 1):\n for j in range(1, len(align2[0]) + 1):\n up = scoreTable[i - 1][j] - supergap\n left = scoreTable[i][j - 1] - supergap\n diag = scoreTable[i - 1][j - 1] + sumPairScores(align1, align2,\n i - 1, j - 1, match, mismatch, gap)\n scoreTable[i][j] = max(up, left, diag)\n return scoreTable\n\n\ndef progressiveBacktrack(scoreTable, align1, align2, match, mismatch, gap,\n supergap):\n numRows = len(align1[0]) + 1\n numCols = len(align2[0]) + 1\n backtrack = [['' for i in range(numCols)] for j in range(numRows)]\n for i in range(1, numCols):\n backtrack[0][i] = 'LEFT'\n for i in range(1, numRows):\n backtrack[i][0] = 'UP'\n for i in range(1, numRows):\n for j in range(1, numCols):\n if scoreTable[i][j] == scoreTable[i - 1][j] - supergap:\n backtrack[i][j] = 'UP'\n elif scoreTable[i][j] == scoreTable[i][j - 1] - supergap:\n backtrack[i][j] = 'LEFT'\n else:\n backtrack[i][j] = 'DIAG'\n return backtrack\n\n\ndef backtracker(string, backtrack, orientation):\n aligned = ''\n row = len(backtrack) - 1\n col = len(backtrack[0]) - 1\n while row != 0 or col != 0:\n k = len(string)\n if backtrack[row][col] == 'UP':\n if orientation == 'top':\n aligned = '-' + aligned\n elif orientation == 'side':\n aligned = str(string[k - 1]) + aligned\n string = string[:k - 1]\n row -= 1\n elif backtrack[row][col] == 'LEFT':\n if orientation == 'side':\n aligned = '-' + aligned\n elif orientation == 'top':\n aligned = str(string[k - 1]) + aligned\n string = string[:k - 1]\n col -= 1\n else:\n aligned = str(string[k - 1]) + aligned\n string = string[:k - 1]\n row -= 1\n col -= 1\n return aligned\n\n\ndef outputProgressiveAlign(align1, align2, backtrack):\n a = [[''] for i in range(len(align1) + len(align2))]\n for i in range(len(align1)):\n a[i] = backtracker(align1[i], backtrack, 'side')\n for j in range(len(align1), len(align2) + len(align1)):\n a[j] = backtracker(align2[j - len(align1)], backtrack, 'top')\n return a\n\n\ndef progressiveAlign(align1, align2, match, mismatch, gap, supergap):\n scoreTable = generateScoreTable(align1, align2, match, mismatch, gap,\n supergap)\n backtrack = progressiveBacktrack(scoreTable, align1, align2, match,\n mismatch, gap, supergap)\n opt = outputProgressiveAlign(align1, align2, backtrack)\n return opt\n\n\ndef clustalw(guideTree, dnaStrings, match, mismatch, gap, supergap):\n for i in range(len(dnaStrings)):\n guideTree[i].alignment = [dnaStrings[i]]\n for j in range(len(dnaStrings), len(guideTree)):\n child1 = guideTree[j].child1\n child2 = guideTree[j].child2\n guideTree[j].alignment = progressiveAlign(child1.alignment, child2.\n alignment, match, mismatch, gap, supergap)\n return guideTree[len(guideTree) - 1].alignment\n\n\n<mask token>\n",
"step-5": "# Evolutionary Trees contains algorithms and methods used in determining phylogenetic inheritance of various species.\n# Main algos UPGMA and CLUSTALW\nfrom dataclasses import dataclass\nimport FormattingET\n\n@dataclass\nclass Node:\n age: int\n num: int\n label: str\n alignment: []\n def __init__(self, child1=None, child2=None):\n self.child1 = child1\n self.child2 = child2\n\n#UPGMA algos\n\ndef initializeMatrix(m, n):\n mtx = [[0 for x in range(n)] for y in range(m)]\n return mtx\n\ndef initializeClusters(t):\n numNodes = len(t)\n numLeaves = (numNodes + 1) / 2\n clusters = [0]*int(numLeaves)\n\n for i in range(int(numLeaves)):\n clusters[i] = t[i]\n\n return clusters\n\ndef initializeTree(speciesNames):\n numLeaves = len(speciesNames)\n\n t = [Node]*(2*numLeaves - 1)\n\n for i in range(len(t)):\n vx = Node()\n\n if i < numLeaves:\n vx.label = speciesNames[i]\n else:\n vx.label = \"Ancestor species\" + str(i)\n vx.num = i\n t[i] = vx\n\n return t\n\ndef countLeaves(v: Node):\n if v.child1 is None or v.child2 is None:\n return 1\n\n return countLeaves(v.child1) + countLeaves(v.child2)\n\ndef delClusters(clusters, row, col):\n del clusters[col]\n del clusters[row]\n return clusters\n\ndef findMinElement(mtx):\n minRow = 0\n minCol = 1\n minElement = mtx[0][1]\n for row in range(0, len(mtx)):\n for col in range(row+1, len(mtx)):\n if mtx[row][col] < minElement:\n minRow = row\n minCol = col\n minElement = mtx[row][col]\n\n return minRow, minCol, minElement\n\ndef delRowCol(mtx, row, col):\n del mtx[col]\n del mtx[row]\n\n for i in range(len(mtx)):\n del mtx[i][col]\n del mtx[i][row]\n\n return mtx\n\ndef addRowCol(mtx, clusters, row, col):\n newRow = [0]*(len(mtx) + 1)\n\n for i in range(len(newRow) - 1):\n if i != row and i != col:\n size1 = countLeaves(clusters[row])\n size2 = countLeaves(clusters[col])\n avg = (size1*mtx[row][i] + size2*mtx[i][col]) / (size1 + size2)\n newRow[i] = avg\n\n mtx.append(newRow)\n\n for i in range(len(newRow) - 1):\n mtx[i].append(newRow[i])\n\n return mtx\n\ndef upgma(mtx, speciesNames):\n tree = initializeTree(speciesNames)\n clusters = initializeClusters(tree)\n numLeaves = len(mtx)\n\n for i in range(numLeaves, 2*numLeaves - 1):\n minElements = findMinElement(mtx)\n row = minElements[0]\n col = minElements[1]\n min = minElements[2]\n\n tree[i].age = min/2\n tree[i].child1 = clusters[row]\n tree[i].child2 = clusters[col]\n\n mtx = addRowCol(mtx, clusters, row, col)\n clusters.append(tree[i])\n mtx = delRowCol(mtx, row, col)\n\n clusters = delClusters(clusters, row, col)\n\n return tree\n\n#CLUSTALW algos\n\ndef sumPairScores(align1, align2, idx1, idx2, match, mismatch, gap):\n alignment1 = ['']*len(align1)\n for i in range(len(align1)):\n alignment1[i] = align1[i][idx1]\n\n alignment2 = [''] * len(align2)\n for i in range(len(align2)):\n alignment2[i] = align2[i][idx2]\n\n score = 0.0\n\n for char in alignment1:\n for char2 in alignment2:\n if char == '-' and char2 == '-':\n continue\n elif char == char2:\n score += match\n elif char != '-' and char2 != '-':\n score -= mismatch\n else:\n score -= gap\n\n return score\n\ndef generateScoreTable(align1, align2, match, mismatch, gap, supergap):\n scoreTable = [[0 for j in range(len(align2[0]) + 1)] for i in range(len(align1[0]) + 1)]\n\n for i in range(len(scoreTable)):\n scoreTable[i][0] = i * (-supergap)\n for i in range(len(scoreTable[0])):\n scoreTable[0][i] = i * (-supergap)\n\n for i in range(1, len(align1[0]) + 1):\n for j in range(1, len(align2[0]) + 1):\n\n up = scoreTable[i-1][j] - supergap\n left = scoreTable[i][j-1] - supergap\n diag = scoreTable[i-1][j-1] + sumPairScores(align1, align2, i-1, j-1, match, mismatch, gap)\n\n scoreTable[i][j] = max(up, left, diag)\n\n return scoreTable\n\ndef progressiveBacktrack(scoreTable, align1, align2, match, mismatch, gap, supergap):\n numRows = len(align1[0]) + 1\n numCols = len(align2[0]) + 1\n\n backtrack = [['' for i in range(numCols)] for j in range(numRows)]\n\n for i in range(1, numCols):\n backtrack[0][i] = \"LEFT\"\n for i in range(1, numRows):\n backtrack[i][0] = \"UP\"\n\n for i in range(1, numRows):\n for j in range(1, numCols):\n if (scoreTable[i][j] == scoreTable[i-1][j] - supergap):\n backtrack[i][j] = \"UP\"\n elif scoreTable[i][j] == scoreTable[i][j-1] - supergap:\n backtrack[i][j] = \"LEFT\"\n else:\n backtrack[i][j] = \"DIAG\"\n\n return backtrack\n\ndef backtracker(string, backtrack, orientation):\n aligned = \"\"\n\n row = len(backtrack) - 1\n col = len(backtrack[0]) - 1\n\n while(row != 0 or col != 0):\n k = len(string)\n\n if backtrack[row][col] == \"UP\":\n if (orientation == \"top\"):\n aligned = \"-\" + aligned\n elif orientation == \"side\":\n aligned = str(string[k - 1]) + aligned\n string = string[:k - 1]\n row -= 1\n elif backtrack[row][col] == \"LEFT\":\n if (orientation == \"side\"):\n aligned = \"-\" + aligned\n elif orientation == \"top\":\n aligned = str(string[k-1]) + aligned\n string = string[:k-1]\n col -= 1\n else:\n aligned = str(string[k-1]) + aligned\n string = string[:k-1]\n row -= 1\n col -= 1\n\n return aligned\n\ndef outputProgressiveAlign(align1, align2, backtrack):\n a = [[\"\"] for i in range(len(align1) + len(align2))]\n\n for i in range(len(align1)):\n a[i] = backtracker(align1[i], backtrack, \"side\")\n for j in range(len(align1), len(align2) + len(align1)):\n a[j] = backtracker(align2[j - len(align1)], backtrack, \"top\")\n\n return a\n\ndef progressiveAlign(align1, align2, match, mismatch, gap, supergap):\n scoreTable = generateScoreTable(align1, align2, match, mismatch, gap, supergap)\n backtrack = progressiveBacktrack(scoreTable, align1, align2, match, mismatch, gap, supergap)\n opt = outputProgressiveAlign(align1, align2, backtrack)\n\n return opt\n\ndef clustalw(guideTree, dnaStrings, match, mismatch, gap, supergap):\n\n for i in range(len(dnaStrings)):\n guideTree[i].alignment = [dnaStrings[i]]\n\n for j in range(len(dnaStrings), len(guideTree)):\n child1 = guideTree[j].child1\n child2 = guideTree[j].child2\n\n guideTree[j].alignment = progressiveAlign(child1.alignment, child2.alignment, match, mismatch, gap, supergap)\n\n return guideTree[len(guideTree) - 1].alignment\n\n\n#main\nif __name__ == \"__main__\":\n print(\"UPGMA Test\")\n mtx = [[0, 3, 4, 3], [3, 0, 4, 5], [4, 4, 0, 2], [3, 5, 2, 0]]\n labels = [\"H\", \"C\", \"W\", \"S\"]\n tree = upgma(mtx, labels)\n\n print(\"CLUSTALW Test\")\n \n #cats = [\"USA\", \"CHN\", \"ITA\"]\n\n mtxreturn = FormattingET.readMatrixFromFile(\"Datasets/Input/Test-Example/distance.mtx\")\n mtx1 = mtxreturn[0]\n labels1 = mtxreturn[1]\n\n t = upgma(mtx1, labels1)\n\n match = 1.0\n mismatch = 1.0\n gap = 1.0\n supergap = 6.0\n \n dnaMap = FormattingET.readDNAStringsFromFile(\"Datasets/Input/Test-Example/RAW/toy-example.fasta\")\n keyvalues = FormattingET.getKeyValues(dnaMap)\n newLabels = keyvalues[0]\n newDnaStrings = keyvalues[1]\n\n dnaStrings = FormattingET.rearrangeStrings(labels1, newLabels, newDnaStrings)\n align = clustalw(t, dnaStrings, match, mismatch, gap, supergap)\n FormattingET.writeAlignmentToFile(align, labels1, \"Datasets/Output/Test-Example\", \"toy.aln\")\n print(align)\n ",
"step-ids": [
9,
12,
16,
17,
21
]
}
|
[
9,
12,
16,
17,
21
] |
import numpy as np
import math
import random
from numpy.linalg import inv
from scipy.optimize import minimize
from Util import to_vector
class TS_RLR:
def __init__(self, alpha):
self.d = 6
self.k = 6
self.alpha = alpha
self.batch_size = 1000
self.training_size = 1000
self.impressions = 0
self.batch_ids = list([])
self.batch_clicks = np.array([])
self.articles_1_d = np.array([])
self.article_ids = dict()
self.bad_articles = set()
self.mu = np.zeros(self.d)
self.q = self.alpha * np.ones(self.d)
def sigmoid(self, x):
# print(x)
return 1.0 / (1.0 + math.exp(-x))
def add_new_article(self, line):
article_id = int(line.split(" ")[0])
if article_id in self.bad_articles:
return -1
if article_id not in self.article_ids:
try:
article = to_vector(line)
except IndexError:
self.bad_articles.add(article_id)
return -1
self.article_ids[article_id] = len(self.article_ids)
self.articles_1_d = np.append(self.articles_1_d, article).reshape([len(self.article_ids), self.d])
return article_id
def to_minimize(self, w):
return 1/2 * sum (self.q * (w - self.mu) * (w - self.mu)) + sum(np.log(1+np.exp(-self.batch_clicks * w.dot(self.batch_articles))))
def update(self, user, selected_article, click):
self.impressions += 1
self.batch_ids.append(self.article_ids[selected_article])
self.batch_clicks = np.append(self.batch_clicks, click)
if self.impressions % self.batch_size == 0:
w = np.random.normal(0, 1, self.d)
self.batch_articles = self.articles_1_d[self.batch_ids].reshape([self.d, self.batch_size])
res = minimize(self.to_minimize, w, method='nelder-mead', options={'xtol': 1e-8, 'disp': False})
self.m = res.x
p = 1/(1 + np.exp(- self.m.dot(self.batch_articles)))
for i in np.arange(0, self.d):
self.q[i] += sum(self.batch_articles[i] * self.batch_articles[i] * p[i] * (1-p[i]))
self.batch_ids = list([])
self.batch_clicks = np.array([])
def warmup(self, file):
pass
def select(self, user, pre_selected_article, lines, total_impressions, click):
selected_article = -1
warmup = False
if self.impressions < self.training_size:
for line in lines:
self.add_new_article(line)
self.update(user, pre_selected_article, click)
selected_article = pre_selected_article
warmup = True
else:
best_value = 0
best_value_articles = list()
sample_w = np.random.multivariate_normal(self.mu, np.diag(1/self.q))
for line in lines:
article_id = self.add_new_article(line)
if article_id == -1 :
continue
a_id = self.article_ids[article_id]
article = self.articles_1_d[a_id]
cur_value = self.sigmoid(sample_w.dot(article))
if cur_value > best_value:
best_value_articles = list([article_id])
best_value = cur_value
elif cur_value == best_value:
best_value_articles.append(article_id)
index = random.randint(0, len(best_value_articles)-1)
selected_article = best_value_articles[index]
return selected_article, warmup
|
normal
|
{
"blob_id": "49df9db508637ce5914aa6591178a03c609b6bc7",
"index": 659,
"step-1": "<mask token>\n\n\nclass TS_RLR:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TS_RLR:\n\n def __init__(self, alpha):\n self.d = 6\n self.k = 6\n self.alpha = alpha\n self.batch_size = 1000\n self.training_size = 1000\n self.impressions = 0\n self.batch_ids = list([])\n self.batch_clicks = np.array([])\n self.articles_1_d = np.array([])\n self.article_ids = dict()\n self.bad_articles = set()\n self.mu = np.zeros(self.d)\n self.q = self.alpha * np.ones(self.d)\n <mask token>\n\n def add_new_article(self, line):\n article_id = int(line.split(' ')[0])\n if article_id in self.bad_articles:\n return -1\n if article_id not in self.article_ids:\n try:\n article = to_vector(line)\n except IndexError:\n self.bad_articles.add(article_id)\n return -1\n self.article_ids[article_id] = len(self.article_ids)\n self.articles_1_d = np.append(self.articles_1_d, article).reshape([\n len(self.article_ids), self.d])\n return article_id\n\n def to_minimize(self, w):\n return 1 / 2 * sum(self.q * (w - self.mu) * (w - self.mu)) + sum(np\n .log(1 + np.exp(-self.batch_clicks * w.dot(self.batch_articles))))\n\n def update(self, user, selected_article, click):\n self.impressions += 1\n self.batch_ids.append(self.article_ids[selected_article])\n self.batch_clicks = np.append(self.batch_clicks, click)\n if self.impressions % self.batch_size == 0:\n w = np.random.normal(0, 1, self.d)\n self.batch_articles = self.articles_1_d[self.batch_ids].reshape([\n self.d, self.batch_size])\n res = minimize(self.to_minimize, w, method='nelder-mead',\n options={'xtol': 1e-08, 'disp': False})\n self.m = res.x\n p = 1 / (1 + np.exp(-self.m.dot(self.batch_articles)))\n for i in np.arange(0, self.d):\n self.q[i] += sum(self.batch_articles[i] * self.\n batch_articles[i] * p[i] * (1 - p[i]))\n self.batch_ids = list([])\n self.batch_clicks = np.array([])\n <mask token>\n\n def select(self, user, pre_selected_article, lines, total_impressions,\n click):\n selected_article = -1\n warmup = False\n if self.impressions < self.training_size:\n for line in lines:\n self.add_new_article(line)\n self.update(user, pre_selected_article, click)\n selected_article = pre_selected_article\n warmup = True\n else:\n best_value = 0\n best_value_articles = list()\n sample_w = np.random.multivariate_normal(self.mu, np.diag(1 /\n self.q))\n for line in lines:\n article_id = self.add_new_article(line)\n if article_id == -1:\n continue\n a_id = self.article_ids[article_id]\n article = self.articles_1_d[a_id]\n cur_value = self.sigmoid(sample_w.dot(article))\n if cur_value > best_value:\n best_value_articles = list([article_id])\n best_value = cur_value\n elif cur_value == best_value:\n best_value_articles.append(article_id)\n index = random.randint(0, len(best_value_articles) - 1)\n selected_article = best_value_articles[index]\n return selected_article, warmup\n",
"step-3": "<mask token>\n\n\nclass TS_RLR:\n\n def __init__(self, alpha):\n self.d = 6\n self.k = 6\n self.alpha = alpha\n self.batch_size = 1000\n self.training_size = 1000\n self.impressions = 0\n self.batch_ids = list([])\n self.batch_clicks = np.array([])\n self.articles_1_d = np.array([])\n self.article_ids = dict()\n self.bad_articles = set()\n self.mu = np.zeros(self.d)\n self.q = self.alpha * np.ones(self.d)\n <mask token>\n\n def add_new_article(self, line):\n article_id = int(line.split(' ')[0])\n if article_id in self.bad_articles:\n return -1\n if article_id not in self.article_ids:\n try:\n article = to_vector(line)\n except IndexError:\n self.bad_articles.add(article_id)\n return -1\n self.article_ids[article_id] = len(self.article_ids)\n self.articles_1_d = np.append(self.articles_1_d, article).reshape([\n len(self.article_ids), self.d])\n return article_id\n\n def to_minimize(self, w):\n return 1 / 2 * sum(self.q * (w - self.mu) * (w - self.mu)) + sum(np\n .log(1 + np.exp(-self.batch_clicks * w.dot(self.batch_articles))))\n\n def update(self, user, selected_article, click):\n self.impressions += 1\n self.batch_ids.append(self.article_ids[selected_article])\n self.batch_clicks = np.append(self.batch_clicks, click)\n if self.impressions % self.batch_size == 0:\n w = np.random.normal(0, 1, self.d)\n self.batch_articles = self.articles_1_d[self.batch_ids].reshape([\n self.d, self.batch_size])\n res = minimize(self.to_minimize, w, method='nelder-mead',\n options={'xtol': 1e-08, 'disp': False})\n self.m = res.x\n p = 1 / (1 + np.exp(-self.m.dot(self.batch_articles)))\n for i in np.arange(0, self.d):\n self.q[i] += sum(self.batch_articles[i] * self.\n batch_articles[i] * p[i] * (1 - p[i]))\n self.batch_ids = list([])\n self.batch_clicks = np.array([])\n\n def warmup(self, file):\n pass\n\n def select(self, user, pre_selected_article, lines, total_impressions,\n click):\n selected_article = -1\n warmup = False\n if self.impressions < self.training_size:\n for line in lines:\n self.add_new_article(line)\n self.update(user, pre_selected_article, click)\n selected_article = pre_selected_article\n warmup = True\n else:\n best_value = 0\n best_value_articles = list()\n sample_w = np.random.multivariate_normal(self.mu, np.diag(1 /\n self.q))\n for line in lines:\n article_id = self.add_new_article(line)\n if article_id == -1:\n continue\n a_id = self.article_ids[article_id]\n article = self.articles_1_d[a_id]\n cur_value = self.sigmoid(sample_w.dot(article))\n if cur_value > best_value:\n best_value_articles = list([article_id])\n best_value = cur_value\n elif cur_value == best_value:\n best_value_articles.append(article_id)\n index = random.randint(0, len(best_value_articles) - 1)\n selected_article = best_value_articles[index]\n return selected_article, warmup\n",
"step-4": "<mask token>\n\n\nclass TS_RLR:\n\n def __init__(self, alpha):\n self.d = 6\n self.k = 6\n self.alpha = alpha\n self.batch_size = 1000\n self.training_size = 1000\n self.impressions = 0\n self.batch_ids = list([])\n self.batch_clicks = np.array([])\n self.articles_1_d = np.array([])\n self.article_ids = dict()\n self.bad_articles = set()\n self.mu = np.zeros(self.d)\n self.q = self.alpha * np.ones(self.d)\n\n def sigmoid(self, x):\n return 1.0 / (1.0 + math.exp(-x))\n\n def add_new_article(self, line):\n article_id = int(line.split(' ')[0])\n if article_id in self.bad_articles:\n return -1\n if article_id not in self.article_ids:\n try:\n article = to_vector(line)\n except IndexError:\n self.bad_articles.add(article_id)\n return -1\n self.article_ids[article_id] = len(self.article_ids)\n self.articles_1_d = np.append(self.articles_1_d, article).reshape([\n len(self.article_ids), self.d])\n return article_id\n\n def to_minimize(self, w):\n return 1 / 2 * sum(self.q * (w - self.mu) * (w - self.mu)) + sum(np\n .log(1 + np.exp(-self.batch_clicks * w.dot(self.batch_articles))))\n\n def update(self, user, selected_article, click):\n self.impressions += 1\n self.batch_ids.append(self.article_ids[selected_article])\n self.batch_clicks = np.append(self.batch_clicks, click)\n if self.impressions % self.batch_size == 0:\n w = np.random.normal(0, 1, self.d)\n self.batch_articles = self.articles_1_d[self.batch_ids].reshape([\n self.d, self.batch_size])\n res = minimize(self.to_minimize, w, method='nelder-mead',\n options={'xtol': 1e-08, 'disp': False})\n self.m = res.x\n p = 1 / (1 + np.exp(-self.m.dot(self.batch_articles)))\n for i in np.arange(0, self.d):\n self.q[i] += sum(self.batch_articles[i] * self.\n batch_articles[i] * p[i] * (1 - p[i]))\n self.batch_ids = list([])\n self.batch_clicks = np.array([])\n\n def warmup(self, file):\n pass\n\n def select(self, user, pre_selected_article, lines, total_impressions,\n click):\n selected_article = -1\n warmup = False\n if self.impressions < self.training_size:\n for line in lines:\n self.add_new_article(line)\n self.update(user, pre_selected_article, click)\n selected_article = pre_selected_article\n warmup = True\n else:\n best_value = 0\n best_value_articles = list()\n sample_w = np.random.multivariate_normal(self.mu, np.diag(1 /\n self.q))\n for line in lines:\n article_id = self.add_new_article(line)\n if article_id == -1:\n continue\n a_id = self.article_ids[article_id]\n article = self.articles_1_d[a_id]\n cur_value = self.sigmoid(sample_w.dot(article))\n if cur_value > best_value:\n best_value_articles = list([article_id])\n best_value = cur_value\n elif cur_value == best_value:\n best_value_articles.append(article_id)\n index = random.randint(0, len(best_value_articles) - 1)\n selected_article = best_value_articles[index]\n return selected_article, warmup\n",
"step-5": "import numpy as np\nimport math\nimport random\nfrom numpy.linalg import inv\nfrom scipy.optimize import minimize\n\nfrom Util import to_vector\n\nclass TS_RLR:\n\t\n\tdef __init__(self, alpha):\n\t\tself.d = 6\n\t\tself.k = 6\n\n\t\tself.alpha = alpha\n\t\tself.batch_size = 1000\n\t\tself.training_size = 1000\n\t\tself.impressions = 0\n\t\tself.batch_ids = list([])\n\t\tself.batch_clicks = np.array([])\n\n\t\tself.articles_1_d = np.array([])\n\t\tself.article_ids = dict()\n\t\tself.bad_articles = set()\n\n\t\tself.mu = np.zeros(self.d)\n\t\tself.q = self.alpha * np.ones(self.d)\n\t\t\n\tdef sigmoid(self, x):\n\t\t# print(x)\n\t\treturn 1.0 / (1.0 + math.exp(-x))\n\n\tdef add_new_article(self, line):\n\t\tarticle_id = int(line.split(\" \")[0])\n\t\t\t\n\t\tif article_id in self.bad_articles:\n\t\t\treturn -1\n\n\t\tif article_id not in self.article_ids:\n\t\t\ttry:\n\t\t\t\tarticle = to_vector(line)\n\t\t\texcept IndexError:\n\t\t\t\tself.bad_articles.add(article_id)\n\t\t\t\treturn -1\n\t\t\t\n\t\t\tself.article_ids[article_id] = len(self.article_ids)\n\t\t\tself.articles_1_d = np.append(self.articles_1_d, article).reshape([len(self.article_ids), self.d])\n\t\t\t\n\t\treturn article_id\n\n\tdef to_minimize(self, w):\n\t\treturn 1/2 * sum (self.q * (w - self.mu) * (w - self.mu)) + sum(np.log(1+np.exp(-self.batch_clicks * w.dot(self.batch_articles))))\n\t\n\n\tdef update(self, user, selected_article, click):\n\t\tself.impressions += 1\n\t\tself.batch_ids.append(self.article_ids[selected_article])\n\t\tself.batch_clicks = np.append(self.batch_clicks, click)\n\n\t\tif self.impressions % self.batch_size == 0:\n\t\t\tw = np.random.normal(0, 1, self.d)\n\t\t\tself.batch_articles = self.articles_1_d[self.batch_ids].reshape([self.d, self.batch_size])\n\n\t\t\tres = minimize(self.to_minimize, w, method='nelder-mead', options={'xtol': 1e-8, 'disp': False})\n\t\t\tself.m = res.x\n\t\t\t\n\t\t\tp = 1/(1 + np.exp(- self.m.dot(self.batch_articles)))\n\n\t\t\tfor i in np.arange(0, self.d):\n\t\t\t\t\tself.q[i] += sum(self.batch_articles[i] * self.batch_articles[i] * p[i] * (1-p[i]))\n\t\t\t\t\n\t\t\tself.batch_ids = list([])\n\t\t\tself.batch_clicks = np.array([])\n\n\t\n\tdef warmup(self, file):\n\t\tpass\n\n\tdef select(self, user, pre_selected_article, lines, total_impressions, click):\n\t\tselected_article = -1\n\t\twarmup = False\n\t\t\n\t\tif self.impressions < self.training_size:\n\t\t\tfor line in lines:\n\t\t\t\tself.add_new_article(line)\n\n\t\t\tself.update(user, pre_selected_article, click)\n\t\t\tselected_article = pre_selected_article\n\t\t\twarmup = True\n\t\t\n\t\telse:\n\t\t\tbest_value = 0\n\t\t\tbest_value_articles = list()\n\n\t\t\tsample_w = np.random.multivariate_normal(self.mu, np.diag(1/self.q))\n\t\t\tfor line in lines:\n\t\t\t\tarticle_id = self.add_new_article(line)\n\t\t\t\tif article_id == -1 : \n\t\t\t\t\tcontinue\n\t\t\t\t\n\t\t\t\ta_id = self.article_ids[article_id]\n\t\t\t\tarticle = self.articles_1_d[a_id]\n\t\t\t\t\n\t\t\t\tcur_value = self.sigmoid(sample_w.dot(article))\n\t\t\n\t\t\t\tif cur_value > best_value:\n\t\t\t\t\tbest_value_articles = list([article_id])\n\t\t\t\t\tbest_value = cur_value\n\t\t\t\telif cur_value == best_value:\n\t\t\t\t\tbest_value_articles.append(article_id)\n\n\t\t\tindex = random.randint(0, len(best_value_articles)-1)\t\n\t\t\tselected_article = best_value_articles[index]\n\n\t\treturn selected_article, warmup\n\n\n\n\n\n",
"step-ids": [
1,
6,
7,
8,
10
]
}
|
[
1,
6,
7,
8,
10
] |
# Copyright 2021 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""`kedro_viz.launchers.jupyter` provides line_magic to launch the viz server
from a jupyter notebook.
"""
# pragma: no cover
import logging
import multiprocessing
import socket
from contextlib import closing
from functools import partial
from time import sleep, time
from typing import Any, Callable, Dict
import requests
from IPython.core.display import HTML, display
from kedro_viz.server import run_server
_VIZ_PROCESSES: Dict[str, int] = {}
logger = logging.getLogger(__name__)
class WaitForException(Exception):
"""WaitForException: if func doesn't return expected result within the specified time"""
def _wait_for(
func: Callable,
expected_result: Any = True,
timeout: int = 10,
print_error: bool = True,
sleep_for: int = 1,
**kwargs,
) -> None:
"""
Run specified function until it returns expected result until timeout.
Args:
func (Callable): Specified function
expected_result (Any): result that is expected. Defaults to None.
timeout (int): Time out in seconds. Defaults to 10.
print_error (boolean): whether any exceptions raised should be printed.
Defaults to False.
sleep_for (int): Execute func every specified number of seconds.
Defaults to 1.
**kwargs: Arguments to be passed to func
Raises:
WaitForException: if func doesn't return expected result within the
specified time
"""
end = time() + timeout
while time() <= end:
try:
retval = func(**kwargs)
except Exception as err: # pylint: disable=broad-except
if print_error:
logger.error(err)
else:
if retval == expected_result:
return None
sleep(sleep_for)
raise WaitForException(
f"func: {func}, didn't return {expected_result} within specified timeout: {timeout}"
)
def _check_viz_up(port): # pragma: no cover
url = "http://127.0.0.1:{}/".format(port)
try:
response = requests.get(url)
except requests.ConnectionError:
return False
return response.status_code == 200
def _allocate_port(start_at: int, end_at: int = 65535) -> int:
acceptable_ports = range(start_at, end_at + 1)
viz_ports = _VIZ_PROCESSES.keys() & set(acceptable_ports)
if viz_ports: # reuse one of already allocated ports
return sorted(viz_ports)[0]
socket.setdefaulttimeout(2.0) # seconds
for port in acceptable_ports: # iterate through all acceptable ports
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
if sock.connect_ex(("127.0.0.1", port)) != 0: # port is available
return port
raise ValueError(
"Cannot allocate an open TCP port for Kedro-Viz in a range "
"from {} to {}".format(start_at, end_at)
)
# pylint: disable=unused-argument,missing-type-doc
def run_viz(port: int = None, line=None, local_ns=None) -> None:
"""
Line magic function to start kedro viz. It calls a kedro viz in a process and displays it in
the Jupyter notebook environment.
Args:
port: TCP port that viz will listen to. Defaults to 4141.
line: line required by line magic interface.
local_ns: Local namespace with local variables of the scope where the line magic is invoked.
For more details, please visit:
https://ipython.readthedocs.io/en/stable/config/custommagics.html
"""
port = port or 4141 # Default argument doesn't work in Jupyter line magic.
port = _allocate_port(start_at=port)
if port in _VIZ_PROCESSES and _VIZ_PROCESSES[port].is_alive():
_VIZ_PROCESSES[port].terminate()
if local_ns is not None and "project_path" in local_ns: # pragma: no cover
target = partial(run_server, project_path=local_ns["project_path"])
else:
target = run_server
viz_process = multiprocessing.Process(
target=target, daemon=True, kwargs={"port": port}
)
viz_process.start()
_VIZ_PROCESSES[port] = viz_process
_wait_for(func=_check_viz_up, port=port)
wrapper = """
<html lang="en"><head></head><body style="width:100; height:100;">
<iframe src="http://127.0.0.1:{}/" height=500 width="100%"></iframe>
</body></html>""".format(
port
)
display(HTML(wrapper))
|
normal
|
{
"blob_id": "0069a61127c5968d7014bdf7f8c4441f02e67df0",
"index": 6541,
"step-1": "<mask token>\n\n\nclass WaitForException(Exception):\n \"\"\"WaitForException: if func doesn't return expected result within the specified time\"\"\"\n\n\ndef _wait_for(func: Callable, expected_result: Any=True, timeout: int=10,\n print_error: bool=True, sleep_for: int=1, **kwargs) ->None:\n \"\"\"\n Run specified function until it returns expected result until timeout.\n\n Args:\n func (Callable): Specified function\n expected_result (Any): result that is expected. Defaults to None.\n timeout (int): Time out in seconds. Defaults to 10.\n print_error (boolean): whether any exceptions raised should be printed.\n Defaults to False.\n sleep_for (int): Execute func every specified number of seconds.\n Defaults to 1.\n **kwargs: Arguments to be passed to func\n\n Raises:\n WaitForException: if func doesn't return expected result within the\n specified time\n\n \"\"\"\n end = time() + timeout\n while time() <= end:\n try:\n retval = func(**kwargs)\n except Exception as err:\n if print_error:\n logger.error(err)\n else:\n if retval == expected_result:\n return None\n sleep(sleep_for)\n raise WaitForException(\n f\"func: {func}, didn't return {expected_result} within specified timeout: {timeout}\"\n )\n\n\n<mask token>\n\n\ndef run_viz(port: int=None, line=None, local_ns=None) ->None:\n \"\"\"\n Line magic function to start kedro viz. It calls a kedro viz in a process and displays it in\n the Jupyter notebook environment.\n\n Args:\n port: TCP port that viz will listen to. Defaults to 4141.\n line: line required by line magic interface.\n local_ns: Local namespace with local variables of the scope where the line magic is invoked.\n For more details, please visit:\n https://ipython.readthedocs.io/en/stable/config/custommagics.html\n\n \"\"\"\n port = port or 4141\n port = _allocate_port(start_at=port)\n if port in _VIZ_PROCESSES and _VIZ_PROCESSES[port].is_alive():\n _VIZ_PROCESSES[port].terminate()\n if local_ns is not None and 'project_path' in local_ns:\n target = partial(run_server, project_path=local_ns['project_path'])\n else:\n target = run_server\n viz_process = multiprocessing.Process(target=target, daemon=True,\n kwargs={'port': port})\n viz_process.start()\n _VIZ_PROCESSES[port] = viz_process\n _wait_for(func=_check_viz_up, port=port)\n wrapper = (\n \"\"\"\n <html lang=\"en\"><head></head><body style=\"width:100; height:100;\">\n <iframe src=\"http://127.0.0.1:{}/\" height=500 width=\"100%\"></iframe>\n </body></html>\"\"\"\n .format(port))\n display(HTML(wrapper))\n",
"step-2": "<mask token>\n\n\nclass WaitForException(Exception):\n \"\"\"WaitForException: if func doesn't return expected result within the specified time\"\"\"\n\n\ndef _wait_for(func: Callable, expected_result: Any=True, timeout: int=10,\n print_error: bool=True, sleep_for: int=1, **kwargs) ->None:\n \"\"\"\n Run specified function until it returns expected result until timeout.\n\n Args:\n func (Callable): Specified function\n expected_result (Any): result that is expected. Defaults to None.\n timeout (int): Time out in seconds. Defaults to 10.\n print_error (boolean): whether any exceptions raised should be printed.\n Defaults to False.\n sleep_for (int): Execute func every specified number of seconds.\n Defaults to 1.\n **kwargs: Arguments to be passed to func\n\n Raises:\n WaitForException: if func doesn't return expected result within the\n specified time\n\n \"\"\"\n end = time() + timeout\n while time() <= end:\n try:\n retval = func(**kwargs)\n except Exception as err:\n if print_error:\n logger.error(err)\n else:\n if retval == expected_result:\n return None\n sleep(sleep_for)\n raise WaitForException(\n f\"func: {func}, didn't return {expected_result} within specified timeout: {timeout}\"\n )\n\n\n<mask token>\n\n\ndef _allocate_port(start_at: int, end_at: int=65535) ->int:\n acceptable_ports = range(start_at, end_at + 1)\n viz_ports = _VIZ_PROCESSES.keys() & set(acceptable_ports)\n if viz_ports:\n return sorted(viz_ports)[0]\n socket.setdefaulttimeout(2.0)\n for port in acceptable_ports:\n with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n ) as sock:\n if sock.connect_ex(('127.0.0.1', port)) != 0:\n return port\n raise ValueError(\n 'Cannot allocate an open TCP port for Kedro-Viz in a range from {} to {}'\n .format(start_at, end_at))\n\n\ndef run_viz(port: int=None, line=None, local_ns=None) ->None:\n \"\"\"\n Line magic function to start kedro viz. It calls a kedro viz in a process and displays it in\n the Jupyter notebook environment.\n\n Args:\n port: TCP port that viz will listen to. Defaults to 4141.\n line: line required by line magic interface.\n local_ns: Local namespace with local variables of the scope where the line magic is invoked.\n For more details, please visit:\n https://ipython.readthedocs.io/en/stable/config/custommagics.html\n\n \"\"\"\n port = port or 4141\n port = _allocate_port(start_at=port)\n if port in _VIZ_PROCESSES and _VIZ_PROCESSES[port].is_alive():\n _VIZ_PROCESSES[port].terminate()\n if local_ns is not None and 'project_path' in local_ns:\n target = partial(run_server, project_path=local_ns['project_path'])\n else:\n target = run_server\n viz_process = multiprocessing.Process(target=target, daemon=True,\n kwargs={'port': port})\n viz_process.start()\n _VIZ_PROCESSES[port] = viz_process\n _wait_for(func=_check_viz_up, port=port)\n wrapper = (\n \"\"\"\n <html lang=\"en\"><head></head><body style=\"width:100; height:100;\">\n <iframe src=\"http://127.0.0.1:{}/\" height=500 width=\"100%\"></iframe>\n </body></html>\"\"\"\n .format(port))\n display(HTML(wrapper))\n",
"step-3": "<mask token>\n\n\nclass WaitForException(Exception):\n \"\"\"WaitForException: if func doesn't return expected result within the specified time\"\"\"\n\n\ndef _wait_for(func: Callable, expected_result: Any=True, timeout: int=10,\n print_error: bool=True, sleep_for: int=1, **kwargs) ->None:\n \"\"\"\n Run specified function until it returns expected result until timeout.\n\n Args:\n func (Callable): Specified function\n expected_result (Any): result that is expected. Defaults to None.\n timeout (int): Time out in seconds. Defaults to 10.\n print_error (boolean): whether any exceptions raised should be printed.\n Defaults to False.\n sleep_for (int): Execute func every specified number of seconds.\n Defaults to 1.\n **kwargs: Arguments to be passed to func\n\n Raises:\n WaitForException: if func doesn't return expected result within the\n specified time\n\n \"\"\"\n end = time() + timeout\n while time() <= end:\n try:\n retval = func(**kwargs)\n except Exception as err:\n if print_error:\n logger.error(err)\n else:\n if retval == expected_result:\n return None\n sleep(sleep_for)\n raise WaitForException(\n f\"func: {func}, didn't return {expected_result} within specified timeout: {timeout}\"\n )\n\n\ndef _check_viz_up(port):\n url = 'http://127.0.0.1:{}/'.format(port)\n try:\n response = requests.get(url)\n except requests.ConnectionError:\n return False\n return response.status_code == 200\n\n\ndef _allocate_port(start_at: int, end_at: int=65535) ->int:\n acceptable_ports = range(start_at, end_at + 1)\n viz_ports = _VIZ_PROCESSES.keys() & set(acceptable_ports)\n if viz_ports:\n return sorted(viz_ports)[0]\n socket.setdefaulttimeout(2.0)\n for port in acceptable_ports:\n with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n ) as sock:\n if sock.connect_ex(('127.0.0.1', port)) != 0:\n return port\n raise ValueError(\n 'Cannot allocate an open TCP port for Kedro-Viz in a range from {} to {}'\n .format(start_at, end_at))\n\n\ndef run_viz(port: int=None, line=None, local_ns=None) ->None:\n \"\"\"\n Line magic function to start kedro viz. It calls a kedro viz in a process and displays it in\n the Jupyter notebook environment.\n\n Args:\n port: TCP port that viz will listen to. Defaults to 4141.\n line: line required by line magic interface.\n local_ns: Local namespace with local variables of the scope where the line magic is invoked.\n For more details, please visit:\n https://ipython.readthedocs.io/en/stable/config/custommagics.html\n\n \"\"\"\n port = port or 4141\n port = _allocate_port(start_at=port)\n if port in _VIZ_PROCESSES and _VIZ_PROCESSES[port].is_alive():\n _VIZ_PROCESSES[port].terminate()\n if local_ns is not None and 'project_path' in local_ns:\n target = partial(run_server, project_path=local_ns['project_path'])\n else:\n target = run_server\n viz_process = multiprocessing.Process(target=target, daemon=True,\n kwargs={'port': port})\n viz_process.start()\n _VIZ_PROCESSES[port] = viz_process\n _wait_for(func=_check_viz_up, port=port)\n wrapper = (\n \"\"\"\n <html lang=\"en\"><head></head><body style=\"width:100; height:100;\">\n <iframe src=\"http://127.0.0.1:{}/\" height=500 width=\"100%\"></iframe>\n </body></html>\"\"\"\n .format(port))\n display(HTML(wrapper))\n",
"step-4": "<mask token>\n_VIZ_PROCESSES: Dict[str, int] = {}\n<mask token>\n\n\nclass WaitForException(Exception):\n \"\"\"WaitForException: if func doesn't return expected result within the specified time\"\"\"\n\n\ndef _wait_for(func: Callable, expected_result: Any=True, timeout: int=10,\n print_error: bool=True, sleep_for: int=1, **kwargs) ->None:\n \"\"\"\n Run specified function until it returns expected result until timeout.\n\n Args:\n func (Callable): Specified function\n expected_result (Any): result that is expected. Defaults to None.\n timeout (int): Time out in seconds. Defaults to 10.\n print_error (boolean): whether any exceptions raised should be printed.\n Defaults to False.\n sleep_for (int): Execute func every specified number of seconds.\n Defaults to 1.\n **kwargs: Arguments to be passed to func\n\n Raises:\n WaitForException: if func doesn't return expected result within the\n specified time\n\n \"\"\"\n end = time() + timeout\n while time() <= end:\n try:\n retval = func(**kwargs)\n except Exception as err:\n if print_error:\n logger.error(err)\n else:\n if retval == expected_result:\n return None\n sleep(sleep_for)\n raise WaitForException(\n f\"func: {func}, didn't return {expected_result} within specified timeout: {timeout}\"\n )\n\n\ndef _check_viz_up(port):\n url = 'http://127.0.0.1:{}/'.format(port)\n try:\n response = requests.get(url)\n except requests.ConnectionError:\n return False\n return response.status_code == 200\n\n\ndef _allocate_port(start_at: int, end_at: int=65535) ->int:\n acceptable_ports = range(start_at, end_at + 1)\n viz_ports = _VIZ_PROCESSES.keys() & set(acceptable_ports)\n if viz_ports:\n return sorted(viz_ports)[0]\n socket.setdefaulttimeout(2.0)\n for port in acceptable_ports:\n with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n ) as sock:\n if sock.connect_ex(('127.0.0.1', port)) != 0:\n return port\n raise ValueError(\n 'Cannot allocate an open TCP port for Kedro-Viz in a range from {} to {}'\n .format(start_at, end_at))\n\n\ndef run_viz(port: int=None, line=None, local_ns=None) ->None:\n \"\"\"\n Line magic function to start kedro viz. It calls a kedro viz in a process and displays it in\n the Jupyter notebook environment.\n\n Args:\n port: TCP port that viz will listen to. Defaults to 4141.\n line: line required by line magic interface.\n local_ns: Local namespace with local variables of the scope where the line magic is invoked.\n For more details, please visit:\n https://ipython.readthedocs.io/en/stable/config/custommagics.html\n\n \"\"\"\n port = port or 4141\n port = _allocate_port(start_at=port)\n if port in _VIZ_PROCESSES and _VIZ_PROCESSES[port].is_alive():\n _VIZ_PROCESSES[port].terminate()\n if local_ns is not None and 'project_path' in local_ns:\n target = partial(run_server, project_path=local_ns['project_path'])\n else:\n target = run_server\n viz_process = multiprocessing.Process(target=target, daemon=True,\n kwargs={'port': port})\n viz_process.start()\n _VIZ_PROCESSES[port] = viz_process\n _wait_for(func=_check_viz_up, port=port)\n wrapper = (\n \"\"\"\n <html lang=\"en\"><head></head><body style=\"width:100; height:100;\">\n <iframe src=\"http://127.0.0.1:{}/\" height=500 width=\"100%\"></iframe>\n </body></html>\"\"\"\n .format(port))\n display(HTML(wrapper))\n",
"step-5": "# Copyright 2021 QuantumBlack Visual Analytics Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND\n# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS\n# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN\n# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#\n# The QuantumBlack Visual Analytics Limited (\"QuantumBlack\") name and logo\n# (either separately or in combination, \"QuantumBlack Trademarks\") are\n# trademarks of QuantumBlack. The License does not grant you any right or\n# license to the QuantumBlack Trademarks. You may not use the QuantumBlack\n# Trademarks or any confusingly similar mark as a trademark for your product,\n# or use the QuantumBlack Trademarks in any other manner that might cause\n# confusion in the marketplace, including but not limited to in advertising,\n# on websites, or on software.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"`kedro_viz.launchers.jupyter` provides line_magic to launch the viz server\nfrom a jupyter notebook.\n\"\"\"\n# pragma: no cover\nimport logging\nimport multiprocessing\nimport socket\nfrom contextlib import closing\nfrom functools import partial\nfrom time import sleep, time\nfrom typing import Any, Callable, Dict\n\nimport requests\nfrom IPython.core.display import HTML, display\n\nfrom kedro_viz.server import run_server\n\n_VIZ_PROCESSES: Dict[str, int] = {}\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass WaitForException(Exception):\n \"\"\"WaitForException: if func doesn't return expected result within the specified time\"\"\"\n\n\ndef _wait_for(\n func: Callable,\n expected_result: Any = True,\n timeout: int = 10,\n print_error: bool = True,\n sleep_for: int = 1,\n **kwargs,\n) -> None:\n \"\"\"\n Run specified function until it returns expected result until timeout.\n\n Args:\n func (Callable): Specified function\n expected_result (Any): result that is expected. Defaults to None.\n timeout (int): Time out in seconds. Defaults to 10.\n print_error (boolean): whether any exceptions raised should be printed.\n Defaults to False.\n sleep_for (int): Execute func every specified number of seconds.\n Defaults to 1.\n **kwargs: Arguments to be passed to func\n\n Raises:\n WaitForException: if func doesn't return expected result within the\n specified time\n\n \"\"\"\n end = time() + timeout\n\n while time() <= end:\n try:\n retval = func(**kwargs)\n except Exception as err: # pylint: disable=broad-except\n if print_error:\n logger.error(err)\n else:\n if retval == expected_result:\n return None\n sleep(sleep_for)\n\n raise WaitForException(\n f\"func: {func}, didn't return {expected_result} within specified timeout: {timeout}\"\n )\n\n\ndef _check_viz_up(port): # pragma: no cover\n url = \"http://127.0.0.1:{}/\".format(port)\n try:\n response = requests.get(url)\n except requests.ConnectionError:\n return False\n\n return response.status_code == 200\n\n\ndef _allocate_port(start_at: int, end_at: int = 65535) -> int:\n acceptable_ports = range(start_at, end_at + 1)\n\n viz_ports = _VIZ_PROCESSES.keys() & set(acceptable_ports)\n if viz_ports: # reuse one of already allocated ports\n return sorted(viz_ports)[0]\n\n socket.setdefaulttimeout(2.0) # seconds\n for port in acceptable_ports: # iterate through all acceptable ports\n with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:\n if sock.connect_ex((\"127.0.0.1\", port)) != 0: # port is available\n return port\n\n raise ValueError(\n \"Cannot allocate an open TCP port for Kedro-Viz in a range \"\n \"from {} to {}\".format(start_at, end_at)\n )\n\n\n# pylint: disable=unused-argument,missing-type-doc\ndef run_viz(port: int = None, line=None, local_ns=None) -> None:\n \"\"\"\n Line magic function to start kedro viz. It calls a kedro viz in a process and displays it in\n the Jupyter notebook environment.\n\n Args:\n port: TCP port that viz will listen to. Defaults to 4141.\n line: line required by line magic interface.\n local_ns: Local namespace with local variables of the scope where the line magic is invoked.\n For more details, please visit:\n https://ipython.readthedocs.io/en/stable/config/custommagics.html\n\n \"\"\"\n port = port or 4141 # Default argument doesn't work in Jupyter line magic.\n port = _allocate_port(start_at=port)\n\n if port in _VIZ_PROCESSES and _VIZ_PROCESSES[port].is_alive():\n _VIZ_PROCESSES[port].terminate()\n\n if local_ns is not None and \"project_path\" in local_ns: # pragma: no cover\n target = partial(run_server, project_path=local_ns[\"project_path\"])\n else:\n target = run_server\n\n viz_process = multiprocessing.Process(\n target=target, daemon=True, kwargs={\"port\": port}\n )\n\n viz_process.start()\n _VIZ_PROCESSES[port] = viz_process\n\n _wait_for(func=_check_viz_up, port=port)\n\n wrapper = \"\"\"\n <html lang=\"en\"><head></head><body style=\"width:100; height:100;\">\n <iframe src=\"http://127.0.0.1:{}/\" height=500 width=\"100%\"></iframe>\n </body></html>\"\"\".format(\n port\n )\n display(HTML(wrapper))\n",
"step-ids": [
4,
5,
6,
7,
10
]
}
|
[
4,
5,
6,
7,
10
] |
# 1 use the operators to solve for the following equation:
# (a)
number = ((30*39) + 300) **10
print(number)
# find the value of C. X + Y = C Given:
x = 0.0050
y = 0.1000
c = x + y
print(c)
"""
what is the result of the following:
(a) take the sentence:
the study or use of the systems
(especially computers and communications)
for storing, retrieving, and sending information
"""
"""
strore each word in a separate variable, then print out the sentence on the one line using the print function
"""
word1 = "the study or use of the systems"
word2 = "especially computers and communications"
word3 = "for storing, retrieving, and sending information"
print(word1, " " + word2, " " + word3)
# (b) what is output ?
word = "Mystery"
print(word[:4])
|
normal
|
{
"blob_id": "c2f82cf73d095979d1da346b7dd7779bcc675805",
"index": 4045,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(number)\n<mask token>\nprint(c)\n<mask token>\nprint(word1, ' ' + word2, ' ' + word3)\n<mask token>\nprint(word[:4])\n",
"step-3": "number = (30 * 39 + 300) ** 10\nprint(number)\nx = 0.005\ny = 0.1\nc = x + y\nprint(c)\n<mask token>\nword1 = 'the study or use of the systems'\nword2 = 'especially computers and communications'\nword3 = 'for storing, retrieving, and sending information'\nprint(word1, ' ' + word2, ' ' + word3)\nword = 'Mystery'\nprint(word[:4])\n",
"step-4": "# 1 use the operators to solve for the following equation:\n# (a) \nnumber = ((30*39) + 300) **10\nprint(number)\n\n# find the value of C. X + Y = C Given:\nx = 0.0050\ny = 0.1000\n\nc = x + y\nprint(c)\n\n\"\"\"\n what is the result of the following:\n (a) take the sentence:\n the study or use of the systems\n (especially computers and communications)\n for storing, retrieving, and sending information\n\"\"\"\n\"\"\"\nstrore each word in a separate variable, then print out the sentence on the one line using the print function\n\"\"\"\nword1 = \"the study or use of the systems\"\nword2 = \"especially computers and communications\"\nword3 = \"for storing, retrieving, and sending information\"\nprint(word1, \" \" + word2, \" \" + word3)\n\n# (b) what is output ?\n\nword = \"Mystery\"\nprint(word[:4])\n\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from classes.Board import Board
class Visualiser:
coordinate_map = ("a", "b", "c", "d", "e", "f", "g", "h")
__dimensions = 8
def __init__(self):
self.map = []
self.__build_map()
def __build_map(self):
"""
Creates the array of the battlefield. Should never be used for logical operations
:return:
"""
columns = []
for i in range(self.__dimensions):
columns.append([])
for i in range(self.__dimensions):
self.map.append(columns)
def print(self, board: Board):
"""
Print the entire battlefield
:param board: The current board in play
:return: void
"""
# Render first horizontal alphabetical x-axis markers
row = [" "]
for x_marker in self.coordinate_map:
row.append(" " + x_marker)
print("".join(row))
# Render the rest of the cheese board
for y, y_row in enumerate(self.map):
# Render left side row numbers
row = [str((8-y)) + " "]
# Render battlefield
for x, square in enumerate(y_row):
# Check with Board if there is a piece on this coordinate
anybody = board.who_is_in(*[x, y])
# Anybody out there?
if anybody is not None:
# Oh hai
row.append(anybody.name)
else:
# Print a simple dot
row.append(" .")
# Print the entire row
print("".join(row))
@staticmethod
def to_algebraic(x, y):
return Visualiser.coordinate_map[x] + str(abs((y - Visualiser.__dimensions)))
|
normal
|
{
"blob_id": "e5e012e40a71dee9f4dbd9913590aef125b758df",
"index": 223,
"step-1": "<mask token>\n\n\nclass Visualiser:\n <mask token>\n <mask token>\n <mask token>\n\n def __build_map(self):\n \"\"\"\n Creates the array of the battlefield. Should never be used for logical operations\n :return:\n \"\"\"\n columns = []\n for i in range(self.__dimensions):\n columns.append([])\n for i in range(self.__dimensions):\n self.map.append(columns)\n\n def print(self, board: Board):\n \"\"\"\n Print the entire battlefield\n\n :param board: The current board in play\n :return: void\n \"\"\"\n row = [' ']\n for x_marker in self.coordinate_map:\n row.append(' ' + x_marker)\n print(''.join(row))\n for y, y_row in enumerate(self.map):\n row = [str(8 - y) + ' ']\n for x, square in enumerate(y_row):\n anybody = board.who_is_in(*[x, y])\n if anybody is not None:\n row.append(anybody.name)\n else:\n row.append(' .')\n print(''.join(row))\n\n @staticmethod\n def to_algebraic(x, y):\n return Visualiser.coordinate_map[x] + str(abs(y - Visualiser.\n __dimensions))\n",
"step-2": "<mask token>\n\n\nclass Visualiser:\n <mask token>\n <mask token>\n\n def __init__(self):\n self.map = []\n self.__build_map()\n\n def __build_map(self):\n \"\"\"\n Creates the array of the battlefield. Should never be used for logical operations\n :return:\n \"\"\"\n columns = []\n for i in range(self.__dimensions):\n columns.append([])\n for i in range(self.__dimensions):\n self.map.append(columns)\n\n def print(self, board: Board):\n \"\"\"\n Print the entire battlefield\n\n :param board: The current board in play\n :return: void\n \"\"\"\n row = [' ']\n for x_marker in self.coordinate_map:\n row.append(' ' + x_marker)\n print(''.join(row))\n for y, y_row in enumerate(self.map):\n row = [str(8 - y) + ' ']\n for x, square in enumerate(y_row):\n anybody = board.who_is_in(*[x, y])\n if anybody is not None:\n row.append(anybody.name)\n else:\n row.append(' .')\n print(''.join(row))\n\n @staticmethod\n def to_algebraic(x, y):\n return Visualiser.coordinate_map[x] + str(abs(y - Visualiser.\n __dimensions))\n",
"step-3": "<mask token>\n\n\nclass Visualiser:\n coordinate_map = 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'\n __dimensions = 8\n\n def __init__(self):\n self.map = []\n self.__build_map()\n\n def __build_map(self):\n \"\"\"\n Creates the array of the battlefield. Should never be used for logical operations\n :return:\n \"\"\"\n columns = []\n for i in range(self.__dimensions):\n columns.append([])\n for i in range(self.__dimensions):\n self.map.append(columns)\n\n def print(self, board: Board):\n \"\"\"\n Print the entire battlefield\n\n :param board: The current board in play\n :return: void\n \"\"\"\n row = [' ']\n for x_marker in self.coordinate_map:\n row.append(' ' + x_marker)\n print(''.join(row))\n for y, y_row in enumerate(self.map):\n row = [str(8 - y) + ' ']\n for x, square in enumerate(y_row):\n anybody = board.who_is_in(*[x, y])\n if anybody is not None:\n row.append(anybody.name)\n else:\n row.append(' .')\n print(''.join(row))\n\n @staticmethod\n def to_algebraic(x, y):\n return Visualiser.coordinate_map[x] + str(abs(y - Visualiser.\n __dimensions))\n",
"step-4": "from classes.Board import Board\n\n\nclass Visualiser:\n coordinate_map = 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'\n __dimensions = 8\n\n def __init__(self):\n self.map = []\n self.__build_map()\n\n def __build_map(self):\n \"\"\"\n Creates the array of the battlefield. Should never be used for logical operations\n :return:\n \"\"\"\n columns = []\n for i in range(self.__dimensions):\n columns.append([])\n for i in range(self.__dimensions):\n self.map.append(columns)\n\n def print(self, board: Board):\n \"\"\"\n Print the entire battlefield\n\n :param board: The current board in play\n :return: void\n \"\"\"\n row = [' ']\n for x_marker in self.coordinate_map:\n row.append(' ' + x_marker)\n print(''.join(row))\n for y, y_row in enumerate(self.map):\n row = [str(8 - y) + ' ']\n for x, square in enumerate(y_row):\n anybody = board.who_is_in(*[x, y])\n if anybody is not None:\n row.append(anybody.name)\n else:\n row.append(' .')\n print(''.join(row))\n\n @staticmethod\n def to_algebraic(x, y):\n return Visualiser.coordinate_map[x] + str(abs(y - Visualiser.\n __dimensions))\n",
"step-5": "from classes.Board import Board\n\n\nclass Visualiser:\n coordinate_map = (\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\")\n __dimensions = 8\n\n def __init__(self):\n self.map = []\n self.__build_map()\n\n def __build_map(self):\n \"\"\"\n Creates the array of the battlefield. Should never be used for logical operations\n :return:\n \"\"\"\n columns = []\n\n for i in range(self.__dimensions):\n columns.append([])\n\n for i in range(self.__dimensions):\n self.map.append(columns)\n\n def print(self, board: Board):\n \"\"\"\n Print the entire battlefield\n\n :param board: The current board in play\n :return: void\n \"\"\"\n # Render first horizontal alphabetical x-axis markers\n row = [\" \"]\n\n for x_marker in self.coordinate_map:\n row.append(\" \" + x_marker)\n\n print(\"\".join(row))\n\n # Render the rest of the cheese board\n for y, y_row in enumerate(self.map):\n # Render left side row numbers\n row = [str((8-y)) + \" \"]\n\n # Render battlefield\n for x, square in enumerate(y_row):\n # Check with Board if there is a piece on this coordinate\n anybody = board.who_is_in(*[x, y])\n\n # Anybody out there?\n if anybody is not None:\n # Oh hai\n row.append(anybody.name)\n else:\n # Print a simple dot\n row.append(\" .\")\n\n # Print the entire row\n print(\"\".join(row))\n\n @staticmethod\n def to_algebraic(x, y):\n return Visualiser.coordinate_map[x] + str(abs((y - Visualiser.__dimensions)))\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
from django.contrib.postgres.fields import JSONField
from django.db import models
from core.utils.time import get_now
class BaseManager(models.Manager):
"""
Our basic manager is used to order all child models of AbstractLayer
by created time (descending), therefore it creates a LIFO order,
causing the recent ones appear first in results.
"""
use_for_related_fields = True
def get_queryset(self):
super(BaseManager, self).get_queryset().order_by('-created_time')
class AbstractLayer(models.Model):
"""
All basic abstraction is done here.
Also, we'll implement some methods which will simplify the work with models.
"""
# let's configure managers
default_manager = BaseManager
objects = BaseManager
all_objects = models.Manager
# All objects in our database are gonna have time of creation and last updated time.
created_time = models.DateTimeField(default=get_now)
last_updated_time = models.DateTimeField(default=get_now)
@classmethod
def get(cls, *args, **kwargs) -> object or None:
"""
We use our custom get method to avoid errors (like Not Found).
This way we won't have to use try/except for the rest of our codebase (at least for non-existing objects).
:param args:
:param kwargs:
:return: object of model
"""
try:
return cls.objects.get(*args, **kwargs)
except cls.DoesNotExist:
# if objects does not exist, we use None
return None
@classmethod
def filter(cls, *args, **kwargs) -> models.QuerySet:
"""
Just to reduce the model.objects.filter to model.filter
:param args:
:param kwargs:
:return: QuerySet
"""
return cls.objects.filter(*args, **kwargs)
@classmethod
def all(cls):
"""
Shortcut for model.objects.all
"""
return cls.objects.all()
def save(self, *args, **kwargs) -> None:
"""
We won't be using auto_now and auto_add_now for created_time and last_updated_time,
since they might cause unintentional errors in future.
Instead we implement custom save method to update them.
:param args:
:param kwargs:
:return: None
"""
self.last_updated_time = get_now()
super(AbstractLayer, self).save(*args, **kwargs)
@classmethod
def create(cls, *args, **kwargs):
"""
Since we are not using auto fields for created_time,
we will be implementing our custom create method to take care of that.
Also, we reduce model.objects.create to model.create.
:param args:
:param kwargs:
:return: created object
"""
now = get_now()
obj = cls(
*args,
**kwargs,
created_time=now,
last_updated_time=now
)
obj.save()
return obj
class Meta:
abstract = True
class Country(AbstractLayer):
code = models.CharField(max_length=7, unique=True)
def __str__(self):
return self.code
class Meta:
db_table = 'countries'
class City(AbstractLayer):
name = models.CharField(max_length=255)
state = models.CharField(max_length=255)
country = models.ForeignKey(Country, on_delete=models.CASCADE, related_name='cities')
lon = models.CharField(max_length=31)
lat = models.CharField(max_length=31)
def __str__(self):
return self.name
class Meta:
db_table = 'cities'
class Forecast(AbstractLayer):
city = models.ForeignKey(City, on_delete=models.CASCADE, related_name='forecasts')
detailed_status = models.CharField(max_length=1023, blank=True, null=True)
data = JSONField(blank=True, null=True, help_text='Whole JSON data representing the forecast details')
time = models.DateTimeField()
class Meta:
db_table = 'forecasts'
|
normal
|
{
"blob_id": "5a33aeffa740a41bd0bd1d80f45796ae37377a4c",
"index": 757,
"step-1": "<mask token>\n\n\nclass AbstractLayer(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @classmethod\n def filter(cls, *args, **kwargs) ->models.QuerySet:\n \"\"\"\n Just to reduce the model.objects.filter to model.filter\n :param args:\n :param kwargs:\n :return: QuerySet\n \"\"\"\n return cls.objects.filter(*args, **kwargs)\n\n @classmethod\n def all(cls):\n \"\"\"\n Shortcut for model.objects.all\n \"\"\"\n return cls.objects.all()\n <mask token>\n\n @classmethod\n def create(cls, *args, **kwargs):\n \"\"\"\n Since we are not using auto fields for created_time,\n we will be implementing our custom create method to take care of that.\n Also, we reduce model.objects.create to model.create.\n :param args:\n :param kwargs:\n :return: created object\n \"\"\"\n now = get_now()\n obj = cls(*args, **kwargs, created_time=now, last_updated_time=now)\n obj.save()\n return obj\n\n\n class Meta:\n abstract = True\n\n\nclass Country(AbstractLayer):\n code = models.CharField(max_length=7, unique=True)\n\n def __str__(self):\n return self.code\n\n\n class Meta:\n db_table = 'countries'\n\n\nclass City(AbstractLayer):\n name = models.CharField(max_length=255)\n state = models.CharField(max_length=255)\n country = models.ForeignKey(Country, on_delete=models.CASCADE,\n related_name='cities')\n lon = models.CharField(max_length=31)\n lat = models.CharField(max_length=31)\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n db_table = 'cities'\n\n\nclass Forecast(AbstractLayer):\n city = models.ForeignKey(City, on_delete=models.CASCADE, related_name=\n 'forecasts')\n detailed_status = models.CharField(max_length=1023, blank=True, null=True)\n data = JSONField(blank=True, null=True, help_text=\n 'Whole JSON data representing the forecast details')\n time = models.DateTimeField()\n\n\n class Meta:\n db_table = 'forecasts'\n",
"step-2": "<mask token>\n\n\nclass BaseManager(models.Manager):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass AbstractLayer(models.Model):\n \"\"\"\n All basic abstraction is done here.\n Also, we'll implement some methods which will simplify the work with models.\n \"\"\"\n default_manager = BaseManager\n objects = BaseManager\n all_objects = models.Manager\n created_time = models.DateTimeField(default=get_now)\n last_updated_time = models.DateTimeField(default=get_now)\n\n @classmethod\n def get(cls, *args, **kwargs) ->(object or None):\n \"\"\"\n We use our custom get method to avoid errors (like Not Found).\n This way we won't have to use try/except for the rest of our codebase (at least for non-existing objects).\n :param args:\n :param kwargs:\n :return: object of model\n \"\"\"\n try:\n return cls.objects.get(*args, **kwargs)\n except cls.DoesNotExist:\n return None\n\n @classmethod\n def filter(cls, *args, **kwargs) ->models.QuerySet:\n \"\"\"\n Just to reduce the model.objects.filter to model.filter\n :param args:\n :param kwargs:\n :return: QuerySet\n \"\"\"\n return cls.objects.filter(*args, **kwargs)\n\n @classmethod\n def all(cls):\n \"\"\"\n Shortcut for model.objects.all\n \"\"\"\n return cls.objects.all()\n\n def save(self, *args, **kwargs) ->None:\n \"\"\"\n We won't be using auto_now and auto_add_now for created_time and last_updated_time,\n since they might cause unintentional errors in future.\n Instead we implement custom save method to update them.\n :param args:\n :param kwargs:\n :return: None\n \"\"\"\n self.last_updated_time = get_now()\n super(AbstractLayer, self).save(*args, **kwargs)\n\n @classmethod\n def create(cls, *args, **kwargs):\n \"\"\"\n Since we are not using auto fields for created_time,\n we will be implementing our custom create method to take care of that.\n Also, we reduce model.objects.create to model.create.\n :param args:\n :param kwargs:\n :return: created object\n \"\"\"\n now = get_now()\n obj = cls(*args, **kwargs, created_time=now, last_updated_time=now)\n obj.save()\n return obj\n\n\n class Meta:\n abstract = True\n\n\nclass Country(AbstractLayer):\n code = models.CharField(max_length=7, unique=True)\n\n def __str__(self):\n return self.code\n\n\n class Meta:\n db_table = 'countries'\n\n\nclass City(AbstractLayer):\n name = models.CharField(max_length=255)\n state = models.CharField(max_length=255)\n country = models.ForeignKey(Country, on_delete=models.CASCADE,\n related_name='cities')\n lon = models.CharField(max_length=31)\n lat = models.CharField(max_length=31)\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n db_table = 'cities'\n\n\nclass Forecast(AbstractLayer):\n city = models.ForeignKey(City, on_delete=models.CASCADE, related_name=\n 'forecasts')\n detailed_status = models.CharField(max_length=1023, blank=True, null=True)\n data = JSONField(blank=True, null=True, help_text=\n 'Whole JSON data representing the forecast details')\n time = models.DateTimeField()\n\n\n class Meta:\n db_table = 'forecasts'\n",
"step-3": "<mask token>\n\n\nclass BaseManager(models.Manager):\n <mask token>\n <mask token>\n\n def get_queryset(self):\n super(BaseManager, self).get_queryset().order_by('-created_time')\n\n\nclass AbstractLayer(models.Model):\n \"\"\"\n All basic abstraction is done here.\n Also, we'll implement some methods which will simplify the work with models.\n \"\"\"\n default_manager = BaseManager\n objects = BaseManager\n all_objects = models.Manager\n created_time = models.DateTimeField(default=get_now)\n last_updated_time = models.DateTimeField(default=get_now)\n\n @classmethod\n def get(cls, *args, **kwargs) ->(object or None):\n \"\"\"\n We use our custom get method to avoid errors (like Not Found).\n This way we won't have to use try/except for the rest of our codebase (at least for non-existing objects).\n :param args:\n :param kwargs:\n :return: object of model\n \"\"\"\n try:\n return cls.objects.get(*args, **kwargs)\n except cls.DoesNotExist:\n return None\n\n @classmethod\n def filter(cls, *args, **kwargs) ->models.QuerySet:\n \"\"\"\n Just to reduce the model.objects.filter to model.filter\n :param args:\n :param kwargs:\n :return: QuerySet\n \"\"\"\n return cls.objects.filter(*args, **kwargs)\n\n @classmethod\n def all(cls):\n \"\"\"\n Shortcut for model.objects.all\n \"\"\"\n return cls.objects.all()\n\n def save(self, *args, **kwargs) ->None:\n \"\"\"\n We won't be using auto_now and auto_add_now for created_time and last_updated_time,\n since they might cause unintentional errors in future.\n Instead we implement custom save method to update them.\n :param args:\n :param kwargs:\n :return: None\n \"\"\"\n self.last_updated_time = get_now()\n super(AbstractLayer, self).save(*args, **kwargs)\n\n @classmethod\n def create(cls, *args, **kwargs):\n \"\"\"\n Since we are not using auto fields for created_time,\n we will be implementing our custom create method to take care of that.\n Also, we reduce model.objects.create to model.create.\n :param args:\n :param kwargs:\n :return: created object\n \"\"\"\n now = get_now()\n obj = cls(*args, **kwargs, created_time=now, last_updated_time=now)\n obj.save()\n return obj\n\n\n class Meta:\n abstract = True\n\n\nclass Country(AbstractLayer):\n code = models.CharField(max_length=7, unique=True)\n\n def __str__(self):\n return self.code\n\n\n class Meta:\n db_table = 'countries'\n\n\nclass City(AbstractLayer):\n name = models.CharField(max_length=255)\n state = models.CharField(max_length=255)\n country = models.ForeignKey(Country, on_delete=models.CASCADE,\n related_name='cities')\n lon = models.CharField(max_length=31)\n lat = models.CharField(max_length=31)\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n db_table = 'cities'\n\n\nclass Forecast(AbstractLayer):\n city = models.ForeignKey(City, on_delete=models.CASCADE, related_name=\n 'forecasts')\n detailed_status = models.CharField(max_length=1023, blank=True, null=True)\n data = JSONField(blank=True, null=True, help_text=\n 'Whole JSON data representing the forecast details')\n time = models.DateTimeField()\n\n\n class Meta:\n db_table = 'forecasts'\n",
"step-4": "<mask token>\n\n\nclass BaseManager(models.Manager):\n <mask token>\n use_for_related_fields = True\n\n def get_queryset(self):\n super(BaseManager, self).get_queryset().order_by('-created_time')\n\n\nclass AbstractLayer(models.Model):\n \"\"\"\n All basic abstraction is done here.\n Also, we'll implement some methods which will simplify the work with models.\n \"\"\"\n default_manager = BaseManager\n objects = BaseManager\n all_objects = models.Manager\n created_time = models.DateTimeField(default=get_now)\n last_updated_time = models.DateTimeField(default=get_now)\n\n @classmethod\n def get(cls, *args, **kwargs) ->(object or None):\n \"\"\"\n We use our custom get method to avoid errors (like Not Found).\n This way we won't have to use try/except for the rest of our codebase (at least for non-existing objects).\n :param args:\n :param kwargs:\n :return: object of model\n \"\"\"\n try:\n return cls.objects.get(*args, **kwargs)\n except cls.DoesNotExist:\n return None\n\n @classmethod\n def filter(cls, *args, **kwargs) ->models.QuerySet:\n \"\"\"\n Just to reduce the model.objects.filter to model.filter\n :param args:\n :param kwargs:\n :return: QuerySet\n \"\"\"\n return cls.objects.filter(*args, **kwargs)\n\n @classmethod\n def all(cls):\n \"\"\"\n Shortcut for model.objects.all\n \"\"\"\n return cls.objects.all()\n\n def save(self, *args, **kwargs) ->None:\n \"\"\"\n We won't be using auto_now and auto_add_now for created_time and last_updated_time,\n since they might cause unintentional errors in future.\n Instead we implement custom save method to update them.\n :param args:\n :param kwargs:\n :return: None\n \"\"\"\n self.last_updated_time = get_now()\n super(AbstractLayer, self).save(*args, **kwargs)\n\n @classmethod\n def create(cls, *args, **kwargs):\n \"\"\"\n Since we are not using auto fields for created_time,\n we will be implementing our custom create method to take care of that.\n Also, we reduce model.objects.create to model.create.\n :param args:\n :param kwargs:\n :return: created object\n \"\"\"\n now = get_now()\n obj = cls(*args, **kwargs, created_time=now, last_updated_time=now)\n obj.save()\n return obj\n\n\n class Meta:\n abstract = True\n\n\nclass Country(AbstractLayer):\n code = models.CharField(max_length=7, unique=True)\n\n def __str__(self):\n return self.code\n\n\n class Meta:\n db_table = 'countries'\n\n\nclass City(AbstractLayer):\n name = models.CharField(max_length=255)\n state = models.CharField(max_length=255)\n country = models.ForeignKey(Country, on_delete=models.CASCADE,\n related_name='cities')\n lon = models.CharField(max_length=31)\n lat = models.CharField(max_length=31)\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n db_table = 'cities'\n\n\nclass Forecast(AbstractLayer):\n city = models.ForeignKey(City, on_delete=models.CASCADE, related_name=\n 'forecasts')\n detailed_status = models.CharField(max_length=1023, blank=True, null=True)\n data = JSONField(blank=True, null=True, help_text=\n 'Whole JSON data representing the forecast details')\n time = models.DateTimeField()\n\n\n class Meta:\n db_table = 'forecasts'\n",
"step-5": "from django.contrib.postgres.fields import JSONField\nfrom django.db import models\n\nfrom core.utils.time import get_now\n\n\nclass BaseManager(models.Manager):\n \"\"\"\n Our basic manager is used to order all child models of AbstractLayer\n by created time (descending), therefore it creates a LIFO order,\n causing the recent ones appear first in results.\n \"\"\"\n use_for_related_fields = True\n\n def get_queryset(self):\n super(BaseManager, self).get_queryset().order_by('-created_time')\n\n\nclass AbstractLayer(models.Model):\n \"\"\"\n All basic abstraction is done here.\n Also, we'll implement some methods which will simplify the work with models.\n \"\"\"\n\n # let's configure managers\n default_manager = BaseManager\n objects = BaseManager\n all_objects = models.Manager\n\n # All objects in our database are gonna have time of creation and last updated time.\n created_time = models.DateTimeField(default=get_now)\n last_updated_time = models.DateTimeField(default=get_now)\n\n @classmethod\n def get(cls, *args, **kwargs) -> object or None:\n \"\"\"\n We use our custom get method to avoid errors (like Not Found).\n This way we won't have to use try/except for the rest of our codebase (at least for non-existing objects).\n :param args:\n :param kwargs:\n :return: object of model\n \"\"\"\n try:\n return cls.objects.get(*args, **kwargs)\n except cls.DoesNotExist:\n # if objects does not exist, we use None\n return None\n\n @classmethod\n def filter(cls, *args, **kwargs) -> models.QuerySet:\n \"\"\"\n Just to reduce the model.objects.filter to model.filter\n :param args:\n :param kwargs:\n :return: QuerySet\n \"\"\"\n return cls.objects.filter(*args, **kwargs)\n\n @classmethod\n def all(cls):\n \"\"\"\n Shortcut for model.objects.all\n \"\"\"\n return cls.objects.all()\n\n def save(self, *args, **kwargs) -> None:\n \"\"\"\n We won't be using auto_now and auto_add_now for created_time and last_updated_time,\n since they might cause unintentional errors in future.\n Instead we implement custom save method to update them.\n :param args:\n :param kwargs:\n :return: None\n \"\"\"\n self.last_updated_time = get_now()\n super(AbstractLayer, self).save(*args, **kwargs)\n\n @classmethod\n def create(cls, *args, **kwargs):\n \"\"\"\n Since we are not using auto fields for created_time,\n we will be implementing our custom create method to take care of that.\n Also, we reduce model.objects.create to model.create.\n :param args:\n :param kwargs:\n :return: created object\n \"\"\"\n now = get_now()\n obj = cls(\n *args,\n **kwargs,\n created_time=now,\n last_updated_time=now\n )\n obj.save()\n return obj\n\n class Meta:\n abstract = True\n\n\nclass Country(AbstractLayer):\n code = models.CharField(max_length=7, unique=True)\n\n def __str__(self):\n return self.code\n\n class Meta:\n db_table = 'countries'\n\n\nclass City(AbstractLayer):\n name = models.CharField(max_length=255)\n state = models.CharField(max_length=255)\n country = models.ForeignKey(Country, on_delete=models.CASCADE, related_name='cities')\n lon = models.CharField(max_length=31)\n lat = models.CharField(max_length=31)\n\n def __str__(self):\n return self.name\n\n class Meta:\n db_table = 'cities'\n\n\nclass Forecast(AbstractLayer):\n city = models.ForeignKey(City, on_delete=models.CASCADE, related_name='forecasts')\n detailed_status = models.CharField(max_length=1023, blank=True, null=True)\n data = JSONField(blank=True, null=True, help_text='Whole JSON data representing the forecast details')\n time = models.DateTimeField()\n\n class Meta:\n db_table = 'forecasts'\n",
"step-ids": [
12,
17,
18,
19,
22
]
}
|
[
12,
17,
18,
19,
22
] |
# Takes in a word and makes a list containing individual characters
def split(word):
return [char for char in word]
# Removes empty strings from a list
def removeEmptyStrings(lst):
while "" in lst:
lst.remove("")
ints = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
tokenList = []
class Token:
def __init__(self, token, value):
self.token = token
self.value = value
def display(self):
# Throws error if character is unknown
if self.token == 'ERROR':
error = Error('UnknownError', 'cannot identify character', str(self.value), 1, r'C:\Users\tanne\My_Codes\Python\ease_language\SAMPLE.py')
print(error.display())
# Displays token value pairs
else:
if self.token == 'STRING' or self.token == 'INT':
pair = f'{self.token}: {self.value}'
else:
pair = f'{self.token}'
return pair
# Adds token value pairs to list
def addToList(self):
tokenList.append(self.display())
class Lexer:
def __init__(self, items):
self.items = split(items)
self.index = 0
self.item = ''
self.stringOn = False
self.stringList = ''
self.intOn = False
# Identifies correct token type
def identify(self):
ints = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
tok = ''
val = self.item
if '"' == self.item or "'" == self.item:
if self.stringOn:
self.stringOn = False
tok = 'STRING'
val = self.getString()
else:
self.stringOn = True
elif self.stringOn:
self.addToString()
elif self.item in ' \n':
pass
elif '+' == self.item:
tok = 'PLUS'
elif '-' == self.item:
tok = 'MINUS'
elif '*' == self.item:
tok = 'MUL'
elif '/' == self.item:
tok = 'DIV'
elif '(' == self.item:
tok = 'LPAREN'
elif ')' == self.item:
tok = 'RPAREN'
else:
if self.item in ints:
tok = 'INT'
else:
tok = 'ERROR'
token = Token(tok, val)
return token.addToList()
# Moves forward a character
def advance(self):
self.item = self.items[self.index]
self.index += 1
# Displays list of token value pairs
def displayAll(self):
removeEmptyStrings(tokenList)
return tokenList
# Adds an item to a string
def addToString(self):
self.stringList += self.item
# Returns string and clears it
def getString(self):
temp = self.stringList
self.stringList = ''
return temp
class Error:
def __init__(self, error, detail, code, line, fileName):
self.error = error
self.detail = detail
self.code = code
self.line = line
self.file = fileName
# Displays errors
def display(self):
return f'File "{self.file}", line {str(self.line)}\n {self.code}\n{self.error}: {self.detail}'
# Runs needed methods
def run(text):
wordList = split(text)
l1 = Lexer(text)
for word in wordList:
l1.advance()
l1.identify()
print(l1.displayAll())
tokenList.clear()
|
normal
|
{
"blob_id": "8d5b75dc945844d48f52159be08fc1e6aa51fdf5",
"index": 497,
"step-1": "<mask token>\n\n\nclass Lexer:\n\n def __init__(self, items):\n self.items = split(items)\n self.index = 0\n self.item = ''\n self.stringOn = False\n self.stringList = ''\n self.intOn = False\n <mask token>\n\n def advance(self):\n self.item = self.items[self.index]\n self.index += 1\n\n def displayAll(self):\n removeEmptyStrings(tokenList)\n return tokenList\n <mask token>\n\n def getString(self):\n temp = self.stringList\n self.stringList = ''\n return temp\n\n\nclass Error:\n\n def __init__(self, error, detail, code, line, fileName):\n self.error = error\n self.detail = detail\n self.code = code\n self.line = line\n self.file = fileName\n\n def display(self):\n return f\"\"\"File \"{self.file}\", line {str(self.line)}\n {self.code}\n{self.error}: {self.detail}\"\"\"\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Lexer:\n\n def __init__(self, items):\n self.items = split(items)\n self.index = 0\n self.item = ''\n self.stringOn = False\n self.stringList = ''\n self.intOn = False\n\n def identify(self):\n ints = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n tok = ''\n val = self.item\n if '\"' == self.item or \"'\" == self.item:\n if self.stringOn:\n self.stringOn = False\n tok = 'STRING'\n val = self.getString()\n else:\n self.stringOn = True\n elif self.stringOn:\n self.addToString()\n elif self.item in ' \\n':\n pass\n elif '+' == self.item:\n tok = 'PLUS'\n elif '-' == self.item:\n tok = 'MINUS'\n elif '*' == self.item:\n tok = 'MUL'\n elif '/' == self.item:\n tok = 'DIV'\n elif '(' == self.item:\n tok = 'LPAREN'\n elif ')' == self.item:\n tok = 'RPAREN'\n elif self.item in ints:\n tok = 'INT'\n else:\n tok = 'ERROR'\n token = Token(tok, val)\n return token.addToList()\n\n def advance(self):\n self.item = self.items[self.index]\n self.index += 1\n\n def displayAll(self):\n removeEmptyStrings(tokenList)\n return tokenList\n\n def addToString(self):\n self.stringList += self.item\n\n def getString(self):\n temp = self.stringList\n self.stringList = ''\n return temp\n\n\nclass Error:\n\n def __init__(self, error, detail, code, line, fileName):\n self.error = error\n self.detail = detail\n self.code = code\n self.line = line\n self.file = fileName\n\n def display(self):\n return f\"\"\"File \"{self.file}\", line {str(self.line)}\n {self.code}\n{self.error}: {self.detail}\"\"\"\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Token:\n\n def __init__(self, token, value):\n self.token = token\n self.value = value\n\n def display(self):\n if self.token == 'ERROR':\n error = Error('UnknownError', 'cannot identify character', str(\n self.value), 1,\n 'C:\\\\Users\\\\tanne\\\\My_Codes\\\\Python\\\\ease_language\\\\SAMPLE.py')\n print(error.display())\n else:\n if self.token == 'STRING' or self.token == 'INT':\n pair = f'{self.token}: {self.value}'\n else:\n pair = f'{self.token}'\n return pair\n\n def addToList(self):\n tokenList.append(self.display())\n\n\nclass Lexer:\n\n def __init__(self, items):\n self.items = split(items)\n self.index = 0\n self.item = ''\n self.stringOn = False\n self.stringList = ''\n self.intOn = False\n\n def identify(self):\n ints = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n tok = ''\n val = self.item\n if '\"' == self.item or \"'\" == self.item:\n if self.stringOn:\n self.stringOn = False\n tok = 'STRING'\n val = self.getString()\n else:\n self.stringOn = True\n elif self.stringOn:\n self.addToString()\n elif self.item in ' \\n':\n pass\n elif '+' == self.item:\n tok = 'PLUS'\n elif '-' == self.item:\n tok = 'MINUS'\n elif '*' == self.item:\n tok = 'MUL'\n elif '/' == self.item:\n tok = 'DIV'\n elif '(' == self.item:\n tok = 'LPAREN'\n elif ')' == self.item:\n tok = 'RPAREN'\n elif self.item in ints:\n tok = 'INT'\n else:\n tok = 'ERROR'\n token = Token(tok, val)\n return token.addToList()\n\n def advance(self):\n self.item = self.items[self.index]\n self.index += 1\n\n def displayAll(self):\n removeEmptyStrings(tokenList)\n return tokenList\n\n def addToString(self):\n self.stringList += self.item\n\n def getString(self):\n temp = self.stringList\n self.stringList = ''\n return temp\n\n\nclass Error:\n\n def __init__(self, error, detail, code, line, fileName):\n self.error = error\n self.detail = detail\n self.code = code\n self.line = line\n self.file = fileName\n\n def display(self):\n return f\"\"\"File \"{self.file}\", line {str(self.line)}\n {self.code}\n{self.error}: {self.detail}\"\"\"\n\n\n<mask token>\n",
"step-4": "def split(word):\n return [char for char in word]\n\n\ndef removeEmptyStrings(lst):\n while '' in lst:\n lst.remove('')\n\n\nints = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\ntokenList = []\n\n\nclass Token:\n\n def __init__(self, token, value):\n self.token = token\n self.value = value\n\n def display(self):\n if self.token == 'ERROR':\n error = Error('UnknownError', 'cannot identify character', str(\n self.value), 1,\n 'C:\\\\Users\\\\tanne\\\\My_Codes\\\\Python\\\\ease_language\\\\SAMPLE.py')\n print(error.display())\n else:\n if self.token == 'STRING' or self.token == 'INT':\n pair = f'{self.token}: {self.value}'\n else:\n pair = f'{self.token}'\n return pair\n\n def addToList(self):\n tokenList.append(self.display())\n\n\nclass Lexer:\n\n def __init__(self, items):\n self.items = split(items)\n self.index = 0\n self.item = ''\n self.stringOn = False\n self.stringList = ''\n self.intOn = False\n\n def identify(self):\n ints = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n tok = ''\n val = self.item\n if '\"' == self.item or \"'\" == self.item:\n if self.stringOn:\n self.stringOn = False\n tok = 'STRING'\n val = self.getString()\n else:\n self.stringOn = True\n elif self.stringOn:\n self.addToString()\n elif self.item in ' \\n':\n pass\n elif '+' == self.item:\n tok = 'PLUS'\n elif '-' == self.item:\n tok = 'MINUS'\n elif '*' == self.item:\n tok = 'MUL'\n elif '/' == self.item:\n tok = 'DIV'\n elif '(' == self.item:\n tok = 'LPAREN'\n elif ')' == self.item:\n tok = 'RPAREN'\n elif self.item in ints:\n tok = 'INT'\n else:\n tok = 'ERROR'\n token = Token(tok, val)\n return token.addToList()\n\n def advance(self):\n self.item = self.items[self.index]\n self.index += 1\n\n def displayAll(self):\n removeEmptyStrings(tokenList)\n return tokenList\n\n def addToString(self):\n self.stringList += self.item\n\n def getString(self):\n temp = self.stringList\n self.stringList = ''\n return temp\n\n\nclass Error:\n\n def __init__(self, error, detail, code, line, fileName):\n self.error = error\n self.detail = detail\n self.code = code\n self.line = line\n self.file = fileName\n\n def display(self):\n return f\"\"\"File \"{self.file}\", line {str(self.line)}\n {self.code}\n{self.error}: {self.detail}\"\"\"\n\n\ndef run(text):\n wordList = split(text)\n l1 = Lexer(text)\n for word in wordList:\n l1.advance()\n l1.identify()\n print(l1.displayAll())\n tokenList.clear()\n",
"step-5": "# Takes in a word and makes a list containing individual characters\ndef split(word): \n return [char for char in word]\n\n# Removes empty strings from a list\ndef removeEmptyStrings(lst):\n while \"\" in lst: \n lst.remove(\"\") \n\nints = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\ntokenList = []\n\nclass Token:\n\n def __init__(self, token, value):\n self.token = token\n self.value = value\n \n def display(self):\n\n # Throws error if character is unknown\n if self.token == 'ERROR':\n error = Error('UnknownError', 'cannot identify character', str(self.value), 1, r'C:\\Users\\tanne\\My_Codes\\Python\\ease_language\\SAMPLE.py')\n print(error.display())\n\n # Displays token value pairs\n else:\n if self.token == 'STRING' or self.token == 'INT':\n pair = f'{self.token}: {self.value}'\n else:\n pair = f'{self.token}'\n return pair\n \n # Adds token value pairs to list\n def addToList(self):\n tokenList.append(self.display())\n\n\nclass Lexer:\n\n def __init__(self, items):\n self.items = split(items)\n self.index = 0\n self.item = ''\n self.stringOn = False\n self.stringList = ''\n self.intOn = False\n\n # Identifies correct token type\n def identify(self):\n ints = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n\n tok = ''\n val = self.item\n\n if '\"' == self.item or \"'\" == self.item:\n if self.stringOn:\n self.stringOn = False\n tok = 'STRING'\n val = self.getString()\n else:\n self.stringOn = True\n elif self.stringOn:\n self.addToString()\n elif self.item in ' \\n':\n pass\n elif '+' == self.item:\n tok = 'PLUS'\n elif '-' == self.item:\n tok = 'MINUS'\n elif '*' == self.item:\n tok = 'MUL'\n elif '/' == self.item:\n tok = 'DIV'\n elif '(' == self.item:\n tok = 'LPAREN'\n elif ')' == self.item:\n tok = 'RPAREN'\n else:\n if self.item in ints:\n tok = 'INT'\n else:\n tok = 'ERROR'\n \n token = Token(tok, val)\n return token.addToList()\n \n # Moves forward a character\n def advance(self):\n self.item = self.items[self.index]\n self.index += 1\n \n # Displays list of token value pairs\n def displayAll(self):\n removeEmptyStrings(tokenList)\n return tokenList\n \n # Adds an item to a string\n def addToString(self):\n self.stringList += self.item\n \n # Returns string and clears it\n def getString(self):\n temp = self.stringList\n self.stringList = ''\n return temp\n\n\nclass Error:\n\n def __init__(self, error, detail, code, line, fileName):\n self.error = error\n self.detail = detail\n self.code = code\n self.line = line\n self.file = fileName\n \n # Displays errors\n def display(self):\n return f'File \"{self.file}\", line {str(self.line)}\\n {self.code}\\n{self.error}: {self.detail}'\n\n# Runs needed methods\ndef run(text):\n wordList = split(text)\n l1 = Lexer(text)\n\n for word in wordList:\n l1.advance()\n l1.identify()\n \n print(l1.displayAll())\n tokenList.clear()",
"step-ids": [
8,
10,
14,
18,
19
]
}
|
[
8,
10,
14,
18,
19
] |
import sys
sys.stdin = open('input.txt', 'rt')
BLOCK_0 = 1
BLOCK_1 = 2
BLOCK_2 = 3
N = int(input())
X, Y = 10, 10
# x: 행 , y: 열A
GRN = 0
BLU = 1
maps = [[0]*Y for _ in range(X)]
dx = [1, 0]
dy = [0, 1]
def outMaps(x, y):
global X, Y
if 0<=x<X and 0<=y<Y: return False
else: return True
def meetBlock(x, y, maps):
if maps[x][y] == 1: return True
else: return False
def onlyUpdate(n_blocks, xs, ys, maps):
for i in range(n_blocks):
maps[xs[i]][ys[i]] = 1
def oneLineFull(maps, CLR):
for i in range(4, 10):
for j in range(4):
if CLR == GRN and maps[i][j] == 0:
break
elif CLR == BLU and maps[j][i] == 0:
break
else: # 전부 1이여서 full line일 때
return True, i
return False, 0
def pullAndUpdate(olf_idx, maps, CLR):
#for olf in list_olf:
for i in range(olf_idx, 3, -1):
for j in range(4):
if CLR == GRN:
if olf_idx == 4:
maps[i][j] = 0
else:
maps[i][j] = maps[i-1][j]
maps[i-1][j] = 0
elif CLR == BLU:
if olf_idx == 4:
maps[j][i] = 0
else:
maps[j][i] = maps[j][i-1]
maps[j][i-1] = 0
def pushAndPullUpdate(n_inBorder, maps, CLR):
for i in range(10-1-n_inBorder, 3, -1):
for j in range(4):
if CLR == GRN:
maps[i+n_inBorder][j] = maps[i][j]
maps[i][j] = 0
elif CLR == BLU:
maps[j][i+n_inBorder] = maps[j][i]
maps[j][i] = 0
def print_maps(maps):
global X, Y
for i in range(X):
for j in range(Y):
print(maps[i][j], end=' ')
print()
print()
def isBlockInBorder(maps, CLR):
cnt = 0
for i in range(4, 6):
for j in range(4):
if (CLR == GRN and maps[i][j] == 1) or (CLR == BLU and maps[j][i] == 1):
cnt += 1
break
return cnt
def Mover(n_blocks, xs_ori, ys_ori, maps, CLR):
xs = xs_ori.copy()
ys = ys_ori.copy()
score = 0
STOP_FLAG = False
while not STOP_FLAG:
for i in range(n_blocks):
xt, yt = xs[i] + dx[CLR], ys[i] + dy[CLR]
if outMaps(xt, yt):
STOP_FLAG = True
break
if meetBlock(xt, yt, maps):
STOP_FLAG = True
break
else:
# break 걸리지 않고 넘어왔으므로, update
for i in range(n_blocks):
xs[i], ys[i] = xs[i] + dx[CLR], ys[i] + dy[CLR]
# 만약 STOP_FLAG == True 로 탈출했다면
# 해당 상자의 이동이 끝난 것 이므로 한 줄이 전부 차있는 것이 있는지 check
# maps에 업데이트
onlyUpdate(n_blocks, xs, ys, maps)
# 만약 one line full 인 라인이 있다면
OLF_FLAG = True
while OLF_FLAG:
OLF_FLAG, olf_idx = oneLineFull(maps, CLR)
if OLF_FLAG:
score += 1
pullAndUpdate(olf_idx, maps, CLR)
# 만약 경계안에 block이 존재한다면
n_inBorder = isBlockInBorder(maps, CLR)
if n_inBorder:
pushAndPullUpdate(n_inBorder, maps, CLR)
return score
def Area_score(maps, CLR):
score = 0
for i in range(4, 10):
for j in range(4):
if CLR == GRN: score += maps[i][j]
elif CLR == BLU: score += maps[j][i]
return score
total_score = 0
for i in range(N):
t, x, y = map(int, input().split())
xs, ys = [x], [y]
if t == BLOCK_0:
n_blocks = 1
elif t == BLOCK_1:
n_blocks = 2
xs.append(x)
ys.append(y+1)
elif t == BLOCK_2:
n_blocks = 2
xs.append(x+1)
ys.append(y)
total_score += Mover(n_blocks, xs, ys, maps, GRN)
total_score += Mover(n_blocks, xs, ys, maps, BLU)
#print_maps(maps)
grn_score = Area_score(maps, GRN)
blu_score = Area_score(maps, BLU)
print(total_score)
print(grn_score+blu_score)
|
normal
|
{
"blob_id": "937d01eaa82cbfe07b20fae9320c554a0960d7b1",
"index": 571,
"step-1": "<mask token>\n\n\ndef meetBlock(x, y, maps):\n if maps[x][y] == 1:\n return True\n else:\n return False\n\n\ndef onlyUpdate(n_blocks, xs, ys, maps):\n for i in range(n_blocks):\n maps[xs[i]][ys[i]] = 1\n\n\ndef oneLineFull(maps, CLR):\n for i in range(4, 10):\n for j in range(4):\n if CLR == GRN and maps[i][j] == 0:\n break\n elif CLR == BLU and maps[j][i] == 0:\n break\n else:\n return True, i\n return False, 0\n\n\ndef pullAndUpdate(olf_idx, maps, CLR):\n for i in range(olf_idx, 3, -1):\n for j in range(4):\n if CLR == GRN:\n if olf_idx == 4:\n maps[i][j] = 0\n else:\n maps[i][j] = maps[i - 1][j]\n maps[i - 1][j] = 0\n elif CLR == BLU:\n if olf_idx == 4:\n maps[j][i] = 0\n else:\n maps[j][i] = maps[j][i - 1]\n maps[j][i - 1] = 0\n\n\ndef pushAndPullUpdate(n_inBorder, maps, CLR):\n for i in range(10 - 1 - n_inBorder, 3, -1):\n for j in range(4):\n if CLR == GRN:\n maps[i + n_inBorder][j] = maps[i][j]\n maps[i][j] = 0\n elif CLR == BLU:\n maps[j][i + n_inBorder] = maps[j][i]\n maps[j][i] = 0\n\n\ndef print_maps(maps):\n global X, Y\n for i in range(X):\n for j in range(Y):\n print(maps[i][j], end=' ')\n print()\n print()\n\n\ndef isBlockInBorder(maps, CLR):\n cnt = 0\n for i in range(4, 6):\n for j in range(4):\n if CLR == GRN and maps[i][j] == 1 or CLR == BLU and maps[j][i\n ] == 1:\n cnt += 1\n break\n return cnt\n\n\ndef Mover(n_blocks, xs_ori, ys_ori, maps, CLR):\n xs = xs_ori.copy()\n ys = ys_ori.copy()\n score = 0\n STOP_FLAG = False\n while not STOP_FLAG:\n for i in range(n_blocks):\n xt, yt = xs[i] + dx[CLR], ys[i] + dy[CLR]\n if outMaps(xt, yt):\n STOP_FLAG = True\n break\n if meetBlock(xt, yt, maps):\n STOP_FLAG = True\n break\n else:\n for i in range(n_blocks):\n xs[i], ys[i] = xs[i] + dx[CLR], ys[i] + dy[CLR]\n onlyUpdate(n_blocks, xs, ys, maps)\n OLF_FLAG = True\n while OLF_FLAG:\n OLF_FLAG, olf_idx = oneLineFull(maps, CLR)\n if OLF_FLAG:\n score += 1\n pullAndUpdate(olf_idx, maps, CLR)\n n_inBorder = isBlockInBorder(maps, CLR)\n if n_inBorder:\n pushAndPullUpdate(n_inBorder, maps, CLR)\n return score\n\n\ndef Area_score(maps, CLR):\n score = 0\n for i in range(4, 10):\n for j in range(4):\n if CLR == GRN:\n score += maps[i][j]\n elif CLR == BLU:\n score += maps[j][i]\n return score\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef outMaps(x, y):\n global X, Y\n if 0 <= x < X and 0 <= y < Y:\n return False\n else:\n return True\n\n\ndef meetBlock(x, y, maps):\n if maps[x][y] == 1:\n return True\n else:\n return False\n\n\ndef onlyUpdate(n_blocks, xs, ys, maps):\n for i in range(n_blocks):\n maps[xs[i]][ys[i]] = 1\n\n\ndef oneLineFull(maps, CLR):\n for i in range(4, 10):\n for j in range(4):\n if CLR == GRN and maps[i][j] == 0:\n break\n elif CLR == BLU and maps[j][i] == 0:\n break\n else:\n return True, i\n return False, 0\n\n\ndef pullAndUpdate(olf_idx, maps, CLR):\n for i in range(olf_idx, 3, -1):\n for j in range(4):\n if CLR == GRN:\n if olf_idx == 4:\n maps[i][j] = 0\n else:\n maps[i][j] = maps[i - 1][j]\n maps[i - 1][j] = 0\n elif CLR == BLU:\n if olf_idx == 4:\n maps[j][i] = 0\n else:\n maps[j][i] = maps[j][i - 1]\n maps[j][i - 1] = 0\n\n\ndef pushAndPullUpdate(n_inBorder, maps, CLR):\n for i in range(10 - 1 - n_inBorder, 3, -1):\n for j in range(4):\n if CLR == GRN:\n maps[i + n_inBorder][j] = maps[i][j]\n maps[i][j] = 0\n elif CLR == BLU:\n maps[j][i + n_inBorder] = maps[j][i]\n maps[j][i] = 0\n\n\ndef print_maps(maps):\n global X, Y\n for i in range(X):\n for j in range(Y):\n print(maps[i][j], end=' ')\n print()\n print()\n\n\ndef isBlockInBorder(maps, CLR):\n cnt = 0\n for i in range(4, 6):\n for j in range(4):\n if CLR == GRN and maps[i][j] == 1 or CLR == BLU and maps[j][i\n ] == 1:\n cnt += 1\n break\n return cnt\n\n\ndef Mover(n_blocks, xs_ori, ys_ori, maps, CLR):\n xs = xs_ori.copy()\n ys = ys_ori.copy()\n score = 0\n STOP_FLAG = False\n while not STOP_FLAG:\n for i in range(n_blocks):\n xt, yt = xs[i] + dx[CLR], ys[i] + dy[CLR]\n if outMaps(xt, yt):\n STOP_FLAG = True\n break\n if meetBlock(xt, yt, maps):\n STOP_FLAG = True\n break\n else:\n for i in range(n_blocks):\n xs[i], ys[i] = xs[i] + dx[CLR], ys[i] + dy[CLR]\n onlyUpdate(n_blocks, xs, ys, maps)\n OLF_FLAG = True\n while OLF_FLAG:\n OLF_FLAG, olf_idx = oneLineFull(maps, CLR)\n if OLF_FLAG:\n score += 1\n pullAndUpdate(olf_idx, maps, CLR)\n n_inBorder = isBlockInBorder(maps, CLR)\n if n_inBorder:\n pushAndPullUpdate(n_inBorder, maps, CLR)\n return score\n\n\ndef Area_score(maps, CLR):\n score = 0\n for i in range(4, 10):\n for j in range(4):\n if CLR == GRN:\n score += maps[i][j]\n elif CLR == BLU:\n score += maps[j][i]\n return score\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef outMaps(x, y):\n global X, Y\n if 0 <= x < X and 0 <= y < Y:\n return False\n else:\n return True\n\n\ndef meetBlock(x, y, maps):\n if maps[x][y] == 1:\n return True\n else:\n return False\n\n\ndef onlyUpdate(n_blocks, xs, ys, maps):\n for i in range(n_blocks):\n maps[xs[i]][ys[i]] = 1\n\n\ndef oneLineFull(maps, CLR):\n for i in range(4, 10):\n for j in range(4):\n if CLR == GRN and maps[i][j] == 0:\n break\n elif CLR == BLU and maps[j][i] == 0:\n break\n else:\n return True, i\n return False, 0\n\n\ndef pullAndUpdate(olf_idx, maps, CLR):\n for i in range(olf_idx, 3, -1):\n for j in range(4):\n if CLR == GRN:\n if olf_idx == 4:\n maps[i][j] = 0\n else:\n maps[i][j] = maps[i - 1][j]\n maps[i - 1][j] = 0\n elif CLR == BLU:\n if olf_idx == 4:\n maps[j][i] = 0\n else:\n maps[j][i] = maps[j][i - 1]\n maps[j][i - 1] = 0\n\n\ndef pushAndPullUpdate(n_inBorder, maps, CLR):\n for i in range(10 - 1 - n_inBorder, 3, -1):\n for j in range(4):\n if CLR == GRN:\n maps[i + n_inBorder][j] = maps[i][j]\n maps[i][j] = 0\n elif CLR == BLU:\n maps[j][i + n_inBorder] = maps[j][i]\n maps[j][i] = 0\n\n\ndef print_maps(maps):\n global X, Y\n for i in range(X):\n for j in range(Y):\n print(maps[i][j], end=' ')\n print()\n print()\n\n\ndef isBlockInBorder(maps, CLR):\n cnt = 0\n for i in range(4, 6):\n for j in range(4):\n if CLR == GRN and maps[i][j] == 1 or CLR == BLU and maps[j][i\n ] == 1:\n cnt += 1\n break\n return cnt\n\n\ndef Mover(n_blocks, xs_ori, ys_ori, maps, CLR):\n xs = xs_ori.copy()\n ys = ys_ori.copy()\n score = 0\n STOP_FLAG = False\n while not STOP_FLAG:\n for i in range(n_blocks):\n xt, yt = xs[i] + dx[CLR], ys[i] + dy[CLR]\n if outMaps(xt, yt):\n STOP_FLAG = True\n break\n if meetBlock(xt, yt, maps):\n STOP_FLAG = True\n break\n else:\n for i in range(n_blocks):\n xs[i], ys[i] = xs[i] + dx[CLR], ys[i] + dy[CLR]\n onlyUpdate(n_blocks, xs, ys, maps)\n OLF_FLAG = True\n while OLF_FLAG:\n OLF_FLAG, olf_idx = oneLineFull(maps, CLR)\n if OLF_FLAG:\n score += 1\n pullAndUpdate(olf_idx, maps, CLR)\n n_inBorder = isBlockInBorder(maps, CLR)\n if n_inBorder:\n pushAndPullUpdate(n_inBorder, maps, CLR)\n return score\n\n\ndef Area_score(maps, CLR):\n score = 0\n for i in range(4, 10):\n for j in range(4):\n if CLR == GRN:\n score += maps[i][j]\n elif CLR == BLU:\n score += maps[j][i]\n return score\n\n\n<mask token>\nfor i in range(N):\n t, x, y = map(int, input().split())\n xs, ys = [x], [y]\n if t == BLOCK_0:\n n_blocks = 1\n elif t == BLOCK_1:\n n_blocks = 2\n xs.append(x)\n ys.append(y + 1)\n elif t == BLOCK_2:\n n_blocks = 2\n xs.append(x + 1)\n ys.append(y)\n total_score += Mover(n_blocks, xs, ys, maps, GRN)\n total_score += Mover(n_blocks, xs, ys, maps, BLU)\n<mask token>\nprint(total_score)\nprint(grn_score + blu_score)\n",
"step-4": "<mask token>\nsys.stdin = open('input.txt', 'rt')\nBLOCK_0 = 1\nBLOCK_1 = 2\nBLOCK_2 = 3\nN = int(input())\nX, Y = 10, 10\nGRN = 0\nBLU = 1\nmaps = [([0] * Y) for _ in range(X)]\ndx = [1, 0]\ndy = [0, 1]\n\n\ndef outMaps(x, y):\n global X, Y\n if 0 <= x < X and 0 <= y < Y:\n return False\n else:\n return True\n\n\ndef meetBlock(x, y, maps):\n if maps[x][y] == 1:\n return True\n else:\n return False\n\n\ndef onlyUpdate(n_blocks, xs, ys, maps):\n for i in range(n_blocks):\n maps[xs[i]][ys[i]] = 1\n\n\ndef oneLineFull(maps, CLR):\n for i in range(4, 10):\n for j in range(4):\n if CLR == GRN and maps[i][j] == 0:\n break\n elif CLR == BLU and maps[j][i] == 0:\n break\n else:\n return True, i\n return False, 0\n\n\ndef pullAndUpdate(olf_idx, maps, CLR):\n for i in range(olf_idx, 3, -1):\n for j in range(4):\n if CLR == GRN:\n if olf_idx == 4:\n maps[i][j] = 0\n else:\n maps[i][j] = maps[i - 1][j]\n maps[i - 1][j] = 0\n elif CLR == BLU:\n if olf_idx == 4:\n maps[j][i] = 0\n else:\n maps[j][i] = maps[j][i - 1]\n maps[j][i - 1] = 0\n\n\ndef pushAndPullUpdate(n_inBorder, maps, CLR):\n for i in range(10 - 1 - n_inBorder, 3, -1):\n for j in range(4):\n if CLR == GRN:\n maps[i + n_inBorder][j] = maps[i][j]\n maps[i][j] = 0\n elif CLR == BLU:\n maps[j][i + n_inBorder] = maps[j][i]\n maps[j][i] = 0\n\n\ndef print_maps(maps):\n global X, Y\n for i in range(X):\n for j in range(Y):\n print(maps[i][j], end=' ')\n print()\n print()\n\n\ndef isBlockInBorder(maps, CLR):\n cnt = 0\n for i in range(4, 6):\n for j in range(4):\n if CLR == GRN and maps[i][j] == 1 or CLR == BLU and maps[j][i\n ] == 1:\n cnt += 1\n break\n return cnt\n\n\ndef Mover(n_blocks, xs_ori, ys_ori, maps, CLR):\n xs = xs_ori.copy()\n ys = ys_ori.copy()\n score = 0\n STOP_FLAG = False\n while not STOP_FLAG:\n for i in range(n_blocks):\n xt, yt = xs[i] + dx[CLR], ys[i] + dy[CLR]\n if outMaps(xt, yt):\n STOP_FLAG = True\n break\n if meetBlock(xt, yt, maps):\n STOP_FLAG = True\n break\n else:\n for i in range(n_blocks):\n xs[i], ys[i] = xs[i] + dx[CLR], ys[i] + dy[CLR]\n onlyUpdate(n_blocks, xs, ys, maps)\n OLF_FLAG = True\n while OLF_FLAG:\n OLF_FLAG, olf_idx = oneLineFull(maps, CLR)\n if OLF_FLAG:\n score += 1\n pullAndUpdate(olf_idx, maps, CLR)\n n_inBorder = isBlockInBorder(maps, CLR)\n if n_inBorder:\n pushAndPullUpdate(n_inBorder, maps, CLR)\n return score\n\n\ndef Area_score(maps, CLR):\n score = 0\n for i in range(4, 10):\n for j in range(4):\n if CLR == GRN:\n score += maps[i][j]\n elif CLR == BLU:\n score += maps[j][i]\n return score\n\n\ntotal_score = 0\nfor i in range(N):\n t, x, y = map(int, input().split())\n xs, ys = [x], [y]\n if t == BLOCK_0:\n n_blocks = 1\n elif t == BLOCK_1:\n n_blocks = 2\n xs.append(x)\n ys.append(y + 1)\n elif t == BLOCK_2:\n n_blocks = 2\n xs.append(x + 1)\n ys.append(y)\n total_score += Mover(n_blocks, xs, ys, maps, GRN)\n total_score += Mover(n_blocks, xs, ys, maps, BLU)\ngrn_score = Area_score(maps, GRN)\nblu_score = Area_score(maps, BLU)\nprint(total_score)\nprint(grn_score + blu_score)\n",
"step-5": "import sys\nsys.stdin = open('input.txt', 'rt')\nBLOCK_0 = 1\nBLOCK_1 = 2\nBLOCK_2 = 3\nN = int(input())\nX, Y = 10, 10\n# x: 행 , y: 열A\nGRN = 0\nBLU = 1\nmaps = [[0]*Y for _ in range(X)]\ndx = [1, 0]\ndy = [0, 1]\n\ndef outMaps(x, y):\n global X, Y\n if 0<=x<X and 0<=y<Y: return False\n else: return True\n\ndef meetBlock(x, y, maps):\n if maps[x][y] == 1: return True\n else: return False\n\ndef onlyUpdate(n_blocks, xs, ys, maps):\n for i in range(n_blocks):\n maps[xs[i]][ys[i]] = 1\n\ndef oneLineFull(maps, CLR):\n for i in range(4, 10):\n for j in range(4):\n if CLR == GRN and maps[i][j] == 0:\n break\n elif CLR == BLU and maps[j][i] == 0:\n break\n else: # 전부 1이여서 full line일 때\n return True, i\n return False, 0\n\ndef pullAndUpdate(olf_idx, maps, CLR):\n #for olf in list_olf:\n for i in range(olf_idx, 3, -1):\n for j in range(4):\n if CLR == GRN:\n if olf_idx == 4:\n maps[i][j] = 0\n else:\n maps[i][j] = maps[i-1][j]\n maps[i-1][j] = 0\n elif CLR == BLU:\n if olf_idx == 4:\n maps[j][i] = 0\n else:\n maps[j][i] = maps[j][i-1]\n maps[j][i-1] = 0\n\ndef pushAndPullUpdate(n_inBorder, maps, CLR):\n for i in range(10-1-n_inBorder, 3, -1):\n for j in range(4):\n if CLR == GRN:\n maps[i+n_inBorder][j] = maps[i][j]\n maps[i][j] = 0\n elif CLR == BLU:\n maps[j][i+n_inBorder] = maps[j][i]\n maps[j][i] = 0\n\n\ndef print_maps(maps):\n global X, Y\n for i in range(X):\n for j in range(Y):\n print(maps[i][j], end=' ')\n print()\n print()\n\ndef isBlockInBorder(maps, CLR):\n cnt = 0\n for i in range(4, 6):\n for j in range(4):\n if (CLR == GRN and maps[i][j] == 1) or (CLR == BLU and maps[j][i] == 1):\n cnt += 1\n break\n return cnt\n\ndef Mover(n_blocks, xs_ori, ys_ori, maps, CLR):\n xs = xs_ori.copy()\n ys = ys_ori.copy()\n score = 0\n STOP_FLAG = False\n while not STOP_FLAG:\n for i in range(n_blocks):\n xt, yt = xs[i] + dx[CLR], ys[i] + dy[CLR]\n if outMaps(xt, yt):\n STOP_FLAG = True\n break\n if meetBlock(xt, yt, maps):\n STOP_FLAG = True\n break\n else:\n # break 걸리지 않고 넘어왔으므로, update\n for i in range(n_blocks):\n xs[i], ys[i] = xs[i] + dx[CLR], ys[i] + dy[CLR]\n # 만약 STOP_FLAG == True 로 탈출했다면\n # 해당 상자의 이동이 끝난 것 이므로 한 줄이 전부 차있는 것이 있는지 check\n # maps에 업데이트\n onlyUpdate(n_blocks, xs, ys, maps)\n \n # 만약 one line full 인 라인이 있다면\n OLF_FLAG = True\n while OLF_FLAG:\n OLF_FLAG, olf_idx = oneLineFull(maps, CLR)\n if OLF_FLAG:\n score += 1\n pullAndUpdate(olf_idx, maps, CLR)\n\n # 만약 경계안에 block이 존재한다면\n n_inBorder = isBlockInBorder(maps, CLR)\n if n_inBorder:\n pushAndPullUpdate(n_inBorder, maps, CLR)\n return score\n\ndef Area_score(maps, CLR):\n score = 0\n for i in range(4, 10):\n for j in range(4):\n if CLR == GRN: score += maps[i][j]\n elif CLR == BLU: score += maps[j][i]\n return score\n\n\ntotal_score = 0\nfor i in range(N):\n t, x, y = map(int, input().split())\n xs, ys = [x], [y]\n if t == BLOCK_0:\n n_blocks = 1\n elif t == BLOCK_1:\n n_blocks = 2\n xs.append(x)\n ys.append(y+1)\n elif t == BLOCK_2:\n n_blocks = 2\n xs.append(x+1)\n ys.append(y)\n\n total_score += Mover(n_blocks, xs, ys, maps, GRN)\n total_score += Mover(n_blocks, xs, ys, maps, BLU)\n #print_maps(maps)\n\ngrn_score = Area_score(maps, GRN)\nblu_score = Area_score(maps, BLU)\n\nprint(total_score)\nprint(grn_score+blu_score)\n\n\n\n\n",
"step-ids": [
9,
10,
11,
12,
14
]
}
|
[
9,
10,
11,
12,
14
] |
class Solution:
def toGoatLatin(self, S: str) -> str:
def exchange(str2):
if str2[0] in "aeiou":
str2 = str2+"ma"
else:
str2 = str2[1:]+str2[0]+"ma"
list2 = S.split(" ")
for i in list2:
res.append(exchange(i))
for i in res:
if __name__ == "__main__":
s = Solution()
str2 = "I speak Goat Latin"
print(s.toGoatLatin(str2))
|
normal
|
{
"blob_id": "398c28265e61831ba65b4ae2a785e57c0fa5b6d2",
"index": 8311,
"step-1": "\n\n\nclass Solution:\n def toGoatLatin(self, S: str) -> str:\n \n def exchange(str2):\n if str2[0] in \"aeiou\":\n str2 = str2+\"ma\"\n else:\n str2 = str2[1:]+str2[0]+\"ma\"\n\n list2 = S.split(\" \")\n\n for i in list2:\n res.append(exchange(i))\n\n\n for i in res:\n \n\nif __name__ == \"__main__\":\n s = Solution()\n str2 = \"I speak Goat Latin\"\n print(s.toGoatLatin(str2))\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- coding: utf-8 -*-
from django import forms
from django.utils.translation import ugettext_lazy as _
# import models
from apps.qa.models.coupon import Coupon
from apps.qa.models.coupon_type import CouponType
COUPONTYPE_CHOICES = (
('text', _("text")),
('url', _("url")),
('questionnaire', _("questionnaire")),
)
class CouponForm(forms.ModelForm):
name = forms.CharField(max_length=64, label=_("Name"), required=True)
type = forms.ChoiceField(choices=COUPONTYPE_CHOICES)
# these fields are JSON containers populated by custom BL
data = forms.CharField(max_length=64, required=False)
style = forms.CharField(max_length=64, required=False)
valid_from = forms.DateTimeField(widget=forms.DateTimeInput(attrs={'class': 'datepicker'}, format='%Y-%m-%d %H:%M'),
input_formats=['%Y-%m-%d %H:%M', ],
label=_("Valid From"),
required=True)
valid_until = forms.DateTimeField(widget=forms.DateTimeInput(attrs={'class': 'datepicker'}, format='%Y-%m-%d %H:%M'),
input_formats=['%Y-%m-%d %H:%M', ],
label=_("Valid Until"),
required=True)
def clean(self):
cleaned_type = self.cleaned_data.get('type')
real_type = CouponType.objects.filter(name=cleaned_type).first()
if not real_type:
raise forms.ValidationError(_("Sorry, that coupon type cannot be found."))
else:
self.cleaned_data['type'] = real_type
return self.cleaned_data
class Meta:
model = Coupon
fields = ('name', 'type', 'data', 'style', 'valid_from', 'valid_until')
|
normal
|
{
"blob_id": "a0f83f0a2c6ddaa2fc641bd4fa48a6f50fd1d978",
"index": 1755,
"step-1": "<mask token>\n\n\nclass CouponForm(forms.ModelForm):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def clean(self):\n cleaned_type = self.cleaned_data.get('type')\n real_type = CouponType.objects.filter(name=cleaned_type).first()\n if not real_type:\n raise forms.ValidationError(_(\n 'Sorry, that coupon type cannot be found.'))\n else:\n self.cleaned_data['type'] = real_type\n return self.cleaned_data\n\n\n class Meta:\n model = Coupon\n fields = 'name', 'type', 'data', 'style', 'valid_from', 'valid_until'\n",
"step-2": "<mask token>\n\n\nclass CouponForm(forms.ModelForm):\n name = forms.CharField(max_length=64, label=_('Name'), required=True)\n type = forms.ChoiceField(choices=COUPONTYPE_CHOICES)\n data = forms.CharField(max_length=64, required=False)\n style = forms.CharField(max_length=64, required=False)\n valid_from = forms.DateTimeField(widget=forms.DateTimeInput(attrs={\n 'class': 'datepicker'}, format='%Y-%m-%d %H:%M'), input_formats=[\n '%Y-%m-%d %H:%M'], label=_('Valid From'), required=True)\n valid_until = forms.DateTimeField(widget=forms.DateTimeInput(attrs={\n 'class': 'datepicker'}, format='%Y-%m-%d %H:%M'), input_formats=[\n '%Y-%m-%d %H:%M'], label=_('Valid Until'), required=True)\n\n def clean(self):\n cleaned_type = self.cleaned_data.get('type')\n real_type = CouponType.objects.filter(name=cleaned_type).first()\n if not real_type:\n raise forms.ValidationError(_(\n 'Sorry, that coupon type cannot be found.'))\n else:\n self.cleaned_data['type'] = real_type\n return self.cleaned_data\n\n\n class Meta:\n model = Coupon\n fields = 'name', 'type', 'data', 'style', 'valid_from', 'valid_until'\n",
"step-3": "<mask token>\nCOUPONTYPE_CHOICES = ('text', _('text')), ('url', _('url')), ('questionnaire',\n _('questionnaire'))\n\n\nclass CouponForm(forms.ModelForm):\n name = forms.CharField(max_length=64, label=_('Name'), required=True)\n type = forms.ChoiceField(choices=COUPONTYPE_CHOICES)\n data = forms.CharField(max_length=64, required=False)\n style = forms.CharField(max_length=64, required=False)\n valid_from = forms.DateTimeField(widget=forms.DateTimeInput(attrs={\n 'class': 'datepicker'}, format='%Y-%m-%d %H:%M'), input_formats=[\n '%Y-%m-%d %H:%M'], label=_('Valid From'), required=True)\n valid_until = forms.DateTimeField(widget=forms.DateTimeInput(attrs={\n 'class': 'datepicker'}, format='%Y-%m-%d %H:%M'), input_formats=[\n '%Y-%m-%d %H:%M'], label=_('Valid Until'), required=True)\n\n def clean(self):\n cleaned_type = self.cleaned_data.get('type')\n real_type = CouponType.objects.filter(name=cleaned_type).first()\n if not real_type:\n raise forms.ValidationError(_(\n 'Sorry, that coupon type cannot be found.'))\n else:\n self.cleaned_data['type'] = real_type\n return self.cleaned_data\n\n\n class Meta:\n model = Coupon\n fields = 'name', 'type', 'data', 'style', 'valid_from', 'valid_until'\n",
"step-4": "from django import forms\nfrom django.utils.translation import ugettext_lazy as _\nfrom apps.qa.models.coupon import Coupon\nfrom apps.qa.models.coupon_type import CouponType\nCOUPONTYPE_CHOICES = ('text', _('text')), ('url', _('url')), ('questionnaire',\n _('questionnaire'))\n\n\nclass CouponForm(forms.ModelForm):\n name = forms.CharField(max_length=64, label=_('Name'), required=True)\n type = forms.ChoiceField(choices=COUPONTYPE_CHOICES)\n data = forms.CharField(max_length=64, required=False)\n style = forms.CharField(max_length=64, required=False)\n valid_from = forms.DateTimeField(widget=forms.DateTimeInput(attrs={\n 'class': 'datepicker'}, format='%Y-%m-%d %H:%M'), input_formats=[\n '%Y-%m-%d %H:%M'], label=_('Valid From'), required=True)\n valid_until = forms.DateTimeField(widget=forms.DateTimeInput(attrs={\n 'class': 'datepicker'}, format='%Y-%m-%d %H:%M'), input_formats=[\n '%Y-%m-%d %H:%M'], label=_('Valid Until'), required=True)\n\n def clean(self):\n cleaned_type = self.cleaned_data.get('type')\n real_type = CouponType.objects.filter(name=cleaned_type).first()\n if not real_type:\n raise forms.ValidationError(_(\n 'Sorry, that coupon type cannot be found.'))\n else:\n self.cleaned_data['type'] = real_type\n return self.cleaned_data\n\n\n class Meta:\n model = Coupon\n fields = 'name', 'type', 'data', 'style', 'valid_from', 'valid_until'\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom django import forms\nfrom django.utils.translation import ugettext_lazy as _\n\n# import models\nfrom apps.qa.models.coupon import Coupon\nfrom apps.qa.models.coupon_type import CouponType\n\n\nCOUPONTYPE_CHOICES = (\n ('text', _(\"text\")),\n ('url', _(\"url\")),\n ('questionnaire', _(\"questionnaire\")),\n)\n\nclass CouponForm(forms.ModelForm):\n\n name = forms.CharField(max_length=64, label=_(\"Name\"), required=True)\n type = forms.ChoiceField(choices=COUPONTYPE_CHOICES)\n\n # these fields are JSON containers populated by custom BL\n data = forms.CharField(max_length=64, required=False)\n style = forms.CharField(max_length=64, required=False)\n\n valid_from = forms.DateTimeField(widget=forms.DateTimeInput(attrs={'class': 'datepicker'}, format='%Y-%m-%d %H:%M'),\n input_formats=['%Y-%m-%d %H:%M', ],\n label=_(\"Valid From\"),\n required=True)\n valid_until = forms.DateTimeField(widget=forms.DateTimeInput(attrs={'class': 'datepicker'}, format='%Y-%m-%d %H:%M'),\n input_formats=['%Y-%m-%d %H:%M', ],\n label=_(\"Valid Until\"),\n required=True)\n\n def clean(self):\n cleaned_type = self.cleaned_data.get('type')\n real_type = CouponType.objects.filter(name=cleaned_type).first()\n if not real_type:\n raise forms.ValidationError(_(\"Sorry, that coupon type cannot be found.\"))\n else:\n self.cleaned_data['type'] = real_type\n return self.cleaned_data\n\n class Meta:\n model = Coupon\n fields = ('name', 'type', 'data', 'style', 'valid_from', 'valid_until')\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import numpy as np
import dl_style_transfer.workspace.data_helpers
import os
here = os.path.dirname(os.path.abspath(__file__))
sents = list(open(os.path.join(here, 'yelp_sentences.txt'))) + list(open(os.path.join(here, 'shake_sentences.txt')))
thresh = 5
col = dict()
word_to_ind = dict()
ind_to_word = dict()
def __line_into_col__(line):
tokens = dl_style_transfer.workspace.data_helpers.clean_str(line).split(" ")
for wor in tokens:
if col.get(wor) is None:
col[wor] = 1
else:
col[wor] = col[wor] + 1
for l in sents:
__line_into_col__(l)
lis = list(col.items())
lis.sort(key=lambda count: count[1], reverse=True)
for i, word in enumerate(lis):
word_to_ind[word[0]] = i
ind_to_word[i] = word[0]
voc_len = len(word_to_ind)
shape = (len(sents), voc_len)
def get_small_bag():
bag = []
for sent in sents:
sbag =[]
for wor in dl_style_transfer.workspace.data_helpers.clean_str(sent).split(" "):
sbag.append(word_to_ind[wor])
bag.append(sbag)
return bag
def get_bag():
bag = np.zeros(shape)
for j,sent in enumerate(sents):
for wor in dl_style_transfer.workspace.data_helpers.clean_str(sent).split(" "):
bag[j, word_to_ind[wor]] = bag[j, word_to_ind[wor]] + 1
return np.log(1 + bag) / np.max(np.log(1 + bag), axis=1)
def string_to_vec(string):
tokens = dl_style_transfer.workspace.data_helpers.clean_str(string).split(" ")
vec = np.zeros(voc_len)
for wor in tokens:
vec[word_to_ind[wor]] = vec[word_to_ind[wor]] + 1
return vec
def get_ryans_strange_input():
vec = []
for l in sents:
vec.append(dl_style_transfer.workspace.data_helpers.clean_str(l))
return np.array([word_to_ind[i] for l in vec for i in l.split(" ")])
def vocab_length():
return voc_len
|
normal
|
{
"blob_id": "2317a2fff493588ad6cc3a4ac2b600fbf1c5583c",
"index": 8594,
"step-1": "<mask token>\n\n\ndef __line_into_col__(line):\n tokens = dl_style_transfer.workspace.data_helpers.clean_str(line).split(' '\n )\n for wor in tokens:\n if col.get(wor) is None:\n col[wor] = 1\n else:\n col[wor] = col[wor] + 1\n\n\n<mask token>\n\n\ndef vocab_length():\n return voc_len\n",
"step-2": "<mask token>\n\n\ndef __line_into_col__(line):\n tokens = dl_style_transfer.workspace.data_helpers.clean_str(line).split(' '\n )\n for wor in tokens:\n if col.get(wor) is None:\n col[wor] = 1\n else:\n col[wor] = col[wor] + 1\n\n\n<mask token>\n\n\ndef get_small_bag():\n bag = []\n for sent in sents:\n sbag = []\n for wor in dl_style_transfer.workspace.data_helpers.clean_str(sent\n ).split(' '):\n sbag.append(word_to_ind[wor])\n bag.append(sbag)\n return bag\n\n\n<mask token>\n\n\ndef get_ryans_strange_input():\n vec = []\n for l in sents:\n vec.append(dl_style_transfer.workspace.data_helpers.clean_str(l))\n return np.array([word_to_ind[i] for l in vec for i in l.split(' ')])\n\n\ndef vocab_length():\n return voc_len\n",
"step-3": "<mask token>\n\n\ndef __line_into_col__(line):\n tokens = dl_style_transfer.workspace.data_helpers.clean_str(line).split(' '\n )\n for wor in tokens:\n if col.get(wor) is None:\n col[wor] = 1\n else:\n col[wor] = col[wor] + 1\n\n\n<mask token>\n\n\ndef get_small_bag():\n bag = []\n for sent in sents:\n sbag = []\n for wor in dl_style_transfer.workspace.data_helpers.clean_str(sent\n ).split(' '):\n sbag.append(word_to_ind[wor])\n bag.append(sbag)\n return bag\n\n\ndef get_bag():\n bag = np.zeros(shape)\n for j, sent in enumerate(sents):\n for wor in dl_style_transfer.workspace.data_helpers.clean_str(sent\n ).split(' '):\n bag[j, word_to_ind[wor]] = bag[j, word_to_ind[wor]] + 1\n return np.log(1 + bag) / np.max(np.log(1 + bag), axis=1)\n\n\ndef string_to_vec(string):\n tokens = dl_style_transfer.workspace.data_helpers.clean_str(string).split(\n ' ')\n vec = np.zeros(voc_len)\n for wor in tokens:\n vec[word_to_ind[wor]] = vec[word_to_ind[wor]] + 1\n return vec\n\n\ndef get_ryans_strange_input():\n vec = []\n for l in sents:\n vec.append(dl_style_transfer.workspace.data_helpers.clean_str(l))\n return np.array([word_to_ind[i] for l in vec for i in l.split(' ')])\n\n\ndef vocab_length():\n return voc_len\n",
"step-4": "<mask token>\nhere = os.path.dirname(os.path.abspath(__file__))\nsents = list(open(os.path.join(here, 'yelp_sentences.txt'))) + list(open(os\n .path.join(here, 'shake_sentences.txt')))\nthresh = 5\ncol = dict()\nword_to_ind = dict()\nind_to_word = dict()\n\n\ndef __line_into_col__(line):\n tokens = dl_style_transfer.workspace.data_helpers.clean_str(line).split(' '\n )\n for wor in tokens:\n if col.get(wor) is None:\n col[wor] = 1\n else:\n col[wor] = col[wor] + 1\n\n\nfor l in sents:\n __line_into_col__(l)\nlis = list(col.items())\nlis.sort(key=lambda count: count[1], reverse=True)\nfor i, word in enumerate(lis):\n word_to_ind[word[0]] = i\n ind_to_word[i] = word[0]\nvoc_len = len(word_to_ind)\nshape = len(sents), voc_len\n\n\ndef get_small_bag():\n bag = []\n for sent in sents:\n sbag = []\n for wor in dl_style_transfer.workspace.data_helpers.clean_str(sent\n ).split(' '):\n sbag.append(word_to_ind[wor])\n bag.append(sbag)\n return bag\n\n\ndef get_bag():\n bag = np.zeros(shape)\n for j, sent in enumerate(sents):\n for wor in dl_style_transfer.workspace.data_helpers.clean_str(sent\n ).split(' '):\n bag[j, word_to_ind[wor]] = bag[j, word_to_ind[wor]] + 1\n return np.log(1 + bag) / np.max(np.log(1 + bag), axis=1)\n\n\ndef string_to_vec(string):\n tokens = dl_style_transfer.workspace.data_helpers.clean_str(string).split(\n ' ')\n vec = np.zeros(voc_len)\n for wor in tokens:\n vec[word_to_ind[wor]] = vec[word_to_ind[wor]] + 1\n return vec\n\n\ndef get_ryans_strange_input():\n vec = []\n for l in sents:\n vec.append(dl_style_transfer.workspace.data_helpers.clean_str(l))\n return np.array([word_to_ind[i] for l in vec for i in l.split(' ')])\n\n\ndef vocab_length():\n return voc_len\n",
"step-5": "import numpy as np\nimport dl_style_transfer.workspace.data_helpers\nimport os\n\n\nhere = os.path.dirname(os.path.abspath(__file__))\n\nsents = list(open(os.path.join(here, 'yelp_sentences.txt'))) + list(open(os.path.join(here, 'shake_sentences.txt')))\n\nthresh = 5\n\ncol = dict()\nword_to_ind = dict()\nind_to_word = dict()\n\n\ndef __line_into_col__(line):\n tokens = dl_style_transfer.workspace.data_helpers.clean_str(line).split(\" \")\n for wor in tokens:\n if col.get(wor) is None:\n col[wor] = 1\n else:\n col[wor] = col[wor] + 1\n\n\nfor l in sents:\n __line_into_col__(l)\n\nlis = list(col.items())\nlis.sort(key=lambda count: count[1], reverse=True)\nfor i, word in enumerate(lis):\n word_to_ind[word[0]] = i\n ind_to_word[i] = word[0]\n\nvoc_len = len(word_to_ind)\n\nshape = (len(sents), voc_len)\n\ndef get_small_bag():\n\tbag = []\n\tfor sent in sents:\n\t\tsbag =[]\n\t\tfor wor in dl_style_transfer.workspace.data_helpers.clean_str(sent).split(\" \"):\n\t\t\tsbag.append(word_to_ind[wor])\n\t\tbag.append(sbag)\n\treturn bag\n\t\t\t\n\t\t\t\n\ndef get_bag():\n bag = np.zeros(shape)\n for j,sent in enumerate(sents):\n for wor in dl_style_transfer.workspace.data_helpers.clean_str(sent).split(\" \"):\n bag[j, word_to_ind[wor]] = bag[j, word_to_ind[wor]] + 1\n return np.log(1 + bag) / np.max(np.log(1 + bag), axis=1)\n\n\ndef string_to_vec(string):\n tokens = dl_style_transfer.workspace.data_helpers.clean_str(string).split(\" \")\n vec = np.zeros(voc_len)\n for wor in tokens:\n vec[word_to_ind[wor]] = vec[word_to_ind[wor]] + 1\n return vec\n\n\ndef get_ryans_strange_input():\n vec = []\n for l in sents:\n vec.append(dl_style_transfer.workspace.data_helpers.clean_str(l))\n return np.array([word_to_ind[i] for l in vec for i in l.split(\" \")])\n\n\ndef vocab_length():\n return voc_len\n",
"step-ids": [
2,
4,
6,
8,
10
]
}
|
[
2,
4,
6,
8,
10
] |
def adder(x, y):
return x + y
adder('one', 'two')
adder([3, 4], [9, 0, 33])
adder(4.3, 3.5)
|
normal
|
{
"blob_id": "1ee5139cb1613977f1c85619404b3dcc6e996382",
"index": 5364,
"step-1": "<mask token>\n",
"step-2": "def adder(x, y):\n return x + y\n\n\n<mask token>\n",
"step-3": "def adder(x, y):\n return x + y\n\n\nadder('one', 'two')\nadder([3, 4], [9, 0, 33])\nadder(4.3, 3.5)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
""""""
import random
import nbformat
from textwrap import dedent
from pybryt.preprocessors import IntermediateVariablePreprocessor
def test_preprocessor():
"""
"""
nb = nbformat.v4.new_notebook()
nb.cells.append(nbformat.v4.new_code_cell(dedent("""\
a = True
b = False
f = lambda x: not x
g = f(a) + f(b)
if f(a) and f(b):
print("hi")
if f(a) or f(b):
print("hi")
if a or b:
print("bye")
l = [f(i) for i in [a, b]]
f = lambda x: [not i for i in l]
l = [a, b]
if all(f(l)):
print("ok")
else:
l = any(f(l))
""")))
ivp = IntermediateVariablePreprocessor()
random.seed(42)
nb = ivp.preprocess(nb)
print(nb.cells[0].source)
assert len(nb.cells) == 1
assert nb.cells[0].source.strip() == dedent("""\
a = True
b = False
f = (lambda x: (not x))
var_HBRPOI = f(a)
var_G8F1CB = f(b)
g = (var_HBRPOI + var_G8F1CB)
var_FNO6B9 = f(a)
if (var_FNO6B9):
var_M80O2R = f(b)
if (var_FNO6B9 and var_M80O2R):
var_AK1VRJ = print('hi')
var_AK1VRJ
var_NVGFYG = f(a)
if (not (var_NVGFYG)):
var_WWQC38 = f(b)
if (var_NVGFYG or var_WWQC38):
var_HYF9SX = print('hi')
var_HYF9SX
if (a or b):
var_MECOSF = print('bye')
var_MECOSF
l = [f(i) for i in [a, b]]
f = (lambda x: [(not i) for i in l])
l = [a, b]
var_KXWNRE = f(l)
var_K8PK3Y = all(var_KXWNRE)
if var_K8PK3Y:
var_R9OUDO = print('ok')
var_R9OUDO
else:
var_CUZREN = f(l)
l = any(var_CUZREN)
""").strip()
|
normal
|
{
"blob_id": "d9f08e770dacaa86a03d553afd78fdcd725efb62",
"index": 5204,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_preprocessor():\n \"\"\"\n \"\"\"\n nb = nbformat.v4.new_notebook()\n nb.cells.append(nbformat.v4.new_code_cell(dedent(\n \"\"\" a = True\n b = False\n f = lambda x: not x\n\n g = f(a) + f(b)\n\n if f(a) and f(b):\n print(\"hi\")\n\n if f(a) or f(b):\n print(\"hi\")\n \n if a or b:\n print(\"bye\")\n\n l = [f(i) for i in [a, b]]\n\n f = lambda x: [not i for i in l]\n l = [a, b]\n if all(f(l)):\n print(\"ok\")\n else:\n l = any(f(l))\n \"\"\"\n )))\n ivp = IntermediateVariablePreprocessor()\n random.seed(42)\n nb = ivp.preprocess(nb)\n print(nb.cells[0].source)\n assert len(nb.cells) == 1\n assert nb.cells[0].source.strip() == dedent(\n \"\"\" a = True\n b = False\n f = (lambda x: (not x))\n var_HBRPOI = f(a)\n var_G8F1CB = f(b)\n g = (var_HBRPOI + var_G8F1CB)\n var_FNO6B9 = f(a)\n if (var_FNO6B9):\n var_M80O2R = f(b)\n if (var_FNO6B9 and var_M80O2R):\n var_AK1VRJ = print('hi')\n var_AK1VRJ\n var_NVGFYG = f(a)\n if (not (var_NVGFYG)):\n var_WWQC38 = f(b)\n if (var_NVGFYG or var_WWQC38):\n var_HYF9SX = print('hi')\n var_HYF9SX\n if (a or b):\n var_MECOSF = print('bye')\n var_MECOSF\n l = [f(i) for i in [a, b]]\n f = (lambda x: [(not i) for i in l])\n l = [a, b]\n var_KXWNRE = f(l)\n var_K8PK3Y = all(var_KXWNRE)\n if var_K8PK3Y:\n var_R9OUDO = print('ok')\n var_R9OUDO\n else:\n var_CUZREN = f(l)\n l = any(var_CUZREN)\n \"\"\"\n ).strip()\n",
"step-3": "<mask token>\nimport random\nimport nbformat\nfrom textwrap import dedent\nfrom pybryt.preprocessors import IntermediateVariablePreprocessor\n\n\ndef test_preprocessor():\n \"\"\"\n \"\"\"\n nb = nbformat.v4.new_notebook()\n nb.cells.append(nbformat.v4.new_code_cell(dedent(\n \"\"\" a = True\n b = False\n f = lambda x: not x\n\n g = f(a) + f(b)\n\n if f(a) and f(b):\n print(\"hi\")\n\n if f(a) or f(b):\n print(\"hi\")\n \n if a or b:\n print(\"bye\")\n\n l = [f(i) for i in [a, b]]\n\n f = lambda x: [not i for i in l]\n l = [a, b]\n if all(f(l)):\n print(\"ok\")\n else:\n l = any(f(l))\n \"\"\"\n )))\n ivp = IntermediateVariablePreprocessor()\n random.seed(42)\n nb = ivp.preprocess(nb)\n print(nb.cells[0].source)\n assert len(nb.cells) == 1\n assert nb.cells[0].source.strip() == dedent(\n \"\"\" a = True\n b = False\n f = (lambda x: (not x))\n var_HBRPOI = f(a)\n var_G8F1CB = f(b)\n g = (var_HBRPOI + var_G8F1CB)\n var_FNO6B9 = f(a)\n if (var_FNO6B9):\n var_M80O2R = f(b)\n if (var_FNO6B9 and var_M80O2R):\n var_AK1VRJ = print('hi')\n var_AK1VRJ\n var_NVGFYG = f(a)\n if (not (var_NVGFYG)):\n var_WWQC38 = f(b)\n if (var_NVGFYG or var_WWQC38):\n var_HYF9SX = print('hi')\n var_HYF9SX\n if (a or b):\n var_MECOSF = print('bye')\n var_MECOSF\n l = [f(i) for i in [a, b]]\n f = (lambda x: [(not i) for i in l])\n l = [a, b]\n var_KXWNRE = f(l)\n var_K8PK3Y = all(var_KXWNRE)\n if var_K8PK3Y:\n var_R9OUDO = print('ok')\n var_R9OUDO\n else:\n var_CUZREN = f(l)\n l = any(var_CUZREN)\n \"\"\"\n ).strip()\n",
"step-4": "\"\"\"\"\"\"\n\nimport random\nimport nbformat\n\nfrom textwrap import dedent\n\nfrom pybryt.preprocessors import IntermediateVariablePreprocessor\n\n\ndef test_preprocessor():\n \"\"\"\n \"\"\"\n nb = nbformat.v4.new_notebook()\n nb.cells.append(nbformat.v4.new_code_cell(dedent(\"\"\"\\\n a = True\n b = False\n f = lambda x: not x\n\n g = f(a) + f(b)\n\n if f(a) and f(b):\n print(\"hi\")\n\n if f(a) or f(b):\n print(\"hi\")\n \n if a or b:\n print(\"bye\")\n\n l = [f(i) for i in [a, b]]\n\n f = lambda x: [not i for i in l]\n l = [a, b]\n if all(f(l)):\n print(\"ok\")\n else:\n l = any(f(l))\n \"\"\")))\n\n ivp = IntermediateVariablePreprocessor()\n\n random.seed(42)\n nb = ivp.preprocess(nb)\n print(nb.cells[0].source)\n assert len(nb.cells) == 1\n assert nb.cells[0].source.strip() == dedent(\"\"\"\\\n a = True\n b = False\n f = (lambda x: (not x))\n var_HBRPOI = f(a)\n var_G8F1CB = f(b)\n g = (var_HBRPOI + var_G8F1CB)\n var_FNO6B9 = f(a)\n if (var_FNO6B9):\n var_M80O2R = f(b)\n if (var_FNO6B9 and var_M80O2R):\n var_AK1VRJ = print('hi')\n var_AK1VRJ\n var_NVGFYG = f(a)\n if (not (var_NVGFYG)):\n var_WWQC38 = f(b)\n if (var_NVGFYG or var_WWQC38):\n var_HYF9SX = print('hi')\n var_HYF9SX\n if (a or b):\n var_MECOSF = print('bye')\n var_MECOSF\n l = [f(i) for i in [a, b]]\n f = (lambda x: [(not i) for i in l])\n l = [a, b]\n var_KXWNRE = f(l)\n var_K8PK3Y = all(var_KXWNRE)\n if var_K8PK3Y:\n var_R9OUDO = print('ok')\n var_R9OUDO\n else:\n var_CUZREN = f(l)\n l = any(var_CUZREN)\n \"\"\").strip()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Copyright (C) 2014 Abhay Vardhan. All Rights Reserved.
"""
Author: [email protected]
We have not yet added tests which exercise the HTTP GET directly.
"""
__author__ = 'abhay'
from nose.tools import *
import test_data
import search_index
class TestClass:
def setUp(self):
search_index.buildIndex(test_data.sample_food_trucks_data)
def tearDown(self):
pass
def test_case_query_index(self):
assert_equals(search_index.query_index, test_data.sample_query_index)
def test_case_lat_index(self):
assert_equals(search_index.sorted_latitudes, test_data.sample_latitude_index)
def test_case_lng_index(self):
assert_equals(search_index.sorted_longitudes, test_data.sample_longitude_index)
def test_case_search_query(self):
assert_equals(search_index.searchQuery('cold'), set([2, 3]))
def test_case_search_query_case(self):
assert_equals(search_index.searchQuery('Cold'), set([2, 3]))
def test_case_search_find_le(self):
assert_equals(search_index.find_le([10, 20, 30, 40], 20), 1)
assert_equals(search_index.find_le([10, 20, 30, 40], 20.1), 1)
assert_equals(search_index.find_le([10, 20, 30, 40], 30), 2)
def test_case_search_find_ge(self):
assert_equals(search_index.find_ge([10, 20, 30, 40], 20), 1)
assert_equals(search_index.find_ge([10, 20, 30, 40], 30), 2)
assert_equals(search_index.find_ge([10, 20, 30, 40], 20.1), 2)
def test_case_search_lat(self):
assert_equals(search_index.find_array_range_matching([10, 20, 30, 40], 20, 30), set([1, 2]))
assert_equals(search_index.find_array_range_matching([10, 20, 30, 40], 19, 35), set([1, 2]))
assert_equals(search_index.find_array_range_matching([10, 20, 30, 40], 9, 50), set([0, 1, 2, 3]))
def test_case_search1(self):
all_objectids = [x['objectid'] for x in search_index.all_results]
results = search_index.search('', 37.7860914634251, -122.398658184604, 37.7901490737255, -122.3934729318)
assert_equals([x['objectid'] for x in results],
all_objectids)
def test_case_search2(self):
all_objectids = [x['objectid'] for x in search_index.all_results[0:3]]
results = search_index.search('', 37.7879000978181, -122.398658184604, 37.7901490737255, -122.394594036205)
assert_equals([x['objectid'] for x in results],
all_objectids)
def test_case_search3(self):
all_objectids = [x['objectid'] for x in search_index.all_results[0:3]]
results = search_index.search('', 37.787, -122.398658184604, 37.7901490737255, -122.394)
assert_equals([x['objectid'] for x in results],
all_objectids)
def test_case_search4(self):
all_objectids = [x['objectid'] for x in search_index.all_results[2:4]]
results = search_index.search('cold', 37.7860914634251, -122.398658184604, 37.7901490737255, -122.3934729318)
assert_equals([x['objectid'] for x in results],
all_objectids)
def test_case_search5(self):
all_objectids = [x['objectid'] for x in search_index.all_results[1:2]]
results = search_index.search('cheese', 37.7860914634251, -122.398658184604, 37.7901490737255, -122.3934729318)
assert_equals([x['objectid'] for x in results],
all_objectids)
|
normal
|
{
"blob_id": "a9c0251b3422457b2c0089b70308a70b09cfa0e0",
"index": 7276,
"step-1": "<mask token>\n\n\nclass TestClass:\n\n def setUp(self):\n search_index.buildIndex(test_data.sample_food_trucks_data)\n\n def tearDown(self):\n pass\n\n def test_case_query_index(self):\n assert_equals(search_index.query_index, test_data.sample_query_index)\n <mask token>\n\n def test_case_lng_index(self):\n assert_equals(search_index.sorted_longitudes, test_data.\n sample_longitude_index)\n\n def test_case_search_query(self):\n assert_equals(search_index.searchQuery('cold'), set([2, 3]))\n\n def test_case_search_query_case(self):\n assert_equals(search_index.searchQuery('Cold'), set([2, 3]))\n\n def test_case_search_find_le(self):\n assert_equals(search_index.find_le([10, 20, 30, 40], 20), 1)\n assert_equals(search_index.find_le([10, 20, 30, 40], 20.1), 1)\n assert_equals(search_index.find_le([10, 20, 30, 40], 30), 2)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_case_search3(self):\n all_objectids = [x['objectid'] for x in search_index.all_results[0:3]]\n results = search_index.search('', 37.787, -122.398658184604, \n 37.7901490737255, -122.394)\n assert_equals([x['objectid'] for x in results], all_objectids)\n\n def test_case_search4(self):\n all_objectids = [x['objectid'] for x in search_index.all_results[2:4]]\n results = search_index.search('cold', 37.7860914634251, -\n 122.398658184604, 37.7901490737255, -122.3934729318)\n assert_equals([x['objectid'] for x in results], all_objectids)\n\n def test_case_search5(self):\n all_objectids = [x['objectid'] for x in search_index.all_results[1:2]]\n results = search_index.search('cheese', 37.7860914634251, -\n 122.398658184604, 37.7901490737255, -122.3934729318)\n assert_equals([x['objectid'] for x in results], all_objectids)\n",
"step-2": "<mask token>\n\n\nclass TestClass:\n\n def setUp(self):\n search_index.buildIndex(test_data.sample_food_trucks_data)\n\n def tearDown(self):\n pass\n\n def test_case_query_index(self):\n assert_equals(search_index.query_index, test_data.sample_query_index)\n\n def test_case_lat_index(self):\n assert_equals(search_index.sorted_latitudes, test_data.\n sample_latitude_index)\n\n def test_case_lng_index(self):\n assert_equals(search_index.sorted_longitudes, test_data.\n sample_longitude_index)\n\n def test_case_search_query(self):\n assert_equals(search_index.searchQuery('cold'), set([2, 3]))\n\n def test_case_search_query_case(self):\n assert_equals(search_index.searchQuery('Cold'), set([2, 3]))\n\n def test_case_search_find_le(self):\n assert_equals(search_index.find_le([10, 20, 30, 40], 20), 1)\n assert_equals(search_index.find_le([10, 20, 30, 40], 20.1), 1)\n assert_equals(search_index.find_le([10, 20, 30, 40], 30), 2)\n <mask token>\n <mask token>\n\n def test_case_search1(self):\n all_objectids = [x['objectid'] for x in search_index.all_results]\n results = search_index.search('', 37.7860914634251, -\n 122.398658184604, 37.7901490737255, -122.3934729318)\n assert_equals([x['objectid'] for x in results], all_objectids)\n <mask token>\n\n def test_case_search3(self):\n all_objectids = [x['objectid'] for x in search_index.all_results[0:3]]\n results = search_index.search('', 37.787, -122.398658184604, \n 37.7901490737255, -122.394)\n assert_equals([x['objectid'] for x in results], all_objectids)\n\n def test_case_search4(self):\n all_objectids = [x['objectid'] for x in search_index.all_results[2:4]]\n results = search_index.search('cold', 37.7860914634251, -\n 122.398658184604, 37.7901490737255, -122.3934729318)\n assert_equals([x['objectid'] for x in results], all_objectids)\n\n def test_case_search5(self):\n all_objectids = [x['objectid'] for x in search_index.all_results[1:2]]\n results = search_index.search('cheese', 37.7860914634251, -\n 122.398658184604, 37.7901490737255, -122.3934729318)\n assert_equals([x['objectid'] for x in results], all_objectids)\n",
"step-3": "<mask token>\n\n\nclass TestClass:\n\n def setUp(self):\n search_index.buildIndex(test_data.sample_food_trucks_data)\n\n def tearDown(self):\n pass\n\n def test_case_query_index(self):\n assert_equals(search_index.query_index, test_data.sample_query_index)\n\n def test_case_lat_index(self):\n assert_equals(search_index.sorted_latitudes, test_data.\n sample_latitude_index)\n\n def test_case_lng_index(self):\n assert_equals(search_index.sorted_longitudes, test_data.\n sample_longitude_index)\n\n def test_case_search_query(self):\n assert_equals(search_index.searchQuery('cold'), set([2, 3]))\n\n def test_case_search_query_case(self):\n assert_equals(search_index.searchQuery('Cold'), set([2, 3]))\n\n def test_case_search_find_le(self):\n assert_equals(search_index.find_le([10, 20, 30, 40], 20), 1)\n assert_equals(search_index.find_le([10, 20, 30, 40], 20.1), 1)\n assert_equals(search_index.find_le([10, 20, 30, 40], 30), 2)\n\n def test_case_search_find_ge(self):\n assert_equals(search_index.find_ge([10, 20, 30, 40], 20), 1)\n assert_equals(search_index.find_ge([10, 20, 30, 40], 30), 2)\n assert_equals(search_index.find_ge([10, 20, 30, 40], 20.1), 2)\n <mask token>\n\n def test_case_search1(self):\n all_objectids = [x['objectid'] for x in search_index.all_results]\n results = search_index.search('', 37.7860914634251, -\n 122.398658184604, 37.7901490737255, -122.3934729318)\n assert_equals([x['objectid'] for x in results], all_objectids)\n <mask token>\n\n def test_case_search3(self):\n all_objectids = [x['objectid'] for x in search_index.all_results[0:3]]\n results = search_index.search('', 37.787, -122.398658184604, \n 37.7901490737255, -122.394)\n assert_equals([x['objectid'] for x in results], all_objectids)\n\n def test_case_search4(self):\n all_objectids = [x['objectid'] for x in search_index.all_results[2:4]]\n results = search_index.search('cold', 37.7860914634251, -\n 122.398658184604, 37.7901490737255, -122.3934729318)\n assert_equals([x['objectid'] for x in results], all_objectids)\n\n def test_case_search5(self):\n all_objectids = [x['objectid'] for x in search_index.all_results[1:2]]\n results = search_index.search('cheese', 37.7860914634251, -\n 122.398658184604, 37.7901490737255, -122.3934729318)\n assert_equals([x['objectid'] for x in results], all_objectids)\n",
"step-4": "<mask token>\n__author__ = 'abhay'\nfrom nose.tools import *\nimport test_data\nimport search_index\n\n\nclass TestClass:\n\n def setUp(self):\n search_index.buildIndex(test_data.sample_food_trucks_data)\n\n def tearDown(self):\n pass\n\n def test_case_query_index(self):\n assert_equals(search_index.query_index, test_data.sample_query_index)\n\n def test_case_lat_index(self):\n assert_equals(search_index.sorted_latitudes, test_data.\n sample_latitude_index)\n\n def test_case_lng_index(self):\n assert_equals(search_index.sorted_longitudes, test_data.\n sample_longitude_index)\n\n def test_case_search_query(self):\n assert_equals(search_index.searchQuery('cold'), set([2, 3]))\n\n def test_case_search_query_case(self):\n assert_equals(search_index.searchQuery('Cold'), set([2, 3]))\n\n def test_case_search_find_le(self):\n assert_equals(search_index.find_le([10, 20, 30, 40], 20), 1)\n assert_equals(search_index.find_le([10, 20, 30, 40], 20.1), 1)\n assert_equals(search_index.find_le([10, 20, 30, 40], 30), 2)\n\n def test_case_search_find_ge(self):\n assert_equals(search_index.find_ge([10, 20, 30, 40], 20), 1)\n assert_equals(search_index.find_ge([10, 20, 30, 40], 30), 2)\n assert_equals(search_index.find_ge([10, 20, 30, 40], 20.1), 2)\n\n def test_case_search_lat(self):\n assert_equals(search_index.find_array_range_matching([10, 20, 30, \n 40], 20, 30), set([1, 2]))\n assert_equals(search_index.find_array_range_matching([10, 20, 30, \n 40], 19, 35), set([1, 2]))\n assert_equals(search_index.find_array_range_matching([10, 20, 30, \n 40], 9, 50), set([0, 1, 2, 3]))\n\n def test_case_search1(self):\n all_objectids = [x['objectid'] for x in search_index.all_results]\n results = search_index.search('', 37.7860914634251, -\n 122.398658184604, 37.7901490737255, -122.3934729318)\n assert_equals([x['objectid'] for x in results], all_objectids)\n\n def test_case_search2(self):\n all_objectids = [x['objectid'] for x in search_index.all_results[0:3]]\n results = search_index.search('', 37.7879000978181, -\n 122.398658184604, 37.7901490737255, -122.394594036205)\n assert_equals([x['objectid'] for x in results], all_objectids)\n\n def test_case_search3(self):\n all_objectids = [x['objectid'] for x in search_index.all_results[0:3]]\n results = search_index.search('', 37.787, -122.398658184604, \n 37.7901490737255, -122.394)\n assert_equals([x['objectid'] for x in results], all_objectids)\n\n def test_case_search4(self):\n all_objectids = [x['objectid'] for x in search_index.all_results[2:4]]\n results = search_index.search('cold', 37.7860914634251, -\n 122.398658184604, 37.7901490737255, -122.3934729318)\n assert_equals([x['objectid'] for x in results], all_objectids)\n\n def test_case_search5(self):\n all_objectids = [x['objectid'] for x in search_index.all_results[1:2]]\n results = search_index.search('cheese', 37.7860914634251, -\n 122.398658184604, 37.7901490737255, -122.3934729318)\n assert_equals([x['objectid'] for x in results], all_objectids)\n",
"step-5": "# Copyright (C) 2014 Abhay Vardhan. All Rights Reserved.\n\"\"\"\nAuthor: [email protected]\n\nWe have not yet added tests which exercise the HTTP GET directly.\n\"\"\"\n__author__ = 'abhay'\n\nfrom nose.tools import *\n\nimport test_data\nimport search_index\n\nclass TestClass:\n def setUp(self):\n search_index.buildIndex(test_data.sample_food_trucks_data)\n\n def tearDown(self):\n pass\n\n def test_case_query_index(self):\n assert_equals(search_index.query_index, test_data.sample_query_index)\n\n def test_case_lat_index(self):\n assert_equals(search_index.sorted_latitudes, test_data.sample_latitude_index)\n\n def test_case_lng_index(self):\n assert_equals(search_index.sorted_longitudes, test_data.sample_longitude_index)\n\n def test_case_search_query(self):\n assert_equals(search_index.searchQuery('cold'), set([2, 3]))\n\n def test_case_search_query_case(self):\n assert_equals(search_index.searchQuery('Cold'), set([2, 3]))\n\n def test_case_search_find_le(self):\n assert_equals(search_index.find_le([10, 20, 30, 40], 20), 1)\n assert_equals(search_index.find_le([10, 20, 30, 40], 20.1), 1)\n assert_equals(search_index.find_le([10, 20, 30, 40], 30), 2)\n\n def test_case_search_find_ge(self):\n assert_equals(search_index.find_ge([10, 20, 30, 40], 20), 1)\n assert_equals(search_index.find_ge([10, 20, 30, 40], 30), 2)\n assert_equals(search_index.find_ge([10, 20, 30, 40], 20.1), 2)\n\n def test_case_search_lat(self):\n assert_equals(search_index.find_array_range_matching([10, 20, 30, 40], 20, 30), set([1, 2]))\n assert_equals(search_index.find_array_range_matching([10, 20, 30, 40], 19, 35), set([1, 2]))\n assert_equals(search_index.find_array_range_matching([10, 20, 30, 40], 9, 50), set([0, 1, 2, 3]))\n\n def test_case_search1(self):\n all_objectids = [x['objectid'] for x in search_index.all_results]\n results = search_index.search('', 37.7860914634251, -122.398658184604, 37.7901490737255, -122.3934729318)\n assert_equals([x['objectid'] for x in results],\n all_objectids)\n\n def test_case_search2(self):\n all_objectids = [x['objectid'] for x in search_index.all_results[0:3]]\n results = search_index.search('', 37.7879000978181, -122.398658184604, 37.7901490737255, -122.394594036205)\n assert_equals([x['objectid'] for x in results],\n all_objectids)\n\n def test_case_search3(self):\n all_objectids = [x['objectid'] for x in search_index.all_results[0:3]]\n results = search_index.search('', 37.787, -122.398658184604, 37.7901490737255, -122.394)\n assert_equals([x['objectid'] for x in results],\n all_objectids)\n\n def test_case_search4(self):\n all_objectids = [x['objectid'] for x in search_index.all_results[2:4]]\n results = search_index.search('cold', 37.7860914634251, -122.398658184604, 37.7901490737255, -122.3934729318)\n assert_equals([x['objectid'] for x in results],\n all_objectids)\n\n def test_case_search5(self):\n all_objectids = [x['objectid'] for x in search_index.all_results[1:2]]\n results = search_index.search('cheese', 37.7860914634251, -122.398658184604, 37.7901490737255, -122.3934729318)\n assert_equals([x['objectid'] for x in results],\n all_objectids)\n\n\n",
"step-ids": [
11,
13,
14,
18,
19
]
}
|
[
11,
13,
14,
18,
19
] |
# -*- coding: utf-8 -*-
import base64
import logging
from decimal import Decimal
import requests
from django import forms
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from currencies.currencies import decimal_round
from payments.systems import base
from payments.systems.bankusd import display_amount_usd
from payments.systems.base import CommissionCalculationResult
name = _("Neteller")
logo = "neteller.png"
slug = __name__.rsplit(".", 1)[-1]
currencies = ["USD"]
mt4_payment_slug = "NETELLER"
transfer_details = {
"deposit": {
"fee": "3.5% min $1",
"time": _("Within day"),
"min_amount": display_amount_usd(10),
},
"withdraw": {
"fee": _("2.5% min $1 max $30"),
"time": _("Within day"),
"min_amount": display_amount_usd(10),
}
}
templates = {
"deposit": "payments/forms/deposit/neteller.html",
"withdraw": "payments/forms/withdraw/electronic.html",
}
log = logging.getLogger(__name__)
class DepositForm(base.DepositForm):
purse = forms.CharField(max_length=100, label=_("Net account"),
help_text=_("Your Neteller's 12-digit Account ID or email address that is "
"associated with their NETELLER account"))
secure_id = forms.IntegerField(label=_("Secure ID"), help_text=_("Your Neteller's 6-digit Secure ID"))
bill_address = "https://api.neteller.com/v1/transferIn"
get_token_url = "https://api.neteller.com/v1/oauth2/token?grant_type=client_credentials"
commission_rate = Decimal("0.035")
MIN_AMOUNT = (10, 'USD')
@classmethod
def is_automatic(cls, instance):
return True
def get_neteller_token(self):
"""
:return: tuple. ('accessToken', 'Auth method'). Example: ("0.AQAAAU3in", "Bearer")
or None if can't get token.
"""
headers = {'Content-Type': 'application/json',
'Cache-Control': 'no-cache',
'Authorization': 'Basic ' + base64.b64encode(
settings.NETELLER_MERCHANT_ID + ':' + settings.NETELLER_SECRET_KEY)}
result = requests.post(self.get_token_url, headers = headers)
if result.status_code == 200:
result = result.json()
else:
return None
if result.get("accessToken"):
return result.get("accessToken"), result.get("tokenType")
else:
return None
def make_request(self):
import json
currency = {
"RUR": "RUB"
}.get(self.instance.currency, self.instance.currency)
amount = int(decimal_round(self.instance.amount) * 100)
token_tuple = self.get_neteller_token()
if not token_tuple:
return "Can't get the token."
data = {
"paymentMethod": {
"type": "neteller",
"value": self.instance.purse
},
"transaction": {
"merchantRefId": unicode(self.instance.pk),
"amount": amount,
"currency": currency
},
"verificationCode": unicode(self.instance.params["secure_id"]),
}
headers = {'Content-Type': 'application/json', 'Authorization': token_tuple[1] + " " + token_tuple[0]}
request = requests.post(self.bill_address, data=json.dumps(data), headers=headers)
request = request.json()
if request.get("transaction") and request.get("transaction").get("status") == "accepted":
self.instance.refresh_state()
self.instance.is_payed = True
self.instance.params["transaction"] = request.get("transaction").get("id")
self.instance.save()
return None
else:
error_message = request.get("error").get("message") if request.get("error") else \
"Automatic payment failed."
self.instance.is_committed = False
self.instance.is_payed = False
self.instance.public_comment = error_message
self.instance.save()
return error_message
@classmethod
def generate_mt4_comment(cls, payment_request):
return "{NETELLER}[%s]" % payment_request.pk
def clean(self):
from platforms.converter import convert_currency
amount = self.cleaned_data["amount"]
currency = self.cleaned_data["currency"]
return super(DepositForm, self).clean()
def confirmed_response_data(self, request):
error = self.make_request()
if error:
return {'detail': "Error: %s" % error}, 400
else:
return {"success": True}, None
@classmethod
def _calculate_commission(cls, request, full_commission=False):
commission = request.amount * cls.commission_rate
min_comm = Decimal("1")
commission = max(min_comm, commission)
return CommissionCalculationResult(
amount=request.amount,
commission=commission,
currency=request.currency
)
class DetailsForm(base.DetailsForm):
def __init__(self, *args, **kwargs):
super(DetailsForm, self).__init__(*args, **kwargs)
self.fields["purse"].label = _("Net account")
self.fields["purse"].help_text = _("Your Neteller's 12-digit Account ID or email address that is "
"associated with their NETELLER account")
class WithdrawForm(base.WithdrawForm):
MIN_AMOUNT = (10, 'USD')
commission_rate = Decimal("0.025")
@classmethod
def _calculate_commission(cls, request, full_commission=False):
commission = request.amount * cls.commission_rate
min_comm = Decimal("1")
max_comm = Decimal("30")
commission = min(max_comm, max(min_comm, commission))
return CommissionCalculationResult(
amount=request.amount,
commission=commission,
currency=request.currency
)
|
normal
|
{
"blob_id": "15c1db535beb115c45aeba433a946255f70fa86e",
"index": 7845,
"step-1": "<mask token>\n\n\nclass DepositForm(base.DepositForm):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @classmethod\n def is_automatic(cls, instance):\n return True\n <mask token>\n <mask token>\n <mask token>\n\n def clean(self):\n from platforms.converter import convert_currency\n amount = self.cleaned_data['amount']\n currency = self.cleaned_data['currency']\n return super(DepositForm, self).clean()\n <mask token>\n\n @classmethod\n def _calculate_commission(cls, request, full_commission=False):\n commission = request.amount * cls.commission_rate\n min_comm = Decimal('1')\n commission = max(min_comm, commission)\n return CommissionCalculationResult(amount=request.amount,\n commission=commission, currency=request.currency)\n\n\nclass DetailsForm(base.DetailsForm):\n\n def __init__(self, *args, **kwargs):\n super(DetailsForm, self).__init__(*args, **kwargs)\n self.fields['purse'].label = _('Net account')\n self.fields['purse'].help_text = _(\n \"Your Neteller's 12-digit Account ID or email address that is associated with their NETELLER account\"\n )\n\n\nclass WithdrawForm(base.WithdrawForm):\n MIN_AMOUNT = 10, 'USD'\n commission_rate = Decimal('0.025')\n\n @classmethod\n def _calculate_commission(cls, request, full_commission=False):\n commission = request.amount * cls.commission_rate\n min_comm = Decimal('1')\n max_comm = Decimal('30')\n commission = min(max_comm, max(min_comm, commission))\n return CommissionCalculationResult(amount=request.amount,\n commission=commission, currency=request.currency)\n",
"step-2": "<mask token>\n\n\nclass DepositForm(base.DepositForm):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @classmethod\n def is_automatic(cls, instance):\n return True\n <mask token>\n\n def make_request(self):\n import json\n currency = {'RUR': 'RUB'}.get(self.instance.currency, self.instance\n .currency)\n amount = int(decimal_round(self.instance.amount) * 100)\n token_tuple = self.get_neteller_token()\n if not token_tuple:\n return \"Can't get the token.\"\n data = {'paymentMethod': {'type': 'neteller', 'value': self.\n instance.purse}, 'transaction': {'merchantRefId': unicode(self.\n instance.pk), 'amount': amount, 'currency': currency},\n 'verificationCode': unicode(self.instance.params['secure_id'])}\n headers = {'Content-Type': 'application/json', 'Authorization': \n token_tuple[1] + ' ' + token_tuple[0]}\n request = requests.post(self.bill_address, data=json.dumps(data),\n headers=headers)\n request = request.json()\n if request.get('transaction') and request.get('transaction').get(\n 'status') == 'accepted':\n self.instance.refresh_state()\n self.instance.is_payed = True\n self.instance.params['transaction'] = request.get('transaction'\n ).get('id')\n self.instance.save()\n return None\n else:\n error_message = request.get('error').get('message') if request.get(\n 'error') else 'Automatic payment failed.'\n self.instance.is_committed = False\n self.instance.is_payed = False\n self.instance.public_comment = error_message\n self.instance.save()\n return error_message\n <mask token>\n\n def clean(self):\n from platforms.converter import convert_currency\n amount = self.cleaned_data['amount']\n currency = self.cleaned_data['currency']\n return super(DepositForm, self).clean()\n <mask token>\n\n @classmethod\n def _calculate_commission(cls, request, full_commission=False):\n commission = request.amount * cls.commission_rate\n min_comm = Decimal('1')\n commission = max(min_comm, commission)\n return CommissionCalculationResult(amount=request.amount,\n commission=commission, currency=request.currency)\n\n\nclass DetailsForm(base.DetailsForm):\n\n def __init__(self, *args, **kwargs):\n super(DetailsForm, self).__init__(*args, **kwargs)\n self.fields['purse'].label = _('Net account')\n self.fields['purse'].help_text = _(\n \"Your Neteller's 12-digit Account ID or email address that is associated with their NETELLER account\"\n )\n\n\nclass WithdrawForm(base.WithdrawForm):\n MIN_AMOUNT = 10, 'USD'\n commission_rate = Decimal('0.025')\n\n @classmethod\n def _calculate_commission(cls, request, full_commission=False):\n commission = request.amount * cls.commission_rate\n min_comm = Decimal('1')\n max_comm = Decimal('30')\n commission = min(max_comm, max(min_comm, commission))\n return CommissionCalculationResult(amount=request.amount,\n commission=commission, currency=request.currency)\n",
"step-3": "<mask token>\n\n\nclass DepositForm(base.DepositForm):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @classmethod\n def is_automatic(cls, instance):\n return True\n <mask token>\n\n def make_request(self):\n import json\n currency = {'RUR': 'RUB'}.get(self.instance.currency, self.instance\n .currency)\n amount = int(decimal_round(self.instance.amount) * 100)\n token_tuple = self.get_neteller_token()\n if not token_tuple:\n return \"Can't get the token.\"\n data = {'paymentMethod': {'type': 'neteller', 'value': self.\n instance.purse}, 'transaction': {'merchantRefId': unicode(self.\n instance.pk), 'amount': amount, 'currency': currency},\n 'verificationCode': unicode(self.instance.params['secure_id'])}\n headers = {'Content-Type': 'application/json', 'Authorization': \n token_tuple[1] + ' ' + token_tuple[0]}\n request = requests.post(self.bill_address, data=json.dumps(data),\n headers=headers)\n request = request.json()\n if request.get('transaction') and request.get('transaction').get(\n 'status') == 'accepted':\n self.instance.refresh_state()\n self.instance.is_payed = True\n self.instance.params['transaction'] = request.get('transaction'\n ).get('id')\n self.instance.save()\n return None\n else:\n error_message = request.get('error').get('message') if request.get(\n 'error') else 'Automatic payment failed.'\n self.instance.is_committed = False\n self.instance.is_payed = False\n self.instance.public_comment = error_message\n self.instance.save()\n return error_message\n\n @classmethod\n def generate_mt4_comment(cls, payment_request):\n return '{NETELLER}[%s]' % payment_request.pk\n\n def clean(self):\n from platforms.converter import convert_currency\n amount = self.cleaned_data['amount']\n currency = self.cleaned_data['currency']\n return super(DepositForm, self).clean()\n <mask token>\n\n @classmethod\n def _calculate_commission(cls, request, full_commission=False):\n commission = request.amount * cls.commission_rate\n min_comm = Decimal('1')\n commission = max(min_comm, commission)\n return CommissionCalculationResult(amount=request.amount,\n commission=commission, currency=request.currency)\n\n\nclass DetailsForm(base.DetailsForm):\n\n def __init__(self, *args, **kwargs):\n super(DetailsForm, self).__init__(*args, **kwargs)\n self.fields['purse'].label = _('Net account')\n self.fields['purse'].help_text = _(\n \"Your Neteller's 12-digit Account ID or email address that is associated with their NETELLER account\"\n )\n\n\nclass WithdrawForm(base.WithdrawForm):\n MIN_AMOUNT = 10, 'USD'\n commission_rate = Decimal('0.025')\n\n @classmethod\n def _calculate_commission(cls, request, full_commission=False):\n commission = request.amount * cls.commission_rate\n min_comm = Decimal('1')\n max_comm = Decimal('30')\n commission = min(max_comm, max(min_comm, commission))\n return CommissionCalculationResult(amount=request.amount,\n commission=commission, currency=request.currency)\n",
"step-4": "<mask token>\n\n\nclass DepositForm(base.DepositForm):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @classmethod\n def is_automatic(cls, instance):\n return True\n\n def get_neteller_token(self):\n \"\"\"\n :return: tuple. ('accessToken', 'Auth method'). Example: (\"0.AQAAAU3in\", \"Bearer\")\n or None if can't get token.\n \"\"\"\n headers = {'Content-Type': 'application/json', 'Cache-Control':\n 'no-cache', 'Authorization': 'Basic ' + base64.b64encode(\n settings.NETELLER_MERCHANT_ID + ':' + settings.NETELLER_SECRET_KEY)\n }\n result = requests.post(self.get_token_url, headers=headers)\n if result.status_code == 200:\n result = result.json()\n else:\n return None\n if result.get('accessToken'):\n return result.get('accessToken'), result.get('tokenType')\n else:\n return None\n\n def make_request(self):\n import json\n currency = {'RUR': 'RUB'}.get(self.instance.currency, self.instance\n .currency)\n amount = int(decimal_round(self.instance.amount) * 100)\n token_tuple = self.get_neteller_token()\n if not token_tuple:\n return \"Can't get the token.\"\n data = {'paymentMethod': {'type': 'neteller', 'value': self.\n instance.purse}, 'transaction': {'merchantRefId': unicode(self.\n instance.pk), 'amount': amount, 'currency': currency},\n 'verificationCode': unicode(self.instance.params['secure_id'])}\n headers = {'Content-Type': 'application/json', 'Authorization': \n token_tuple[1] + ' ' + token_tuple[0]}\n request = requests.post(self.bill_address, data=json.dumps(data),\n headers=headers)\n request = request.json()\n if request.get('transaction') and request.get('transaction').get(\n 'status') == 'accepted':\n self.instance.refresh_state()\n self.instance.is_payed = True\n self.instance.params['transaction'] = request.get('transaction'\n ).get('id')\n self.instance.save()\n return None\n else:\n error_message = request.get('error').get('message') if request.get(\n 'error') else 'Automatic payment failed.'\n self.instance.is_committed = False\n self.instance.is_payed = False\n self.instance.public_comment = error_message\n self.instance.save()\n return error_message\n\n @classmethod\n def generate_mt4_comment(cls, payment_request):\n return '{NETELLER}[%s]' % payment_request.pk\n\n def clean(self):\n from platforms.converter import convert_currency\n amount = self.cleaned_data['amount']\n currency = self.cleaned_data['currency']\n return super(DepositForm, self).clean()\n\n def confirmed_response_data(self, request):\n error = self.make_request()\n if error:\n return {'detail': 'Error: %s' % error}, 400\n else:\n return {'success': True}, None\n\n @classmethod\n def _calculate_commission(cls, request, full_commission=False):\n commission = request.amount * cls.commission_rate\n min_comm = Decimal('1')\n commission = max(min_comm, commission)\n return CommissionCalculationResult(amount=request.amount,\n commission=commission, currency=request.currency)\n\n\nclass DetailsForm(base.DetailsForm):\n\n def __init__(self, *args, **kwargs):\n super(DetailsForm, self).__init__(*args, **kwargs)\n self.fields['purse'].label = _('Net account')\n self.fields['purse'].help_text = _(\n \"Your Neteller's 12-digit Account ID or email address that is associated with their NETELLER account\"\n )\n\n\nclass WithdrawForm(base.WithdrawForm):\n MIN_AMOUNT = 10, 'USD'\n commission_rate = Decimal('0.025')\n\n @classmethod\n def _calculate_commission(cls, request, full_commission=False):\n commission = request.amount * cls.commission_rate\n min_comm = Decimal('1')\n max_comm = Decimal('30')\n commission = min(max_comm, max(min_comm, commission))\n return CommissionCalculationResult(amount=request.amount,\n commission=commission, currency=request.currency)\n",
"step-5": "# -*- coding: utf-8 -*-\nimport base64\nimport logging\nfrom decimal import Decimal\n\nimport requests\nfrom django import forms\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom currencies.currencies import decimal_round\nfrom payments.systems import base\nfrom payments.systems.bankusd import display_amount_usd\nfrom payments.systems.base import CommissionCalculationResult\n\nname = _(\"Neteller\")\nlogo = \"neteller.png\"\nslug = __name__.rsplit(\".\", 1)[-1]\ncurrencies = [\"USD\"]\nmt4_payment_slug = \"NETELLER\"\n\ntransfer_details = {\n \"deposit\": {\n \"fee\": \"3.5% min $1\",\n \"time\": _(\"Within day\"),\n \"min_amount\": display_amount_usd(10),\n },\n \"withdraw\": {\n \"fee\": _(\"2.5% min $1 max $30\"),\n \"time\": _(\"Within day\"),\n \"min_amount\": display_amount_usd(10),\n }\n}\n\ntemplates = {\n \"deposit\": \"payments/forms/deposit/neteller.html\",\n \"withdraw\": \"payments/forms/withdraw/electronic.html\",\n}\n\nlog = logging.getLogger(__name__)\n\n\nclass DepositForm(base.DepositForm):\n\n purse = forms.CharField(max_length=100, label=_(\"Net account\"),\n help_text=_(\"Your Neteller's 12-digit Account ID or email address that is \"\n \"associated with their NETELLER account\"))\n secure_id = forms.IntegerField(label=_(\"Secure ID\"), help_text=_(\"Your Neteller's 6-digit Secure ID\"))\n\n bill_address = \"https://api.neteller.com/v1/transferIn\"\n get_token_url = \"https://api.neteller.com/v1/oauth2/token?grant_type=client_credentials\"\n commission_rate = Decimal(\"0.035\")\n MIN_AMOUNT = (10, 'USD')\n\n @classmethod\n def is_automatic(cls, instance):\n return True\n\n def get_neteller_token(self):\n \"\"\"\n :return: tuple. ('accessToken', 'Auth method'). Example: (\"0.AQAAAU3in\", \"Bearer\")\n or None if can't get token.\n \"\"\"\n\n headers = {'Content-Type': 'application/json',\n 'Cache-Control': 'no-cache',\n 'Authorization': 'Basic ' + base64.b64encode(\n settings.NETELLER_MERCHANT_ID + ':' + settings.NETELLER_SECRET_KEY)}\n\n\n result = requests.post(self.get_token_url, headers = headers)\n\n if result.status_code == 200:\n result = result.json()\n else:\n return None\n\n if result.get(\"accessToken\"):\n return result.get(\"accessToken\"), result.get(\"tokenType\")\n else:\n return None\n\n def make_request(self):\n import json\n\n currency = {\n \"RUR\": \"RUB\"\n }.get(self.instance.currency, self.instance.currency)\n amount = int(decimal_round(self.instance.amount) * 100)\n token_tuple = self.get_neteller_token()\n\n if not token_tuple:\n return \"Can't get the token.\"\n\n data = {\n \"paymentMethod\": {\n \"type\": \"neteller\",\n \"value\": self.instance.purse\n },\n \"transaction\": {\n \"merchantRefId\": unicode(self.instance.pk),\n \"amount\": amount,\n \"currency\": currency\n },\n \"verificationCode\": unicode(self.instance.params[\"secure_id\"]),\n }\n\n headers = {'Content-Type': 'application/json', 'Authorization': token_tuple[1] + \" \" + token_tuple[0]}\n\n request = requests.post(self.bill_address, data=json.dumps(data), headers=headers)\n\n request = request.json()\n\n if request.get(\"transaction\") and request.get(\"transaction\").get(\"status\") == \"accepted\":\n self.instance.refresh_state()\n self.instance.is_payed = True\n self.instance.params[\"transaction\"] = request.get(\"transaction\").get(\"id\")\n self.instance.save()\n return None\n else:\n error_message = request.get(\"error\").get(\"message\") if request.get(\"error\") else \\\n \"Automatic payment failed.\"\n self.instance.is_committed = False\n self.instance.is_payed = False\n self.instance.public_comment = error_message\n self.instance.save()\n return error_message\n\n @classmethod\n def generate_mt4_comment(cls, payment_request):\n return \"{NETELLER}[%s]\" % payment_request.pk\n\n def clean(self):\n from platforms.converter import convert_currency\n amount = self.cleaned_data[\"amount\"]\n currency = self.cleaned_data[\"currency\"]\n return super(DepositForm, self).clean()\n\n def confirmed_response_data(self, request):\n error = self.make_request()\n if error:\n return {'detail': \"Error: %s\" % error}, 400\n else:\n return {\"success\": True}, None\n\n @classmethod\n def _calculate_commission(cls, request, full_commission=False):\n commission = request.amount * cls.commission_rate\n min_comm = Decimal(\"1\")\n commission = max(min_comm, commission)\n return CommissionCalculationResult(\n amount=request.amount,\n commission=commission,\n currency=request.currency\n )\n\nclass DetailsForm(base.DetailsForm):\n\n def __init__(self, *args, **kwargs):\n super(DetailsForm, self).__init__(*args, **kwargs)\n self.fields[\"purse\"].label = _(\"Net account\")\n self.fields[\"purse\"].help_text = _(\"Your Neteller's 12-digit Account ID or email address that is \"\n \"associated with their NETELLER account\")\n\n\nclass WithdrawForm(base.WithdrawForm):\n MIN_AMOUNT = (10, 'USD')\n commission_rate = Decimal(\"0.025\")\n\n @classmethod\n def _calculate_commission(cls, request, full_commission=False):\n commission = request.amount * cls.commission_rate\n min_comm = Decimal(\"1\")\n max_comm = Decimal(\"30\")\n commission = min(max_comm, max(min_comm, commission))\n return CommissionCalculationResult(\n amount=request.amount,\n commission=commission,\n currency=request.currency\n )\n",
"step-ids": [
9,
10,
11,
13,
17
]
}
|
[
9,
10,
11,
13,
17
] |
from django.shortcuts import render
from django.contrib import messages
from django.contrib.auth import logout
from django.contrib.auth.decorators import login_required
# Create your views here.
# def login(request):
# return render(request, 'login.html')
# def validar_login(request):
# usuario= request.POST['username']
# password= request.POST['password']
# usuarios = Login.objects.filter(usuario = usuario, password = password)
# print(usuarios)
# if usuarios is None:
# print('entroo')
# print(usuario)
# messages.add_message(request, messages.INFO, 'EL USUARIO NO ESTA REGISTRADO')
# else:
# return render(request, 'menu.html')
@login_required
def menu(request):
return render(request, 'menu.html')
# def Logout_view(request):
# logout(request)
# return render(request, 'login.html')
@login_required
def Lojanisima(request):
return render(request, 'lojanisima/ruta_lojanisima.html')
@login_required
def Identiarte(request):
return render(request, 'identiarte/ruta_identiarte.html')
@login_required
def Raices(request):
return render(request, 'raice/ruta_raices.html')
@login_required
def Lojatur(request):
return render(request, 'lojatur/lojatur.html')
|
normal
|
{
"blob_id": "08712e050bd90408ed9d22bba9f62fafacd64d99",
"index": 9671,
"step-1": "<mask token>\n\n\n@login_required\ndef Lojanisima(request):\n return render(request, 'lojanisima/ruta_lojanisima.html')\n\n\n@login_required\ndef Identiarte(request):\n return render(request, 'identiarte/ruta_identiarte.html')\n\n\n@login_required\ndef Raices(request):\n return render(request, 'raice/ruta_raices.html')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@login_required\ndef menu(request):\n return render(request, 'menu.html')\n\n\n@login_required\ndef Lojanisima(request):\n return render(request, 'lojanisima/ruta_lojanisima.html')\n\n\n@login_required\ndef Identiarte(request):\n return render(request, 'identiarte/ruta_identiarte.html')\n\n\n@login_required\ndef Raices(request):\n return render(request, 'raice/ruta_raices.html')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@login_required\ndef menu(request):\n return render(request, 'menu.html')\n\n\n@login_required\ndef Lojanisima(request):\n return render(request, 'lojanisima/ruta_lojanisima.html')\n\n\n@login_required\ndef Identiarte(request):\n return render(request, 'identiarte/ruta_identiarte.html')\n\n\n@login_required\ndef Raices(request):\n return render(request, 'raice/ruta_raices.html')\n\n\n@login_required\ndef Lojatur(request):\n return render(request, 'lojatur/lojatur.html')\n",
"step-4": "from django.shortcuts import render\nfrom django.contrib import messages\nfrom django.contrib.auth import logout\nfrom django.contrib.auth.decorators import login_required\n\n\n@login_required\ndef menu(request):\n return render(request, 'menu.html')\n\n\n@login_required\ndef Lojanisima(request):\n return render(request, 'lojanisima/ruta_lojanisima.html')\n\n\n@login_required\ndef Identiarte(request):\n return render(request, 'identiarte/ruta_identiarte.html')\n\n\n@login_required\ndef Raices(request):\n return render(request, 'raice/ruta_raices.html')\n\n\n@login_required\ndef Lojatur(request):\n return render(request, 'lojatur/lojatur.html')\n",
"step-5": "from django.shortcuts import render\nfrom django.contrib import messages\nfrom django.contrib.auth import logout\nfrom django.contrib.auth.decorators import login_required\n# Create your views here.\n# def login(request):\n# return render(request, 'login.html')\n\n# def validar_login(request):\n# usuario= request.POST['username']\n# password= request.POST['password']\n# usuarios = Login.objects.filter(usuario = usuario, password = password)\n# print(usuarios)\n# if usuarios is None:\n# print('entroo')\n# print(usuario)\n# messages.add_message(request, messages.INFO, 'EL USUARIO NO ESTA REGISTRADO')\n# else:\n# return render(request, 'menu.html')\n@login_required\ndef menu(request):\n return render(request, 'menu.html')\n\n# def Logout_view(request):\n# logout(request)\n# return render(request, 'login.html')\n@login_required\ndef Lojanisima(request):\n return render(request, 'lojanisima/ruta_lojanisima.html')\n\n@login_required\ndef Identiarte(request):\n return render(request, 'identiarte/ruta_identiarte.html')\n\n@login_required\ndef Raices(request):\n return render(request, 'raice/ruta_raices.html')\n\n@login_required\ndef Lojatur(request):\n return render(request, 'lojatur/lojatur.html')",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# -*- coding: utf-8 -*-
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtGui import *
from PyQt5.QtCore import *
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
MainWindow.setCentralWidget(self.centralwidget)
MainWindow.setWindowIcon(QIcon('data/nn.png'))
MainWindow.resize(800, 800)
self.OK = QtWidgets.QPushButton(self.centralwidget)
self.OK.setStyleSheet("background-color:#18BDFF; border-radius: 5px;");
self.OK.setIcon(QIcon("data/ok.png"))
self.OK.setIconSize(QSize(40, 40))
self.OK.setGeometry(QtCore.QRect(375, 820, 150, 45))
font = QtGui.QFont()
font.setPointSize(10)
self.OK.setFont(font)
self.OK.setAutoFillBackground(True)
self.OK.setObjectName("OK")
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Drawing digits"))
self.OK.setText(_translate("MainWindow", " OK"))
|
normal
|
{
"blob_id": "65264f52f641b67c707b6a827ecfe1bf417748e8",
"index": 2379,
"step-1": "<mask token>\n\n\nclass Ui_MainWindow(object):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Ui_MainWindow(object):\n\n def setupUi(self, MainWindow):\n MainWindow.setObjectName('MainWindow')\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName('centralwidget')\n MainWindow.setCentralWidget(self.centralwidget)\n MainWindow.setWindowIcon(QIcon('data/nn.png'))\n MainWindow.resize(800, 800)\n self.OK = QtWidgets.QPushButton(self.centralwidget)\n self.OK.setStyleSheet('background-color:#18BDFF; border-radius: 5px;')\n self.OK.setIcon(QIcon('data/ok.png'))\n self.OK.setIconSize(QSize(40, 40))\n self.OK.setGeometry(QtCore.QRect(375, 820, 150, 45))\n font = QtGui.QFont()\n font.setPointSize(10)\n self.OK.setFont(font)\n self.OK.setAutoFillBackground(True)\n self.OK.setObjectName('OK')\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Ui_MainWindow(object):\n\n def setupUi(self, MainWindow):\n MainWindow.setObjectName('MainWindow')\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName('centralwidget')\n MainWindow.setCentralWidget(self.centralwidget)\n MainWindow.setWindowIcon(QIcon('data/nn.png'))\n MainWindow.resize(800, 800)\n self.OK = QtWidgets.QPushButton(self.centralwidget)\n self.OK.setStyleSheet('background-color:#18BDFF; border-radius: 5px;')\n self.OK.setIcon(QIcon('data/ok.png'))\n self.OK.setIconSize(QSize(40, 40))\n self.OK.setGeometry(QtCore.QRect(375, 820, 150, 45))\n font = QtGui.QFont()\n font.setPointSize(10)\n self.OK.setFont(font)\n self.OK.setAutoFillBackground(True)\n self.OK.setObjectName('OK')\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate('MainWindow', 'Drawing digits'))\n self.OK.setText(_translate('MainWindow', ' OK'))\n",
"step-4": "from PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\n\n\nclass Ui_MainWindow(object):\n\n def setupUi(self, MainWindow):\n MainWindow.setObjectName('MainWindow')\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName('centralwidget')\n MainWindow.setCentralWidget(self.centralwidget)\n MainWindow.setWindowIcon(QIcon('data/nn.png'))\n MainWindow.resize(800, 800)\n self.OK = QtWidgets.QPushButton(self.centralwidget)\n self.OK.setStyleSheet('background-color:#18BDFF; border-radius: 5px;')\n self.OK.setIcon(QIcon('data/ok.png'))\n self.OK.setIconSize(QSize(40, 40))\n self.OK.setGeometry(QtCore.QRect(375, 820, 150, 45))\n font = QtGui.QFont()\n font.setPointSize(10)\n self.OK.setFont(font)\n self.OK.setAutoFillBackground(True)\n self.OK.setObjectName('OK')\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate('MainWindow', 'Drawing digits'))\n self.OK.setText(_translate('MainWindow', ' OK'))\n",
"step-5": "# -*- coding: utf-8 -*-\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n MainWindow.setCentralWidget(self.centralwidget)\n MainWindow.setWindowIcon(QIcon('data/nn.png'))\n MainWindow.resize(800, 800)\n \n \n self.OK = QtWidgets.QPushButton(self.centralwidget)\n self.OK.setStyleSheet(\"background-color:#18BDFF; border-radius: 5px;\");\n self.OK.setIcon(QIcon(\"data/ok.png\"))\n self.OK.setIconSize(QSize(40, 40)) \n self.OK.setGeometry(QtCore.QRect(375, 820, 150, 45))\n font = QtGui.QFont()\n font.setPointSize(10)\n self.OK.setFont(font)\n self.OK.setAutoFillBackground(True)\n self.OK.setObjectName(\"OK\")\n \n \n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n \n \n \n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"Drawing digits\"))\n self.OK.setText(_translate(\"MainWindow\", \" OK\"))\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
line_numbers = input().split(", ")
print("Positive:", ", ".join(list(filter((lambda x: int(x) > -1), line_numbers))))
print("Negative:", ", ".join((list(filter((lambda x: int(x) < 0), line_numbers)))))
print("Even:", ", ".join((list(filter((lambda x: int(x) % 2 == 0), line_numbers)))))
print("Odd:", ", ".join((list(filter((lambda x: int(x) % 2 != 0), line_numbers)))))
# # INPUT 1
# 1, -2, 0, 5, 3, 4, -100, -20, 12, 19, -33
# # OUTPUT 1
# Positive: 1, 0, 5, 3, 4, 12, 19
# Negative: -2, -100, -20, -33
# Even: -2, 0, 4, -100, -20, 12
# Odd: 1, 5, 3, 19, -33
|
normal
|
{
"blob_id": "e4845e5aa949ec523515efc4d7996d647fddabdb",
"index": 7060,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Positive:', ', '.join(list(filter(lambda x: int(x) > -1, line_numbers)))\n )\nprint('Negative:', ', '.join(list(filter(lambda x: int(x) < 0, line_numbers))))\nprint('Even:', ', '.join(list(filter(lambda x: int(x) % 2 == 0, line_numbers)))\n )\nprint('Odd:', ', '.join(list(filter(lambda x: int(x) % 2 != 0, line_numbers))))\n",
"step-3": "line_numbers = input().split(', ')\nprint('Positive:', ', '.join(list(filter(lambda x: int(x) > -1, line_numbers)))\n )\nprint('Negative:', ', '.join(list(filter(lambda x: int(x) < 0, line_numbers))))\nprint('Even:', ', '.join(list(filter(lambda x: int(x) % 2 == 0, line_numbers)))\n )\nprint('Odd:', ', '.join(list(filter(lambda x: int(x) % 2 != 0, line_numbers))))\n",
"step-4": "line_numbers = input().split(\", \")\nprint(\"Positive:\", \", \".join(list(filter((lambda x: int(x) > -1), line_numbers))))\nprint(\"Negative:\", \", \".join((list(filter((lambda x: int(x) < 0), line_numbers)))))\nprint(\"Even:\", \", \".join((list(filter((lambda x: int(x) % 2 == 0), line_numbers)))))\nprint(\"Odd:\", \", \".join((list(filter((lambda x: int(x) % 2 != 0), line_numbers)))))\n# # INPUT 1\n# 1, -2, 0, 5, 3, 4, -100, -20, 12, 19, -33\n# # OUTPUT 1\n# Positive: 1, 0, 5, 3, 4, 12, 19\n# Negative: -2, -100, -20, -33\n# Even: -2, 0, 4, -100, -20, 12\n# Odd: 1, 5, 3, 19, -33\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import gc
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from tqdm import tqdm
import cv2
import torch
from torch.utils.data import DataLoader
from torch import optim
from torch.optim import lr_scheduler
from dataset.car_dataset import CarDataset
from nn.network import MyUNet
from utils.utils import coords2str, extract_coords
from utils.evaluate_map import compute_map
from utils.utils import visualize
from efficientnet_pytorch import EfficientNet
camera_matrix = np.array([[2304.5479, 0, 1686.2379],
[0, 2305.8757, 1354.9849],
[0, 0, 1]], dtype=np.float32)
device = torch.device("cuda")
IMG_WIDTH = 1024
IMG_HEIGHT = IMG_WIDTH // 16 * 5
MODEL_SCALE = 8
if __name__ == "__main__":
ROOT_PATH = "/media/andreis/storage/datasets/pku-autonomous-driving/"
df = pd.read_csv(ROOT_PATH + "train.csv")
df_test = pd.read_csv(ROOT_PATH + "sample_submission.csv")
train_images_dir = ROOT_PATH + "train_images/"
test_images_dir = ROOT_PATH + "test_images/"
df_train, df_val = train_test_split(df, test_size=0.01, random_state=72)
df_val_gt = df_val.copy()
# create dataset objects
train_dataset = CarDataset(df_train, train_images_dir, camera_matrix)
val_dataset = CarDataset(df_val, train_images_dir, camera_matrix)
test_dataset = CarDataset(df_test, test_images_dir, camera_matrix)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = MyUNet(10).to(device)
model.load_state_dict(torch.load("model.pth"))
model.eval()
val_loader = DataLoader(dataset=val_dataset, batch_size=1, shuffle=False, num_workers=4)
#img, mask, regr = val_dataset[0]
#output = model(torch.tensor(img[None]).to(device))
#output = output.data.cpu().numpy()
predictions = []
for img, _, _, img0 in tqdm(val_loader):
img_np = np.moveaxis(torch.squeeze(img).numpy(), 0, 2)
img0 = torch.squeeze(img0).numpy()#p.moveaxis(torch.squeeze(img).numpy(), 0, 2)
#print(img_np.shape)
with torch.no_grad():
#output = model(torch.tensor(img[None]).to(device))
output = model(img.to(device))
output = output.data.cpu().numpy()
# looping over batch items
for out in output:
coords = extract_coords(out)
print(coords)
# s = coords2str(coords)
#predictions.append(s)
q_img = visualize(img0, coords, camera_matrix)
print(q_img.shape)
q_img = cv2.resize(q_img, (int(q_img.shape[1]*0.25), int(q_img.shape[0]*0.25) ))
# show predictions on image
cv2.imshow("Prediction", q_img)
cv2.waitKey()
# cv2.imshow("Predictions", visualize(img_np, coords, camera_matrix))
# cv2.waitKey()
#df_val['PredictionString'] = predictions
#df_test.to_csv('predictions.csv', index=False)
#print(df_val.head())
#def sigmoid(x):
# return 1 / (1 + np.exp(-x))
#map = compute_map(df_val_gt, df_val)
#print(map)
#logits = output[0,0].data.cpu().numpy()
#sigmoids = np.apply_along_axis(sigmoid, -1, logits)
#print(output.shape)
#print(logits.shape)
#print(sigmoids.shape)
#print(sigmoids)
#print(np.max(sigmoids))
#points = np.argwhere(logits > 0)
#print(points)
#preds = extract_coords(output)
#img = np.rollaxis(img, 0, 3)
#print(type(img))
#cv2.imshow("imagine", img)
#cv2.imshow("mask", mask)
#cv2.imshow("regr", regr[:,:,-1])
#cv2.imshow("predictions", sigmoids)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
|
normal
|
{
"blob_id": "1861c394fb02643d2e6ac8362f3340f512ef6d72",
"index": 6402,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n ROOT_PATH = '/media/andreis/storage/datasets/pku-autonomous-driving/'\n df = pd.read_csv(ROOT_PATH + 'train.csv')\n df_test = pd.read_csv(ROOT_PATH + 'sample_submission.csv')\n train_images_dir = ROOT_PATH + 'train_images/'\n test_images_dir = ROOT_PATH + 'test_images/'\n df_train, df_val = train_test_split(df, test_size=0.01, random_state=72)\n df_val_gt = df_val.copy()\n train_dataset = CarDataset(df_train, train_images_dir, camera_matrix)\n val_dataset = CarDataset(df_val, train_images_dir, camera_matrix)\n test_dataset = CarDataset(df_test, test_images_dir, camera_matrix)\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n model = MyUNet(10).to(device)\n model.load_state_dict(torch.load('model.pth'))\n model.eval()\n val_loader = DataLoader(dataset=val_dataset, batch_size=1, shuffle=\n False, num_workers=4)\n predictions = []\n for img, _, _, img0 in tqdm(val_loader):\n img_np = np.moveaxis(torch.squeeze(img).numpy(), 0, 2)\n img0 = torch.squeeze(img0).numpy()\n with torch.no_grad():\n output = model(img.to(device))\n output = output.data.cpu().numpy()\n for out in output:\n coords = extract_coords(out)\n print(coords)\n q_img = visualize(img0, coords, camera_matrix)\n print(q_img.shape)\n q_img = cv2.resize(q_img, (int(q_img.shape[1] * 0.25), int(\n q_img.shape[0] * 0.25)))\n cv2.imshow('Prediction', q_img)\n cv2.waitKey()\n",
"step-3": "<mask token>\ncamera_matrix = np.array([[2304.5479, 0, 1686.2379], [0, 2305.8757, \n 1354.9849], [0, 0, 1]], dtype=np.float32)\ndevice = torch.device('cuda')\nIMG_WIDTH = 1024\nIMG_HEIGHT = IMG_WIDTH // 16 * 5\nMODEL_SCALE = 8\nif __name__ == '__main__':\n ROOT_PATH = '/media/andreis/storage/datasets/pku-autonomous-driving/'\n df = pd.read_csv(ROOT_PATH + 'train.csv')\n df_test = pd.read_csv(ROOT_PATH + 'sample_submission.csv')\n train_images_dir = ROOT_PATH + 'train_images/'\n test_images_dir = ROOT_PATH + 'test_images/'\n df_train, df_val = train_test_split(df, test_size=0.01, random_state=72)\n df_val_gt = df_val.copy()\n train_dataset = CarDataset(df_train, train_images_dir, camera_matrix)\n val_dataset = CarDataset(df_val, train_images_dir, camera_matrix)\n test_dataset = CarDataset(df_test, test_images_dir, camera_matrix)\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n model = MyUNet(10).to(device)\n model.load_state_dict(torch.load('model.pth'))\n model.eval()\n val_loader = DataLoader(dataset=val_dataset, batch_size=1, shuffle=\n False, num_workers=4)\n predictions = []\n for img, _, _, img0 in tqdm(val_loader):\n img_np = np.moveaxis(torch.squeeze(img).numpy(), 0, 2)\n img0 = torch.squeeze(img0).numpy()\n with torch.no_grad():\n output = model(img.to(device))\n output = output.data.cpu().numpy()\n for out in output:\n coords = extract_coords(out)\n print(coords)\n q_img = visualize(img0, coords, camera_matrix)\n print(q_img.shape)\n q_img = cv2.resize(q_img, (int(q_img.shape[1] * 0.25), int(\n q_img.shape[0] * 0.25)))\n cv2.imshow('Prediction', q_img)\n cv2.waitKey()\n",
"step-4": "import gc\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom tqdm import tqdm\nimport cv2\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torch import optim\nfrom torch.optim import lr_scheduler\nfrom dataset.car_dataset import CarDataset\nfrom nn.network import MyUNet\nfrom utils.utils import coords2str, extract_coords\nfrom utils.evaluate_map import compute_map\nfrom utils.utils import visualize\nfrom efficientnet_pytorch import EfficientNet\ncamera_matrix = np.array([[2304.5479, 0, 1686.2379], [0, 2305.8757, \n 1354.9849], [0, 0, 1]], dtype=np.float32)\ndevice = torch.device('cuda')\nIMG_WIDTH = 1024\nIMG_HEIGHT = IMG_WIDTH // 16 * 5\nMODEL_SCALE = 8\nif __name__ == '__main__':\n ROOT_PATH = '/media/andreis/storage/datasets/pku-autonomous-driving/'\n df = pd.read_csv(ROOT_PATH + 'train.csv')\n df_test = pd.read_csv(ROOT_PATH + 'sample_submission.csv')\n train_images_dir = ROOT_PATH + 'train_images/'\n test_images_dir = ROOT_PATH + 'test_images/'\n df_train, df_val = train_test_split(df, test_size=0.01, random_state=72)\n df_val_gt = df_val.copy()\n train_dataset = CarDataset(df_train, train_images_dir, camera_matrix)\n val_dataset = CarDataset(df_val, train_images_dir, camera_matrix)\n test_dataset = CarDataset(df_test, test_images_dir, camera_matrix)\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n model = MyUNet(10).to(device)\n model.load_state_dict(torch.load('model.pth'))\n model.eval()\n val_loader = DataLoader(dataset=val_dataset, batch_size=1, shuffle=\n False, num_workers=4)\n predictions = []\n for img, _, _, img0 in tqdm(val_loader):\n img_np = np.moveaxis(torch.squeeze(img).numpy(), 0, 2)\n img0 = torch.squeeze(img0).numpy()\n with torch.no_grad():\n output = model(img.to(device))\n output = output.data.cpu().numpy()\n for out in output:\n coords = extract_coords(out)\n print(coords)\n q_img = visualize(img0, coords, camera_matrix)\n print(q_img.shape)\n q_img = cv2.resize(q_img, (int(q_img.shape[1] * 0.25), int(\n q_img.shape[0] * 0.25)))\n cv2.imshow('Prediction', q_img)\n cv2.waitKey()\n",
"step-5": "import gc\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.model_selection import train_test_split\r\nfrom tqdm import tqdm\r\nimport cv2 \r\n\r\nimport torch\r\nfrom torch.utils.data import DataLoader\r\nfrom torch import optim\r\nfrom torch.optim import lr_scheduler\r\n\r\nfrom dataset.car_dataset import CarDataset\r\nfrom nn.network import MyUNet\r\nfrom utils.utils import coords2str, extract_coords\r\nfrom utils.evaluate_map import compute_map\r\nfrom utils.utils import visualize\r\n\r\nfrom efficientnet_pytorch import EfficientNet\r\n\r\ncamera_matrix = np.array([[2304.5479, 0, 1686.2379],\r\n [0, 2305.8757, 1354.9849],\r\n [0, 0, 1]], dtype=np.float32)\r\n\r\ndevice = torch.device(\"cuda\")\r\n\r\nIMG_WIDTH = 1024\r\nIMG_HEIGHT = IMG_WIDTH // 16 * 5\r\nMODEL_SCALE = 8\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n ROOT_PATH = \"/media/andreis/storage/datasets/pku-autonomous-driving/\"\r\n df = pd.read_csv(ROOT_PATH + \"train.csv\")\r\n df_test = pd.read_csv(ROOT_PATH + \"sample_submission.csv\")\r\n\r\n train_images_dir = ROOT_PATH + \"train_images/\"\r\n test_images_dir = ROOT_PATH + \"test_images/\"\r\n\r\n df_train, df_val = train_test_split(df, test_size=0.01, random_state=72)\r\n df_val_gt = df_val.copy()\r\n\r\n # create dataset objects\r\n train_dataset = CarDataset(df_train, train_images_dir, camera_matrix)\r\n val_dataset = CarDataset(df_val, train_images_dir, camera_matrix)\r\n test_dataset = CarDataset(df_test, test_images_dir, camera_matrix)\r\n\r\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n model = MyUNet(10).to(device)\r\n\r\n model.load_state_dict(torch.load(\"model.pth\"))\r\n model.eval()\r\n\r\n val_loader = DataLoader(dataset=val_dataset, batch_size=1, shuffle=False, num_workers=4)\r\n\r\n #img, mask, regr = val_dataset[0]\r\n\r\n #output = model(torch.tensor(img[None]).to(device))\r\n\r\n #output = output.data.cpu().numpy()\r\n \r\n predictions = []\r\n for img, _, _, img0 in tqdm(val_loader):\r\n img_np = np.moveaxis(torch.squeeze(img).numpy(), 0, 2)\r\n img0 = torch.squeeze(img0).numpy()#p.moveaxis(torch.squeeze(img).numpy(), 0, 2)\r\n #print(img_np.shape)\r\n with torch.no_grad():\r\n #output = model(torch.tensor(img[None]).to(device))\r\n output = model(img.to(device))\r\n output = output.data.cpu().numpy()\r\n # looping over batch items\r\n for out in output:\r\n coords = extract_coords(out)\r\n print(coords)\r\n # s = coords2str(coords)\r\n \r\n #predictions.append(s)\r\n q_img = visualize(img0, coords, camera_matrix)\r\n print(q_img.shape)\r\n q_img = cv2.resize(q_img, (int(q_img.shape[1]*0.25), int(q_img.shape[0]*0.25) ))\r\n # show predictions on image\r\n cv2.imshow(\"Prediction\", q_img)\r\n cv2.waitKey()\r\n # cv2.imshow(\"Predictions\", visualize(img_np, coords, camera_matrix))\r\n # cv2.waitKey()\r\n\r\n\r\n #df_val['PredictionString'] = predictions\r\n #df_test.to_csv('predictions.csv', index=False)\r\n #print(df_val.head())\r\n\r\n #def sigmoid(x):\r\n # return 1 / (1 + np.exp(-x))\r\n\r\n #map = compute_map(df_val_gt, df_val)\r\n #print(map)\r\n\r\n #logits = output[0,0].data.cpu().numpy()\r\n #sigmoids = np.apply_along_axis(sigmoid, -1, logits)\r\n #print(output.shape)\r\n #print(logits.shape)\r\n #print(sigmoids.shape)\r\n #print(sigmoids)\r\n #print(np.max(sigmoids))\r\n\r\n #points = np.argwhere(logits > 0)\r\n #print(points)\r\n #preds = extract_coords(output)\r\n\r\n\r\n #img = np.rollaxis(img, 0, 3)\r\n #print(type(img))\r\n\r\n #cv2.imshow(\"imagine\", img)\r\n #cv2.imshow(\"mask\", mask)\r\n #cv2.imshow(\"regr\", regr[:,:,-1])\r\n #cv2.imshow(\"predictions\", sigmoids)\r\n #cv2.waitKey(0)\r\n #cv2.destroyAllWindows()\r\n\r\n\r\n\r\n\r\n \r\n\r\n\r\n\r\n ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from flask import Flask, request, render_template, redirect
from pymongo import MongoClient
from envparse import env
from flask_httpauth import HTTPDigestAuth
import os.path
# Get env vars stored either in an env file or on the machine
def get_env(name):
if (os.path.exists('./env')):
env.read_envfile('./env')
return env(name)
app = Flask(__name__)
app.config['SECRET_KEY'] = get_env('SECRET_KEY')
users = users = {
"admin": get_env('ADMIN_PASS')
}
auth = HTTPDigestAuth()
@auth.get_password
def get_pw(username):
if username in users:
return users.get(username)
return None
# Utility method for mongo connections
def mongo_login():
mongo_uri=get_env('MONGO_URI')
client = MongoClient(mongo_uri)
return client['rescuebnb']
# Home page with host form
@app.route('/')
def show_home():
return render_template('index.html')
# Post endpoint for committing host to db
@app.route('/addhost', methods = ['GET', 'POST'])
def hosts():
if request.method == 'POST':
db = mongo_login()
hosts_collection = db.hosts
host = request.form.to_dict()
hosts_collection.insert_one(host) # should probably check for completed insert
return redirect('/')
return render_template('addhosts.html')
# Post endpoint for committing people who need shelter to db
@app.route('/requestshelter', methods = ['GET', 'POST'])
def guests():
if request.method == 'POST':
db = mongo_login()
guest_collection = db.guests
guest = request.form.to_dict()
guest_collection.insert_one(guest) # should probably check for completed insert
return redirect('/')
return render_template('request_shelter.html')
# Get involved page
@app.route('/getinvolved')
def get_involved():
return render_template('get_involved.html')
# Get involved page
@app.route('/volunteer')
def volunteer():
return render_template('volunteer.html')
# "Secured" endpoint for viewing registered hosts
@app.route('/hosts')
@auth.login_required
def viewhosts():
db = mongo_login()
hosts_collection = db.hosts
guests_collection = db.guests
return render_template('viewhosts.html', hosts=list(hosts_collection.find()),
guests=list(guests_collection.find()))
@app.route('/ussd')
def ussd():
db = mongo_login()
ussd_collection = db.ussd
ussd = request.form.to_dict()
ussd_collection.insert_one(ussd)
return render_template('index.html')
if __name__ == '__main__':
app.run()
#app.run(debug=True)
|
normal
|
{
"blob_id": "ad813216ba8162a7089340c677e47c3e656f7c95",
"index": 6198,
"step-1": "<mask token>\n\n\[email protected]('/')\ndef show_home():\n return render_template('index.html')\n\n\[email protected]('/addhost', methods=['GET', 'POST'])\ndef hosts():\n if request.method == 'POST':\n db = mongo_login()\n hosts_collection = db.hosts\n host = request.form.to_dict()\n hosts_collection.insert_one(host)\n return redirect('/')\n return render_template('addhosts.html')\n\n\n<mask token>\n\n\[email protected]('/getinvolved')\ndef get_involved():\n return render_template('get_involved.html')\n\n\[email protected]('/volunteer')\ndef volunteer():\n return render_template('volunteer.html')\n\n\[email protected]('/hosts')\[email protected]_required\ndef viewhosts():\n db = mongo_login()\n hosts_collection = db.hosts\n guests_collection = db.guests\n return render_template('viewhosts.html', hosts=list(hosts_collection.\n find()), guests=list(guests_collection.find()))\n\n\[email protected]('/ussd')\ndef ussd():\n db = mongo_login()\n ussd_collection = db.ussd\n ussd = request.form.to_dict()\n ussd_collection.insert_one(ussd)\n return render_template('index.html')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]_password\ndef get_pw(username):\n if username in users:\n return users.get(username)\n return None\n\n\n<mask token>\n\n\[email protected]('/')\ndef show_home():\n return render_template('index.html')\n\n\[email protected]('/addhost', methods=['GET', 'POST'])\ndef hosts():\n if request.method == 'POST':\n db = mongo_login()\n hosts_collection = db.hosts\n host = request.form.to_dict()\n hosts_collection.insert_one(host)\n return redirect('/')\n return render_template('addhosts.html')\n\n\n<mask token>\n\n\[email protected]('/getinvolved')\ndef get_involved():\n return render_template('get_involved.html')\n\n\[email protected]('/volunteer')\ndef volunteer():\n return render_template('volunteer.html')\n\n\[email protected]('/hosts')\[email protected]_required\ndef viewhosts():\n db = mongo_login()\n hosts_collection = db.hosts\n guests_collection = db.guests\n return render_template('viewhosts.html', hosts=list(hosts_collection.\n find()), guests=list(guests_collection.find()))\n\n\[email protected]('/ussd')\ndef ussd():\n db = mongo_login()\n ussd_collection = db.ussd\n ussd = request.form.to_dict()\n ussd_collection.insert_one(ussd)\n return render_template('index.html')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_env(name):\n if os.path.exists('./env'):\n env.read_envfile('./env')\n return env(name)\n\n\n<mask token>\n\n\[email protected]_password\ndef get_pw(username):\n if username in users:\n return users.get(username)\n return None\n\n\ndef mongo_login():\n mongo_uri = get_env('MONGO_URI')\n client = MongoClient(mongo_uri)\n return client['rescuebnb']\n\n\[email protected]('/')\ndef show_home():\n return render_template('index.html')\n\n\[email protected]('/addhost', methods=['GET', 'POST'])\ndef hosts():\n if request.method == 'POST':\n db = mongo_login()\n hosts_collection = db.hosts\n host = request.form.to_dict()\n hosts_collection.insert_one(host)\n return redirect('/')\n return render_template('addhosts.html')\n\n\[email protected]('/requestshelter', methods=['GET', 'POST'])\ndef guests():\n if request.method == 'POST':\n db = mongo_login()\n guest_collection = db.guests\n guest = request.form.to_dict()\n guest_collection.insert_one(guest)\n return redirect('/')\n return render_template('request_shelter.html')\n\n\[email protected]('/getinvolved')\ndef get_involved():\n return render_template('get_involved.html')\n\n\[email protected]('/volunteer')\ndef volunteer():\n return render_template('volunteer.html')\n\n\[email protected]('/hosts')\[email protected]_required\ndef viewhosts():\n db = mongo_login()\n hosts_collection = db.hosts\n guests_collection = db.guests\n return render_template('viewhosts.html', hosts=list(hosts_collection.\n find()), guests=list(guests_collection.find()))\n\n\[email protected]('/ussd')\ndef ussd():\n db = mongo_login()\n ussd_collection = db.ussd\n ussd = request.form.to_dict()\n ussd_collection.insert_one(ussd)\n return render_template('index.html')\n\n\n<mask token>\n",
"step-4": "from flask import Flask, request, render_template, redirect\nfrom pymongo import MongoClient\nfrom envparse import env\nfrom flask_httpauth import HTTPDigestAuth\nimport os.path\n\n\ndef get_env(name):\n if os.path.exists('./env'):\n env.read_envfile('./env')\n return env(name)\n\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = get_env('SECRET_KEY')\nusers = users = {'admin': get_env('ADMIN_PASS')}\nauth = HTTPDigestAuth()\n\n\[email protected]_password\ndef get_pw(username):\n if username in users:\n return users.get(username)\n return None\n\n\ndef mongo_login():\n mongo_uri = get_env('MONGO_URI')\n client = MongoClient(mongo_uri)\n return client['rescuebnb']\n\n\[email protected]('/')\ndef show_home():\n return render_template('index.html')\n\n\[email protected]('/addhost', methods=['GET', 'POST'])\ndef hosts():\n if request.method == 'POST':\n db = mongo_login()\n hosts_collection = db.hosts\n host = request.form.to_dict()\n hosts_collection.insert_one(host)\n return redirect('/')\n return render_template('addhosts.html')\n\n\[email protected]('/requestshelter', methods=['GET', 'POST'])\ndef guests():\n if request.method == 'POST':\n db = mongo_login()\n guest_collection = db.guests\n guest = request.form.to_dict()\n guest_collection.insert_one(guest)\n return redirect('/')\n return render_template('request_shelter.html')\n\n\[email protected]('/getinvolved')\ndef get_involved():\n return render_template('get_involved.html')\n\n\[email protected]('/volunteer')\ndef volunteer():\n return render_template('volunteer.html')\n\n\[email protected]('/hosts')\[email protected]_required\ndef viewhosts():\n db = mongo_login()\n hosts_collection = db.hosts\n guests_collection = db.guests\n return render_template('viewhosts.html', hosts=list(hosts_collection.\n find()), guests=list(guests_collection.find()))\n\n\[email protected]('/ussd')\ndef ussd():\n db = mongo_login()\n ussd_collection = db.ussd\n ussd = request.form.to_dict()\n ussd_collection.insert_one(ussd)\n return render_template('index.html')\n\n\nif __name__ == '__main__':\n app.run()\n",
"step-5": "from flask import Flask, request, render_template, redirect\nfrom pymongo import MongoClient\nfrom envparse import env\nfrom flask_httpauth import HTTPDigestAuth\nimport os.path\n\n\n# Get env vars stored either in an env file or on the machine\ndef get_env(name):\n\tif (os.path.exists('./env')):\n\t\tenv.read_envfile('./env')\n\treturn env(name)\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = get_env('SECRET_KEY')\nusers = users = {\n \"admin\": get_env('ADMIN_PASS')\n}\nauth = HTTPDigestAuth()\n\[email protected]_password\ndef get_pw(username):\n if username in users:\n return users.get(username)\n return None\n\n\n# Utility method for mongo connections\ndef mongo_login():\n\tmongo_uri=get_env('MONGO_URI')\n\tclient = MongoClient(mongo_uri)\n\treturn client['rescuebnb']\n\n# Home page with host form\[email protected]('/')\ndef show_home():\n\treturn render_template('index.html')\n\n# Post endpoint for committing host to db\[email protected]('/addhost', methods = ['GET', 'POST'])\ndef hosts():\n\tif request.method == 'POST':\n\t\tdb = mongo_login()\n\t\thosts_collection = db.hosts\n\t\thost = request.form.to_dict()\n\t\thosts_collection.insert_one(host) # should probably check for completed insert\n\t\treturn redirect('/')\n\treturn render_template('addhosts.html')\n\n# Post endpoint for committing people who need shelter to db\[email protected]('/requestshelter', methods = ['GET', 'POST'])\ndef guests():\n\tif request.method == 'POST':\n\t\tdb = mongo_login()\n\t\tguest_collection = db.guests\n\t\tguest = request.form.to_dict()\n\t\tguest_collection.insert_one(guest) # should probably check for completed insert\n\t\treturn redirect('/')\n\treturn render_template('request_shelter.html')\n\n# Get involved page\[email protected]('/getinvolved')\ndef get_involved():\n\treturn render_template('get_involved.html')\n\n# Get involved page\[email protected]('/volunteer')\ndef volunteer():\n\treturn render_template('volunteer.html')\n\n# \"Secured\" endpoint for viewing registered hosts\[email protected]('/hosts')\[email protected]_required\ndef viewhosts():\n\tdb = mongo_login()\n\thosts_collection = db.hosts\n\tguests_collection = db.guests\n\t\n\treturn render_template('viewhosts.html', hosts=list(hosts_collection.find()),\n\t\tguests=list(guests_collection.find()))\n\[email protected]('/ussd')\ndef ussd():\n\tdb = mongo_login()\n\tussd_collection = db.ussd\n\tussd = request.form.to_dict()\n\tussd_collection.insert_one(ussd)\n\treturn render_template('index.html')\n\nif __name__ == '__main__':\n\tapp.run()\n #app.run(debug=True)",
"step-ids": [
6,
7,
10,
13,
14
]
}
|
[
6,
7,
10,
13,
14
] |
import requests
import json
class Parser:
init_url = r'https://www.joom.com/tokens/init'
products_url = r'https://api.joom.com/1.1/search/products?language=ru-RU¤cy=RUB'
def __init__(self, links_list):
self.links = links_list
self.product_info_dict = {}
access_token = json.loads(requests.post(self.init_url).text)['accessToken']
count = int(links_list[0])
for url in self.links[1:]:
id_link = url[33:]
headers = {'Authorization': 'Bearer ' + access_token}
data = {
'count': count,
'filters': [{
'id': 'categoryId',
'value': {
'type': 'categories',
'items': [
{'id': id_link}
]
}
}
]
}
res = requests.post(self.products_url, json=data, headers=headers)
for product in json.loads(res.text)['payload']['items']:
content = requests.get(product['mainImage']['images'][1]['url']).content
self.product_info_dict.update({
product['id']:
dict(
price=product['price'],
name=product['name'],
image=content,
description=self.get_description(product['id'], headers)
)
})
def get_description(self, id_str, headers):
link = 'https://api.joom.com/1.1/products/' + id_str + '?language=ru-RU¤cy=RUB'
res = requests.get(link, headers=headers)
return json.loads(res.text)['payload']['description']
|
normal
|
{
"blob_id": "00c6899b9d49cbbd0f1980eada77ad91562211a0",
"index": 4471,
"step-1": "<mask token>\n\n\nclass Parser:\n <mask token>\n <mask token>\n <mask token>\n\n def get_description(self, id_str, headers):\n link = ('https://api.joom.com/1.1/products/' + id_str +\n '?language=ru-RU¤cy=RUB')\n res = requests.get(link, headers=headers)\n return json.loads(res.text)['payload']['description']\n",
"step-2": "<mask token>\n\n\nclass Parser:\n <mask token>\n <mask token>\n\n def __init__(self, links_list):\n self.links = links_list\n self.product_info_dict = {}\n access_token = json.loads(requests.post(self.init_url).text)[\n 'accessToken']\n count = int(links_list[0])\n for url in self.links[1:]:\n id_link = url[33:]\n headers = {'Authorization': 'Bearer ' + access_token}\n data = {'count': count, 'filters': [{'id': 'categoryId',\n 'value': {'type': 'categories', 'items': [{'id': id_link}]}}]}\n res = requests.post(self.products_url, json=data, headers=headers)\n for product in json.loads(res.text)['payload']['items']:\n content = requests.get(product['mainImage']['images'][1]['url']\n ).content\n self.product_info_dict.update({product['id']: dict(price=\n product['price'], name=product['name'], image=content,\n description=self.get_description(product['id'], headers))})\n\n def get_description(self, id_str, headers):\n link = ('https://api.joom.com/1.1/products/' + id_str +\n '?language=ru-RU¤cy=RUB')\n res = requests.get(link, headers=headers)\n return json.loads(res.text)['payload']['description']\n",
"step-3": "<mask token>\n\n\nclass Parser:\n init_url = 'https://www.joom.com/tokens/init'\n products_url = (\n 'https://api.joom.com/1.1/search/products?language=ru-RU¤cy=RUB')\n\n def __init__(self, links_list):\n self.links = links_list\n self.product_info_dict = {}\n access_token = json.loads(requests.post(self.init_url).text)[\n 'accessToken']\n count = int(links_list[0])\n for url in self.links[1:]:\n id_link = url[33:]\n headers = {'Authorization': 'Bearer ' + access_token}\n data = {'count': count, 'filters': [{'id': 'categoryId',\n 'value': {'type': 'categories', 'items': [{'id': id_link}]}}]}\n res = requests.post(self.products_url, json=data, headers=headers)\n for product in json.loads(res.text)['payload']['items']:\n content = requests.get(product['mainImage']['images'][1]['url']\n ).content\n self.product_info_dict.update({product['id']: dict(price=\n product['price'], name=product['name'], image=content,\n description=self.get_description(product['id'], headers))})\n\n def get_description(self, id_str, headers):\n link = ('https://api.joom.com/1.1/products/' + id_str +\n '?language=ru-RU¤cy=RUB')\n res = requests.get(link, headers=headers)\n return json.loads(res.text)['payload']['description']\n",
"step-4": "import requests\nimport json\n\n\nclass Parser:\n init_url = 'https://www.joom.com/tokens/init'\n products_url = (\n 'https://api.joom.com/1.1/search/products?language=ru-RU¤cy=RUB')\n\n def __init__(self, links_list):\n self.links = links_list\n self.product_info_dict = {}\n access_token = json.loads(requests.post(self.init_url).text)[\n 'accessToken']\n count = int(links_list[0])\n for url in self.links[1:]:\n id_link = url[33:]\n headers = {'Authorization': 'Bearer ' + access_token}\n data = {'count': count, 'filters': [{'id': 'categoryId',\n 'value': {'type': 'categories', 'items': [{'id': id_link}]}}]}\n res = requests.post(self.products_url, json=data, headers=headers)\n for product in json.loads(res.text)['payload']['items']:\n content = requests.get(product['mainImage']['images'][1]['url']\n ).content\n self.product_info_dict.update({product['id']: dict(price=\n product['price'], name=product['name'], image=content,\n description=self.get_description(product['id'], headers))})\n\n def get_description(self, id_str, headers):\n link = ('https://api.joom.com/1.1/products/' + id_str +\n '?language=ru-RU¤cy=RUB')\n res = requests.get(link, headers=headers)\n return json.loads(res.text)['payload']['description']\n",
"step-5": "import requests\nimport json\n\n\nclass Parser:\n init_url = r'https://www.joom.com/tokens/init'\n products_url = r'https://api.joom.com/1.1/search/products?language=ru-RU¤cy=RUB'\n\n def __init__(self, links_list):\n self.links = links_list\n self.product_info_dict = {}\n access_token = json.loads(requests.post(self.init_url).text)['accessToken']\n count = int(links_list[0])\n for url in self.links[1:]:\n id_link = url[33:]\n\n headers = {'Authorization': 'Bearer ' + access_token}\n data = {\n 'count': count,\n 'filters': [{\n 'id': 'categoryId',\n 'value': {\n 'type': 'categories',\n 'items': [\n {'id': id_link}\n ]\n }\n }\n ]\n }\n\n res = requests.post(self.products_url, json=data, headers=headers)\n for product in json.loads(res.text)['payload']['items']:\n content = requests.get(product['mainImage']['images'][1]['url']).content\n self.product_info_dict.update({\n product['id']:\n dict(\n price=product['price'],\n name=product['name'],\n image=content,\n description=self.get_description(product['id'], headers)\n )\n })\n\n def get_description(self, id_str, headers):\n link = 'https://api.joom.com/1.1/products/' + id_str + '?language=ru-RU¤cy=RUB'\n res = requests.get(link, headers=headers)\n return json.loads(res.text)['payload']['description']\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
__doc__
def fizz_buzz(num1, num2, end_range):
if not (
isinstance(num1, int) and isinstance(num2, int) and isinstance(end_range, int)
) or (num1 < 0 or num2 < 0 or end_range < 0):
return "Input should be a positive integer"
# I'm storing the result to test the returned value aka a list of outputs
result = []
for i in range(1, end_range):
output = i
if i % num1 == 0 and i % num2 == 0:
output = "FizzBuzz"
elif i % num1 == 0:
output = "Fizz"
elif i % num2 == 0:
output = "Buzz"
result.append(output)
print(output)
return result
def test_answer():
import sys
answer1 = None
answer2 = None
answer3 = None
try:
answer1 = fizz_buzz(3, 5, 16)
answer2 = fizz_buzz(2, 7, 20)
answer3 = fizz_buzz(100)
except:
print("An error occurred:", sys.exc_info()[1])
assert answer1 == [
1,
2,
"Fizz",
4,
"Buzz",
"Fizz",
7,
8,
"Fizz",
"Buzz",
11,
"Fizz",
13,
14,
"FizzBuzz",
]
assert answer2 == [
1,
"Fizz",
3,
"Fizz",
5,
"Fizz",
"Buzz",
"Fizz",
9,
"Fizz",
11,
"Fizz",
13,
"FizzBuzz",
15,
"Fizz",
17,
"Fizz",
19,
]
assert answer3 == None
|
normal
|
{
"blob_id": "d00873c3ee72b55cb5b74f78a98de61a25b3cc21",
"index": 7227,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_answer():\n import sys\n answer1 = None\n answer2 = None\n answer3 = None\n try:\n answer1 = fizz_buzz(3, 5, 16)\n answer2 = fizz_buzz(2, 7, 20)\n answer3 = fizz_buzz(100)\n except:\n print('An error occurred:', sys.exc_info()[1])\n assert answer1 == [1, 2, 'Fizz', 4, 'Buzz', 'Fizz', 7, 8, 'Fizz',\n 'Buzz', 11, 'Fizz', 13, 14, 'FizzBuzz']\n assert answer2 == [1, 'Fizz', 3, 'Fizz', 5, 'Fizz', 'Buzz', 'Fizz', 9,\n 'Fizz', 11, 'Fizz', 13, 'FizzBuzz', 15, 'Fizz', 17, 'Fizz', 19]\n assert answer3 == None\n",
"step-3": "<mask token>\n\n\ndef fizz_buzz(num1, num2, end_range):\n if not (isinstance(num1, int) and isinstance(num2, int) and isinstance(\n end_range, int)) or (num1 < 0 or num2 < 0 or end_range < 0):\n return 'Input should be a positive integer'\n result = []\n for i in range(1, end_range):\n output = i\n if i % num1 == 0 and i % num2 == 0:\n output = 'FizzBuzz'\n elif i % num1 == 0:\n output = 'Fizz'\n elif i % num2 == 0:\n output = 'Buzz'\n result.append(output)\n print(output)\n return result\n\n\ndef test_answer():\n import sys\n answer1 = None\n answer2 = None\n answer3 = None\n try:\n answer1 = fizz_buzz(3, 5, 16)\n answer2 = fizz_buzz(2, 7, 20)\n answer3 = fizz_buzz(100)\n except:\n print('An error occurred:', sys.exc_info()[1])\n assert answer1 == [1, 2, 'Fizz', 4, 'Buzz', 'Fizz', 7, 8, 'Fizz',\n 'Buzz', 11, 'Fizz', 13, 14, 'FizzBuzz']\n assert answer2 == [1, 'Fizz', 3, 'Fizz', 5, 'Fizz', 'Buzz', 'Fizz', 9,\n 'Fizz', 11, 'Fizz', 13, 'FizzBuzz', 15, 'Fizz', 17, 'Fizz', 19]\n assert answer3 == None\n",
"step-4": "__doc__\n\n\ndef fizz_buzz(num1, num2, end_range):\n if not (isinstance(num1, int) and isinstance(num2, int) and isinstance(\n end_range, int)) or (num1 < 0 or num2 < 0 or end_range < 0):\n return 'Input should be a positive integer'\n result = []\n for i in range(1, end_range):\n output = i\n if i % num1 == 0 and i % num2 == 0:\n output = 'FizzBuzz'\n elif i % num1 == 0:\n output = 'Fizz'\n elif i % num2 == 0:\n output = 'Buzz'\n result.append(output)\n print(output)\n return result\n\n\ndef test_answer():\n import sys\n answer1 = None\n answer2 = None\n answer3 = None\n try:\n answer1 = fizz_buzz(3, 5, 16)\n answer2 = fizz_buzz(2, 7, 20)\n answer3 = fizz_buzz(100)\n except:\n print('An error occurred:', sys.exc_info()[1])\n assert answer1 == [1, 2, 'Fizz', 4, 'Buzz', 'Fizz', 7, 8, 'Fizz',\n 'Buzz', 11, 'Fizz', 13, 14, 'FizzBuzz']\n assert answer2 == [1, 'Fizz', 3, 'Fizz', 5, 'Fizz', 'Buzz', 'Fizz', 9,\n 'Fizz', 11, 'Fizz', 13, 'FizzBuzz', 15, 'Fizz', 17, 'Fizz', 19]\n assert answer3 == None\n",
"step-5": "__doc__\n\n\ndef fizz_buzz(num1, num2, end_range):\n if not (\n isinstance(num1, int) and isinstance(num2, int) and isinstance(end_range, int)\n ) or (num1 < 0 or num2 < 0 or end_range < 0):\n return \"Input should be a positive integer\"\n\n # I'm storing the result to test the returned value aka a list of outputs\n result = []\n\n for i in range(1, end_range):\n output = i\n if i % num1 == 0 and i % num2 == 0:\n output = \"FizzBuzz\"\n elif i % num1 == 0:\n output = \"Fizz\"\n elif i % num2 == 0:\n output = \"Buzz\"\n result.append(output)\n print(output)\n\n return result\n\n\ndef test_answer():\n import sys\n\n answer1 = None\n answer2 = None\n answer3 = None\n try:\n answer1 = fizz_buzz(3, 5, 16)\n answer2 = fizz_buzz(2, 7, 20)\n answer3 = fizz_buzz(100)\n except:\n print(\"An error occurred:\", sys.exc_info()[1])\n\n assert answer1 == [\n 1,\n 2,\n \"Fizz\",\n 4,\n \"Buzz\",\n \"Fizz\",\n 7,\n 8,\n \"Fizz\",\n \"Buzz\",\n 11,\n \"Fizz\",\n 13,\n 14,\n \"FizzBuzz\",\n ]\n assert answer2 == [\n 1,\n \"Fizz\",\n 3,\n \"Fizz\",\n 5,\n \"Fizz\",\n \"Buzz\",\n \"Fizz\",\n 9,\n \"Fizz\",\n 11,\n \"Fizz\",\n 13,\n \"FizzBuzz\",\n 15,\n \"Fizz\",\n 17,\n \"Fizz\",\n 19,\n ]\n assert answer3 == None\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.apps import AppConfig
class FosAppConfig(AppConfig):
name = 'fos_app'
|
normal
|
{
"blob_id": "d83f2d9bb25a46bc7344b420ce65bf729165e6b9",
"index": 278,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass FosAppConfig(AppConfig):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass FosAppConfig(AppConfig):\n name = 'fos_app'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass FosAppConfig(AppConfig):\n name = 'fos_app'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import unittest
from textwrap import dedent
from simplesat import InstallRequirement, Repository
from simplesat.test_utils import packages_from_definition
from ..compute_dependencies import (compute_dependencies,
compute_leaf_packages,
compute_reverse_dependencies)
PACKAGE_DEF_0 = dedent("""\
A 0.0.0-1; depends (B ^= 0.0.0)
B 0.0.0-1; depends (D == 0.0.0-2)
B 0.0.0-2; depends (D ^= 0.0.0)
C 0.0.0-1; depends (E >= 1.0.0)
""")
PACKAGE_DEF_1 = dedent("""\
D 0.0.0-2
E 0.0.0-1
E 1.0.0-1
E 1.0.1-1
""")
PACKAGE_DEF_2 = dedent("""\
B 0.0.0-1; depends (D == 0.0.0-2)
C 0.0.0-1; depends (E >= 1.0.0)
""")
class TestComputeDependencies(unittest.TestCase):
def setUp(self):
repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))
repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))
self.repos = [repo_0, repo_1]
def test_no_dependency(self):
requirement = InstallRequirement._from_string('D == 0.0.0-2')
expected_deps = set()
deps = compute_dependencies(self.repos, requirement)
self.assertEqual(deps, expected_deps)
def test_simple_dependency(self):
requirement = InstallRequirement._from_string('C *')
expected_deps = packages_from_definition(
"""E 1.0.0-1
E 1.0.1-1""")
deps = compute_dependencies(self.repos, requirement)
self.assertEqual(deps, set(expected_deps))
def test_chained_requirements(self):
requirement = InstallRequirement._from_string('A ^= 0.0.0')
expected_deps = packages_from_definition(
"""B 0.0.0-1; depends (D == 0.0.0-2)
B 0.0.0-2; depends (D ^= 0.0.0) """
)
deps = compute_dependencies(self.repos, requirement)
self.assertEqual(deps, set(expected_deps))
def test_chained_requirements_transitive(self):
requirement = InstallRequirement._from_string('A ^= 0.0.0')
expected_deps = packages_from_definition(
"""B 0.0.0-1; depends (D == 0.0.0-2)
B 0.0.0-2; depends (D ^= 0.0.0)
D 0.0.0-2 """
)
deps = compute_dependencies(self.repos, requirement, transitive=True)
self.assertEqual(deps, set(expected_deps))
class TestComputeReverseDependencies(unittest.TestCase):
def setUp(self):
repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))
repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))
self.repos = [repo_0, repo_1]
def test_no_dependency(self):
requirement = InstallRequirement._from_string('A ^= 0.0.0')
deps = compute_reverse_dependencies(self.repos, requirement)
self.assertEqual(deps, set())
def test_simple_dependency(self):
requirement = InstallRequirement._from_string('E *')
expected_deps = packages_from_definition(
'C 0.0.0-1; depends (E >= 1.0.0)'
)
deps = compute_reverse_dependencies(self.repos, requirement)
self.assertEqual(deps, set(expected_deps))
def test_chained_dependencies(self):
requirement = InstallRequirement._from_string('D ^= 0.0.0')
expected_deps = packages_from_definition(
"""B 0.0.0-1; depends (D == 0.0.0-2)
B 0.0.0-2; depends (D ^= 0.0.0)"""
)
deps = compute_reverse_dependencies(self.repos, requirement)
self.assertEqual(deps, set(expected_deps))
def test_chained_dependencies_transitive(self):
requirement = InstallRequirement._from_string('D ^= 0.0.0')
expected_deps = packages_from_definition(
"""A 0.0.0-1; depends (B ^= 0.0.0)
B 0.0.0-1; depends (D == 0.0.0-2)
B 0.0.0-2; depends (D ^= 0.0.0)"""
)
deps = compute_reverse_dependencies(self.repos, requirement,
transitive=True)
self.assertEqual(deps, set(expected_deps))
class TestComputeLeafPackages(unittest.TestCase):
def setUp(self):
repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))
repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))
repo_2 = Repository(packages_from_definition(PACKAGE_DEF_2))
self.repos = [repo_0, repo_1, repo_2]
def test_simple(self):
expected_leaf_packages = packages_from_definition(
"""A 0.0.0-1; depends (B ^= 0.0.0)
C 0.0.0-1; depends (E >= 1.0.0)
E 0.0.0-1 """
)
leaf_packages = compute_leaf_packages(self.repos)
self.assertEqual(leaf_packages, set(expected_leaf_packages))
|
normal
|
{
"blob_id": "fcf19c49bb161305eaa5ba8bc26e276a8e8db8ea",
"index": 3925,
"step-1": "<mask token>\n\n\nclass TestComputeReverseDependencies(unittest.TestCase):\n\n def setUp(self):\n repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))\n repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))\n self.repos = [repo_0, repo_1]\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass TestComputeLeafPackages(unittest.TestCase):\n\n def setUp(self):\n repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))\n repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))\n repo_2 = Repository(packages_from_definition(PACKAGE_DEF_2))\n self.repos = [repo_0, repo_1, repo_2]\n\n def test_simple(self):\n expected_leaf_packages = packages_from_definition(\n \"\"\"A 0.0.0-1; depends (B ^= 0.0.0)\n C 0.0.0-1; depends (E >= 1.0.0)\n E 0.0.0-1 \"\"\"\n )\n leaf_packages = compute_leaf_packages(self.repos)\n self.assertEqual(leaf_packages, set(expected_leaf_packages))\n",
"step-2": "<mask token>\n\n\nclass TestComputeDependencies(unittest.TestCase):\n <mask token>\n <mask token>\n <mask token>\n\n def test_chained_requirements(self):\n requirement = InstallRequirement._from_string('A ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0) \"\"\"\n )\n deps = compute_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_requirements_transitive(self):\n requirement = InstallRequirement._from_string('A ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0)\n D 0.0.0-2 \"\"\"\n )\n deps = compute_dependencies(self.repos, requirement, transitive=True)\n self.assertEqual(deps, set(expected_deps))\n\n\nclass TestComputeReverseDependencies(unittest.TestCase):\n\n def setUp(self):\n repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))\n repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))\n self.repos = [repo_0, repo_1]\n\n def test_no_dependency(self):\n requirement = InstallRequirement._from_string('A ^= 0.0.0')\n deps = compute_reverse_dependencies(self.repos, requirement)\n self.assertEqual(deps, set())\n\n def test_simple_dependency(self):\n requirement = InstallRequirement._from_string('E *')\n expected_deps = packages_from_definition(\n 'C 0.0.0-1; depends (E >= 1.0.0)')\n deps = compute_reverse_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_dependencies(self):\n requirement = InstallRequirement._from_string('D ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0)\"\"\"\n )\n deps = compute_reverse_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_dependencies_transitive(self):\n requirement = InstallRequirement._from_string('D ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"A 0.0.0-1; depends (B ^= 0.0.0)\n B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0)\"\"\"\n )\n deps = compute_reverse_dependencies(self.repos, requirement,\n transitive=True)\n self.assertEqual(deps, set(expected_deps))\n\n\nclass TestComputeLeafPackages(unittest.TestCase):\n\n def setUp(self):\n repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))\n repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))\n repo_2 = Repository(packages_from_definition(PACKAGE_DEF_2))\n self.repos = [repo_0, repo_1, repo_2]\n\n def test_simple(self):\n expected_leaf_packages = packages_from_definition(\n \"\"\"A 0.0.0-1; depends (B ^= 0.0.0)\n C 0.0.0-1; depends (E >= 1.0.0)\n E 0.0.0-1 \"\"\"\n )\n leaf_packages = compute_leaf_packages(self.repos)\n self.assertEqual(leaf_packages, set(expected_leaf_packages))\n",
"step-3": "<mask token>\n\n\nclass TestComputeDependencies(unittest.TestCase):\n <mask token>\n\n def test_no_dependency(self):\n requirement = InstallRequirement._from_string('D == 0.0.0-2')\n expected_deps = set()\n deps = compute_dependencies(self.repos, requirement)\n self.assertEqual(deps, expected_deps)\n <mask token>\n\n def test_chained_requirements(self):\n requirement = InstallRequirement._from_string('A ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0) \"\"\"\n )\n deps = compute_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_requirements_transitive(self):\n requirement = InstallRequirement._from_string('A ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0)\n D 0.0.0-2 \"\"\"\n )\n deps = compute_dependencies(self.repos, requirement, transitive=True)\n self.assertEqual(deps, set(expected_deps))\n\n\nclass TestComputeReverseDependencies(unittest.TestCase):\n\n def setUp(self):\n repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))\n repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))\n self.repos = [repo_0, repo_1]\n\n def test_no_dependency(self):\n requirement = InstallRequirement._from_string('A ^= 0.0.0')\n deps = compute_reverse_dependencies(self.repos, requirement)\n self.assertEqual(deps, set())\n\n def test_simple_dependency(self):\n requirement = InstallRequirement._from_string('E *')\n expected_deps = packages_from_definition(\n 'C 0.0.0-1; depends (E >= 1.0.0)')\n deps = compute_reverse_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_dependencies(self):\n requirement = InstallRequirement._from_string('D ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0)\"\"\"\n )\n deps = compute_reverse_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_dependencies_transitive(self):\n requirement = InstallRequirement._from_string('D ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"A 0.0.0-1; depends (B ^= 0.0.0)\n B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0)\"\"\"\n )\n deps = compute_reverse_dependencies(self.repos, requirement,\n transitive=True)\n self.assertEqual(deps, set(expected_deps))\n\n\nclass TestComputeLeafPackages(unittest.TestCase):\n\n def setUp(self):\n repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))\n repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))\n repo_2 = Repository(packages_from_definition(PACKAGE_DEF_2))\n self.repos = [repo_0, repo_1, repo_2]\n\n def test_simple(self):\n expected_leaf_packages = packages_from_definition(\n \"\"\"A 0.0.0-1; depends (B ^= 0.0.0)\n C 0.0.0-1; depends (E >= 1.0.0)\n E 0.0.0-1 \"\"\"\n )\n leaf_packages = compute_leaf_packages(self.repos)\n self.assertEqual(leaf_packages, set(expected_leaf_packages))\n",
"step-4": "<mask token>\n\n\nclass TestComputeDependencies(unittest.TestCase):\n <mask token>\n\n def test_no_dependency(self):\n requirement = InstallRequirement._from_string('D == 0.0.0-2')\n expected_deps = set()\n deps = compute_dependencies(self.repos, requirement)\n self.assertEqual(deps, expected_deps)\n\n def test_simple_dependency(self):\n requirement = InstallRequirement._from_string('C *')\n expected_deps = packages_from_definition(\n \"\"\"E 1.0.0-1\n E 1.0.1-1\"\"\")\n deps = compute_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_requirements(self):\n requirement = InstallRequirement._from_string('A ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0) \"\"\"\n )\n deps = compute_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_requirements_transitive(self):\n requirement = InstallRequirement._from_string('A ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0)\n D 0.0.0-2 \"\"\"\n )\n deps = compute_dependencies(self.repos, requirement, transitive=True)\n self.assertEqual(deps, set(expected_deps))\n\n\nclass TestComputeReverseDependencies(unittest.TestCase):\n\n def setUp(self):\n repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))\n repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))\n self.repos = [repo_0, repo_1]\n\n def test_no_dependency(self):\n requirement = InstallRequirement._from_string('A ^= 0.0.0')\n deps = compute_reverse_dependencies(self.repos, requirement)\n self.assertEqual(deps, set())\n\n def test_simple_dependency(self):\n requirement = InstallRequirement._from_string('E *')\n expected_deps = packages_from_definition(\n 'C 0.0.0-1; depends (E >= 1.0.0)')\n deps = compute_reverse_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_dependencies(self):\n requirement = InstallRequirement._from_string('D ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0)\"\"\"\n )\n deps = compute_reverse_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_dependencies_transitive(self):\n requirement = InstallRequirement._from_string('D ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"A 0.0.0-1; depends (B ^= 0.0.0)\n B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0)\"\"\"\n )\n deps = compute_reverse_dependencies(self.repos, requirement,\n transitive=True)\n self.assertEqual(deps, set(expected_deps))\n\n\nclass TestComputeLeafPackages(unittest.TestCase):\n\n def setUp(self):\n repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))\n repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))\n repo_2 = Repository(packages_from_definition(PACKAGE_DEF_2))\n self.repos = [repo_0, repo_1, repo_2]\n\n def test_simple(self):\n expected_leaf_packages = packages_from_definition(\n \"\"\"A 0.0.0-1; depends (B ^= 0.0.0)\n C 0.0.0-1; depends (E >= 1.0.0)\n E 0.0.0-1 \"\"\"\n )\n leaf_packages = compute_leaf_packages(self.repos)\n self.assertEqual(leaf_packages, set(expected_leaf_packages))\n",
"step-5": "import unittest\nfrom textwrap import dedent\n\nfrom simplesat import InstallRequirement, Repository\nfrom simplesat.test_utils import packages_from_definition\n\nfrom ..compute_dependencies import (compute_dependencies,\n compute_leaf_packages,\n compute_reverse_dependencies)\n\n\nPACKAGE_DEF_0 = dedent(\"\"\"\\\n A 0.0.0-1; depends (B ^= 0.0.0)\n B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0)\n C 0.0.0-1; depends (E >= 1.0.0)\n\"\"\")\n\n\nPACKAGE_DEF_1 = dedent(\"\"\"\\\n D 0.0.0-2\n E 0.0.0-1\n E 1.0.0-1\n E 1.0.1-1\n\"\"\")\n\nPACKAGE_DEF_2 = dedent(\"\"\"\\\n B 0.0.0-1; depends (D == 0.0.0-2)\n C 0.0.0-1; depends (E >= 1.0.0)\n\"\"\")\n\n\nclass TestComputeDependencies(unittest.TestCase):\n\n def setUp(self):\n repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))\n repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))\n self.repos = [repo_0, repo_1]\n\n def test_no_dependency(self):\n requirement = InstallRequirement._from_string('D == 0.0.0-2')\n expected_deps = set()\n deps = compute_dependencies(self.repos, requirement)\n self.assertEqual(deps, expected_deps)\n\n def test_simple_dependency(self):\n requirement = InstallRequirement._from_string('C *')\n expected_deps = packages_from_definition(\n \"\"\"E 1.0.0-1\n E 1.0.1-1\"\"\")\n\n deps = compute_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_requirements(self):\n requirement = InstallRequirement._from_string('A ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0) \"\"\"\n )\n\n deps = compute_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_requirements_transitive(self):\n requirement = InstallRequirement._from_string('A ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0)\n D 0.0.0-2 \"\"\"\n )\n\n deps = compute_dependencies(self.repos, requirement, transitive=True)\n self.assertEqual(deps, set(expected_deps))\n\n\nclass TestComputeReverseDependencies(unittest.TestCase):\n\n def setUp(self):\n repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))\n repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))\n self.repos = [repo_0, repo_1]\n\n def test_no_dependency(self):\n requirement = InstallRequirement._from_string('A ^= 0.0.0')\n\n deps = compute_reverse_dependencies(self.repos, requirement)\n self.assertEqual(deps, set())\n\n def test_simple_dependency(self):\n requirement = InstallRequirement._from_string('E *')\n expected_deps = packages_from_definition(\n 'C 0.0.0-1; depends (E >= 1.0.0)'\n )\n\n deps = compute_reverse_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_dependencies(self):\n requirement = InstallRequirement._from_string('D ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0)\"\"\"\n )\n deps = compute_reverse_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_dependencies_transitive(self):\n requirement = InstallRequirement._from_string('D ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"A 0.0.0-1; depends (B ^= 0.0.0)\n B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0)\"\"\"\n )\n deps = compute_reverse_dependencies(self.repos, requirement,\n transitive=True)\n self.assertEqual(deps, set(expected_deps))\n\n\nclass TestComputeLeafPackages(unittest.TestCase):\n\n def setUp(self):\n repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))\n repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))\n repo_2 = Repository(packages_from_definition(PACKAGE_DEF_2))\n self.repos = [repo_0, repo_1, repo_2]\n\n def test_simple(self):\n expected_leaf_packages = packages_from_definition(\n \"\"\"A 0.0.0-1; depends (B ^= 0.0.0)\n C 0.0.0-1; depends (E >= 1.0.0)\n E 0.0.0-1 \"\"\"\n )\n leaf_packages = compute_leaf_packages(self.repos)\n\n self.assertEqual(leaf_packages, set(expected_leaf_packages))\n",
"step-ids": [
5,
12,
13,
14,
18
]
}
|
[
5,
12,
13,
14,
18
] |
import pandas as pd
df = pd.DataFrame({'col1':[1,2,3,4],'col2':[444,555,666,444],'col3':['abc','def','ghi','xyz']})
print(df.head())
#print(df['col2'].unique())
#print(df['col1'] > 2)
newdf = df[(df['col1']>0) & (df['col2'] == 444)]
print("========================")
print(newdf)
def times2(x):
return x*2
print("========================")
print(df['col1'].apply(times2))
print("========================")
print(df.sort_values(by='col2'))
print("========================")
print(df)
|
normal
|
{
"blob_id": "422a4945ebf453d3e09e9e7e76dd32b30488680e",
"index": 3011,
"step-1": "<mask token>\n\n\ndef times2(x):\n return x * 2\n\n\n<mask token>\n",
"step-2": "<mask token>\nprint(df.head())\n<mask token>\nprint('========================')\nprint(newdf)\n\n\ndef times2(x):\n return x * 2\n\n\nprint('========================')\nprint(df['col1'].apply(times2))\nprint('========================')\nprint(df.sort_values(by='col2'))\nprint('========================')\nprint(df)\n",
"step-3": "<mask token>\ndf = pd.DataFrame({'col1': [1, 2, 3, 4], 'col2': [444, 555, 666, 444],\n 'col3': ['abc', 'def', 'ghi', 'xyz']})\nprint(df.head())\nnewdf = df[(df['col1'] > 0) & (df['col2'] == 444)]\nprint('========================')\nprint(newdf)\n\n\ndef times2(x):\n return x * 2\n\n\nprint('========================')\nprint(df['col1'].apply(times2))\nprint('========================')\nprint(df.sort_values(by='col2'))\nprint('========================')\nprint(df)\n",
"step-4": "import pandas as pd\ndf = pd.DataFrame({'col1': [1, 2, 3, 4], 'col2': [444, 555, 666, 444],\n 'col3': ['abc', 'def', 'ghi', 'xyz']})\nprint(df.head())\nnewdf = df[(df['col1'] > 0) & (df['col2'] == 444)]\nprint('========================')\nprint(newdf)\n\n\ndef times2(x):\n return x * 2\n\n\nprint('========================')\nprint(df['col1'].apply(times2))\nprint('========================')\nprint(df.sort_values(by='col2'))\nprint('========================')\nprint(df)\n",
"step-5": "import pandas as pd\ndf = pd.DataFrame({'col1':[1,2,3,4],'col2':[444,555,666,444],'col3':['abc','def','ghi','xyz']})\nprint(df.head())\n#print(df['col2'].unique())\n#print(df['col1'] > 2)\nnewdf = df[(df['col1']>0) & (df['col2'] == 444)]\nprint(\"========================\")\nprint(newdf)\n\ndef times2(x):\n return x*2\n\nprint(\"========================\")\nprint(df['col1'].apply(times2))\n\nprint(\"========================\")\nprint(df.sort_values(by='col2'))\nprint(\"========================\")\nprint(df)",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import sys
def solution(input):
k = 1
for v in sorted(input):
if v >= k:
k += 1
return k - 1
testcase = sys.stdin.readline()
for i in range(int(testcase)):
sys.stdin.readline()
line1 = sys.stdin.readline().rstrip('\n')
line2 = sys.stdin.readline().rstrip('\n')
ans = solution(
[ int(x) for x in line1.split(' ') ],
[ int(x) for x in line2.split(' ') ],
)
print("Case #{}: {}".format(i+1, ans))
|
normal
|
{
"blob_id": "a89724be31b4ccc1a3d83305509d9624da364a0c",
"index": 6004,
"step-1": "<mask token>\n\n\ndef solution(input):\n k = 1\n for v in sorted(input):\n if v >= k:\n k += 1\n return k - 1\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef solution(input):\n k = 1\n for v in sorted(input):\n if v >= k:\n k += 1\n return k - 1\n\n\n<mask token>\nfor i in range(int(testcase)):\n sys.stdin.readline()\n line1 = sys.stdin.readline().rstrip('\\n')\n line2 = sys.stdin.readline().rstrip('\\n')\n ans = solution([int(x) for x in line1.split(' ')], [int(x) for x in\n line2.split(' ')])\n print('Case #{}: {}'.format(i + 1, ans))\n",
"step-3": "<mask token>\n\n\ndef solution(input):\n k = 1\n for v in sorted(input):\n if v >= k:\n k += 1\n return k - 1\n\n\ntestcase = sys.stdin.readline()\nfor i in range(int(testcase)):\n sys.stdin.readline()\n line1 = sys.stdin.readline().rstrip('\\n')\n line2 = sys.stdin.readline().rstrip('\\n')\n ans = solution([int(x) for x in line1.split(' ')], [int(x) for x in\n line2.split(' ')])\n print('Case #{}: {}'.format(i + 1, ans))\n",
"step-4": "import sys\n\n\ndef solution(input):\n k = 1\n for v in sorted(input):\n if v >= k:\n k += 1\n return k - 1\n\n\ntestcase = sys.stdin.readline()\nfor i in range(int(testcase)):\n sys.stdin.readline()\n line1 = sys.stdin.readline().rstrip('\\n')\n line2 = sys.stdin.readline().rstrip('\\n')\n ans = solution([int(x) for x in line1.split(' ')], [int(x) for x in\n line2.split(' ')])\n print('Case #{}: {}'.format(i + 1, ans))\n",
"step-5": "import sys\n\ndef solution(input):\n k = 1\n for v in sorted(input):\n if v >= k:\n k += 1\n return k - 1\n\ntestcase = sys.stdin.readline()\nfor i in range(int(testcase)):\n sys.stdin.readline()\n line1 = sys.stdin.readline().rstrip('\\n')\n line2 = sys.stdin.readline().rstrip('\\n')\n ans = solution(\n [ int(x) for x in line1.split(' ') ],\n [ int(x) for x in line2.split(' ') ],\n )\n print(\"Case #{}: {}\".format(i+1, ans))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
def main():
' main entry point for module execution\n '
argument_spec = dict(src=dict(type='path'), replace_src=dict(), lines=dict(aliases=['commands'], type='list'), parents=dict(type='list'), before=dict(type='list'), after=dict(type='list'), match=dict(default='line', choices=['line', 'strict', 'exact', 'none']), replace=dict(default='line', choices=['line', 'block', 'config']), running_config=dict(aliases=['config']), intended_config=dict(), defaults=dict(type='bool', default=False), backup=dict(type='bool', default=False), save_when=dict(choices=['always', 'never', 'modified'], default='never'), diff_against=dict(choices=['running', 'startup', 'intended']), diff_ignore_lines=dict(type='list'), save=dict(default=False, type='bool', removed_in_version='2.4'), force=dict(default=False, type='bool', removed_in_version='2.2'))
argument_spec.update(nxos_argument_spec)
mutually_exclusive = [('lines', 'src', 'replace_src'), ('parents', 'src'), ('save', 'save_when')]
required_if = [('match', 'strict', ['lines']), ('match', 'exact', ['lines']), ('replace', 'block', ['lines']), ('replace', 'config', ['replace_src']), ('diff_against', 'intended', ['intended_config'])]
module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=mutually_exclusive, required_if=required_if, supports_check_mode=True)
warnings = list()
nxos_check_args(module, warnings)
result = {
'changed': False,
'warnings': warnings,
}
config = None
info = get_capabilities(module).get('device_info', {
})
os_platform = info.get('network_os_platform', '')
if (module.params['replace'] == 'config'):
if ('9K' not in os_platform):
module.fail_json(msg='replace: config is supported only for Nexus 9K series switches')
if module.params['replace_src']:
if (module.params['replace'] != 'config'):
module.fail_json(msg='replace: config is required with replace_src')
if (module.params['backup'] or (module._diff and (module.params['diff_against'] == 'running'))):
contents = get_config(module)
config = NetworkConfig(indent=2, contents=contents)
if module.params['backup']:
result['__backup__'] = contents
if any((module.params['src'], module.params['lines'], module.params['replace_src'])):
match = module.params['match']
replace = module.params['replace']
candidate = get_candidate(module)
if ((match != 'none') and (replace != 'config')):
config = get_running_config(module, config)
path = module.params['parents']
configobjs = candidate.difference(config, match=match, replace=replace, path=path)
else:
configobjs = candidate.items
if configobjs:
commands = dumps(configobjs, 'commands').split('\n')
if module.params['before']:
commands[:0] = module.params['before']
if module.params['after']:
commands.extend(module.params['after'])
result['commands'] = commands
result['updates'] = commands
if (not module.check_mode):
load_config(module, commands)
result['changed'] = True
running_config = None
startup_config = None
diff_ignore_lines = module.params['diff_ignore_lines']
if module.params['save']:
module.params['save_when'] = 'always'
if (module.params['save_when'] != 'never'):
output = execute_show_commands(module, ['show running-config', 'show startup-config'])
running_config = NetworkConfig(indent=1, contents=output[0], ignore_lines=diff_ignore_lines)
startup_config = NetworkConfig(indent=1, contents=output[1], ignore_lines=diff_ignore_lines)
if ((running_config.sha1 != startup_config.sha1) or (module.params['save_when'] == 'always')):
result['changed'] = True
if (not module.check_mode):
cmd = {
'command': 'copy running-config startup-config',
'output': 'text',
}
run_commands(module, [cmd])
else:
module.warn('Skipping command `copy running-config startup-config` due to check_mode. Configuration not copied to non-volatile storage')
if module._diff:
if (not running_config):
output = execute_show_commands(module, 'show running-config')
contents = output[0]
else:
contents = running_config.config_text
running_config = NetworkConfig(indent=1, contents=contents, ignore_lines=diff_ignore_lines)
if (module.params['diff_against'] == 'running'):
if module.check_mode:
module.warn('unable to perform diff against running-config due to check mode')
contents = None
else:
contents = config.config_text
elif (module.params['diff_against'] == 'startup'):
if (not startup_config):
output = execute_show_commands(module, 'show startup-config')
contents = output[0]
else:
contents = output[0]
contents = startup_config.config_text
elif (module.params['diff_against'] == 'intended'):
contents = module.params['intended_config']
if (contents is not None):
base_config = NetworkConfig(indent=1, contents=contents, ignore_lines=diff_ignore_lines)
if (running_config.sha1 != base_config.sha1):
if (module.params['diff_against'] == 'intended'):
before = running_config
after = base_config
elif (module.params['diff_against'] in ('startup', 'running')):
before = base_config
after = running_config
result.update({
'changed': True,
'diff': {
'before': str(before),
'after': str(after),
},
})
module.exit_json(**result)
|
normal
|
{
"blob_id": "99b5ac74da95dff399c31d58e19bac65e538a34b",
"index": 8012,
"step-1": "<mask token>\n",
"step-2": "def main():\n \"\"\" main entry point for module execution\n \"\"\"\n argument_spec = dict(src=dict(type='path'), replace_src=dict(), lines=\n dict(aliases=['commands'], type='list'), parents=dict(type='list'),\n before=dict(type='list'), after=dict(type='list'), match=dict(\n default='line', choices=['line', 'strict', 'exact', 'none']),\n replace=dict(default='line', choices=['line', 'block', 'config']),\n running_config=dict(aliases=['config']), intended_config=dict(),\n defaults=dict(type='bool', default=False), backup=dict(type='bool',\n default=False), save_when=dict(choices=['always', 'never',\n 'modified'], default='never'), diff_against=dict(choices=['running',\n 'startup', 'intended']), diff_ignore_lines=dict(type='list'), save=\n dict(default=False, type='bool', removed_in_version='2.4'), force=\n dict(default=False, type='bool', removed_in_version='2.2'))\n argument_spec.update(nxos_argument_spec)\n mutually_exclusive = [('lines', 'src', 'replace_src'), ('parents',\n 'src'), ('save', 'save_when')]\n required_if = [('match', 'strict', ['lines']), ('match', 'exact', [\n 'lines']), ('replace', 'block', ['lines']), ('replace', 'config', [\n 'replace_src']), ('diff_against', 'intended', ['intended_config'])]\n module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=\n mutually_exclusive, required_if=required_if, supports_check_mode=True)\n warnings = list()\n nxos_check_args(module, warnings)\n result = {'changed': False, 'warnings': warnings}\n config = None\n info = get_capabilities(module).get('device_info', {})\n os_platform = info.get('network_os_platform', '')\n if module.params['replace'] == 'config':\n if '9K' not in os_platform:\n module.fail_json(msg=\n 'replace: config is supported only for Nexus 9K series switches'\n )\n if module.params['replace_src']:\n if module.params['replace'] != 'config':\n module.fail_json(msg='replace: config is required with replace_src'\n )\n if module.params['backup'] or module._diff and module.params['diff_against'\n ] == 'running':\n contents = get_config(module)\n config = NetworkConfig(indent=2, contents=contents)\n if module.params['backup']:\n result['__backup__'] = contents\n if any((module.params['src'], module.params['lines'], module.params[\n 'replace_src'])):\n match = module.params['match']\n replace = module.params['replace']\n candidate = get_candidate(module)\n if match != 'none' and replace != 'config':\n config = get_running_config(module, config)\n path = module.params['parents']\n configobjs = candidate.difference(config, match=match, replace=\n replace, path=path)\n else:\n configobjs = candidate.items\n if configobjs:\n commands = dumps(configobjs, 'commands').split('\\n')\n if module.params['before']:\n commands[:0] = module.params['before']\n if module.params['after']:\n commands.extend(module.params['after'])\n result['commands'] = commands\n result['updates'] = commands\n if not module.check_mode:\n load_config(module, commands)\n result['changed'] = True\n running_config = None\n startup_config = None\n diff_ignore_lines = module.params['diff_ignore_lines']\n if module.params['save']:\n module.params['save_when'] = 'always'\n if module.params['save_when'] != 'never':\n output = execute_show_commands(module, ['show running-config',\n 'show startup-config'])\n running_config = NetworkConfig(indent=1, contents=output[0],\n ignore_lines=diff_ignore_lines)\n startup_config = NetworkConfig(indent=1, contents=output[1],\n ignore_lines=diff_ignore_lines)\n if running_config.sha1 != startup_config.sha1 or module.params[\n 'save_when'] == 'always':\n result['changed'] = True\n if not module.check_mode:\n cmd = {'command': 'copy running-config startup-config',\n 'output': 'text'}\n run_commands(module, [cmd])\n else:\n module.warn(\n 'Skipping command `copy running-config startup-config` due to check_mode. Configuration not copied to non-volatile storage'\n )\n if module._diff:\n if not running_config:\n output = execute_show_commands(module, 'show running-config')\n contents = output[0]\n else:\n contents = running_config.config_text\n running_config = NetworkConfig(indent=1, contents=contents,\n ignore_lines=diff_ignore_lines)\n if module.params['diff_against'] == 'running':\n if module.check_mode:\n module.warn(\n 'unable to perform diff against running-config due to check mode'\n )\n contents = None\n else:\n contents = config.config_text\n elif module.params['diff_against'] == 'startup':\n if not startup_config:\n output = execute_show_commands(module, 'show startup-config')\n contents = output[0]\n else:\n contents = output[0]\n contents = startup_config.config_text\n elif module.params['diff_against'] == 'intended':\n contents = module.params['intended_config']\n if contents is not None:\n base_config = NetworkConfig(indent=1, contents=contents,\n ignore_lines=diff_ignore_lines)\n if running_config.sha1 != base_config.sha1:\n if module.params['diff_against'] == 'intended':\n before = running_config\n after = base_config\n elif module.params['diff_against'] in ('startup', 'running'):\n before = base_config\n after = running_config\n result.update({'changed': True, 'diff': {'before': str(\n before), 'after': str(after)}})\n module.exit_json(**result)\n",
"step-3": "def main():\n ' main entry point for module execution\\n '\n argument_spec = dict(src=dict(type='path'), replace_src=dict(), lines=dict(aliases=['commands'], type='list'), parents=dict(type='list'), before=dict(type='list'), after=dict(type='list'), match=dict(default='line', choices=['line', 'strict', 'exact', 'none']), replace=dict(default='line', choices=['line', 'block', 'config']), running_config=dict(aliases=['config']), intended_config=dict(), defaults=dict(type='bool', default=False), backup=dict(type='bool', default=False), save_when=dict(choices=['always', 'never', 'modified'], default='never'), diff_against=dict(choices=['running', 'startup', 'intended']), diff_ignore_lines=dict(type='list'), save=dict(default=False, type='bool', removed_in_version='2.4'), force=dict(default=False, type='bool', removed_in_version='2.2'))\n argument_spec.update(nxos_argument_spec)\n mutually_exclusive = [('lines', 'src', 'replace_src'), ('parents', 'src'), ('save', 'save_when')]\n required_if = [('match', 'strict', ['lines']), ('match', 'exact', ['lines']), ('replace', 'block', ['lines']), ('replace', 'config', ['replace_src']), ('diff_against', 'intended', ['intended_config'])]\n module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=mutually_exclusive, required_if=required_if, supports_check_mode=True)\n warnings = list()\n nxos_check_args(module, warnings)\n result = {\n 'changed': False,\n 'warnings': warnings,\n }\n config = None\n info = get_capabilities(module).get('device_info', {\n \n })\n os_platform = info.get('network_os_platform', '')\n if (module.params['replace'] == 'config'):\n if ('9K' not in os_platform):\n module.fail_json(msg='replace: config is supported only for Nexus 9K series switches')\n if module.params['replace_src']:\n if (module.params['replace'] != 'config'):\n module.fail_json(msg='replace: config is required with replace_src')\n if (module.params['backup'] or (module._diff and (module.params['diff_against'] == 'running'))):\n contents = get_config(module)\n config = NetworkConfig(indent=2, contents=contents)\n if module.params['backup']:\n result['__backup__'] = contents\n if any((module.params['src'], module.params['lines'], module.params['replace_src'])):\n match = module.params['match']\n replace = module.params['replace']\n candidate = get_candidate(module)\n if ((match != 'none') and (replace != 'config')):\n config = get_running_config(module, config)\n path = module.params['parents']\n configobjs = candidate.difference(config, match=match, replace=replace, path=path)\n else:\n configobjs = candidate.items\n if configobjs:\n commands = dumps(configobjs, 'commands').split('\\n')\n if module.params['before']:\n commands[:0] = module.params['before']\n if module.params['after']:\n commands.extend(module.params['after'])\n result['commands'] = commands\n result['updates'] = commands\n if (not module.check_mode):\n load_config(module, commands)\n result['changed'] = True\n running_config = None\n startup_config = None\n diff_ignore_lines = module.params['diff_ignore_lines']\n if module.params['save']:\n module.params['save_when'] = 'always'\n if (module.params['save_when'] != 'never'):\n output = execute_show_commands(module, ['show running-config', 'show startup-config'])\n running_config = NetworkConfig(indent=1, contents=output[0], ignore_lines=diff_ignore_lines)\n startup_config = NetworkConfig(indent=1, contents=output[1], ignore_lines=diff_ignore_lines)\n if ((running_config.sha1 != startup_config.sha1) or (module.params['save_when'] == 'always')):\n result['changed'] = True\n if (not module.check_mode):\n cmd = {\n 'command': 'copy running-config startup-config',\n 'output': 'text',\n }\n run_commands(module, [cmd])\n else:\n module.warn('Skipping command `copy running-config startup-config` due to check_mode. Configuration not copied to non-volatile storage')\n if module._diff:\n if (not running_config):\n output = execute_show_commands(module, 'show running-config')\n contents = output[0]\n else:\n contents = running_config.config_text\n running_config = NetworkConfig(indent=1, contents=contents, ignore_lines=diff_ignore_lines)\n if (module.params['diff_against'] == 'running'):\n if module.check_mode:\n module.warn('unable to perform diff against running-config due to check mode')\n contents = None\n else:\n contents = config.config_text\n elif (module.params['diff_against'] == 'startup'):\n if (not startup_config):\n output = execute_show_commands(module, 'show startup-config')\n contents = output[0]\n else:\n contents = output[0]\n contents = startup_config.config_text\n elif (module.params['diff_against'] == 'intended'):\n contents = module.params['intended_config']\n if (contents is not None):\n base_config = NetworkConfig(indent=1, contents=contents, ignore_lines=diff_ignore_lines)\n if (running_config.sha1 != base_config.sha1):\n if (module.params['diff_against'] == 'intended'):\n before = running_config\n after = base_config\n elif (module.params['diff_against'] in ('startup', 'running')):\n before = base_config\n after = running_config\n result.update({\n 'changed': True,\n 'diff': {\n 'before': str(before),\n 'after': str(after),\n },\n })\n module.exit_json(**result)",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#coding: utf-8
#/usr/bin/python
__author__='julia sayapina'
### Use db_reset.py to drop the db and recreate it, then use 'migrate' --> 'createsuperuser' --> 'makemigrations' --> 'migrate' as usual.
### This will create the DB structure as it has to be from django
### Then use test_db_fullfill.py to fullfill the db with test data. if you don't need to create tables manually don't use db_create()
from warnings import filterwarnings
import MySQLdb as db
import os
import shutil
import os
import sys
from subprocess import Popen, PIPE, STDOUT
import uuid
from decimal import *
from datetime import date
from random import randint
# Создание или открытие файла базы данных и создание схемы
filterwarnings('ignore', category = db.Warning)
db_name = 'ved3'
def db_create(): # creates tables manually (doesn't create AO and AB tables)
cur.execute("""
create table if not exists Offshores_asset (
id INTEGER PRIMARY KEY AUTO_INCREMENT,
asset_name VARCHAR(100),
asset_link VARCHAR(200),
slug CHAR(200),
uuid CHAR(36)
);
""")
cur.execute("""
create table if not exists Offshores_offshore (
id INTEGER PRIMARY KEY AUTO_INCREMENT,
off_name VARCHAR(50),
off_jurisdiction VARCHAR(50),
file VARCHAR(100),
image VARCHAR(100),
off_parent VARCHAR(50),
off_link VARCHAR(300),
slug VARCHAR(200),
uuid CHAR(36)
);
""")
cur.execute("""
create table if not exists Offshores_beneficiary (
id INTEGER PRIMARY KEY AUTO_INCREMENT,
ben_name VARCHAR(50),
ben_lastname VARCHAR(100),
ben_midname VARCHAR(30),
ben_holding VARCHAR(70),
ben_link VARCHAR(300),
slug VARCHAR(200),
uuid CHAR(36)
);
""")
cur.execute("""
create table if not exists Offshores_beneficiariesoffshores (
id INTEGER PRIMARY KEY AUTO_INCREMENT,
share DECIMAL,
rel_date DATE,
source VARCHAR(150),
link VARCHAR(200),
beneficiary_id INT,
offshore_id INT,
uuid CHAR(36)
);
""")
conn.commit()
print('tables created')
def db_insert(numrows):
# inserts test data into tables
for x in xrange(0,numrows): #creates test data for tables
num = str(x)
a_name = 'Asset' + num
a_link = 'http://somelink/'+a_name
a_uuid = uuid.uuid4().hex
a_slug = a_name + '-' + str(a_uuid)
o_name = 'Offshore' + num
o_jur = 'Cyprus'
o_file = 'offshores/favicon.xcf'
o_image = 'offshores/favicon.png'
o_prnt = 'parent' + num
o_link = 'http://' + o_name + '-' + num + '.com'
o_uuid = uuid.uuid4().hex
o_slug = o_name + str(o_uuid)
b_name = 'Michael' + num
b_lname = 'Prohorov' + num
b_mname = 'Dmitrievich' + num
b_holding = 'Onexim' + num
b_link = 'http://onexim.ru/' + b_name + b_lname + '-' + num + '.com'
b_uuid = uuid.uuid4().hex
b_slug = b_lname + str(b_uuid)
try: #inserts test data to tables via SQL; still produces wierd errors for Beneficiariesoffshores idk why
cur.execute("""INSERT INTO Offshores_asset (asset_name, asset_link, slug, uuid) VALUES (%s,%s,%s,%s)""",(a_name, a_link, a_slug, a_uuid))
cur.execute("""INSERT INTO Offshores_offshore (off_name, off_jurisdiction, file, image, off_parent, off_link, slug, uuid) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)""",(o_name, o_jur, o_file, o_image, o_prnt, o_link, o_slug, o_uuid))
cur.execute("""INSERT INTO Offshores_beneficiary (ben_name, ben_lastname, ben_midname, ben_holding, ben_link, slug, uuid) VALUES (%s,%s,%s,%s,%s,%s,%s)""",(b_name, b_lname, b_mname, b_holding, b_link, b_slug, b_uuid))
conn.commit()
except Exception as e:
print ("Exception 1:", type(e), e)
def db_insert_linktables(numrows):
# inserts test data into linking tables; has to be called after db_insert(), as first basic tables need to be generated to produce links between
# them using random numbers
for x in xrange(0,numrows): #creates test data for tables
num = str(x)
bo_share = Decimal(x)
bo_date = date(2016, randint(1, 12), randint(1, 28))
bo_source = 'source' + num
bo_link = 'http://bo.ru/' + bo_source + '-' + num
bo_ben = randint(1, numrows)
bo_off = randint(1, numrows)
bo_uuid = uuid.uuid4().hex
oa_uuid = uuid.uuid4().hex
oa_share = Decimal(x)
oa_date = date(2016, randint(1, 12), randint(1, 28))
oa_source = 'source' + num
oa_link = 'http://oa.ru/' + oa_source + '-' + num
oa_asset = randint(1, numrows)
oa_off = randint(1, numrows)
ab_uuid = uuid.uuid4().hex
ab_share = Decimal(x)
ab_date = date(2016, randint(1, 12), randint(1, 28))
ab_source = 'source' + num
ab_link = 'http://ab.ru/' + oa_source + '-' + num
ab_asset = randint(1, numrows)
ab_ben = randint(1, numrows)
try: #inserts test data to tables via SQL; still produces wierd errors for Beneficiariesoffshores idk why
cur.execute("""INSERT INTO Offshores_beneficiariesoffshores (share, rel_date, source, link, beneficiary_id, offshore_id, uuid) VALUES (%s,%s,%s,%s,%s,%s,%s)""",(bo_share, bo_date, bo_source, bo_link, bo_ben, bo_off, bo_uuid))
cur.execute("""INSERT INTO Offshores_offshoresassets (uuid, share, rel_date, source, link, asset_id, offshore_id) VALUES (%s,%s,%s,%s,%s,%s,%s)""",(oa_uuid, oa_share, oa_date, oa_source, oa_link, oa_asset, oa_off))
cur.execute("""INSERT INTO Offshores_assetsbeneficiaries (uuid, share, rel_date, source, link, asset_id, beneficiary_id) VALUES (%s,%s,%s,%s,%s,%s,%s)""",(ab_uuid, ab_share, ab_date, ab_source, ab_link, ab_asset, ab_ben))
conn.commit()
except Exception as e:
print ("Exception 1:", type(e), e)
numrows = 20
try:
conn = db.connect("localhost","root","0013Tau","ved2" )
cur = conn.cursor()
# db_create() #<-- to create tables manually uncomment this
db_insert(numrows)
db_insert_linktables(numrows) # IMPORTANT! has to be called ONLY after db_insert()!
except Exception as e:
print ("Exception 0:", type(e), e)
except: db.rollback()
conn.commit()
conn.close()
print ('DB fullfilled')
# def main():
# if len(sys.argv) != 2:
# print('usage: python3 db_fullfill.py [numrows]')
# sys.exit(1)
# if len(sys.argv) == 2:
# numrows = sys.argv[1]
# else:
# numrows = 15
# print (numrows)
# return numrows
# sys.exit(1)
# if __name__ == '__main__':
# main()
|
normal
|
{
"blob_id": "3240310653930662dcc4d79646b1a75c2994cda7",
"index": 9063,
"step-1": "<mask token>\n\n\ndef db_create():\n cur.execute(\n \"\"\"\n create table if not exists Offshores_asset (\n id INTEGER PRIMARY KEY AUTO_INCREMENT,\n asset_name VARCHAR(100),\n asset_link VARCHAR(200),\n slug CHAR(200),\n uuid CHAR(36)\n );\n \"\"\"\n )\n cur.execute(\n \"\"\"\n create table if not exists Offshores_offshore (\n id INTEGER PRIMARY KEY AUTO_INCREMENT,\n off_name VARCHAR(50),\n off_jurisdiction VARCHAR(50),\n file VARCHAR(100),\n image VARCHAR(100),\n off_parent VARCHAR(50),\n off_link VARCHAR(300),\n slug VARCHAR(200),\n uuid CHAR(36)\n );\n \"\"\"\n )\n cur.execute(\n \"\"\"\n create table if not exists Offshores_beneficiary (\n id INTEGER PRIMARY KEY AUTO_INCREMENT,\n ben_name VARCHAR(50),\n ben_lastname VARCHAR(100),\n ben_midname VARCHAR(30),\n ben_holding VARCHAR(70),\n ben_link VARCHAR(300),\n slug VARCHAR(200),\n uuid CHAR(36)\n );\n \"\"\"\n )\n cur.execute(\n \"\"\"\n create table if not exists Offshores_beneficiariesoffshores (\n id INTEGER PRIMARY KEY AUTO_INCREMENT,\n share DECIMAL,\n rel_date DATE,\n source VARCHAR(150),\n link VARCHAR(200),\n beneficiary_id INT,\n offshore_id INT,\n uuid CHAR(36)\n );\n \"\"\"\n )\n conn.commit()\n print('tables created')\n\n\n<mask token>\n\n\ndef db_insert_linktables(numrows):\n for x in xrange(0, numrows):\n num = str(x)\n bo_share = Decimal(x)\n bo_date = date(2016, randint(1, 12), randint(1, 28))\n bo_source = 'source' + num\n bo_link = 'http://bo.ru/' + bo_source + '-' + num\n bo_ben = randint(1, numrows)\n bo_off = randint(1, numrows)\n bo_uuid = uuid.uuid4().hex\n oa_uuid = uuid.uuid4().hex\n oa_share = Decimal(x)\n oa_date = date(2016, randint(1, 12), randint(1, 28))\n oa_source = 'source' + num\n oa_link = 'http://oa.ru/' + oa_source + '-' + num\n oa_asset = randint(1, numrows)\n oa_off = randint(1, numrows)\n ab_uuid = uuid.uuid4().hex\n ab_share = Decimal(x)\n ab_date = date(2016, randint(1, 12), randint(1, 28))\n ab_source = 'source' + num\n ab_link = 'http://ab.ru/' + oa_source + '-' + num\n ab_asset = randint(1, numrows)\n ab_ben = randint(1, numrows)\n try:\n cur.execute(\n 'INSERT INTO Offshores_beneficiariesoffshores (share, rel_date, source, link, beneficiary_id, offshore_id, uuid) VALUES (%s,%s,%s,%s,%s,%s,%s)'\n , (bo_share, bo_date, bo_source, bo_link, bo_ben, bo_off,\n bo_uuid))\n cur.execute(\n 'INSERT INTO Offshores_offshoresassets (uuid, share, rel_date, source, link, asset_id, offshore_id) VALUES (%s,%s,%s,%s,%s,%s,%s)'\n , (oa_uuid, oa_share, oa_date, oa_source, oa_link, oa_asset,\n oa_off))\n cur.execute(\n 'INSERT INTO Offshores_assetsbeneficiaries (uuid, share, rel_date, source, link, asset_id, beneficiary_id) VALUES (%s,%s,%s,%s,%s,%s,%s)'\n , (ab_uuid, ab_share, ab_date, ab_source, ab_link, ab_asset,\n ab_ben))\n conn.commit()\n except Exception as e:\n print('Exception 1:', type(e), e)\n\n\n<mask token>\n",
"step-2": "<mask token>\nfilterwarnings('ignore', category=db.Warning)\n<mask token>\n\n\ndef db_create():\n cur.execute(\n \"\"\"\n create table if not exists Offshores_asset (\n id INTEGER PRIMARY KEY AUTO_INCREMENT,\n asset_name VARCHAR(100),\n asset_link VARCHAR(200),\n slug CHAR(200),\n uuid CHAR(36)\n );\n \"\"\"\n )\n cur.execute(\n \"\"\"\n create table if not exists Offshores_offshore (\n id INTEGER PRIMARY KEY AUTO_INCREMENT,\n off_name VARCHAR(50),\n off_jurisdiction VARCHAR(50),\n file VARCHAR(100),\n image VARCHAR(100),\n off_parent VARCHAR(50),\n off_link VARCHAR(300),\n slug VARCHAR(200),\n uuid CHAR(36)\n );\n \"\"\"\n )\n cur.execute(\n \"\"\"\n create table if not exists Offshores_beneficiary (\n id INTEGER PRIMARY KEY AUTO_INCREMENT,\n ben_name VARCHAR(50),\n ben_lastname VARCHAR(100),\n ben_midname VARCHAR(30),\n ben_holding VARCHAR(70),\n ben_link VARCHAR(300),\n slug VARCHAR(200),\n uuid CHAR(36)\n );\n \"\"\"\n )\n cur.execute(\n \"\"\"\n create table if not exists Offshores_beneficiariesoffshores (\n id INTEGER PRIMARY KEY AUTO_INCREMENT,\n share DECIMAL,\n rel_date DATE,\n source VARCHAR(150),\n link VARCHAR(200),\n beneficiary_id INT,\n offshore_id INT,\n uuid CHAR(36)\n );\n \"\"\"\n )\n conn.commit()\n print('tables created')\n\n\ndef db_insert(numrows):\n for x in xrange(0, numrows):\n num = str(x)\n a_name = 'Asset' + num\n a_link = 'http://somelink/' + a_name\n a_uuid = uuid.uuid4().hex\n a_slug = a_name + '-' + str(a_uuid)\n o_name = 'Offshore' + num\n o_jur = 'Cyprus'\n o_file = 'offshores/favicon.xcf'\n o_image = 'offshores/favicon.png'\n o_prnt = 'parent' + num\n o_link = 'http://' + o_name + '-' + num + '.com'\n o_uuid = uuid.uuid4().hex\n o_slug = o_name + str(o_uuid)\n b_name = 'Michael' + num\n b_lname = 'Prohorov' + num\n b_mname = 'Dmitrievich' + num\n b_holding = 'Onexim' + num\n b_link = 'http://onexim.ru/' + b_name + b_lname + '-' + num + '.com'\n b_uuid = uuid.uuid4().hex\n b_slug = b_lname + str(b_uuid)\n try:\n cur.execute(\n 'INSERT INTO Offshores_asset (asset_name, asset_link, slug, uuid) VALUES (%s,%s,%s,%s)'\n , (a_name, a_link, a_slug, a_uuid))\n cur.execute(\n 'INSERT INTO Offshores_offshore (off_name, off_jurisdiction, file, image, off_parent, off_link, slug, uuid) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)'\n , (o_name, o_jur, o_file, o_image, o_prnt, o_link, o_slug,\n o_uuid))\n cur.execute(\n 'INSERT INTO Offshores_beneficiary (ben_name, ben_lastname, ben_midname, ben_holding, ben_link, slug, uuid) VALUES (%s,%s,%s,%s,%s,%s,%s)'\n , (b_name, b_lname, b_mname, b_holding, b_link, b_slug, b_uuid)\n )\n conn.commit()\n except Exception as e:\n print('Exception 1:', type(e), e)\n\n\ndef db_insert_linktables(numrows):\n for x in xrange(0, numrows):\n num = str(x)\n bo_share = Decimal(x)\n bo_date = date(2016, randint(1, 12), randint(1, 28))\n bo_source = 'source' + num\n bo_link = 'http://bo.ru/' + bo_source + '-' + num\n bo_ben = randint(1, numrows)\n bo_off = randint(1, numrows)\n bo_uuid = uuid.uuid4().hex\n oa_uuid = uuid.uuid4().hex\n oa_share = Decimal(x)\n oa_date = date(2016, randint(1, 12), randint(1, 28))\n oa_source = 'source' + num\n oa_link = 'http://oa.ru/' + oa_source + '-' + num\n oa_asset = randint(1, numrows)\n oa_off = randint(1, numrows)\n ab_uuid = uuid.uuid4().hex\n ab_share = Decimal(x)\n ab_date = date(2016, randint(1, 12), randint(1, 28))\n ab_source = 'source' + num\n ab_link = 'http://ab.ru/' + oa_source + '-' + num\n ab_asset = randint(1, numrows)\n ab_ben = randint(1, numrows)\n try:\n cur.execute(\n 'INSERT INTO Offshores_beneficiariesoffshores (share, rel_date, source, link, beneficiary_id, offshore_id, uuid) VALUES (%s,%s,%s,%s,%s,%s,%s)'\n , (bo_share, bo_date, bo_source, bo_link, bo_ben, bo_off,\n bo_uuid))\n cur.execute(\n 'INSERT INTO Offshores_offshoresassets (uuid, share, rel_date, source, link, asset_id, offshore_id) VALUES (%s,%s,%s,%s,%s,%s,%s)'\n , (oa_uuid, oa_share, oa_date, oa_source, oa_link, oa_asset,\n oa_off))\n cur.execute(\n 'INSERT INTO Offshores_assetsbeneficiaries (uuid, share, rel_date, source, link, asset_id, beneficiary_id) VALUES (%s,%s,%s,%s,%s,%s,%s)'\n , (ab_uuid, ab_share, ab_date, ab_source, ab_link, ab_asset,\n ab_ben))\n conn.commit()\n except Exception as e:\n print('Exception 1:', type(e), e)\n\n\n<mask token>\ntry:\n conn = db.connect('localhost', 'root', '0013Tau', 'ved2')\n cur = conn.cursor()\n db_insert(numrows)\n db_insert_linktables(numrows)\nexcept Exception as e:\n print('Exception 0:', type(e), e)\nexcept:\n db.rollback()\nconn.commit()\nconn.close()\nprint('DB fullfilled')\n",
"step-3": "__author__ = 'julia sayapina'\n<mask token>\nfilterwarnings('ignore', category=db.Warning)\ndb_name = 'ved3'\n\n\ndef db_create():\n cur.execute(\n \"\"\"\n create table if not exists Offshores_asset (\n id INTEGER PRIMARY KEY AUTO_INCREMENT,\n asset_name VARCHAR(100),\n asset_link VARCHAR(200),\n slug CHAR(200),\n uuid CHAR(36)\n );\n \"\"\"\n )\n cur.execute(\n \"\"\"\n create table if not exists Offshores_offshore (\n id INTEGER PRIMARY KEY AUTO_INCREMENT,\n off_name VARCHAR(50),\n off_jurisdiction VARCHAR(50),\n file VARCHAR(100),\n image VARCHAR(100),\n off_parent VARCHAR(50),\n off_link VARCHAR(300),\n slug VARCHAR(200),\n uuid CHAR(36)\n );\n \"\"\"\n )\n cur.execute(\n \"\"\"\n create table if not exists Offshores_beneficiary (\n id INTEGER PRIMARY KEY AUTO_INCREMENT,\n ben_name VARCHAR(50),\n ben_lastname VARCHAR(100),\n ben_midname VARCHAR(30),\n ben_holding VARCHAR(70),\n ben_link VARCHAR(300),\n slug VARCHAR(200),\n uuid CHAR(36)\n );\n \"\"\"\n )\n cur.execute(\n \"\"\"\n create table if not exists Offshores_beneficiariesoffshores (\n id INTEGER PRIMARY KEY AUTO_INCREMENT,\n share DECIMAL,\n rel_date DATE,\n source VARCHAR(150),\n link VARCHAR(200),\n beneficiary_id INT,\n offshore_id INT,\n uuid CHAR(36)\n );\n \"\"\"\n )\n conn.commit()\n print('tables created')\n\n\ndef db_insert(numrows):\n for x in xrange(0, numrows):\n num = str(x)\n a_name = 'Asset' + num\n a_link = 'http://somelink/' + a_name\n a_uuid = uuid.uuid4().hex\n a_slug = a_name + '-' + str(a_uuid)\n o_name = 'Offshore' + num\n o_jur = 'Cyprus'\n o_file = 'offshores/favicon.xcf'\n o_image = 'offshores/favicon.png'\n o_prnt = 'parent' + num\n o_link = 'http://' + o_name + '-' + num + '.com'\n o_uuid = uuid.uuid4().hex\n o_slug = o_name + str(o_uuid)\n b_name = 'Michael' + num\n b_lname = 'Prohorov' + num\n b_mname = 'Dmitrievich' + num\n b_holding = 'Onexim' + num\n b_link = 'http://onexim.ru/' + b_name + b_lname + '-' + num + '.com'\n b_uuid = uuid.uuid4().hex\n b_slug = b_lname + str(b_uuid)\n try:\n cur.execute(\n 'INSERT INTO Offshores_asset (asset_name, asset_link, slug, uuid) VALUES (%s,%s,%s,%s)'\n , (a_name, a_link, a_slug, a_uuid))\n cur.execute(\n 'INSERT INTO Offshores_offshore (off_name, off_jurisdiction, file, image, off_parent, off_link, slug, uuid) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)'\n , (o_name, o_jur, o_file, o_image, o_prnt, o_link, o_slug,\n o_uuid))\n cur.execute(\n 'INSERT INTO Offshores_beneficiary (ben_name, ben_lastname, ben_midname, ben_holding, ben_link, slug, uuid) VALUES (%s,%s,%s,%s,%s,%s,%s)'\n , (b_name, b_lname, b_mname, b_holding, b_link, b_slug, b_uuid)\n )\n conn.commit()\n except Exception as e:\n print('Exception 1:', type(e), e)\n\n\ndef db_insert_linktables(numrows):\n for x in xrange(0, numrows):\n num = str(x)\n bo_share = Decimal(x)\n bo_date = date(2016, randint(1, 12), randint(1, 28))\n bo_source = 'source' + num\n bo_link = 'http://bo.ru/' + bo_source + '-' + num\n bo_ben = randint(1, numrows)\n bo_off = randint(1, numrows)\n bo_uuid = uuid.uuid4().hex\n oa_uuid = uuid.uuid4().hex\n oa_share = Decimal(x)\n oa_date = date(2016, randint(1, 12), randint(1, 28))\n oa_source = 'source' + num\n oa_link = 'http://oa.ru/' + oa_source + '-' + num\n oa_asset = randint(1, numrows)\n oa_off = randint(1, numrows)\n ab_uuid = uuid.uuid4().hex\n ab_share = Decimal(x)\n ab_date = date(2016, randint(1, 12), randint(1, 28))\n ab_source = 'source' + num\n ab_link = 'http://ab.ru/' + oa_source + '-' + num\n ab_asset = randint(1, numrows)\n ab_ben = randint(1, numrows)\n try:\n cur.execute(\n 'INSERT INTO Offshores_beneficiariesoffshores (share, rel_date, source, link, beneficiary_id, offshore_id, uuid) VALUES (%s,%s,%s,%s,%s,%s,%s)'\n , (bo_share, bo_date, bo_source, bo_link, bo_ben, bo_off,\n bo_uuid))\n cur.execute(\n 'INSERT INTO Offshores_offshoresassets (uuid, share, rel_date, source, link, asset_id, offshore_id) VALUES (%s,%s,%s,%s,%s,%s,%s)'\n , (oa_uuid, oa_share, oa_date, oa_source, oa_link, oa_asset,\n oa_off))\n cur.execute(\n 'INSERT INTO Offshores_assetsbeneficiaries (uuid, share, rel_date, source, link, asset_id, beneficiary_id) VALUES (%s,%s,%s,%s,%s,%s,%s)'\n , (ab_uuid, ab_share, ab_date, ab_source, ab_link, ab_asset,\n ab_ben))\n conn.commit()\n except Exception as e:\n print('Exception 1:', type(e), e)\n\n\nnumrows = 20\ntry:\n conn = db.connect('localhost', 'root', '0013Tau', 'ved2')\n cur = conn.cursor()\n db_insert(numrows)\n db_insert_linktables(numrows)\nexcept Exception as e:\n print('Exception 0:', type(e), e)\nexcept:\n db.rollback()\nconn.commit()\nconn.close()\nprint('DB fullfilled')\n",
"step-4": "__author__ = 'julia sayapina'\nfrom warnings import filterwarnings\nimport MySQLdb as db\nimport os\nimport shutil\nimport os\nimport sys\nfrom subprocess import Popen, PIPE, STDOUT\nimport uuid\nfrom decimal import *\nfrom datetime import date\nfrom random import randint\nfilterwarnings('ignore', category=db.Warning)\ndb_name = 'ved3'\n\n\ndef db_create():\n cur.execute(\n \"\"\"\n create table if not exists Offshores_asset (\n id INTEGER PRIMARY KEY AUTO_INCREMENT,\n asset_name VARCHAR(100),\n asset_link VARCHAR(200),\n slug CHAR(200),\n uuid CHAR(36)\n );\n \"\"\"\n )\n cur.execute(\n \"\"\"\n create table if not exists Offshores_offshore (\n id INTEGER PRIMARY KEY AUTO_INCREMENT,\n off_name VARCHAR(50),\n off_jurisdiction VARCHAR(50),\n file VARCHAR(100),\n image VARCHAR(100),\n off_parent VARCHAR(50),\n off_link VARCHAR(300),\n slug VARCHAR(200),\n uuid CHAR(36)\n );\n \"\"\"\n )\n cur.execute(\n \"\"\"\n create table if not exists Offshores_beneficiary (\n id INTEGER PRIMARY KEY AUTO_INCREMENT,\n ben_name VARCHAR(50),\n ben_lastname VARCHAR(100),\n ben_midname VARCHAR(30),\n ben_holding VARCHAR(70),\n ben_link VARCHAR(300),\n slug VARCHAR(200),\n uuid CHAR(36)\n );\n \"\"\"\n )\n cur.execute(\n \"\"\"\n create table if not exists Offshores_beneficiariesoffshores (\n id INTEGER PRIMARY KEY AUTO_INCREMENT,\n share DECIMAL,\n rel_date DATE,\n source VARCHAR(150),\n link VARCHAR(200),\n beneficiary_id INT,\n offshore_id INT,\n uuid CHAR(36)\n );\n \"\"\"\n )\n conn.commit()\n print('tables created')\n\n\ndef db_insert(numrows):\n for x in xrange(0, numrows):\n num = str(x)\n a_name = 'Asset' + num\n a_link = 'http://somelink/' + a_name\n a_uuid = uuid.uuid4().hex\n a_slug = a_name + '-' + str(a_uuid)\n o_name = 'Offshore' + num\n o_jur = 'Cyprus'\n o_file = 'offshores/favicon.xcf'\n o_image = 'offshores/favicon.png'\n o_prnt = 'parent' + num\n o_link = 'http://' + o_name + '-' + num + '.com'\n o_uuid = uuid.uuid4().hex\n o_slug = o_name + str(o_uuid)\n b_name = 'Michael' + num\n b_lname = 'Prohorov' + num\n b_mname = 'Dmitrievich' + num\n b_holding = 'Onexim' + num\n b_link = 'http://onexim.ru/' + b_name + b_lname + '-' + num + '.com'\n b_uuid = uuid.uuid4().hex\n b_slug = b_lname + str(b_uuid)\n try:\n cur.execute(\n 'INSERT INTO Offshores_asset (asset_name, asset_link, slug, uuid) VALUES (%s,%s,%s,%s)'\n , (a_name, a_link, a_slug, a_uuid))\n cur.execute(\n 'INSERT INTO Offshores_offshore (off_name, off_jurisdiction, file, image, off_parent, off_link, slug, uuid) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)'\n , (o_name, o_jur, o_file, o_image, o_prnt, o_link, o_slug,\n o_uuid))\n cur.execute(\n 'INSERT INTO Offshores_beneficiary (ben_name, ben_lastname, ben_midname, ben_holding, ben_link, slug, uuid) VALUES (%s,%s,%s,%s,%s,%s,%s)'\n , (b_name, b_lname, b_mname, b_holding, b_link, b_slug, b_uuid)\n )\n conn.commit()\n except Exception as e:\n print('Exception 1:', type(e), e)\n\n\ndef db_insert_linktables(numrows):\n for x in xrange(0, numrows):\n num = str(x)\n bo_share = Decimal(x)\n bo_date = date(2016, randint(1, 12), randint(1, 28))\n bo_source = 'source' + num\n bo_link = 'http://bo.ru/' + bo_source + '-' + num\n bo_ben = randint(1, numrows)\n bo_off = randint(1, numrows)\n bo_uuid = uuid.uuid4().hex\n oa_uuid = uuid.uuid4().hex\n oa_share = Decimal(x)\n oa_date = date(2016, randint(1, 12), randint(1, 28))\n oa_source = 'source' + num\n oa_link = 'http://oa.ru/' + oa_source + '-' + num\n oa_asset = randint(1, numrows)\n oa_off = randint(1, numrows)\n ab_uuid = uuid.uuid4().hex\n ab_share = Decimal(x)\n ab_date = date(2016, randint(1, 12), randint(1, 28))\n ab_source = 'source' + num\n ab_link = 'http://ab.ru/' + oa_source + '-' + num\n ab_asset = randint(1, numrows)\n ab_ben = randint(1, numrows)\n try:\n cur.execute(\n 'INSERT INTO Offshores_beneficiariesoffshores (share, rel_date, source, link, beneficiary_id, offshore_id, uuid) VALUES (%s,%s,%s,%s,%s,%s,%s)'\n , (bo_share, bo_date, bo_source, bo_link, bo_ben, bo_off,\n bo_uuid))\n cur.execute(\n 'INSERT INTO Offshores_offshoresassets (uuid, share, rel_date, source, link, asset_id, offshore_id) VALUES (%s,%s,%s,%s,%s,%s,%s)'\n , (oa_uuid, oa_share, oa_date, oa_source, oa_link, oa_asset,\n oa_off))\n cur.execute(\n 'INSERT INTO Offshores_assetsbeneficiaries (uuid, share, rel_date, source, link, asset_id, beneficiary_id) VALUES (%s,%s,%s,%s,%s,%s,%s)'\n , (ab_uuid, ab_share, ab_date, ab_source, ab_link, ab_asset,\n ab_ben))\n conn.commit()\n except Exception as e:\n print('Exception 1:', type(e), e)\n\n\nnumrows = 20\ntry:\n conn = db.connect('localhost', 'root', '0013Tau', 'ved2')\n cur = conn.cursor()\n db_insert(numrows)\n db_insert_linktables(numrows)\nexcept Exception as e:\n print('Exception 0:', type(e), e)\nexcept:\n db.rollback()\nconn.commit()\nconn.close()\nprint('DB fullfilled')\n",
"step-5": "#coding: utf-8\n#/usr/bin/python\n__author__='julia sayapina'\n\n### Use db_reset.py to drop the db and recreate it, then use 'migrate' --> 'createsuperuser' --> 'makemigrations' --> 'migrate' as usual.\n### This will create the DB structure as it has to be from django\n### Then use test_db_fullfill.py to fullfill the db with test data. if you don't need to create tables manually don't use db_create()\n\nfrom warnings import filterwarnings\nimport MySQLdb as db\nimport os\nimport shutil\nimport os\nimport sys \nfrom subprocess import Popen, PIPE, STDOUT\nimport uuid\nfrom decimal import *\nfrom datetime import date\nfrom random import randint\n\n\n# Создание или открытие файла базы данных и создание схемы\nfilterwarnings('ignore', category = db.Warning)\ndb_name = 'ved3'\n\ndef db_create(): # creates tables manually (doesn't create AO and AB tables)\n cur.execute(\"\"\"\n create table if not exists Offshores_asset (\n id INTEGER PRIMARY KEY AUTO_INCREMENT,\n asset_name VARCHAR(100),\n asset_link VARCHAR(200),\n slug CHAR(200),\n uuid CHAR(36)\n );\n \"\"\")\n cur.execute(\"\"\"\n create table if not exists Offshores_offshore (\n id INTEGER PRIMARY KEY AUTO_INCREMENT,\n off_name VARCHAR(50),\n off_jurisdiction VARCHAR(50),\n file VARCHAR(100),\n image VARCHAR(100),\n off_parent VARCHAR(50),\n off_link VARCHAR(300),\n slug VARCHAR(200),\n uuid CHAR(36)\n );\n \"\"\")\n cur.execute(\"\"\"\n create table if not exists Offshores_beneficiary (\n id INTEGER PRIMARY KEY AUTO_INCREMENT,\n ben_name VARCHAR(50),\n ben_lastname VARCHAR(100),\n ben_midname VARCHAR(30),\n ben_holding VARCHAR(70),\n ben_link VARCHAR(300),\n slug VARCHAR(200),\n uuid CHAR(36)\n );\n \"\"\")\n cur.execute(\"\"\"\n create table if not exists Offshores_beneficiariesoffshores (\n id INTEGER PRIMARY KEY AUTO_INCREMENT,\n share DECIMAL,\n rel_date DATE,\n source VARCHAR(150),\n link VARCHAR(200),\n beneficiary_id INT,\n offshore_id INT,\n uuid CHAR(36)\n );\n \"\"\")\n conn.commit()\n print('tables created')\n\n\ndef db_insert(numrows):\n # inserts test data into tables\n for x in xrange(0,numrows): #creates test data for tables\n num = str(x)\n a_name = 'Asset' + num\n a_link = 'http://somelink/'+a_name\n a_uuid = uuid.uuid4().hex\n a_slug = a_name + '-' + str(a_uuid)\n o_name = 'Offshore' + num\n o_jur = 'Cyprus'\n o_file = 'offshores/favicon.xcf'\n o_image = 'offshores/favicon.png'\n o_prnt = 'parent' + num\n o_link = 'http://' + o_name + '-' + num + '.com'\n o_uuid = uuid.uuid4().hex\n o_slug = o_name + str(o_uuid)\n b_name = 'Michael' + num\n b_lname = 'Prohorov' + num\n b_mname = 'Dmitrievich' + num\n b_holding = 'Onexim' + num\n b_link = 'http://onexim.ru/' + b_name + b_lname + '-' + num + '.com'\n b_uuid = uuid.uuid4().hex\n b_slug = b_lname + str(b_uuid)\n\n try: #inserts test data to tables via SQL; still produces wierd errors for Beneficiariesoffshores idk why\n cur.execute(\"\"\"INSERT INTO Offshores_asset (asset_name, asset_link, slug, uuid) VALUES (%s,%s,%s,%s)\"\"\",(a_name, a_link, a_slug, a_uuid))\n cur.execute(\"\"\"INSERT INTO Offshores_offshore (off_name, off_jurisdiction, file, image, off_parent, off_link, slug, uuid) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)\"\"\",(o_name, o_jur, o_file, o_image, o_prnt, o_link, o_slug, o_uuid))\n cur.execute(\"\"\"INSERT INTO Offshores_beneficiary (ben_name, ben_lastname, ben_midname, ben_holding, ben_link, slug, uuid) VALUES (%s,%s,%s,%s,%s,%s,%s)\"\"\",(b_name, b_lname, b_mname, b_holding, b_link, b_slug, b_uuid))\n conn.commit()\n except Exception as e:\n print (\"Exception 1:\", type(e), e)\n\ndef db_insert_linktables(numrows):\n # inserts test data into linking tables; has to be called after db_insert(), as first basic tables need to be generated to produce links between\n # them using random numbers\n for x in xrange(0,numrows): #creates test data for tables\n num = str(x)\n bo_share = Decimal(x)\n bo_date = date(2016, randint(1, 12), randint(1, 28))\n bo_source = 'source' + num\n bo_link = 'http://bo.ru/' + bo_source + '-' + num\n bo_ben = randint(1, numrows)\n bo_off = randint(1, numrows)\n bo_uuid = uuid.uuid4().hex\n oa_uuid = uuid.uuid4().hex\n oa_share = Decimal(x)\n oa_date = date(2016, randint(1, 12), randint(1, 28))\n oa_source = 'source' + num\n oa_link = 'http://oa.ru/' + oa_source + '-' + num\n oa_asset = randint(1, numrows)\n oa_off = randint(1, numrows)\n ab_uuid = uuid.uuid4().hex\n ab_share = Decimal(x)\n ab_date = date(2016, randint(1, 12), randint(1, 28))\n ab_source = 'source' + num\n ab_link = 'http://ab.ru/' + oa_source + '-' + num\n ab_asset = randint(1, numrows)\n ab_ben = randint(1, numrows)\n\n try: #inserts test data to tables via SQL; still produces wierd errors for Beneficiariesoffshores idk why\n cur.execute(\"\"\"INSERT INTO Offshores_beneficiariesoffshores (share, rel_date, source, link, beneficiary_id, offshore_id, uuid) VALUES (%s,%s,%s,%s,%s,%s,%s)\"\"\",(bo_share, bo_date, bo_source, bo_link, bo_ben, bo_off, bo_uuid))\n cur.execute(\"\"\"INSERT INTO Offshores_offshoresassets (uuid, share, rel_date, source, link, asset_id, offshore_id) VALUES (%s,%s,%s,%s,%s,%s,%s)\"\"\",(oa_uuid, oa_share, oa_date, oa_source, oa_link, oa_asset, oa_off))\n cur.execute(\"\"\"INSERT INTO Offshores_assetsbeneficiaries (uuid, share, rel_date, source, link, asset_id, beneficiary_id) VALUES (%s,%s,%s,%s,%s,%s,%s)\"\"\",(ab_uuid, ab_share, ab_date, ab_source, ab_link, ab_asset, ab_ben))\n conn.commit()\n except Exception as e:\n print (\"Exception 1:\", type(e), e)\n\nnumrows = 20\ntry:\n conn = db.connect(\"localhost\",\"root\",\"0013Tau\",\"ved2\" )\n cur = conn.cursor()\n # db_create() #<-- to create tables manually uncomment this\n db_insert(numrows)\n db_insert_linktables(numrows) # IMPORTANT! has to be called ONLY after db_insert()!\n\nexcept Exception as e:\n print (\"Exception 0:\", type(e), e)\n\nexcept: db.rollback() \n\n\nconn.commit()\nconn.close()\nprint ('DB fullfilled')\n\n\n# def main():\n# if len(sys.argv) != 2:\n# print('usage: python3 db_fullfill.py [numrows]')\n# sys.exit(1)\n\n# if len(sys.argv) == 2: \n# numrows = sys.argv[1]\n\n# else:\n# numrows = 15\n# print (numrows)\n\n# return numrows\n# sys.exit(1)\n\n# if __name__ == '__main__':\n# main()\n\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
from django.apps import AppConfig
class ProjectrolesConfig(AppConfig):
name = 'projectroles'
|
normal
|
{
"blob_id": "6a4585e0e2f5ebbd0f9a7fa203f76bb88ff9c2a0",
"index": 2920,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass ProjectrolesConfig(AppConfig):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass ProjectrolesConfig(AppConfig):\n name = 'projectroles'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass ProjectrolesConfig(AppConfig):\n name = 'projectroles'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import sys
from Decks.Virtual_World.vw_sets import *
from tools import *
hand_3playable_hts = ["Nibiru, the Primal Being", "Effect Veiler", "Fantastical Dragon Phantazmay", "Dragon Buster Destruction Sword", "Dragon Buster Destruction Sword"]
hand_2playable_hts = ["Nibiru, the Primal Being", "Nibiru, the Primal Being", "Fantastical Dragon Phantazmay", "Fantastical Dragon Phantazmay", "Dragon Buster Destruction Sword"]
hand_3lvl3vw = ["Virtual World Mai-Hime - Lulu", "Virtual World Xiezhi - Jiji", "Virtual World Xiezhi - Jiji", "Virtual World Kirin - Lili", "Virtual World Roshi - Laolao"]
def test_playable_hts_in_hand():
assert playable_hts_in_hand(hand_3playable_hts) == 3
assert playable_hts_in_hand(hand_2playable_hts) == 2
def test_cards_of_set_in_hand():
assert cards_of_set_in_hand(hand_3lvl3vw, vw_lvl3) == 3
|
normal
|
{
"blob_id": "43179b8b096836758271a791b4aacb7bbe398ea9",
"index": 1807,
"step-1": "<mask token>\n\n\ndef test_playable_hts_in_hand():\n assert playable_hts_in_hand(hand_3playable_hts) == 3\n assert playable_hts_in_hand(hand_2playable_hts) == 2\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_playable_hts_in_hand():\n assert playable_hts_in_hand(hand_3playable_hts) == 3\n assert playable_hts_in_hand(hand_2playable_hts) == 2\n\n\ndef test_cards_of_set_in_hand():\n assert cards_of_set_in_hand(hand_3lvl3vw, vw_lvl3) == 3\n",
"step-3": "<mask token>\nhand_3playable_hts = ['Nibiru, the Primal Being', 'Effect Veiler',\n 'Fantastical Dragon Phantazmay', 'Dragon Buster Destruction Sword',\n 'Dragon Buster Destruction Sword']\nhand_2playable_hts = ['Nibiru, the Primal Being',\n 'Nibiru, the Primal Being', 'Fantastical Dragon Phantazmay',\n 'Fantastical Dragon Phantazmay', 'Dragon Buster Destruction Sword']\nhand_3lvl3vw = ['Virtual World Mai-Hime - Lulu',\n 'Virtual World Xiezhi - Jiji', 'Virtual World Xiezhi - Jiji',\n 'Virtual World Kirin - Lili', 'Virtual World Roshi - Laolao']\n\n\ndef test_playable_hts_in_hand():\n assert playable_hts_in_hand(hand_3playable_hts) == 3\n assert playable_hts_in_hand(hand_2playable_hts) == 2\n\n\ndef test_cards_of_set_in_hand():\n assert cards_of_set_in_hand(hand_3lvl3vw, vw_lvl3) == 3\n",
"step-4": "import sys\nfrom Decks.Virtual_World.vw_sets import *\nfrom tools import *\nhand_3playable_hts = ['Nibiru, the Primal Being', 'Effect Veiler',\n 'Fantastical Dragon Phantazmay', 'Dragon Buster Destruction Sword',\n 'Dragon Buster Destruction Sword']\nhand_2playable_hts = ['Nibiru, the Primal Being',\n 'Nibiru, the Primal Being', 'Fantastical Dragon Phantazmay',\n 'Fantastical Dragon Phantazmay', 'Dragon Buster Destruction Sword']\nhand_3lvl3vw = ['Virtual World Mai-Hime - Lulu',\n 'Virtual World Xiezhi - Jiji', 'Virtual World Xiezhi - Jiji',\n 'Virtual World Kirin - Lili', 'Virtual World Roshi - Laolao']\n\n\ndef test_playable_hts_in_hand():\n assert playable_hts_in_hand(hand_3playable_hts) == 3\n assert playable_hts_in_hand(hand_2playable_hts) == 2\n\n\ndef test_cards_of_set_in_hand():\n assert cards_of_set_in_hand(hand_3lvl3vw, vw_lvl3) == 3\n",
"step-5": "import sys\nfrom Decks.Virtual_World.vw_sets import *\nfrom tools import *\n\nhand_3playable_hts = [\"Nibiru, the Primal Being\", \"Effect Veiler\", \"Fantastical Dragon Phantazmay\", \"Dragon Buster Destruction Sword\", \"Dragon Buster Destruction Sword\"]\nhand_2playable_hts = [\"Nibiru, the Primal Being\", \"Nibiru, the Primal Being\", \"Fantastical Dragon Phantazmay\", \"Fantastical Dragon Phantazmay\", \"Dragon Buster Destruction Sword\"]\nhand_3lvl3vw = [\"Virtual World Mai-Hime - Lulu\", \"Virtual World Xiezhi - Jiji\", \"Virtual World Xiezhi - Jiji\", \"Virtual World Kirin - Lili\", \"Virtual World Roshi - Laolao\"]\ndef test_playable_hts_in_hand():\n assert playable_hts_in_hand(hand_3playable_hts) == 3\n assert playable_hts_in_hand(hand_2playable_hts) == 2\n\ndef test_cards_of_set_in_hand():\n assert cards_of_set_in_hand(hand_3lvl3vw, vw_lvl3) == 3\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
def pixels_generator(w, h):
i = 0
while i < (w * h):
yield divmod(i, w)
i = i + 1
|
normal
|
{
"blob_id": "bb481fa038835abc6d61a4985b1e30c7c00bff96",
"index": 158,
"step-1": "<mask token>\n",
"step-2": "def pixels_generator(w, h):\n i = 0\n while i < w * h:\n yield divmod(i, w)\n i = i + 1\n",
"step-3": "def pixels_generator(w, h):\n i = 0\n while i < (w * h):\n yield divmod(i, w)\n i = i + 1\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/env python
import rospy
import cv2
import numpy as np
from cv_bridge import CvBridge
from matplotlib import pyplot as plt
from sensor_msgs.msg import Image
from drone_app_msgs.msg import BBox, Drone, DroneArray
from rospy.numpy_msg import numpy_msg
# ---------------------------------------
# This is an implementation of a simple CV
# algorithm that can be used for testing
# --- Global variables initialization ---
pub = None
# ---------------------------------------
def processFrame(image_message):
# --- Convert from ROS to OpenCV
frame = CvBridge().imgmsg_to_cv2(image_message)
# --- Threshold the image and find a mask
frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(frame_hsv, (0, 0, 0, 0), (180, 255, 30, 0))
mask = cv2.dilate(mask, None, iterations=1)
# --- Find contours in the mask and initialize the current
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[-2]
center = None
c = max(cnts, key=cv2.contourArea)
x,y,w,h = cv2.boundingRect(c)
# --- Pack in the message
msg = DroneArray()
drone = Drone()
drone.id = -1
drone.name = 'parrot_bebop2'
drone.box.t.linear.x = x * 100 / 640
drone.box.t.linear.y = y * 100 / 480
drone.box.w = w * 100 / 640
drone.box.h = h * 100 / 480
msg.drones.append(drone)
pub.publish(msg)
if __name__ == '__main__' :
# --- Topics
rospy.init_node('gazeboTracking', anonymous=True)
rospy.Subscriber('camera_img', Image, processFrame)
pub = rospy.Publisher('fixed_drones', DroneArray, queue_size=10)
rospy.spin()
|
normal
|
{
"blob_id": "e864dad3f46fc9c6c472823bd06ce74fb5cb3f41",
"index": 462,
"step-1": "<mask token>\n\n\ndef processFrame(image_message):\n frame = CvBridge().imgmsg_to_cv2(image_message)\n frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(frame_hsv, (0, 0, 0, 0), (180, 255, 30, 0))\n mask = cv2.dilate(mask, None, iterations=1)\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.\n CHAIN_APPROX_SIMPLE)[-2]\n center = None\n c = max(cnts, key=cv2.contourArea)\n x, y, w, h = cv2.boundingRect(c)\n msg = DroneArray()\n drone = Drone()\n drone.id = -1\n drone.name = 'parrot_bebop2'\n drone.box.t.linear.x = x * 100 / 640\n drone.box.t.linear.y = y * 100 / 480\n drone.box.w = w * 100 / 640\n drone.box.h = h * 100 / 480\n msg.drones.append(drone)\n pub.publish(msg)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef processFrame(image_message):\n frame = CvBridge().imgmsg_to_cv2(image_message)\n frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(frame_hsv, (0, 0, 0, 0), (180, 255, 30, 0))\n mask = cv2.dilate(mask, None, iterations=1)\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.\n CHAIN_APPROX_SIMPLE)[-2]\n center = None\n c = max(cnts, key=cv2.contourArea)\n x, y, w, h = cv2.boundingRect(c)\n msg = DroneArray()\n drone = Drone()\n drone.id = -1\n drone.name = 'parrot_bebop2'\n drone.box.t.linear.x = x * 100 / 640\n drone.box.t.linear.y = y * 100 / 480\n drone.box.w = w * 100 / 640\n drone.box.h = h * 100 / 480\n msg.drones.append(drone)\n pub.publish(msg)\n\n\nif __name__ == '__main__':\n rospy.init_node('gazeboTracking', anonymous=True)\n rospy.Subscriber('camera_img', Image, processFrame)\n pub = rospy.Publisher('fixed_drones', DroneArray, queue_size=10)\n rospy.spin()\n",
"step-3": "<mask token>\npub = None\n\n\ndef processFrame(image_message):\n frame = CvBridge().imgmsg_to_cv2(image_message)\n frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(frame_hsv, (0, 0, 0, 0), (180, 255, 30, 0))\n mask = cv2.dilate(mask, None, iterations=1)\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.\n CHAIN_APPROX_SIMPLE)[-2]\n center = None\n c = max(cnts, key=cv2.contourArea)\n x, y, w, h = cv2.boundingRect(c)\n msg = DroneArray()\n drone = Drone()\n drone.id = -1\n drone.name = 'parrot_bebop2'\n drone.box.t.linear.x = x * 100 / 640\n drone.box.t.linear.y = y * 100 / 480\n drone.box.w = w * 100 / 640\n drone.box.h = h * 100 / 480\n msg.drones.append(drone)\n pub.publish(msg)\n\n\nif __name__ == '__main__':\n rospy.init_node('gazeboTracking', anonymous=True)\n rospy.Subscriber('camera_img', Image, processFrame)\n pub = rospy.Publisher('fixed_drones', DroneArray, queue_size=10)\n rospy.spin()\n",
"step-4": "import rospy\nimport cv2\nimport numpy as np\nfrom cv_bridge import CvBridge\nfrom matplotlib import pyplot as plt\nfrom sensor_msgs.msg import Image\nfrom drone_app_msgs.msg import BBox, Drone, DroneArray\nfrom rospy.numpy_msg import numpy_msg\npub = None\n\n\ndef processFrame(image_message):\n frame = CvBridge().imgmsg_to_cv2(image_message)\n frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(frame_hsv, (0, 0, 0, 0), (180, 255, 30, 0))\n mask = cv2.dilate(mask, None, iterations=1)\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.\n CHAIN_APPROX_SIMPLE)[-2]\n center = None\n c = max(cnts, key=cv2.contourArea)\n x, y, w, h = cv2.boundingRect(c)\n msg = DroneArray()\n drone = Drone()\n drone.id = -1\n drone.name = 'parrot_bebop2'\n drone.box.t.linear.x = x * 100 / 640\n drone.box.t.linear.y = y * 100 / 480\n drone.box.w = w * 100 / 640\n drone.box.h = h * 100 / 480\n msg.drones.append(drone)\n pub.publish(msg)\n\n\nif __name__ == '__main__':\n rospy.init_node('gazeboTracking', anonymous=True)\n rospy.Subscriber('camera_img', Image, processFrame)\n pub = rospy.Publisher('fixed_drones', DroneArray, queue_size=10)\n rospy.spin()\n",
"step-5": "#!/usr/bin/env python\nimport rospy\nimport cv2\nimport numpy as np\nfrom cv_bridge import CvBridge\nfrom matplotlib import pyplot as plt\nfrom sensor_msgs.msg import Image\nfrom drone_app_msgs.msg import BBox, Drone, DroneArray\nfrom rospy.numpy_msg import numpy_msg\n\n# ---------------------------------------\n# This is an implementation of a simple CV\n# algorithm that can be used for testing\n# --- Global variables initialization ---\npub = None\n# ---------------------------------------\n\ndef processFrame(image_message):\n # --- Convert from ROS to OpenCV\n frame = CvBridge().imgmsg_to_cv2(image_message)\n\n # --- Threshold the image and find a mask\n frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(frame_hsv, (0, 0, 0, 0), (180, 255, 30, 0))\n mask = cv2.dilate(mask, None, iterations=1)\n\n # --- Find contours in the mask and initialize the current\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[-2]\n center = None\n c = max(cnts, key=cv2.contourArea)\n x,y,w,h = cv2.boundingRect(c)\n\n # --- Pack in the message\n msg = DroneArray()\n drone = Drone()\n\n drone.id = -1\n drone.name = 'parrot_bebop2'\n drone.box.t.linear.x = x * 100 / 640\n drone.box.t.linear.y = y * 100 / 480\n drone.box.w = w * 100 / 640\n drone.box.h = h * 100 / 480\n\n msg.drones.append(drone)\n pub.publish(msg)\n\nif __name__ == '__main__' :\n # --- Topics\n rospy.init_node('gazeboTracking', anonymous=True)\n rospy.Subscriber('camera_img', Image, processFrame)\n pub = rospy.Publisher('fixed_drones', DroneArray, queue_size=10)\n \n rospy.spin()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import argparse
import pickle
import pandas as pd
from pyspark.sql.session import SparkSession
parser = argparse.ArgumentParser()
parser.add_argument('--rs', type=str, nargs='+')
args = parser.parse_args()
ss = SparkSession.builder.getOrCreate()
post_df = None
for f in args.rs:
df = ss.read.json(f).select('id', 'subreddit', 'subreddit_id', 'title')
post_df = df if post_df is None else post_df.union(df)
subreddit_ids = pickle.load(open('subreddit_ids', 'rb'))
ret = post_df.filter(post_df.subreddit_id.isin(*subreddit_ids)).coalesce(1)
ret.write.orc('RS.orc', mode='overwrite')
ret.write.json('RS.json', mode='overwrite')
|
normal
|
{
"blob_id": "e6b3def6ed6f2523d88912832a876caf2742b786",
"index": 7572,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nparser.add_argument('--rs', type=str, nargs='+')\n<mask token>\nfor f in args.rs:\n df = ss.read.json(f).select('id', 'subreddit', 'subreddit_id', 'title')\n post_df = df if post_df is None else post_df.union(df)\n<mask token>\nret.write.orc('RS.orc', mode='overwrite')\nret.write.json('RS.json', mode='overwrite')\n",
"step-3": "<mask token>\nparser = argparse.ArgumentParser()\nparser.add_argument('--rs', type=str, nargs='+')\nargs = parser.parse_args()\nss = SparkSession.builder.getOrCreate()\npost_df = None\nfor f in args.rs:\n df = ss.read.json(f).select('id', 'subreddit', 'subreddit_id', 'title')\n post_df = df if post_df is None else post_df.union(df)\nsubreddit_ids = pickle.load(open('subreddit_ids', 'rb'))\nret = post_df.filter(post_df.subreddit_id.isin(*subreddit_ids)).coalesce(1)\nret.write.orc('RS.orc', mode='overwrite')\nret.write.json('RS.json', mode='overwrite')\n",
"step-4": "import argparse\nimport pickle\nimport pandas as pd\nfrom pyspark.sql.session import SparkSession\nparser = argparse.ArgumentParser()\nparser.add_argument('--rs', type=str, nargs='+')\nargs = parser.parse_args()\nss = SparkSession.builder.getOrCreate()\npost_df = None\nfor f in args.rs:\n df = ss.read.json(f).select('id', 'subreddit', 'subreddit_id', 'title')\n post_df = df if post_df is None else post_df.union(df)\nsubreddit_ids = pickle.load(open('subreddit_ids', 'rb'))\nret = post_df.filter(post_df.subreddit_id.isin(*subreddit_ids)).coalesce(1)\nret.write.orc('RS.orc', mode='overwrite')\nret.write.json('RS.json', mode='overwrite')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
# USAGE: day_22_01.py
# Michael Chambers, 2017
class Grid:
def __init__(self, startFile):
# Load initial infected sites
# Origin is top-left of input file
self.infected = set()
posx = 0
with open(startFile, 'r') as fo:
for i, line in enumerate(fo):
line = line.rstrip()
posx = int((len(line) -1) / 2)
for j, char in enumerate(line):
if char == "#":
self.infected.add((i, j))
# Set initial position to middle of start grid
posy = int((sum(1 for line in open(startFile)) - 1) / 2)
self.pos = (posx, posy)
self.vec = (-1,0)
self.infectionEvents = 0
def update(self):
if self.pos in self.infected:
self.infected.remove(self.pos)
self.turnRight()
else:
self.infectionEvents += 1
self.infected.add(self.pos)
self.turnLeft()
self.pos = (self.pos[0] + self.vec[0], self.pos[1] + self.vec[1])
def turnLeft(self):
if self.vec == (-1, 0):
self.vec = (0, -1)
elif self.vec == (0, -1):
self.vec = (1,0)
elif self.vec == (1, 0):
self.vec = (0, 1)
else:
self.vec = (-1, 0)
def turnRight(self):
if self.vec == (-1, 0):
self.vec = (0, 1)
elif self.vec == (0, 1):
self.vec = (1, 0)
elif self.vec == (1, 0):
self.vec = (0, -1)
else:
self.vec = (-1, 0)
class ComplexGrid:
# clean : 0
# weakened : 1
# infected : 2
# flagged : 3
def __init__(self, startFile):
# Load initial infected sites
# Origin is top-left of input file
self.weakened = set()
self.infected = set()
self.flagged = set()
posx = 0
with open(startFile, 'r') as fo:
for i, line in enumerate(fo):
line = line.rstrip()
posx = int((len(line) -1) / 2)
for j, char in enumerate(line):
if char == "#":
self.infected.add((i, j))
# Set initial position to middle of start grid
posy = int((sum(1 for line in open(startFile)) - 1) / 2)
self.pos = (posx, posy)
self.vec = (-1,0)
self.infectionEvents = 0
def update(self):
if self.pos in self.weakened:
self.weakened.remove(self.pos)
self.infected.add(self.pos)
self.infectionEvents += 1
elif self.pos in self.infected:
self.infected.remove(self.pos)
self.flagged.add(self.pos)
self.turnRight()
elif self.pos in self.flagged:
self.flagged.remove(self.pos)
self.reverse()
else:
self.weakened.add(self.pos)
self.turnLeft()
self.pos = (self.pos[0] + self.vec[0], self.pos[1] + self.vec[1])
def turnLeft(self):
if self.vec == (-1, 0):
self.vec = (0, -1)
elif self.vec == (0, -1):
self.vec = (1,0)
elif self.vec == (1, 0):
self.vec = (0, 1)
else:
self.vec = (-1, 0)
def turnRight(self):
if self.vec == (-1, 0):
self.vec = (0, 1)
elif self.vec == (0, 1):
self.vec = (1, 0)
elif self.vec == (1, 0):
self.vec = (0, -1)
else:
self.vec = (-1, 0)
def reverse(self):
self.vec = tuple(-x for x in self.vec)
def main():
file = "day_22_input.txt"
# file = "day_22_test.txt"
g = Grid(file)
# print(g.infected)
# print("Pos {} Vec {}".format(g.pos, g.vec))
for i in range(10000):
g.update()
# print(g.infected)
# print("Pos {} Vec {}".format(g.pos, g.vec))
print("Part 1: {}".format(g.infectionEvents))
cg = ComplexGrid(file)
for i in range(10000000):
if i % 500000 == 0:
print(i)
cg.update()
print("Part 2: {}".format(cg.infectionEvents))
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "f840624ec11679d576fbb80f8e753c59663a7ee2",
"index": 9168,
"step-1": "<mask token>\n\n\nclass ComplexGrid:\n\n def __init__(self, startFile):\n self.weakened = set()\n self.infected = set()\n self.flagged = set()\n posx = 0\n with open(startFile, 'r') as fo:\n for i, line in enumerate(fo):\n line = line.rstrip()\n posx = int((len(line) - 1) / 2)\n for j, char in enumerate(line):\n if char == '#':\n self.infected.add((i, j))\n posy = int((sum(1 for line in open(startFile)) - 1) / 2)\n self.pos = posx, posy\n self.vec = -1, 0\n self.infectionEvents = 0\n\n def update(self):\n if self.pos in self.weakened:\n self.weakened.remove(self.pos)\n self.infected.add(self.pos)\n self.infectionEvents += 1\n elif self.pos in self.infected:\n self.infected.remove(self.pos)\n self.flagged.add(self.pos)\n self.turnRight()\n elif self.pos in self.flagged:\n self.flagged.remove(self.pos)\n self.reverse()\n else:\n self.weakened.add(self.pos)\n self.turnLeft()\n self.pos = self.pos[0] + self.vec[0], self.pos[1] + self.vec[1]\n\n def turnLeft(self):\n if self.vec == (-1, 0):\n self.vec = 0, -1\n elif self.vec == (0, -1):\n self.vec = 1, 0\n elif self.vec == (1, 0):\n self.vec = 0, 1\n else:\n self.vec = -1, 0\n\n def turnRight(self):\n if self.vec == (-1, 0):\n self.vec = 0, 1\n elif self.vec == (0, 1):\n self.vec = 1, 0\n elif self.vec == (1, 0):\n self.vec = 0, -1\n else:\n self.vec = -1, 0\n\n def reverse(self):\n self.vec = tuple(-x for x in self.vec)\n\n\n<mask token>\n",
"step-2": "class Grid:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass ComplexGrid:\n\n def __init__(self, startFile):\n self.weakened = set()\n self.infected = set()\n self.flagged = set()\n posx = 0\n with open(startFile, 'r') as fo:\n for i, line in enumerate(fo):\n line = line.rstrip()\n posx = int((len(line) - 1) / 2)\n for j, char in enumerate(line):\n if char == '#':\n self.infected.add((i, j))\n posy = int((sum(1 for line in open(startFile)) - 1) / 2)\n self.pos = posx, posy\n self.vec = -1, 0\n self.infectionEvents = 0\n\n def update(self):\n if self.pos in self.weakened:\n self.weakened.remove(self.pos)\n self.infected.add(self.pos)\n self.infectionEvents += 1\n elif self.pos in self.infected:\n self.infected.remove(self.pos)\n self.flagged.add(self.pos)\n self.turnRight()\n elif self.pos in self.flagged:\n self.flagged.remove(self.pos)\n self.reverse()\n else:\n self.weakened.add(self.pos)\n self.turnLeft()\n self.pos = self.pos[0] + self.vec[0], self.pos[1] + self.vec[1]\n\n def turnLeft(self):\n if self.vec == (-1, 0):\n self.vec = 0, -1\n elif self.vec == (0, -1):\n self.vec = 1, 0\n elif self.vec == (1, 0):\n self.vec = 0, 1\n else:\n self.vec = -1, 0\n\n def turnRight(self):\n if self.vec == (-1, 0):\n self.vec = 0, 1\n elif self.vec == (0, 1):\n self.vec = 1, 0\n elif self.vec == (1, 0):\n self.vec = 0, -1\n else:\n self.vec = -1, 0\n\n def reverse(self):\n self.vec = tuple(-x for x in self.vec)\n\n\n<mask token>\n",
"step-3": "class Grid:\n\n def __init__(self, startFile):\n self.infected = set()\n posx = 0\n with open(startFile, 'r') as fo:\n for i, line in enumerate(fo):\n line = line.rstrip()\n posx = int((len(line) - 1) / 2)\n for j, char in enumerate(line):\n if char == '#':\n self.infected.add((i, j))\n posy = int((sum(1 for line in open(startFile)) - 1) / 2)\n self.pos = posx, posy\n self.vec = -1, 0\n self.infectionEvents = 0\n\n def update(self):\n if self.pos in self.infected:\n self.infected.remove(self.pos)\n self.turnRight()\n else:\n self.infectionEvents += 1\n self.infected.add(self.pos)\n self.turnLeft()\n self.pos = self.pos[0] + self.vec[0], self.pos[1] + self.vec[1]\n\n def turnLeft(self):\n if self.vec == (-1, 0):\n self.vec = 0, -1\n elif self.vec == (0, -1):\n self.vec = 1, 0\n elif self.vec == (1, 0):\n self.vec = 0, 1\n else:\n self.vec = -1, 0\n\n def turnRight(self):\n if self.vec == (-1, 0):\n self.vec = 0, 1\n elif self.vec == (0, 1):\n self.vec = 1, 0\n elif self.vec == (1, 0):\n self.vec = 0, -1\n else:\n self.vec = -1, 0\n\n\nclass ComplexGrid:\n\n def __init__(self, startFile):\n self.weakened = set()\n self.infected = set()\n self.flagged = set()\n posx = 0\n with open(startFile, 'r') as fo:\n for i, line in enumerate(fo):\n line = line.rstrip()\n posx = int((len(line) - 1) / 2)\n for j, char in enumerate(line):\n if char == '#':\n self.infected.add((i, j))\n posy = int((sum(1 for line in open(startFile)) - 1) / 2)\n self.pos = posx, posy\n self.vec = -1, 0\n self.infectionEvents = 0\n\n def update(self):\n if self.pos in self.weakened:\n self.weakened.remove(self.pos)\n self.infected.add(self.pos)\n self.infectionEvents += 1\n elif self.pos in self.infected:\n self.infected.remove(self.pos)\n self.flagged.add(self.pos)\n self.turnRight()\n elif self.pos in self.flagged:\n self.flagged.remove(self.pos)\n self.reverse()\n else:\n self.weakened.add(self.pos)\n self.turnLeft()\n self.pos = self.pos[0] + self.vec[0], self.pos[1] + self.vec[1]\n\n def turnLeft(self):\n if self.vec == (-1, 0):\n self.vec = 0, -1\n elif self.vec == (0, -1):\n self.vec = 1, 0\n elif self.vec == (1, 0):\n self.vec = 0, 1\n else:\n self.vec = -1, 0\n\n def turnRight(self):\n if self.vec == (-1, 0):\n self.vec = 0, 1\n elif self.vec == (0, 1):\n self.vec = 1, 0\n elif self.vec == (1, 0):\n self.vec = 0, -1\n else:\n self.vec = -1, 0\n\n def reverse(self):\n self.vec = tuple(-x for x in self.vec)\n\n\n<mask token>\n",
"step-4": "class Grid:\n\n def __init__(self, startFile):\n self.infected = set()\n posx = 0\n with open(startFile, 'r') as fo:\n for i, line in enumerate(fo):\n line = line.rstrip()\n posx = int((len(line) - 1) / 2)\n for j, char in enumerate(line):\n if char == '#':\n self.infected.add((i, j))\n posy = int((sum(1 for line in open(startFile)) - 1) / 2)\n self.pos = posx, posy\n self.vec = -1, 0\n self.infectionEvents = 0\n\n def update(self):\n if self.pos in self.infected:\n self.infected.remove(self.pos)\n self.turnRight()\n else:\n self.infectionEvents += 1\n self.infected.add(self.pos)\n self.turnLeft()\n self.pos = self.pos[0] + self.vec[0], self.pos[1] + self.vec[1]\n\n def turnLeft(self):\n if self.vec == (-1, 0):\n self.vec = 0, -1\n elif self.vec == (0, -1):\n self.vec = 1, 0\n elif self.vec == (1, 0):\n self.vec = 0, 1\n else:\n self.vec = -1, 0\n\n def turnRight(self):\n if self.vec == (-1, 0):\n self.vec = 0, 1\n elif self.vec == (0, 1):\n self.vec = 1, 0\n elif self.vec == (1, 0):\n self.vec = 0, -1\n else:\n self.vec = -1, 0\n\n\nclass ComplexGrid:\n\n def __init__(self, startFile):\n self.weakened = set()\n self.infected = set()\n self.flagged = set()\n posx = 0\n with open(startFile, 'r') as fo:\n for i, line in enumerate(fo):\n line = line.rstrip()\n posx = int((len(line) - 1) / 2)\n for j, char in enumerate(line):\n if char == '#':\n self.infected.add((i, j))\n posy = int((sum(1 for line in open(startFile)) - 1) / 2)\n self.pos = posx, posy\n self.vec = -1, 0\n self.infectionEvents = 0\n\n def update(self):\n if self.pos in self.weakened:\n self.weakened.remove(self.pos)\n self.infected.add(self.pos)\n self.infectionEvents += 1\n elif self.pos in self.infected:\n self.infected.remove(self.pos)\n self.flagged.add(self.pos)\n self.turnRight()\n elif self.pos in self.flagged:\n self.flagged.remove(self.pos)\n self.reverse()\n else:\n self.weakened.add(self.pos)\n self.turnLeft()\n self.pos = self.pos[0] + self.vec[0], self.pos[1] + self.vec[1]\n\n def turnLeft(self):\n if self.vec == (-1, 0):\n self.vec = 0, -1\n elif self.vec == (0, -1):\n self.vec = 1, 0\n elif self.vec == (1, 0):\n self.vec = 0, 1\n else:\n self.vec = -1, 0\n\n def turnRight(self):\n if self.vec == (-1, 0):\n self.vec = 0, 1\n elif self.vec == (0, 1):\n self.vec = 1, 0\n elif self.vec == (1, 0):\n self.vec = 0, -1\n else:\n self.vec = -1, 0\n\n def reverse(self):\n self.vec = tuple(-x for x in self.vec)\n\n\ndef main():\n file = 'day_22_input.txt'\n g = Grid(file)\n for i in range(10000):\n g.update()\n print('Part 1: {}'.format(g.infectionEvents))\n cg = ComplexGrid(file)\n for i in range(10000000):\n if i % 500000 == 0:\n print(i)\n cg.update()\n print('Part 2: {}'.format(cg.infectionEvents))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python\n\n# USAGE: day_22_01.py\n# Michael Chambers, 2017\n\nclass Grid:\n\tdef __init__(self, startFile):\n\t\t# Load initial infected sites\n\t\t# Origin is top-left of input file\n\t\tself.infected = set()\n\t\tposx = 0\n\t\twith open(startFile, 'r') as fo:\n\t\t\tfor i, line in enumerate(fo):\n\t\t\t\tline = line.rstrip()\n\t\t\t\tposx = int((len(line) -1) / 2)\n\t\t\t\tfor j, char in enumerate(line):\n\t\t\t\t\tif char == \"#\":\n\t\t\t\t\t\tself.infected.add((i, j))\n\n\t\t# Set initial position to middle of start grid\n\t\tposy = int((sum(1 for line in open(startFile)) - 1) / 2)\n\t\tself.pos = (posx, posy)\n\t\tself.vec = (-1,0)\n\t\tself.infectionEvents = 0\n\n\tdef update(self):\n\t\tif self.pos in self.infected:\n\t\t\tself.infected.remove(self.pos)\n\t\t\tself.turnRight()\n\t\telse:\n\t\t\tself.infectionEvents += 1\n\t\t\tself.infected.add(self.pos)\n\t\t\tself.turnLeft()\n\t\tself.pos = (self.pos[0] + self.vec[0], self.pos[1] + self.vec[1])\n\n\tdef turnLeft(self):\n\t\tif self.vec == (-1, 0):\n\t\t\tself.vec = (0, -1)\n\t\telif self.vec == (0, -1):\n\t\t\tself.vec = (1,0)\n\t\telif self.vec == (1, 0):\n\t\t\tself.vec = (0, 1)\n\t\telse:\n\t\t\tself.vec = (-1, 0)\n\n\tdef turnRight(self):\n\t\tif self.vec == (-1, 0):\n\t\t\tself.vec = (0, 1)\n\t\telif self.vec == (0, 1):\n\t\t\tself.vec = (1, 0)\n\t\telif self.vec == (1, 0):\n\t\t\tself.vec = (0, -1)\n\t\telse:\n\t\t\tself.vec = (-1, 0)\n\n\nclass ComplexGrid:\n\t# clean : 0\n\t# weakened : 1\n\t# infected : 2\n\t# flagged : 3\n\n\tdef __init__(self, startFile):\n\t\t# Load initial infected sites\n\t\t# Origin is top-left of input file\n\t\tself.weakened = set()\n\t\tself.infected = set()\n\t\tself.flagged = set()\n\t\tposx = 0\n\t\twith open(startFile, 'r') as fo:\n\t\t\tfor i, line in enumerate(fo):\n\t\t\t\tline = line.rstrip()\n\t\t\t\tposx = int((len(line) -1) / 2)\n\t\t\t\tfor j, char in enumerate(line):\n\t\t\t\t\tif char == \"#\":\n\t\t\t\t\t\tself.infected.add((i, j))\n\n\t\t# Set initial position to middle of start grid\n\t\tposy = int((sum(1 for line in open(startFile)) - 1) / 2)\n\t\tself.pos = (posx, posy)\n\t\tself.vec = (-1,0)\n\t\tself.infectionEvents = 0\n\n\tdef update(self):\n\t\tif self.pos in self.weakened:\n\t\t\tself.weakened.remove(self.pos)\n\t\t\tself.infected.add(self.pos)\n\t\t\tself.infectionEvents += 1\n\t\telif self.pos in self.infected:\n\t\t\tself.infected.remove(self.pos)\n\t\t\tself.flagged.add(self.pos)\n\t\t\tself.turnRight()\n\t\telif self.pos in self.flagged:\n\t\t\tself.flagged.remove(self.pos)\n\t\t\tself.reverse()\n\t\telse:\n\t\t\tself.weakened.add(self.pos)\n\t\t\tself.turnLeft()\n\t\tself.pos = (self.pos[0] + self.vec[0], self.pos[1] + self.vec[1])\n\n\tdef turnLeft(self):\n\t\tif self.vec == (-1, 0):\n\t\t\tself.vec = (0, -1)\n\t\telif self.vec == (0, -1):\n\t\t\tself.vec = (1,0)\n\t\telif self.vec == (1, 0):\n\t\t\tself.vec = (0, 1)\n\t\telse:\n\t\t\tself.vec = (-1, 0)\n\n\tdef turnRight(self):\n\t\tif self.vec == (-1, 0):\n\t\t\tself.vec = (0, 1)\n\t\telif self.vec == (0, 1):\n\t\t\tself.vec = (1, 0)\n\t\telif self.vec == (1, 0):\n\t\t\tself.vec = (0, -1)\n\t\telse:\n\t\t\tself.vec = (-1, 0)\t\n\n\tdef reverse(self):\n\t\tself.vec = tuple(-x for x in self.vec)\t\n\ndef main():\n\tfile = \"day_22_input.txt\"\n\t# file = \"day_22_test.txt\"\n\tg = Grid(file)\n\t# print(g.infected)\n\t# print(\"Pos {} Vec {}\".format(g.pos, g.vec))\n\tfor i in range(10000):\n\t\tg.update()\n\t\t# print(g.infected)\n\t\t# print(\"Pos {} Vec {}\".format(g.pos, g.vec))\n\tprint(\"Part 1: {}\".format(g.infectionEvents))\n\n\tcg = ComplexGrid(file)\n\tfor i in range(10000000):\n\t\tif i % 500000 == 0:\n\t\t\tprint(i)\n\t\tcg.update()\n\tprint(\"Part 2: {}\".format(cg.infectionEvents))\n\n\n\nif __name__ == \"__main__\":\n\tmain()\n\n",
"step-ids": [
6,
7,
11,
13,
14
]
}
|
[
6,
7,
11,
13,
14
] |
import gc
import unittest
import numpy as np
from pydrake.autodiffutils import AutoDiffXd
from pydrake.common import RandomDistribution, RandomGenerator
from pydrake.common.test_utilities import numpy_compare
from pydrake.common.test_utilities.deprecation import catch_drake_warnings
from pydrake.common.value import Value
from pydrake.symbolic import Expression, Variable
from pydrake.systems.framework import (
BasicVector,
DiagramBuilder,
DiagramBuilder_,
InputPort,
TriggerType,
VectorBase,
)
from pydrake.systems.test.test_util import (
MyVector2,
)
from pydrake.systems.primitives import (
Adder, Adder_,
AddRandomInputs,
AffineSystem, AffineSystem_,
ConstantValueSource, ConstantValueSource_,
ConstantVectorSource, ConstantVectorSource_,
ControllabilityMatrix,
Demultiplexer, Demultiplexer_,
DiscreteDerivative, DiscreteDerivative_,
DiscreteTimeDelay, DiscreteTimeDelay_,
FirstOrderLowPassFilter,
FirstOrderTaylorApproximation,
Gain, Gain_,
Integrator, Integrator_,
IsControllable,
IsDetectable,
IsObservable,
IsStabilizable,
Linearize,
LinearSystem, LinearSystem_,
LinearTransformDensity, LinearTransformDensity_,
LogVectorOutput,
MatrixGain,
Multiplexer, Multiplexer_,
MultilayerPerceptron, MultilayerPerceptron_,
ObservabilityMatrix,
PassThrough, PassThrough_,
PerceptronActivationType,
PortSwitch, PortSwitch_,
RandomSource,
Saturation, Saturation_,
SharedPointerSystem, SharedPointerSystem_,
Sine, Sine_,
StateInterpolatorWithDiscreteDerivative,
StateInterpolatorWithDiscreteDerivative_,
SymbolicVectorSystem, SymbolicVectorSystem_,
TrajectoryAffineSystem, TrajectoryAffineSystem_,
TrajectoryLinearSystem, TrajectoryLinearSystem_,
TrajectorySource, TrajectorySource_,
VectorLog, VectorLogSink, VectorLogSink_,
WrapToSystem, WrapToSystem_,
ZeroOrderHold, ZeroOrderHold_,
)
from pydrake.trajectories import PiecewisePolynomial
def compare_value(test, a, b):
# Compares a vector or abstract value.
if isinstance(a, VectorBase):
test.assertTrue(np.allclose(a.get_value(), b.get_value()))
else:
test.assertEqual(type(a.get_value()), type(b.get_value()))
test.assertEqual(a.get_value(), b.get_value())
class TestGeneral(unittest.TestCase):
def _check_instantiations(self, template, supports_symbolic=True):
default_cls = template[None]
self.assertTrue(template[float] is default_cls)
self.assertTrue(template[AutoDiffXd] is not default_cls)
if supports_symbolic:
self.assertTrue(template[Expression] is not default_cls)
def test_instantiations(self):
# TODO(eric.cousineau): Refine tests once NumPy functionality is
# resolved for dtype=object, or dtype=custom is used.
self._check_instantiations(Adder_)
self._check_instantiations(AffineSystem_)
self._check_instantiations(ConstantValueSource_)
self._check_instantiations(ConstantVectorSource_)
self._check_instantiations(Demultiplexer_)
self._check_instantiations(DiscreteDerivative_)
self._check_instantiations(DiscreteTimeDelay_)
self._check_instantiations(Gain_)
self._check_instantiations(Integrator_)
self._check_instantiations(LinearSystem_)
self._check_instantiations(LinearTransformDensity_,
supports_symbolic=False)
self._check_instantiations(Multiplexer_)
self._check_instantiations(MultilayerPerceptron_)
self._check_instantiations(PassThrough_)
self._check_instantiations(PortSwitch_)
self._check_instantiations(Saturation_)
self._check_instantiations(SharedPointerSystem_)
self._check_instantiations(Sine_)
self._check_instantiations(StateInterpolatorWithDiscreteDerivative_)
self._check_instantiations(SymbolicVectorSystem_)
self._check_instantiations(TrajectoryAffineSystem_,
supports_symbolic=False)
self._check_instantiations(TrajectoryLinearSystem_,
supports_symbolic=False)
self._check_instantiations(TrajectorySource_)
self._check_instantiations(VectorLogSink_)
self._check_instantiations(WrapToSystem_)
self._check_instantiations(ZeroOrderHold_)
def test_linear_affine_system(self):
# Just make sure linear system is spelled correctly.
A = np.identity(2)
B = np.array([[0], [1]])
f0 = np.array([[0], [0]])
C = np.array([[0, 1]])
D = [1]
y0 = [0]
system = LinearSystem(A, B, C, D)
context = system.CreateDefaultContext()
self.assertEqual(system.get_input_port(0).size(), 1)
self.assertEqual(context
.get_mutable_continuous_state_vector().size(), 2)
self.assertEqual(system.get_output_port(0).size(), 1)
self.assertTrue((system.A() == A).all())
self.assertTrue((system.B() == B).all())
self.assertTrue((system.f0() == f0).all())
self.assertTrue((system.C() == C).all())
self.assertEqual(system.D(), D)
self.assertEqual(system.y0(), y0)
self.assertEqual(system.time_period(), 0.)
x0 = np.array([1, 2])
system.configure_default_state(x0=x0)
system.SetDefaultContext(context)
np.testing.assert_equal(
context.get_continuous_state_vector().CopyToVector(), x0)
generator = RandomGenerator()
system.SetRandomContext(context, generator)
np.testing.assert_equal(
context.get_continuous_state_vector().CopyToVector(), x0)
system.configure_random_state(covariance=np.eye(2))
system.SetRandomContext(context, generator)
self.assertNotEqual(
context.get_continuous_state_vector().CopyToVector()[1], x0[1])
Co = ControllabilityMatrix(system)
self.assertEqual(Co.shape, (2, 2))
self.assertFalse(IsControllable(system))
self.assertFalse(IsControllable(system, 1e-6))
self.assertFalse(IsStabilizable(sys=system))
self.assertFalse(IsStabilizable(sys=system, threshold=1e-6))
Ob = ObservabilityMatrix(system)
self.assertEqual(Ob.shape, (2, 2))
self.assertFalse(IsObservable(system))
self.assertFalse(IsDetectable(sys=system))
self.assertFalse(IsDetectable(sys=system, threshold=1e-6))
system = AffineSystem(A, B, f0, C, D, y0, .1)
self.assertEqual(system.get_input_port(0), system.get_input_port())
self.assertEqual(system.get_output_port(0), system.get_output_port())
context = system.CreateDefaultContext()
self.assertEqual(system.get_input_port(0).size(), 1)
self.assertEqual(context.get_discrete_state_vector().size(), 2)
self.assertEqual(system.get_output_port(0).size(), 1)
self.assertTrue((system.A() == A).all())
self.assertTrue((system.B() == B).all())
self.assertTrue((system.f0() == f0).all())
self.assertTrue((system.C() == C).all())
self.assertEqual(system.D(), D)
self.assertEqual(system.y0(), y0)
self.assertEqual(system.time_period(), .1)
system.get_input_port(0).FixValue(context, 0)
linearized = Linearize(system, context)
self.assertTrue((linearized.A() == A).all())
taylor = FirstOrderTaylorApproximation(system, context)
self.assertTrue((taylor.y0() == y0).all())
new_A = np.array([[1, 2], [3, 4]])
new_B = np.array([[5], [6]])
new_f0 = np.array([[7], [8]])
new_C = np.array([[9, 10]])
new_D = np.array([[11]])
new_y0 = np.array([12])
system.UpdateCoefficients(
A=new_A, B=new_B, f0=new_f0, C=new_C, D=new_D, y0=new_y0
)
np.testing.assert_equal(new_A, system.A())
np.testing.assert_equal(new_B, system.B())
np.testing.assert_equal(new_f0.flatten(), system.f0())
np.testing.assert_equal(new_C, system.C())
np.testing.assert_equal(new_D, system.D())
np.testing.assert_equal(new_y0, system.y0())
system = MatrixGain(D=A)
self.assertTrue((system.D() == A).all())
system = TrajectoryAffineSystem(
PiecewisePolynomial(A),
PiecewisePolynomial(B),
PiecewisePolynomial(f0),
PiecewisePolynomial(C),
PiecewisePolynomial(D),
PiecewisePolynomial(y0),
.1)
self.assertEqual(system.get_input_port(0), system.get_input_port())
self.assertEqual(system.get_output_port(0), system.get_output_port())
context = system.CreateDefaultContext()
self.assertEqual(system.get_input_port(0).size(), 1)
self.assertEqual(context.get_discrete_state_vector().size(), 2)
self.assertEqual(system.get_output_port(0).size(), 1)
for t in np.linspace(0., 1., 5):
self.assertTrue((system.A(t) == A).all())
self.assertTrue((system.B(t) == B).all())
self.assertTrue((system.f0(t) == f0).all())
self.assertTrue((system.C(t) == C).all())
self.assertEqual(system.D(t), D)
self.assertEqual(system.y0(t), y0)
self.assertEqual(system.time_period(), .1)
x0 = np.array([1, 2])
system.configure_default_state(x0=x0)
system.SetDefaultContext(context)
np.testing.assert_equal(
context.get_discrete_state_vector().CopyToVector(), x0)
generator = RandomGenerator()
system.SetRandomContext(context, generator)
np.testing.assert_equal(
context.get_discrete_state_vector().CopyToVector(), x0)
system.configure_random_state(covariance=np.eye(2))
system.SetRandomContext(context, generator)
self.assertNotEqual(
context.get_discrete_state_vector().CopyToVector()[1], x0[1])
system = TrajectoryLinearSystem(
A=PiecewisePolynomial(A),
B=PiecewisePolynomial(B),
C=PiecewisePolynomial(C),
D=PiecewisePolynomial(D),
time_period=0.1)
self.assertEqual(system.time_period(), .1)
system.configure_default_state(x0=np.array([1, 2]))
system.configure_random_state(covariance=np.eye(2))
def test_linear_affine_system_empty_matrices(self):
# Confirm the default values for the system matrices in the
# constructor.
def CheckSizes(system, num_states, num_inputs, num_outputs):
self.assertEqual(system.num_continuous_states(), num_states)
self.assertEqual(system.num_inputs(), num_inputs)
self.assertEqual(system.num_outputs(), num_outputs)
# A constant vector system.
system = AffineSystem(y0=[2, 1])
CheckSizes(system, num_states=0, num_inputs=0, num_outputs=2)
# A matrix gain.
system = AffineSystem(D=np.eye(2))
CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2)
system = LinearSystem(D=np.eye(2))
CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2)
# Add an offset.
system = AffineSystem(D=np.eye(2), y0=[1, 2])
CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2)
# An integrator.
system = LinearSystem(B=np.eye(2))
CheckSizes(system, num_states=2, num_inputs=2, num_outputs=0)
def test_linear_system_zero_size(self):
# Explicitly test #12633.
num_x = 0
num_y = 2
num_u = 2
A = np.zeros((num_x, num_x))
B = np.zeros((num_x, num_u))
C = np.zeros((num_y, num_x))
D = np.zeros((num_y, num_u))
self.assertIsNotNone(LinearSystem(A, B, C, D))
@numpy_compare.check_nonsymbolic_types
def test_linear_transform_density(self, T):
dut = LinearTransformDensity_[T](
distribution=RandomDistribution.kGaussian,
input_size=3,
output_size=3)
w_in = np.array([T(0.5), T(0.1), T(1.5)])
context = dut.CreateDefaultContext()
dut.get_input_port_w_in().FixValue(context, w_in)
self.assertEqual(dut.get_input_port_A().size(), 9)
self.assertEqual(dut.get_input_port_b().size(), 3)
self.assertEqual(dut.get_distribution(), RandomDistribution.kGaussian)
A = np.array([
[T(0.5), T(1), T(2)], [T(1), T(2), T(3)], [T(3), T(4), T(5)]])
dut.FixConstantA(context=context, A=A)
b = np.array([T(1), T(2), T(3)])
dut.FixConstantB(context=context, b=b)
dut.CalcDensity(context=context)
self.assertEqual(dut.get_output_port_w_out().size(), 3)
self.assertEqual(dut.get_output_port_w_out_density().size(), 1)
def test_vector_pass_through(self):
model_value = BasicVector([1., 2, 3])
system = PassThrough(vector_size=model_value.size())
context = system.CreateDefaultContext()
system.get_input_port(0).FixValue(context, model_value)
output = system.AllocateOutput()
input_eval = system.EvalVectorInput(context, 0)
compare_value(self, input_eval, model_value)
system.CalcOutput(context, output)
output_value = output.get_vector_data(0)
compare_value(self, output_value, model_value)
def test_default_vector_pass_through(self):
model_value = [1., 2, 3]
system = PassThrough(value=model_value)
context = system.CreateDefaultContext()
np.testing.assert_array_equal(
model_value, system.get_output_port().Eval(context))
def test_abstract_pass_through(self):
model_value = Value("Hello world")
system = PassThrough(abstract_model_value=model_value)
context = system.CreateDefaultContext()
system.get_input_port(0).FixValue(context, model_value)
output = system.AllocateOutput()
input_eval = system.EvalAbstractInput(context, 0)
compare_value(self, input_eval, model_value)
system.CalcOutput(context, output)
output_value = output.get_data(0)
compare_value(self, output_value, model_value)
def test_port_switch(self):
system = PortSwitch(vector_size=2)
a = system.DeclareInputPort(name="a")
system.DeclareInputPort(name="b")
context = system.CreateDefaultContext()
self.assertIsInstance(a, InputPort)
system.get_port_selector_input_port().FixValue(context, a.get_index())
def test_first_order_low_pass_filter(self):
filter1 = FirstOrderLowPassFilter(time_constant=3.0, size=4)
self.assertEqual(filter1.get_time_constant(), 3.0)
alpha = np.array([1, 2, 3])
filter2 = FirstOrderLowPassFilter(time_constants=alpha)
np.testing.assert_array_equal(filter2.get_time_constants_vector(),
alpha)
context = filter2.CreateDefaultContext()
filter2.set_initial_output_value(context, [0., -0.2, 0.4])
def test_gain(self):
k = 42.
input_size = 10
systems = [Gain(k=k, size=input_size),
Gain(k=k*np.ones(input_size))]
for system in systems:
context = system.CreateDefaultContext()
output = system.AllocateOutput()
def mytest(input, expected):
system.get_input_port(0).FixValue(context, input)
system.CalcOutput(context, output)
self.assertTrue(np.allclose(output.get_vector_data(
0).CopyToVector(), expected))
test_input = np.arange(input_size)
mytest(np.arange(input_size), k*np.arange(input_size))
def test_saturation(self):
system = Saturation((0., -1., 3.), (1., 2., 4.))
context = system.CreateDefaultContext()
output = system.AllocateOutput()
def mytest(input, expected):
system.get_input_port(0).FixValue(context, input)
system.CalcOutput(context, output)
self.assertTrue(np.allclose(output.get_vector_data(
0).CopyToVector(), expected))
mytest((-5., 5., 4.), (0., 2., 4.))
mytest((.4, 0., 3.5), (.4, 0., 3.5))
def test_trajectory_source(self):
ppt = PiecewisePolynomial.FirstOrderHold(
[0., 1.], [[2., 3.], [2., 1.]])
system = TrajectorySource(trajectory=ppt,
output_derivative_order=0,
zero_derivatives_beyond_limits=True)
context = system.CreateDefaultContext()
output = system.AllocateOutput()
def mytest(input, expected):
context.SetTime(input)
system.CalcOutput(context, output)
self.assertTrue(np.allclose(output.get_vector_data(
0).CopyToVector(), expected))
mytest(0.0, (2.0, 2.0))
mytest(0.5, (2.5, 1.5))
mytest(1.0, (3.0, 1.0))
ppt2 = PiecewisePolynomial.FirstOrderHold(
[0., 1.], [[4., 6.], [4., 2.]])
system.UpdateTrajectory(trajectory=ppt2)
mytest(0.0, (4.0, 4.0))
mytest(0.5, (5.0, 3.0))
mytest(1.0, (6.0, 2.0))
def test_symbolic_vector_system(self):
t = Variable("t")
x = [Variable("x0"), Variable("x1")]
u = [Variable("u0"), Variable("u1")]
system = SymbolicVectorSystem(time=t, state=x, input=u,
dynamics=[x[0] + x[1], t],
output=[u[1]],
time_period=0.0)
context = system.CreateDefaultContext()
self.assertEqual(context.num_continuous_states(), 2)
self.assertEqual(context.num_discrete_state_groups(), 0)
self.assertEqual(system.get_input_port(0).size(), 2)
self.assertEqual(system.get_output_port(0).size(), 1)
self.assertEqual(context.num_abstract_parameters(), 0)
self.assertEqual(context.num_numeric_parameter_groups(), 0)
self.assertTrue(system.dynamics_for_variable(x[0])
.EqualTo(x[0] + x[1]))
self.assertTrue(system.dynamics_for_variable(x[1])
.EqualTo(t))
def test_symbolic_vector_system_parameters(self):
t = Variable("t")
x = [Variable("x0"), Variable("x1")]
u = [Variable("u0"), Variable("u1")]
p = [Variable("p0"), Variable("p1")]
system = SymbolicVectorSystem(time=t, state=x, input=u,
parameter=p,
dynamics=[p[0] * x[0] + x[1] + p[1], t],
output=[u[1]],
time_period=0.0)
context = system.CreateDefaultContext()
self.assertEqual(context.num_continuous_states(), 2)
self.assertEqual(context.num_discrete_state_groups(), 0)
self.assertEqual(system.get_input_port(0).size(), 2)
self.assertEqual(system.get_output_port(0).size(), 1)
self.assertEqual(context.num_abstract_parameters(), 0)
self.assertEqual(context.num_numeric_parameter_groups(), 1)
self.assertEqual(context.get_numeric_parameter(0).size(), 2)
self.assertTrue(system.dynamics_for_variable(x[0])
.EqualTo(p[0] * x[0] + x[1] + p[1]))
self.assertTrue(system.dynamics_for_variable(x[1])
.EqualTo(t))
def test_wrap_to_system(self):
system = WrapToSystem(2)
system.set_interval(1, 1., 2.)
context = system.CreateDefaultContext()
output = system.AllocateOutput()
def mytest(input, expected):
system.get_input_port(0).FixValue(context, input)
system.CalcOutput(context, output)
self.assertTrue(np.allclose(output.get_vector_data(
0).CopyToVector(), expected))
mytest((-1.5, 0.5), (-1.5, 1.5))
mytest((.2, .3), (.2, 1.3))
def test_demultiplexer(self):
# Test demultiplexer with scalar outputs.
demux = Demultiplexer(size=4)
context = demux.CreateDefaultContext()
self.assertEqual(demux.num_input_ports(), 1)
self.assertEqual(demux.num_output_ports(), 4)
numpy_compare.assert_equal(demux.get_output_ports_sizes(),
[1, 1, 1, 1])
input_vec = np.array([1., 2., 3., 4.])
demux.get_input_port(0).FixValue(context, input_vec)
output = demux.AllocateOutput()
demux.CalcOutput(context, output)
for i in range(4):
self.assertTrue(
np.allclose(output.get_vector_data(i).get_value(),
input_vec[i]))
# Test demultiplexer with vector outputs.
demux = Demultiplexer(size=4, output_ports_size=2)
context = demux.CreateDefaultContext()
self.assertEqual(demux.num_input_ports(), 1)
self.assertEqual(demux.num_output_ports(), 2)
numpy_compare.assert_equal(demux.get_output_ports_sizes(), [2, 2])
demux.get_input_port(0).FixValue(context, input_vec)
output = demux.AllocateOutput()
demux.CalcOutput(context, output)
for i in range(2):
self.assertTrue(
np.allclose(output.get_vector_data(i).get_value(),
input_vec[2*i:2*i+2]))
# Test demultiplexer with different output port sizes.
output_ports_sizes = np.array([1, 2, 1])
num_output_ports = output_ports_sizes.size
input_vec = np.array([1., 2., 3., 4.])
demux = Demultiplexer(output_ports_sizes=output_ports_sizes)
context = demux.CreateDefaultContext()
self.assertEqual(demux.num_input_ports(), 1)
self.assertEqual(demux.num_output_ports(), num_output_ports)
numpy_compare.assert_equal(demux.get_output_ports_sizes(),
output_ports_sizes)
demux.get_input_port(0).FixValue(context, input_vec)
output = demux.AllocateOutput()
demux.CalcOutput(context, output)
output_port_start = 0
for i in range(num_output_ports):
output_port_size = output.get_vector_data(i).size()
self.assertTrue(
np.allclose(output.get_vector_data(i).get_value(),
input_vec[output_port_start:
output_port_start+output_port_size]))
output_port_start += output_port_size
def test_multiplexer(self):
my_vector = MyVector2(data=[1., 2.])
test_cases = [
dict(has_vector=False, mux=Multiplexer(num_scalar_inputs=4),
data=[[5.], [3.], [4.], [2.]]),
dict(has_vector=False, mux=Multiplexer(input_sizes=[2, 3]),
data=[[8., 4.], [3., 6., 9.]]),
dict(has_vector=True, mux=Multiplexer(model_vector=my_vector),
data=[[42.], [3.]]),
]
for case in test_cases:
mux = case['mux']
port_size = sum([len(vec) for vec in case['data']])
self.assertEqual(mux.get_output_port(0).size(), port_size)
context = mux.CreateDefaultContext()
output = mux.AllocateOutput()
num_ports = len(case['data'])
self.assertEqual(context.num_input_ports(), num_ports)
for j, vec in enumerate(case['data']):
mux.get_input_port(j).FixValue(context, vec)
mux.CalcOutput(context, output)
self.assertTrue(
np.allclose(output.get_vector_data(0).get_value(),
[elem for vec in case['data'] for elem in vec]))
if case['has_vector']:
# Check the type matches MyVector2.
value = output.get_vector_data(0)
self.assertTrue(isinstance(value, MyVector2))
def test_multilayer_perceptron(self):
mlp = MultilayerPerceptron(
layers=[1, 2, 3], activation_type=PerceptronActivationType.kReLU)
self.assertEqual(mlp.get_input_port().size(), 1)
self.assertEqual(mlp.get_output_port().size(), 3)
context = mlp.CreateDefaultContext()
params = np.zeros((mlp.num_parameters(), 1))
self.assertEqual(mlp.num_parameters(), 13)
self.assertEqual(mlp.layers(), [1, 2, 3])
self.assertEqual(mlp.activation_type(layer=0),
PerceptronActivationType.kReLU)
self.assertEqual(len(mlp.GetParameters(context=context)),
mlp.num_parameters())
mlp.SetWeights(context=context, layer=0, W=np.array([[1], [2]]))
mlp.SetBiases(context=context, layer=0, b=[3, 4])
np.testing.assert_array_equal(
mlp.GetWeights(context=context, layer=0), np.array([[1], [2]]))
np.testing.assert_array_equal(
mlp.GetBiases(context=context, layer=0), np.array([3, 4]))
params = np.zeros(mlp.num_parameters())
mlp.SetWeights(params=params, layer=0, W=np.array([[1], [2]]))
mlp.SetBiases(params=params, layer=0, b=[3, 4])
np.testing.assert_array_equal(
mlp.GetWeights(params=params, layer=0), np.array([[1], [2]]))
np.testing.assert_array_equal(
mlp.GetBiases(params=params, layer=0), np.array([3, 4]))
mutable_params = mlp.GetMutableParameters(context=context)
mutable_params[:] = 3.0
np.testing.assert_array_equal(mlp.GetParameters(context),
np.full(mlp.num_parameters(), 3.0))
global called_loss
called_loss = False
def silly_loss(Y, dloss_dY):
global called_loss
called_loss = True
# We must be careful to update the dloss in place, rather than bind
# a new matrix to the same variable name.
dloss_dY[:] = 1
# dloss_dY = np.array(...etc...) # <== wrong
return Y.sum()
dloss_dparams = np.zeros((13,))
generator = RandomGenerator(23)
mlp.SetRandomContext(context, generator)
mlp.Backpropagation(context=context,
X=np.array([1, 3, 4]).reshape((1, 3)),
loss=silly_loss,
dloss_dparams=dloss_dparams)
self.assertTrue(called_loss)
self.assertTrue(dloss_dparams.any()) # No longer all zero.
dloss_dparams = np.zeros((13,))
mlp.BackpropagationMeanSquaredError(context=context,
X=np.array([1, 3, 4]).reshape(
(1, 3)),
Y_desired=np.eye(3),
dloss_dparams=dloss_dparams)
self.assertTrue(dloss_dparams.any()) # No longer all zero.
Y = np.asfortranarray(np.eye(3))
mlp.BatchOutput(context=context, X=np.array([[0.1, 0.3, 0.4]]), Y=Y)
self.assertFalse(np.allclose(Y, np.eye(3)))
Y2 = mlp.BatchOutput(context=context, X=np.array([[0.1, 0.3, 0.4]]))
np.testing.assert_array_equal(Y, Y2)
mlp2 = MultilayerPerceptron(layers=[3, 2, 1],
activation_types=[
PerceptronActivationType.kReLU,
PerceptronActivationType.kTanh
])
self.assertEqual(mlp2.activation_type(0),
PerceptronActivationType.kReLU)
self.assertEqual(mlp2.activation_type(1),
PerceptronActivationType.kTanh)
Y = np.asfortranarray(np.full((1, 3), 2.4))
dYdX = np.asfortranarray(np.full((3, 3), 5.3))
context2 = mlp2.CreateDefaultContext()
mlp2.BatchOutput(context=context2, X=np.eye(3), Y=Y, dYdX=dYdX)
# The default context sets the weights and biases to zero, so the
# output (and gradients) should be zero.
np.testing.assert_array_almost_equal(Y, np.zeros((1, 3)))
np.testing.assert_array_almost_equal(dYdX, np.zeros((3, 3)))
mlp = MultilayerPerceptron(use_sin_cos_for_input=[True, False],
remaining_layers=[3, 2],
activation_types=[
PerceptronActivationType.kReLU,
PerceptronActivationType.kTanh
])
self.assertEqual(mlp.get_input_port().size(), 2)
np.testing.assert_array_equal(mlp.layers(), [3, 3, 2])
def test_random_source(self):
source = RandomSource(distribution=RandomDistribution.kUniform,
num_outputs=2, sampling_interval_sec=0.01)
self.assertEqual(source.get_output_port(0).size(), 2)
builder = DiagramBuilder()
# Note: There are no random inputs to add to the empty diagram, but it
# confirms the API works.
AddRandomInputs(sampling_interval_sec=0.01, builder=builder)
builder_ad = DiagramBuilder_[AutoDiffXd]()
AddRandomInputs(sampling_interval_sec=0.01, builder=builder_ad)
def test_constant_vector_source(self):
source = ConstantVectorSource(source_value=[1., 2.])
context = source.CreateDefaultContext()
source.get_source_value(context)
source.get_mutable_source_value(context)
def test_ctor_api(self):
"""Tests construction of systems for systems whose executions semantics
are not tested above.
"""
ConstantValueSource(Value("Hello world"))
DiscreteTimeDelay(update_sec=0.1, delay_time_steps=5, vector_size=2)
DiscreteTimeDelay(
update_sec=0.1, delay_time_steps=5,
abstract_model_value=Value("Hello world"))
with catch_drake_warnings(expected_count=2) as w:
DiscreteTimeDelay(update_sec=0.1, delay_timesteps=5, vector_size=2)
DiscreteTimeDelay(
update_sec=0.1, delay_timesteps=5,
abstract_model_value=Value("Hello world"))
ZeroOrderHold(period_sec=0.1, offset_sec=0.0, vector_size=2)
dut = ZeroOrderHold(period_sec=1.0, offset_sec=0.25,
abstract_model_value=Value("Hello world"))
self.assertEqual(dut.period(), 1.0)
self.assertEqual(dut.offset(), 0.25)
def test_shared_pointer_system_ctor(self):
dut = SharedPointerSystem(value_to_hold=[1, 2, 3])
readback = dut.get()
self.assertListEqual(readback, [1, 2, 3])
del dut
self.assertListEqual(readback, [1, 2, 3])
def test_shared_pointer_system_builder(self):
builder = DiagramBuilder()
self.assertListEqual(
SharedPointerSystem.AddToBuilder(
builder=builder, value_to_hold=[1, 2, 3]),
[1, 2, 3])
diagram = builder.Build()
del builder
readback = diagram.GetSystems()[0].get()
self.assertListEqual(readback, [1, 2, 3])
del diagram
self.assertListEqual(readback, [1, 2, 3])
def test_sine(self):
# Test scalar output.
sine_source = Sine(amplitude=1, frequency=2, phase=3,
size=1, is_time_based=True)
self.assertEqual(sine_source.get_output_port(0).size(), 1)
self.assertEqual(sine_source.get_output_port(1).size(), 1)
self.assertEqual(sine_source.get_output_port(2).size(), 1)
# Test vector output.
sine_source = Sine(amplitude=1, frequency=2, phase=3,
size=3, is_time_based=True)
self.assertEqual(sine_source.get_output_port(0).size(), 3)
self.assertEqual(sine_source.get_output_port(1).size(), 3)
self.assertEqual(sine_source.get_output_port(2).size(), 3)
sine_source = Sine(amplitudes=np.ones(2), frequencies=np.ones(2),
phases=np.ones(2), is_time_based=True)
self.assertEqual(sine_source.get_output_port(0).size(), 2)
self.assertEqual(sine_source.get_output_port(1).size(), 2)
self.assertEqual(sine_source.get_output_port(2).size(), 2)
def test_discrete_derivative(self):
discrete_derivative = DiscreteDerivative(num_inputs=5, time_step=0.5)
self.assertEqual(discrete_derivative.get_input_port(0).size(), 5)
self.assertEqual(discrete_derivative.get_output_port(0).size(), 5)
self.assertEqual(discrete_derivative.time_step(), 0.5)
self.assertTrue(discrete_derivative.suppress_initial_transient())
discrete_derivative = DiscreteDerivative(
num_inputs=5, time_step=0.5, suppress_initial_transient=False)
self.assertFalse(discrete_derivative.suppress_initial_transient())
def test_state_interpolator_with_discrete_derivative(self):
state_interpolator = StateInterpolatorWithDiscreteDerivative(
num_positions=5, time_step=0.4)
self.assertEqual(state_interpolator.get_input_port(0).size(), 5)
self.assertEqual(state_interpolator.get_output_port(0).size(), 10)
self.assertTrue(state_interpolator.suppress_initial_transient())
# test set_initial_position using context
context = state_interpolator.CreateDefaultContext()
state_interpolator.set_initial_position(
context=context, position=5*[1.1])
np.testing.assert_array_equal(
context.get_discrete_state(0).CopyToVector(),
np.array(5*[1.1]))
np.testing.assert_array_equal(
context.get_discrete_state(1).CopyToVector(),
np.array(5*[1.1]))
# test set_initial_position using state
context = state_interpolator.CreateDefaultContext()
state_interpolator.set_initial_position(
state=context.get_state(), position=5*[1.3])
np.testing.assert_array_equal(
context.get_discrete_state(0).CopyToVector(),
np.array(5*[1.3]))
np.testing.assert_array_equal(
context.get_discrete_state(1).CopyToVector(),
np.array(5*[1.3]))
state_interpolator = StateInterpolatorWithDiscreteDerivative(
num_positions=5, time_step=0.4, suppress_initial_transient=True)
self.assertTrue(state_interpolator.suppress_initial_transient())
@numpy_compare.check_nonsymbolic_types
def test_log_vector_output(self, T):
# Add various redundant loggers to a system, to exercise the
# LogVectorOutput bindings.
builder = DiagramBuilder_[T]()
kSize = 1
integrator = builder.AddSystem(Integrator_[T](kSize))
port = integrator.get_output_port(0)
loggers = []
loggers.append(LogVectorOutput(port, builder))
loggers.append(LogVectorOutput(src=port, builder=builder))
loggers.append(LogVectorOutput(port, builder, 0.125))
loggers.append(LogVectorOutput(
src=port, builder=builder, publish_period=0.125))
loggers.append(LogVectorOutput(port, builder, {TriggerType.kForced}))
loggers.append(LogVectorOutput(
src=port, builder=builder, publish_triggers={TriggerType.kForced}))
loggers.append(LogVectorOutput(
port, builder, {TriggerType.kPeriodic}, 0.125))
loggers.append(LogVectorOutput(
src=port, builder=builder,
publish_triggers={TriggerType.kPeriodic}, publish_period=0.125))
# Check the returned loggers by calling some trivial methods.
diagram = builder.Build()
context = diagram.CreateDefaultContext()
self.assertTrue(all(logger.FindLog(context).num_samples() == 0
for logger in loggers))
@numpy_compare.check_nonsymbolic_types
def test_vector_log(self, T):
kSize = 1
dut = VectorLog(kSize)
self.assertEqual(dut.get_input_size(), kSize)
dut.AddData(0.1, [22.22])
self.assertEqual(dut.num_samples(), 1)
self.assertEqual(dut.sample_times(), [0.1])
self.assertEqual(dut.data(), [22.22])
dut.Clear()
self.assertEqual(dut.num_samples(), 0)
# There is no good way from python to test the semantics of Reserve(),
# but test the binding anyway.
dut.Reserve(VectorLog.kDefaultCapacity * 3)
@numpy_compare.check_nonsymbolic_types
def test_vector_log_sink(self, T):
# Add various redundant loggers to a system, to exercise the
# VectorLog constructor bindings.
builder = DiagramBuilder_[T]()
kSize = 1
constructors = [VectorLogSink_[T]]
loggers = []
if T == float:
constructors.append(VectorLogSink)
for constructor in constructors:
loggers.append(builder.AddSystem(constructor(kSize)))
loggers.append(builder.AddSystem(constructor(input_size=kSize)))
loggers.append(builder.AddSystem(constructor(kSize, 0.125)))
loggers.append(builder.AddSystem(
constructor(input_size=kSize, publish_period=0.125)))
loggers.append(builder.AddSystem(
constructor(kSize, {TriggerType.kForced})))
loggers.append(builder.AddSystem(
constructor(input_size=kSize,
publish_triggers={TriggerType.kForced})))
loggers.append(builder.AddSystem(
constructor(kSize, {TriggerType.kPeriodic}, 0.125)))
loggers.append(builder.AddSystem(
constructor(input_size=kSize,
publish_triggers={TriggerType.kPeriodic},
publish_period=0.125)))
# Exercise all of the log access methods.
diagram = builder.Build()
context = diagram.CreateDefaultContext()
# FindLog and FindMutableLog find the same object.
self.assertTrue(
all(logger.FindLog(context) == logger.FindMutableLog(context)
for logger in loggers))
# Build a list of pairs of loggers and their local contexts.
loggers_and_contexts = [(x, x.GetMyContextFromRoot(context))
for x in loggers]
# GetLog and GetMutableLog find the same object.
self.assertTrue(
all(logger.GetLog(logger_context)
== logger.GetMutableLog(logger_context)
for logger, logger_context in loggers_and_contexts))
# GetLog and FindLog find the same object, given the proper contexts.
self.assertTrue(
all(logger.GetLog(logger_context) == logger.FindLog(context)
for logger, logger_context in loggers_and_contexts))
|
normal
|
{
"blob_id": "f17ae8a44f8b032feac7c18fe39663054fea40c0",
"index": 5282,
"step-1": "<mask token>\n\n\nclass TestGeneral(unittest.TestCase):\n\n def _check_instantiations(self, template, supports_symbolic=True):\n default_cls = template[None]\n self.assertTrue(template[float] is default_cls)\n self.assertTrue(template[AutoDiffXd] is not default_cls)\n if supports_symbolic:\n self.assertTrue(template[Expression] is not default_cls)\n\n def test_instantiations(self):\n self._check_instantiations(Adder_)\n self._check_instantiations(AffineSystem_)\n self._check_instantiations(ConstantValueSource_)\n self._check_instantiations(ConstantVectorSource_)\n self._check_instantiations(Demultiplexer_)\n self._check_instantiations(DiscreteDerivative_)\n self._check_instantiations(DiscreteTimeDelay_)\n self._check_instantiations(Gain_)\n self._check_instantiations(Integrator_)\n self._check_instantiations(LinearSystem_)\n self._check_instantiations(LinearTransformDensity_,\n supports_symbolic=False)\n self._check_instantiations(Multiplexer_)\n self._check_instantiations(MultilayerPerceptron_)\n self._check_instantiations(PassThrough_)\n self._check_instantiations(PortSwitch_)\n self._check_instantiations(Saturation_)\n self._check_instantiations(SharedPointerSystem_)\n self._check_instantiations(Sine_)\n self._check_instantiations(StateInterpolatorWithDiscreteDerivative_)\n self._check_instantiations(SymbolicVectorSystem_)\n self._check_instantiations(TrajectoryAffineSystem_,\n supports_symbolic=False)\n self._check_instantiations(TrajectoryLinearSystem_,\n supports_symbolic=False)\n self._check_instantiations(TrajectorySource_)\n self._check_instantiations(VectorLogSink_)\n self._check_instantiations(WrapToSystem_)\n self._check_instantiations(ZeroOrderHold_)\n <mask token>\n\n def test_linear_affine_system_empty_matrices(self):\n\n def CheckSizes(system, num_states, num_inputs, num_outputs):\n self.assertEqual(system.num_continuous_states(), num_states)\n self.assertEqual(system.num_inputs(), num_inputs)\n self.assertEqual(system.num_outputs(), num_outputs)\n system = AffineSystem(y0=[2, 1])\n CheckSizes(system, num_states=0, num_inputs=0, num_outputs=2)\n system = AffineSystem(D=np.eye(2))\n CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2)\n system = LinearSystem(D=np.eye(2))\n CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2)\n system = AffineSystem(D=np.eye(2), y0=[1, 2])\n CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2)\n system = LinearSystem(B=np.eye(2))\n CheckSizes(system, num_states=2, num_inputs=2, num_outputs=0)\n\n def test_linear_system_zero_size(self):\n num_x = 0\n num_y = 2\n num_u = 2\n A = np.zeros((num_x, num_x))\n B = np.zeros((num_x, num_u))\n C = np.zeros((num_y, num_x))\n D = np.zeros((num_y, num_u))\n self.assertIsNotNone(LinearSystem(A, B, C, D))\n\n @numpy_compare.check_nonsymbolic_types\n def test_linear_transform_density(self, T):\n dut = LinearTransformDensity_[T](distribution=RandomDistribution.\n kGaussian, input_size=3, output_size=3)\n w_in = np.array([T(0.5), T(0.1), T(1.5)])\n context = dut.CreateDefaultContext()\n dut.get_input_port_w_in().FixValue(context, w_in)\n self.assertEqual(dut.get_input_port_A().size(), 9)\n self.assertEqual(dut.get_input_port_b().size(), 3)\n self.assertEqual(dut.get_distribution(), RandomDistribution.kGaussian)\n A = np.array([[T(0.5), T(1), T(2)], [T(1), T(2), T(3)], [T(3), T(4),\n T(5)]])\n dut.FixConstantA(context=context, A=A)\n b = np.array([T(1), T(2), T(3)])\n dut.FixConstantB(context=context, b=b)\n dut.CalcDensity(context=context)\n self.assertEqual(dut.get_output_port_w_out().size(), 3)\n self.assertEqual(dut.get_output_port_w_out_density().size(), 1)\n\n def test_vector_pass_through(self):\n model_value = BasicVector([1.0, 2, 3])\n system = PassThrough(vector_size=model_value.size())\n context = system.CreateDefaultContext()\n system.get_input_port(0).FixValue(context, model_value)\n output = system.AllocateOutput()\n input_eval = system.EvalVectorInput(context, 0)\n compare_value(self, input_eval, model_value)\n system.CalcOutput(context, output)\n output_value = output.get_vector_data(0)\n compare_value(self, output_value, model_value)\n\n def test_default_vector_pass_through(self):\n model_value = [1.0, 2, 3]\n system = PassThrough(value=model_value)\n context = system.CreateDefaultContext()\n np.testing.assert_array_equal(model_value, system.get_output_port()\n .Eval(context))\n\n def test_abstract_pass_through(self):\n model_value = Value('Hello world')\n system = PassThrough(abstract_model_value=model_value)\n context = system.CreateDefaultContext()\n system.get_input_port(0).FixValue(context, model_value)\n output = system.AllocateOutput()\n input_eval = system.EvalAbstractInput(context, 0)\n compare_value(self, input_eval, model_value)\n system.CalcOutput(context, output)\n output_value = output.get_data(0)\n compare_value(self, output_value, model_value)\n\n def test_port_switch(self):\n system = PortSwitch(vector_size=2)\n a = system.DeclareInputPort(name='a')\n system.DeclareInputPort(name='b')\n context = system.CreateDefaultContext()\n self.assertIsInstance(a, InputPort)\n system.get_port_selector_input_port().FixValue(context, a.get_index())\n\n def test_first_order_low_pass_filter(self):\n filter1 = FirstOrderLowPassFilter(time_constant=3.0, size=4)\n self.assertEqual(filter1.get_time_constant(), 3.0)\n alpha = np.array([1, 2, 3])\n filter2 = FirstOrderLowPassFilter(time_constants=alpha)\n np.testing.assert_array_equal(filter2.get_time_constants_vector(),\n alpha)\n context = filter2.CreateDefaultContext()\n filter2.set_initial_output_value(context, [0.0, -0.2, 0.4])\n <mask token>\n\n def test_saturation(self):\n system = Saturation((0.0, -1.0, 3.0), (1.0, 2.0, 4.0))\n context = system.CreateDefaultContext()\n output = system.AllocateOutput()\n\n def mytest(input, expected):\n system.get_input_port(0).FixValue(context, input)\n system.CalcOutput(context, output)\n self.assertTrue(np.allclose(output.get_vector_data(0).\n CopyToVector(), expected))\n mytest((-5.0, 5.0, 4.0), (0.0, 2.0, 4.0))\n mytest((0.4, 0.0, 3.5), (0.4, 0.0, 3.5))\n\n def test_trajectory_source(self):\n ppt = PiecewisePolynomial.FirstOrderHold([0.0, 1.0], [[2.0, 3.0], [\n 2.0, 1.0]])\n system = TrajectorySource(trajectory=ppt, output_derivative_order=0,\n zero_derivatives_beyond_limits=True)\n context = system.CreateDefaultContext()\n output = system.AllocateOutput()\n\n def mytest(input, expected):\n context.SetTime(input)\n system.CalcOutput(context, output)\n self.assertTrue(np.allclose(output.get_vector_data(0).\n CopyToVector(), expected))\n mytest(0.0, (2.0, 2.0))\n mytest(0.5, (2.5, 1.5))\n mytest(1.0, (3.0, 1.0))\n ppt2 = PiecewisePolynomial.FirstOrderHold([0.0, 1.0], [[4.0, 6.0],\n [4.0, 2.0]])\n system.UpdateTrajectory(trajectory=ppt2)\n mytest(0.0, (4.0, 4.0))\n mytest(0.5, (5.0, 3.0))\n mytest(1.0, (6.0, 2.0))\n\n def test_symbolic_vector_system(self):\n t = Variable('t')\n x = [Variable('x0'), Variable('x1')]\n u = [Variable('u0'), Variable('u1')]\n system = SymbolicVectorSystem(time=t, state=x, input=u, dynamics=[x\n [0] + x[1], t], output=[u[1]], time_period=0.0)\n context = system.CreateDefaultContext()\n self.assertEqual(context.num_continuous_states(), 2)\n self.assertEqual(context.num_discrete_state_groups(), 0)\n self.assertEqual(system.get_input_port(0).size(), 2)\n self.assertEqual(system.get_output_port(0).size(), 1)\n self.assertEqual(context.num_abstract_parameters(), 0)\n self.assertEqual(context.num_numeric_parameter_groups(), 0)\n self.assertTrue(system.dynamics_for_variable(x[0]).EqualTo(x[0] + x[1])\n )\n self.assertTrue(system.dynamics_for_variable(x[1]).EqualTo(t))\n\n def test_symbolic_vector_system_parameters(self):\n t = Variable('t')\n x = [Variable('x0'), Variable('x1')]\n u = [Variable('u0'), Variable('u1')]\n p = [Variable('p0'), Variable('p1')]\n system = SymbolicVectorSystem(time=t, state=x, input=u, parameter=p,\n dynamics=[p[0] * x[0] + x[1] + p[1], t], output=[u[1]],\n time_period=0.0)\n context = system.CreateDefaultContext()\n self.assertEqual(context.num_continuous_states(), 2)\n self.assertEqual(context.num_discrete_state_groups(), 0)\n self.assertEqual(system.get_input_port(0).size(), 2)\n self.assertEqual(system.get_output_port(0).size(), 1)\n self.assertEqual(context.num_abstract_parameters(), 0)\n self.assertEqual(context.num_numeric_parameter_groups(), 1)\n self.assertEqual(context.get_numeric_parameter(0).size(), 2)\n self.assertTrue(system.dynamics_for_variable(x[0]).EqualTo(p[0] * x\n [0] + x[1] + p[1]))\n self.assertTrue(system.dynamics_for_variable(x[1]).EqualTo(t))\n\n def test_wrap_to_system(self):\n system = WrapToSystem(2)\n system.set_interval(1, 1.0, 2.0)\n context = system.CreateDefaultContext()\n output = system.AllocateOutput()\n\n def mytest(input, expected):\n system.get_input_port(0).FixValue(context, input)\n system.CalcOutput(context, output)\n self.assertTrue(np.allclose(output.get_vector_data(0).\n CopyToVector(), expected))\n mytest((-1.5, 0.5), (-1.5, 1.5))\n mytest((0.2, 0.3), (0.2, 1.3))\n\n def test_demultiplexer(self):\n demux = Demultiplexer(size=4)\n context = demux.CreateDefaultContext()\n self.assertEqual(demux.num_input_ports(), 1)\n self.assertEqual(demux.num_output_ports(), 4)\n numpy_compare.assert_equal(demux.get_output_ports_sizes(), [1, 1, 1, 1]\n )\n input_vec = np.array([1.0, 2.0, 3.0, 4.0])\n demux.get_input_port(0).FixValue(context, input_vec)\n output = demux.AllocateOutput()\n demux.CalcOutput(context, output)\n for i in range(4):\n self.assertTrue(np.allclose(output.get_vector_data(i).get_value\n (), input_vec[i]))\n demux = Demultiplexer(size=4, output_ports_size=2)\n context = demux.CreateDefaultContext()\n self.assertEqual(demux.num_input_ports(), 1)\n self.assertEqual(demux.num_output_ports(), 2)\n numpy_compare.assert_equal(demux.get_output_ports_sizes(), [2, 2])\n demux.get_input_port(0).FixValue(context, input_vec)\n output = demux.AllocateOutput()\n demux.CalcOutput(context, output)\n for i in range(2):\n self.assertTrue(np.allclose(output.get_vector_data(i).get_value\n (), input_vec[2 * i:2 * i + 2]))\n output_ports_sizes = np.array([1, 2, 1])\n num_output_ports = output_ports_sizes.size\n input_vec = np.array([1.0, 2.0, 3.0, 4.0])\n demux = Demultiplexer(output_ports_sizes=output_ports_sizes)\n context = demux.CreateDefaultContext()\n self.assertEqual(demux.num_input_ports(), 1)\n self.assertEqual(demux.num_output_ports(), num_output_ports)\n numpy_compare.assert_equal(demux.get_output_ports_sizes(),\n output_ports_sizes)\n demux.get_input_port(0).FixValue(context, input_vec)\n output = demux.AllocateOutput()\n demux.CalcOutput(context, output)\n output_port_start = 0\n for i in range(num_output_ports):\n output_port_size = output.get_vector_data(i).size()\n self.assertTrue(np.allclose(output.get_vector_data(i).get_value\n (), input_vec[output_port_start:output_port_start +\n output_port_size]))\n output_port_start += output_port_size\n <mask token>\n\n def test_multilayer_perceptron(self):\n mlp = MultilayerPerceptron(layers=[1, 2, 3], activation_type=\n PerceptronActivationType.kReLU)\n self.assertEqual(mlp.get_input_port().size(), 1)\n self.assertEqual(mlp.get_output_port().size(), 3)\n context = mlp.CreateDefaultContext()\n params = np.zeros((mlp.num_parameters(), 1))\n self.assertEqual(mlp.num_parameters(), 13)\n self.assertEqual(mlp.layers(), [1, 2, 3])\n self.assertEqual(mlp.activation_type(layer=0),\n PerceptronActivationType.kReLU)\n self.assertEqual(len(mlp.GetParameters(context=context)), mlp.\n num_parameters())\n mlp.SetWeights(context=context, layer=0, W=np.array([[1], [2]]))\n mlp.SetBiases(context=context, layer=0, b=[3, 4])\n np.testing.assert_array_equal(mlp.GetWeights(context=context, layer\n =0), np.array([[1], [2]]))\n np.testing.assert_array_equal(mlp.GetBiases(context=context, layer=\n 0), np.array([3, 4]))\n params = np.zeros(mlp.num_parameters())\n mlp.SetWeights(params=params, layer=0, W=np.array([[1], [2]]))\n mlp.SetBiases(params=params, layer=0, b=[3, 4])\n np.testing.assert_array_equal(mlp.GetWeights(params=params, layer=0\n ), np.array([[1], [2]]))\n np.testing.assert_array_equal(mlp.GetBiases(params=params, layer=0),\n np.array([3, 4]))\n mutable_params = mlp.GetMutableParameters(context=context)\n mutable_params[:] = 3.0\n np.testing.assert_array_equal(mlp.GetParameters(context), np.full(\n mlp.num_parameters(), 3.0))\n global called_loss\n called_loss = False\n\n def silly_loss(Y, dloss_dY):\n global called_loss\n called_loss = True\n dloss_dY[:] = 1\n return Y.sum()\n dloss_dparams = np.zeros((13,))\n generator = RandomGenerator(23)\n mlp.SetRandomContext(context, generator)\n mlp.Backpropagation(context=context, X=np.array([1, 3, 4]).reshape(\n (1, 3)), loss=silly_loss, dloss_dparams=dloss_dparams)\n self.assertTrue(called_loss)\n self.assertTrue(dloss_dparams.any())\n dloss_dparams = np.zeros((13,))\n mlp.BackpropagationMeanSquaredError(context=context, X=np.array([1,\n 3, 4]).reshape((1, 3)), Y_desired=np.eye(3), dloss_dparams=\n dloss_dparams)\n self.assertTrue(dloss_dparams.any())\n Y = np.asfortranarray(np.eye(3))\n mlp.BatchOutput(context=context, X=np.array([[0.1, 0.3, 0.4]]), Y=Y)\n self.assertFalse(np.allclose(Y, np.eye(3)))\n Y2 = mlp.BatchOutput(context=context, X=np.array([[0.1, 0.3, 0.4]]))\n np.testing.assert_array_equal(Y, Y2)\n mlp2 = MultilayerPerceptron(layers=[3, 2, 1], activation_types=[\n PerceptronActivationType.kReLU, PerceptronActivationType.kTanh])\n self.assertEqual(mlp2.activation_type(0), PerceptronActivationType.\n kReLU)\n self.assertEqual(mlp2.activation_type(1), PerceptronActivationType.\n kTanh)\n Y = np.asfortranarray(np.full((1, 3), 2.4))\n dYdX = np.asfortranarray(np.full((3, 3), 5.3))\n context2 = mlp2.CreateDefaultContext()\n mlp2.BatchOutput(context=context2, X=np.eye(3), Y=Y, dYdX=dYdX)\n np.testing.assert_array_almost_equal(Y, np.zeros((1, 3)))\n np.testing.assert_array_almost_equal(dYdX, np.zeros((3, 3)))\n mlp = MultilayerPerceptron(use_sin_cos_for_input=[True, False],\n remaining_layers=[3, 2], activation_types=[\n PerceptronActivationType.kReLU, PerceptronActivationType.kTanh])\n self.assertEqual(mlp.get_input_port().size(), 2)\n np.testing.assert_array_equal(mlp.layers(), [3, 3, 2])\n\n def test_random_source(self):\n source = RandomSource(distribution=RandomDistribution.kUniform,\n num_outputs=2, sampling_interval_sec=0.01)\n self.assertEqual(source.get_output_port(0).size(), 2)\n builder = DiagramBuilder()\n AddRandomInputs(sampling_interval_sec=0.01, builder=builder)\n builder_ad = DiagramBuilder_[AutoDiffXd]()\n AddRandomInputs(sampling_interval_sec=0.01, builder=builder_ad)\n\n def test_constant_vector_source(self):\n source = ConstantVectorSource(source_value=[1.0, 2.0])\n context = source.CreateDefaultContext()\n source.get_source_value(context)\n source.get_mutable_source_value(context)\n <mask token>\n\n def test_shared_pointer_system_ctor(self):\n dut = SharedPointerSystem(value_to_hold=[1, 2, 3])\n readback = dut.get()\n self.assertListEqual(readback, [1, 2, 3])\n del dut\n self.assertListEqual(readback, [1, 2, 3])\n <mask token>\n\n def test_sine(self):\n sine_source = Sine(amplitude=1, frequency=2, phase=3, size=1,\n is_time_based=True)\n self.assertEqual(sine_source.get_output_port(0).size(), 1)\n self.assertEqual(sine_source.get_output_port(1).size(), 1)\n self.assertEqual(sine_source.get_output_port(2).size(), 1)\n sine_source = Sine(amplitude=1, frequency=2, phase=3, size=3,\n is_time_based=True)\n self.assertEqual(sine_source.get_output_port(0).size(), 3)\n self.assertEqual(sine_source.get_output_port(1).size(), 3)\n self.assertEqual(sine_source.get_output_port(2).size(), 3)\n sine_source = Sine(amplitudes=np.ones(2), frequencies=np.ones(2),\n phases=np.ones(2), is_time_based=True)\n self.assertEqual(sine_source.get_output_port(0).size(), 2)\n self.assertEqual(sine_source.get_output_port(1).size(), 2)\n self.assertEqual(sine_source.get_output_port(2).size(), 2)\n\n def test_discrete_derivative(self):\n discrete_derivative = DiscreteDerivative(num_inputs=5, time_step=0.5)\n self.assertEqual(discrete_derivative.get_input_port(0).size(), 5)\n self.assertEqual(discrete_derivative.get_output_port(0).size(), 5)\n self.assertEqual(discrete_derivative.time_step(), 0.5)\n self.assertTrue(discrete_derivative.suppress_initial_transient())\n discrete_derivative = DiscreteDerivative(num_inputs=5, time_step=\n 0.5, suppress_initial_transient=False)\n self.assertFalse(discrete_derivative.suppress_initial_transient())\n <mask token>\n\n @numpy_compare.check_nonsymbolic_types\n def test_log_vector_output(self, T):\n builder = DiagramBuilder_[T]()\n kSize = 1\n integrator = builder.AddSystem(Integrator_[T](kSize))\n port = integrator.get_output_port(0)\n loggers = []\n loggers.append(LogVectorOutput(port, builder))\n loggers.append(LogVectorOutput(src=port, builder=builder))\n loggers.append(LogVectorOutput(port, builder, 0.125))\n loggers.append(LogVectorOutput(src=port, builder=builder,\n publish_period=0.125))\n loggers.append(LogVectorOutput(port, builder, {TriggerType.kForced}))\n loggers.append(LogVectorOutput(src=port, builder=builder,\n publish_triggers={TriggerType.kForced}))\n loggers.append(LogVectorOutput(port, builder, {TriggerType.\n kPeriodic}, 0.125))\n loggers.append(LogVectorOutput(src=port, builder=builder,\n publish_triggers={TriggerType.kPeriodic}, publish_period=0.125))\n diagram = builder.Build()\n context = diagram.CreateDefaultContext()\n self.assertTrue(all(logger.FindLog(context).num_samples() == 0 for\n logger in loggers))\n <mask token>\n\n @numpy_compare.check_nonsymbolic_types\n def test_vector_log_sink(self, T):\n builder = DiagramBuilder_[T]()\n kSize = 1\n constructors = [VectorLogSink_[T]]\n loggers = []\n if T == float:\n constructors.append(VectorLogSink)\n for constructor in constructors:\n loggers.append(builder.AddSystem(constructor(kSize)))\n loggers.append(builder.AddSystem(constructor(input_size=kSize)))\n loggers.append(builder.AddSystem(constructor(kSize, 0.125)))\n loggers.append(builder.AddSystem(constructor(input_size=kSize,\n publish_period=0.125)))\n loggers.append(builder.AddSystem(constructor(kSize, {\n TriggerType.kForced})))\n loggers.append(builder.AddSystem(constructor(input_size=kSize,\n publish_triggers={TriggerType.kForced})))\n loggers.append(builder.AddSystem(constructor(kSize, {\n TriggerType.kPeriodic}, 0.125)))\n loggers.append(builder.AddSystem(constructor(input_size=kSize,\n publish_triggers={TriggerType.kPeriodic}, publish_period=\n 0.125)))\n diagram = builder.Build()\n context = diagram.CreateDefaultContext()\n self.assertTrue(all(logger.FindLog(context) == logger.\n FindMutableLog(context) for logger in loggers))\n loggers_and_contexts = [(x, x.GetMyContextFromRoot(context)) for x in\n loggers]\n self.assertTrue(all(logger.GetLog(logger_context) == logger.\n GetMutableLog(logger_context) for logger, logger_context in\n loggers_and_contexts))\n self.assertTrue(all(logger.GetLog(logger_context) == logger.FindLog\n (context) for logger, logger_context in loggers_and_contexts))\n",
"step-2": "<mask token>\n\n\nclass TestGeneral(unittest.TestCase):\n\n def _check_instantiations(self, template, supports_symbolic=True):\n default_cls = template[None]\n self.assertTrue(template[float] is default_cls)\n self.assertTrue(template[AutoDiffXd] is not default_cls)\n if supports_symbolic:\n self.assertTrue(template[Expression] is not default_cls)\n\n def test_instantiations(self):\n self._check_instantiations(Adder_)\n self._check_instantiations(AffineSystem_)\n self._check_instantiations(ConstantValueSource_)\n self._check_instantiations(ConstantVectorSource_)\n self._check_instantiations(Demultiplexer_)\n self._check_instantiations(DiscreteDerivative_)\n self._check_instantiations(DiscreteTimeDelay_)\n self._check_instantiations(Gain_)\n self._check_instantiations(Integrator_)\n self._check_instantiations(LinearSystem_)\n self._check_instantiations(LinearTransformDensity_,\n supports_symbolic=False)\n self._check_instantiations(Multiplexer_)\n self._check_instantiations(MultilayerPerceptron_)\n self._check_instantiations(PassThrough_)\n self._check_instantiations(PortSwitch_)\n self._check_instantiations(Saturation_)\n self._check_instantiations(SharedPointerSystem_)\n self._check_instantiations(Sine_)\n self._check_instantiations(StateInterpolatorWithDiscreteDerivative_)\n self._check_instantiations(SymbolicVectorSystem_)\n self._check_instantiations(TrajectoryAffineSystem_,\n supports_symbolic=False)\n self._check_instantiations(TrajectoryLinearSystem_,\n supports_symbolic=False)\n self._check_instantiations(TrajectorySource_)\n self._check_instantiations(VectorLogSink_)\n self._check_instantiations(WrapToSystem_)\n self._check_instantiations(ZeroOrderHold_)\n <mask token>\n\n def test_linear_affine_system_empty_matrices(self):\n\n def CheckSizes(system, num_states, num_inputs, num_outputs):\n self.assertEqual(system.num_continuous_states(), num_states)\n self.assertEqual(system.num_inputs(), num_inputs)\n self.assertEqual(system.num_outputs(), num_outputs)\n system = AffineSystem(y0=[2, 1])\n CheckSizes(system, num_states=0, num_inputs=0, num_outputs=2)\n system = AffineSystem(D=np.eye(2))\n CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2)\n system = LinearSystem(D=np.eye(2))\n CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2)\n system = AffineSystem(D=np.eye(2), y0=[1, 2])\n CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2)\n system = LinearSystem(B=np.eye(2))\n CheckSizes(system, num_states=2, num_inputs=2, num_outputs=0)\n\n def test_linear_system_zero_size(self):\n num_x = 0\n num_y = 2\n num_u = 2\n A = np.zeros((num_x, num_x))\n B = np.zeros((num_x, num_u))\n C = np.zeros((num_y, num_x))\n D = np.zeros((num_y, num_u))\n self.assertIsNotNone(LinearSystem(A, B, C, D))\n\n @numpy_compare.check_nonsymbolic_types\n def test_linear_transform_density(self, T):\n dut = LinearTransformDensity_[T](distribution=RandomDistribution.\n kGaussian, input_size=3, output_size=3)\n w_in = np.array([T(0.5), T(0.1), T(1.5)])\n context = dut.CreateDefaultContext()\n dut.get_input_port_w_in().FixValue(context, w_in)\n self.assertEqual(dut.get_input_port_A().size(), 9)\n self.assertEqual(dut.get_input_port_b().size(), 3)\n self.assertEqual(dut.get_distribution(), RandomDistribution.kGaussian)\n A = np.array([[T(0.5), T(1), T(2)], [T(1), T(2), T(3)], [T(3), T(4),\n T(5)]])\n dut.FixConstantA(context=context, A=A)\n b = np.array([T(1), T(2), T(3)])\n dut.FixConstantB(context=context, b=b)\n dut.CalcDensity(context=context)\n self.assertEqual(dut.get_output_port_w_out().size(), 3)\n self.assertEqual(dut.get_output_port_w_out_density().size(), 1)\n\n def test_vector_pass_through(self):\n model_value = BasicVector([1.0, 2, 3])\n system = PassThrough(vector_size=model_value.size())\n context = system.CreateDefaultContext()\n system.get_input_port(0).FixValue(context, model_value)\n output = system.AllocateOutput()\n input_eval = system.EvalVectorInput(context, 0)\n compare_value(self, input_eval, model_value)\n system.CalcOutput(context, output)\n output_value = output.get_vector_data(0)\n compare_value(self, output_value, model_value)\n\n def test_default_vector_pass_through(self):\n model_value = [1.0, 2, 3]\n system = PassThrough(value=model_value)\n context = system.CreateDefaultContext()\n np.testing.assert_array_equal(model_value, system.get_output_port()\n .Eval(context))\n\n def test_abstract_pass_through(self):\n model_value = Value('Hello world')\n system = PassThrough(abstract_model_value=model_value)\n context = system.CreateDefaultContext()\n system.get_input_port(0).FixValue(context, model_value)\n output = system.AllocateOutput()\n input_eval = system.EvalAbstractInput(context, 0)\n compare_value(self, input_eval, model_value)\n system.CalcOutput(context, output)\n output_value = output.get_data(0)\n compare_value(self, output_value, model_value)\n\n def test_port_switch(self):\n system = PortSwitch(vector_size=2)\n a = system.DeclareInputPort(name='a')\n system.DeclareInputPort(name='b')\n context = system.CreateDefaultContext()\n self.assertIsInstance(a, InputPort)\n system.get_port_selector_input_port().FixValue(context, a.get_index())\n\n def test_first_order_low_pass_filter(self):\n filter1 = FirstOrderLowPassFilter(time_constant=3.0, size=4)\n self.assertEqual(filter1.get_time_constant(), 3.0)\n alpha = np.array([1, 2, 3])\n filter2 = FirstOrderLowPassFilter(time_constants=alpha)\n np.testing.assert_array_equal(filter2.get_time_constants_vector(),\n alpha)\n context = filter2.CreateDefaultContext()\n filter2.set_initial_output_value(context, [0.0, -0.2, 0.4])\n <mask token>\n\n def test_saturation(self):\n system = Saturation((0.0, -1.0, 3.0), (1.0, 2.0, 4.0))\n context = system.CreateDefaultContext()\n output = system.AllocateOutput()\n\n def mytest(input, expected):\n system.get_input_port(0).FixValue(context, input)\n system.CalcOutput(context, output)\n self.assertTrue(np.allclose(output.get_vector_data(0).\n CopyToVector(), expected))\n mytest((-5.0, 5.0, 4.0), (0.0, 2.0, 4.0))\n mytest((0.4, 0.0, 3.5), (0.4, 0.0, 3.5))\n\n def test_trajectory_source(self):\n ppt = PiecewisePolynomial.FirstOrderHold([0.0, 1.0], [[2.0, 3.0], [\n 2.0, 1.0]])\n system = TrajectorySource(trajectory=ppt, output_derivative_order=0,\n zero_derivatives_beyond_limits=True)\n context = system.CreateDefaultContext()\n output = system.AllocateOutput()\n\n def mytest(input, expected):\n context.SetTime(input)\n system.CalcOutput(context, output)\n self.assertTrue(np.allclose(output.get_vector_data(0).\n CopyToVector(), expected))\n mytest(0.0, (2.0, 2.0))\n mytest(0.5, (2.5, 1.5))\n mytest(1.0, (3.0, 1.0))\n ppt2 = PiecewisePolynomial.FirstOrderHold([0.0, 1.0], [[4.0, 6.0],\n [4.0, 2.0]])\n system.UpdateTrajectory(trajectory=ppt2)\n mytest(0.0, (4.0, 4.0))\n mytest(0.5, (5.0, 3.0))\n mytest(1.0, (6.0, 2.0))\n\n def test_symbolic_vector_system(self):\n t = Variable('t')\n x = [Variable('x0'), Variable('x1')]\n u = [Variable('u0'), Variable('u1')]\n system = SymbolicVectorSystem(time=t, state=x, input=u, dynamics=[x\n [0] + x[1], t], output=[u[1]], time_period=0.0)\n context = system.CreateDefaultContext()\n self.assertEqual(context.num_continuous_states(), 2)\n self.assertEqual(context.num_discrete_state_groups(), 0)\n self.assertEqual(system.get_input_port(0).size(), 2)\n self.assertEqual(system.get_output_port(0).size(), 1)\n self.assertEqual(context.num_abstract_parameters(), 0)\n self.assertEqual(context.num_numeric_parameter_groups(), 0)\n self.assertTrue(system.dynamics_for_variable(x[0]).EqualTo(x[0] + x[1])\n )\n self.assertTrue(system.dynamics_for_variable(x[1]).EqualTo(t))\n\n def test_symbolic_vector_system_parameters(self):\n t = Variable('t')\n x = [Variable('x0'), Variable('x1')]\n u = [Variable('u0'), Variable('u1')]\n p = [Variable('p0'), Variable('p1')]\n system = SymbolicVectorSystem(time=t, state=x, input=u, parameter=p,\n dynamics=[p[0] * x[0] + x[1] + p[1], t], output=[u[1]],\n time_period=0.0)\n context = system.CreateDefaultContext()\n self.assertEqual(context.num_continuous_states(), 2)\n self.assertEqual(context.num_discrete_state_groups(), 0)\n self.assertEqual(system.get_input_port(0).size(), 2)\n self.assertEqual(system.get_output_port(0).size(), 1)\n self.assertEqual(context.num_abstract_parameters(), 0)\n self.assertEqual(context.num_numeric_parameter_groups(), 1)\n self.assertEqual(context.get_numeric_parameter(0).size(), 2)\n self.assertTrue(system.dynamics_for_variable(x[0]).EqualTo(p[0] * x\n [0] + x[1] + p[1]))\n self.assertTrue(system.dynamics_for_variable(x[1]).EqualTo(t))\n\n def test_wrap_to_system(self):\n system = WrapToSystem(2)\n system.set_interval(1, 1.0, 2.0)\n context = system.CreateDefaultContext()\n output = system.AllocateOutput()\n\n def mytest(input, expected):\n system.get_input_port(0).FixValue(context, input)\n system.CalcOutput(context, output)\n self.assertTrue(np.allclose(output.get_vector_data(0).\n CopyToVector(), expected))\n mytest((-1.5, 0.5), (-1.5, 1.5))\n mytest((0.2, 0.3), (0.2, 1.3))\n\n def test_demultiplexer(self):\n demux = Demultiplexer(size=4)\n context = demux.CreateDefaultContext()\n self.assertEqual(demux.num_input_ports(), 1)\n self.assertEqual(demux.num_output_ports(), 4)\n numpy_compare.assert_equal(demux.get_output_ports_sizes(), [1, 1, 1, 1]\n )\n input_vec = np.array([1.0, 2.0, 3.0, 4.0])\n demux.get_input_port(0).FixValue(context, input_vec)\n output = demux.AllocateOutput()\n demux.CalcOutput(context, output)\n for i in range(4):\n self.assertTrue(np.allclose(output.get_vector_data(i).get_value\n (), input_vec[i]))\n demux = Demultiplexer(size=4, output_ports_size=2)\n context = demux.CreateDefaultContext()\n self.assertEqual(demux.num_input_ports(), 1)\n self.assertEqual(demux.num_output_ports(), 2)\n numpy_compare.assert_equal(demux.get_output_ports_sizes(), [2, 2])\n demux.get_input_port(0).FixValue(context, input_vec)\n output = demux.AllocateOutput()\n demux.CalcOutput(context, output)\n for i in range(2):\n self.assertTrue(np.allclose(output.get_vector_data(i).get_value\n (), input_vec[2 * i:2 * i + 2]))\n output_ports_sizes = np.array([1, 2, 1])\n num_output_ports = output_ports_sizes.size\n input_vec = np.array([1.0, 2.0, 3.0, 4.0])\n demux = Demultiplexer(output_ports_sizes=output_ports_sizes)\n context = demux.CreateDefaultContext()\n self.assertEqual(demux.num_input_ports(), 1)\n self.assertEqual(demux.num_output_ports(), num_output_ports)\n numpy_compare.assert_equal(demux.get_output_ports_sizes(),\n output_ports_sizes)\n demux.get_input_port(0).FixValue(context, input_vec)\n output = demux.AllocateOutput()\n demux.CalcOutput(context, output)\n output_port_start = 0\n for i in range(num_output_ports):\n output_port_size = output.get_vector_data(i).size()\n self.assertTrue(np.allclose(output.get_vector_data(i).get_value\n (), input_vec[output_port_start:output_port_start +\n output_port_size]))\n output_port_start += output_port_size\n <mask token>\n\n def test_multilayer_perceptron(self):\n mlp = MultilayerPerceptron(layers=[1, 2, 3], activation_type=\n PerceptronActivationType.kReLU)\n self.assertEqual(mlp.get_input_port().size(), 1)\n self.assertEqual(mlp.get_output_port().size(), 3)\n context = mlp.CreateDefaultContext()\n params = np.zeros((mlp.num_parameters(), 1))\n self.assertEqual(mlp.num_parameters(), 13)\n self.assertEqual(mlp.layers(), [1, 2, 3])\n self.assertEqual(mlp.activation_type(layer=0),\n PerceptronActivationType.kReLU)\n self.assertEqual(len(mlp.GetParameters(context=context)), mlp.\n num_parameters())\n mlp.SetWeights(context=context, layer=0, W=np.array([[1], [2]]))\n mlp.SetBiases(context=context, layer=0, b=[3, 4])\n np.testing.assert_array_equal(mlp.GetWeights(context=context, layer\n =0), np.array([[1], [2]]))\n np.testing.assert_array_equal(mlp.GetBiases(context=context, layer=\n 0), np.array([3, 4]))\n params = np.zeros(mlp.num_parameters())\n mlp.SetWeights(params=params, layer=0, W=np.array([[1], [2]]))\n mlp.SetBiases(params=params, layer=0, b=[3, 4])\n np.testing.assert_array_equal(mlp.GetWeights(params=params, layer=0\n ), np.array([[1], [2]]))\n np.testing.assert_array_equal(mlp.GetBiases(params=params, layer=0),\n np.array([3, 4]))\n mutable_params = mlp.GetMutableParameters(context=context)\n mutable_params[:] = 3.0\n np.testing.assert_array_equal(mlp.GetParameters(context), np.full(\n mlp.num_parameters(), 3.0))\n global called_loss\n called_loss = False\n\n def silly_loss(Y, dloss_dY):\n global called_loss\n called_loss = True\n dloss_dY[:] = 1\n return Y.sum()\n dloss_dparams = np.zeros((13,))\n generator = RandomGenerator(23)\n mlp.SetRandomContext(context, generator)\n mlp.Backpropagation(context=context, X=np.array([1, 3, 4]).reshape(\n (1, 3)), loss=silly_loss, dloss_dparams=dloss_dparams)\n self.assertTrue(called_loss)\n self.assertTrue(dloss_dparams.any())\n dloss_dparams = np.zeros((13,))\n mlp.BackpropagationMeanSquaredError(context=context, X=np.array([1,\n 3, 4]).reshape((1, 3)), Y_desired=np.eye(3), dloss_dparams=\n dloss_dparams)\n self.assertTrue(dloss_dparams.any())\n Y = np.asfortranarray(np.eye(3))\n mlp.BatchOutput(context=context, X=np.array([[0.1, 0.3, 0.4]]), Y=Y)\n self.assertFalse(np.allclose(Y, np.eye(3)))\n Y2 = mlp.BatchOutput(context=context, X=np.array([[0.1, 0.3, 0.4]]))\n np.testing.assert_array_equal(Y, Y2)\n mlp2 = MultilayerPerceptron(layers=[3, 2, 1], activation_types=[\n PerceptronActivationType.kReLU, PerceptronActivationType.kTanh])\n self.assertEqual(mlp2.activation_type(0), PerceptronActivationType.\n kReLU)\n self.assertEqual(mlp2.activation_type(1), PerceptronActivationType.\n kTanh)\n Y = np.asfortranarray(np.full((1, 3), 2.4))\n dYdX = np.asfortranarray(np.full((3, 3), 5.3))\n context2 = mlp2.CreateDefaultContext()\n mlp2.BatchOutput(context=context2, X=np.eye(3), Y=Y, dYdX=dYdX)\n np.testing.assert_array_almost_equal(Y, np.zeros((1, 3)))\n np.testing.assert_array_almost_equal(dYdX, np.zeros((3, 3)))\n mlp = MultilayerPerceptron(use_sin_cos_for_input=[True, False],\n remaining_layers=[3, 2], activation_types=[\n PerceptronActivationType.kReLU, PerceptronActivationType.kTanh])\n self.assertEqual(mlp.get_input_port().size(), 2)\n np.testing.assert_array_equal(mlp.layers(), [3, 3, 2])\n\n def test_random_source(self):\n source = RandomSource(distribution=RandomDistribution.kUniform,\n num_outputs=2, sampling_interval_sec=0.01)\n self.assertEqual(source.get_output_port(0).size(), 2)\n builder = DiagramBuilder()\n AddRandomInputs(sampling_interval_sec=0.01, builder=builder)\n builder_ad = DiagramBuilder_[AutoDiffXd]()\n AddRandomInputs(sampling_interval_sec=0.01, builder=builder_ad)\n\n def test_constant_vector_source(self):\n source = ConstantVectorSource(source_value=[1.0, 2.0])\n context = source.CreateDefaultContext()\n source.get_source_value(context)\n source.get_mutable_source_value(context)\n <mask token>\n\n def test_shared_pointer_system_ctor(self):\n dut = SharedPointerSystem(value_to_hold=[1, 2, 3])\n readback = dut.get()\n self.assertListEqual(readback, [1, 2, 3])\n del dut\n self.assertListEqual(readback, [1, 2, 3])\n\n def test_shared_pointer_system_builder(self):\n builder = DiagramBuilder()\n self.assertListEqual(SharedPointerSystem.AddToBuilder(builder=\n builder, value_to_hold=[1, 2, 3]), [1, 2, 3])\n diagram = builder.Build()\n del builder\n readback = diagram.GetSystems()[0].get()\n self.assertListEqual(readback, [1, 2, 3])\n del diagram\n self.assertListEqual(readback, [1, 2, 3])\n\n def test_sine(self):\n sine_source = Sine(amplitude=1, frequency=2, phase=3, size=1,\n is_time_based=True)\n self.assertEqual(sine_source.get_output_port(0).size(), 1)\n self.assertEqual(sine_source.get_output_port(1).size(), 1)\n self.assertEqual(sine_source.get_output_port(2).size(), 1)\n sine_source = Sine(amplitude=1, frequency=2, phase=3, size=3,\n is_time_based=True)\n self.assertEqual(sine_source.get_output_port(0).size(), 3)\n self.assertEqual(sine_source.get_output_port(1).size(), 3)\n self.assertEqual(sine_source.get_output_port(2).size(), 3)\n sine_source = Sine(amplitudes=np.ones(2), frequencies=np.ones(2),\n phases=np.ones(2), is_time_based=True)\n self.assertEqual(sine_source.get_output_port(0).size(), 2)\n self.assertEqual(sine_source.get_output_port(1).size(), 2)\n self.assertEqual(sine_source.get_output_port(2).size(), 2)\n\n def test_discrete_derivative(self):\n discrete_derivative = DiscreteDerivative(num_inputs=5, time_step=0.5)\n self.assertEqual(discrete_derivative.get_input_port(0).size(), 5)\n self.assertEqual(discrete_derivative.get_output_port(0).size(), 5)\n self.assertEqual(discrete_derivative.time_step(), 0.5)\n self.assertTrue(discrete_derivative.suppress_initial_transient())\n discrete_derivative = DiscreteDerivative(num_inputs=5, time_step=\n 0.5, suppress_initial_transient=False)\n self.assertFalse(discrete_derivative.suppress_initial_transient())\n <mask token>\n\n @numpy_compare.check_nonsymbolic_types\n def test_log_vector_output(self, T):\n builder = DiagramBuilder_[T]()\n kSize = 1\n integrator = builder.AddSystem(Integrator_[T](kSize))\n port = integrator.get_output_port(0)\n loggers = []\n loggers.append(LogVectorOutput(port, builder))\n loggers.append(LogVectorOutput(src=port, builder=builder))\n loggers.append(LogVectorOutput(port, builder, 0.125))\n loggers.append(LogVectorOutput(src=port, builder=builder,\n publish_period=0.125))\n loggers.append(LogVectorOutput(port, builder, {TriggerType.kForced}))\n loggers.append(LogVectorOutput(src=port, builder=builder,\n publish_triggers={TriggerType.kForced}))\n loggers.append(LogVectorOutput(port, builder, {TriggerType.\n kPeriodic}, 0.125))\n loggers.append(LogVectorOutput(src=port, builder=builder,\n publish_triggers={TriggerType.kPeriodic}, publish_period=0.125))\n diagram = builder.Build()\n context = diagram.CreateDefaultContext()\n self.assertTrue(all(logger.FindLog(context).num_samples() == 0 for\n logger in loggers))\n <mask token>\n\n @numpy_compare.check_nonsymbolic_types\n def test_vector_log_sink(self, T):\n builder = DiagramBuilder_[T]()\n kSize = 1\n constructors = [VectorLogSink_[T]]\n loggers = []\n if T == float:\n constructors.append(VectorLogSink)\n for constructor in constructors:\n loggers.append(builder.AddSystem(constructor(kSize)))\n loggers.append(builder.AddSystem(constructor(input_size=kSize)))\n loggers.append(builder.AddSystem(constructor(kSize, 0.125)))\n loggers.append(builder.AddSystem(constructor(input_size=kSize,\n publish_period=0.125)))\n loggers.append(builder.AddSystem(constructor(kSize, {\n TriggerType.kForced})))\n loggers.append(builder.AddSystem(constructor(input_size=kSize,\n publish_triggers={TriggerType.kForced})))\n loggers.append(builder.AddSystem(constructor(kSize, {\n TriggerType.kPeriodic}, 0.125)))\n loggers.append(builder.AddSystem(constructor(input_size=kSize,\n publish_triggers={TriggerType.kPeriodic}, publish_period=\n 0.125)))\n diagram = builder.Build()\n context = diagram.CreateDefaultContext()\n self.assertTrue(all(logger.FindLog(context) == logger.\n FindMutableLog(context) for logger in loggers))\n loggers_and_contexts = [(x, x.GetMyContextFromRoot(context)) for x in\n loggers]\n self.assertTrue(all(logger.GetLog(logger_context) == logger.\n GetMutableLog(logger_context) for logger, logger_context in\n loggers_and_contexts))\n self.assertTrue(all(logger.GetLog(logger_context) == logger.FindLog\n (context) for logger, logger_context in loggers_and_contexts))\n",
"step-3": "<mask token>\n\n\nclass TestGeneral(unittest.TestCase):\n\n def _check_instantiations(self, template, supports_symbolic=True):\n default_cls = template[None]\n self.assertTrue(template[float] is default_cls)\n self.assertTrue(template[AutoDiffXd] is not default_cls)\n if supports_symbolic:\n self.assertTrue(template[Expression] is not default_cls)\n\n def test_instantiations(self):\n self._check_instantiations(Adder_)\n self._check_instantiations(AffineSystem_)\n self._check_instantiations(ConstantValueSource_)\n self._check_instantiations(ConstantVectorSource_)\n self._check_instantiations(Demultiplexer_)\n self._check_instantiations(DiscreteDerivative_)\n self._check_instantiations(DiscreteTimeDelay_)\n self._check_instantiations(Gain_)\n self._check_instantiations(Integrator_)\n self._check_instantiations(LinearSystem_)\n self._check_instantiations(LinearTransformDensity_,\n supports_symbolic=False)\n self._check_instantiations(Multiplexer_)\n self._check_instantiations(MultilayerPerceptron_)\n self._check_instantiations(PassThrough_)\n self._check_instantiations(PortSwitch_)\n self._check_instantiations(Saturation_)\n self._check_instantiations(SharedPointerSystem_)\n self._check_instantiations(Sine_)\n self._check_instantiations(StateInterpolatorWithDiscreteDerivative_)\n self._check_instantiations(SymbolicVectorSystem_)\n self._check_instantiations(TrajectoryAffineSystem_,\n supports_symbolic=False)\n self._check_instantiations(TrajectoryLinearSystem_,\n supports_symbolic=False)\n self._check_instantiations(TrajectorySource_)\n self._check_instantiations(VectorLogSink_)\n self._check_instantiations(WrapToSystem_)\n self._check_instantiations(ZeroOrderHold_)\n\n def test_linear_affine_system(self):\n A = np.identity(2)\n B = np.array([[0], [1]])\n f0 = np.array([[0], [0]])\n C = np.array([[0, 1]])\n D = [1]\n y0 = [0]\n system = LinearSystem(A, B, C, D)\n context = system.CreateDefaultContext()\n self.assertEqual(system.get_input_port(0).size(), 1)\n self.assertEqual(context.get_mutable_continuous_state_vector().size\n (), 2)\n self.assertEqual(system.get_output_port(0).size(), 1)\n self.assertTrue((system.A() == A).all())\n self.assertTrue((system.B() == B).all())\n self.assertTrue((system.f0() == f0).all())\n self.assertTrue((system.C() == C).all())\n self.assertEqual(system.D(), D)\n self.assertEqual(system.y0(), y0)\n self.assertEqual(system.time_period(), 0.0)\n x0 = np.array([1, 2])\n system.configure_default_state(x0=x0)\n system.SetDefaultContext(context)\n np.testing.assert_equal(context.get_continuous_state_vector().\n CopyToVector(), x0)\n generator = RandomGenerator()\n system.SetRandomContext(context, generator)\n np.testing.assert_equal(context.get_continuous_state_vector().\n CopyToVector(), x0)\n system.configure_random_state(covariance=np.eye(2))\n system.SetRandomContext(context, generator)\n self.assertNotEqual(context.get_continuous_state_vector().\n CopyToVector()[1], x0[1])\n Co = ControllabilityMatrix(system)\n self.assertEqual(Co.shape, (2, 2))\n self.assertFalse(IsControllable(system))\n self.assertFalse(IsControllable(system, 1e-06))\n self.assertFalse(IsStabilizable(sys=system))\n self.assertFalse(IsStabilizable(sys=system, threshold=1e-06))\n Ob = ObservabilityMatrix(system)\n self.assertEqual(Ob.shape, (2, 2))\n self.assertFalse(IsObservable(system))\n self.assertFalse(IsDetectable(sys=system))\n self.assertFalse(IsDetectable(sys=system, threshold=1e-06))\n system = AffineSystem(A, B, f0, C, D, y0, 0.1)\n self.assertEqual(system.get_input_port(0), system.get_input_port())\n self.assertEqual(system.get_output_port(0), system.get_output_port())\n context = system.CreateDefaultContext()\n self.assertEqual(system.get_input_port(0).size(), 1)\n self.assertEqual(context.get_discrete_state_vector().size(), 2)\n self.assertEqual(system.get_output_port(0).size(), 1)\n self.assertTrue((system.A() == A).all())\n self.assertTrue((system.B() == B).all())\n self.assertTrue((system.f0() == f0).all())\n self.assertTrue((system.C() == C).all())\n self.assertEqual(system.D(), D)\n self.assertEqual(system.y0(), y0)\n self.assertEqual(system.time_period(), 0.1)\n system.get_input_port(0).FixValue(context, 0)\n linearized = Linearize(system, context)\n self.assertTrue((linearized.A() == A).all())\n taylor = FirstOrderTaylorApproximation(system, context)\n self.assertTrue((taylor.y0() == y0).all())\n new_A = np.array([[1, 2], [3, 4]])\n new_B = np.array([[5], [6]])\n new_f0 = np.array([[7], [8]])\n new_C = np.array([[9, 10]])\n new_D = np.array([[11]])\n new_y0 = np.array([12])\n system.UpdateCoefficients(A=new_A, B=new_B, f0=new_f0, C=new_C, D=\n new_D, y0=new_y0)\n np.testing.assert_equal(new_A, system.A())\n np.testing.assert_equal(new_B, system.B())\n np.testing.assert_equal(new_f0.flatten(), system.f0())\n np.testing.assert_equal(new_C, system.C())\n np.testing.assert_equal(new_D, system.D())\n np.testing.assert_equal(new_y0, system.y0())\n system = MatrixGain(D=A)\n self.assertTrue((system.D() == A).all())\n system = TrajectoryAffineSystem(PiecewisePolynomial(A),\n PiecewisePolynomial(B), PiecewisePolynomial(f0),\n PiecewisePolynomial(C), PiecewisePolynomial(D),\n PiecewisePolynomial(y0), 0.1)\n self.assertEqual(system.get_input_port(0), system.get_input_port())\n self.assertEqual(system.get_output_port(0), system.get_output_port())\n context = system.CreateDefaultContext()\n self.assertEqual(system.get_input_port(0).size(), 1)\n self.assertEqual(context.get_discrete_state_vector().size(), 2)\n self.assertEqual(system.get_output_port(0).size(), 1)\n for t in np.linspace(0.0, 1.0, 5):\n self.assertTrue((system.A(t) == A).all())\n self.assertTrue((system.B(t) == B).all())\n self.assertTrue((system.f0(t) == f0).all())\n self.assertTrue((system.C(t) == C).all())\n self.assertEqual(system.D(t), D)\n self.assertEqual(system.y0(t), y0)\n self.assertEqual(system.time_period(), 0.1)\n x0 = np.array([1, 2])\n system.configure_default_state(x0=x0)\n system.SetDefaultContext(context)\n np.testing.assert_equal(context.get_discrete_state_vector().\n CopyToVector(), x0)\n generator = RandomGenerator()\n system.SetRandomContext(context, generator)\n np.testing.assert_equal(context.get_discrete_state_vector().\n CopyToVector(), x0)\n system.configure_random_state(covariance=np.eye(2))\n system.SetRandomContext(context, generator)\n self.assertNotEqual(context.get_discrete_state_vector().\n CopyToVector()[1], x0[1])\n system = TrajectoryLinearSystem(A=PiecewisePolynomial(A), B=\n PiecewisePolynomial(B), C=PiecewisePolynomial(C), D=\n PiecewisePolynomial(D), time_period=0.1)\n self.assertEqual(system.time_period(), 0.1)\n system.configure_default_state(x0=np.array([1, 2]))\n system.configure_random_state(covariance=np.eye(2))\n\n def test_linear_affine_system_empty_matrices(self):\n\n def CheckSizes(system, num_states, num_inputs, num_outputs):\n self.assertEqual(system.num_continuous_states(), num_states)\n self.assertEqual(system.num_inputs(), num_inputs)\n self.assertEqual(system.num_outputs(), num_outputs)\n system = AffineSystem(y0=[2, 1])\n CheckSizes(system, num_states=0, num_inputs=0, num_outputs=2)\n system = AffineSystem(D=np.eye(2))\n CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2)\n system = LinearSystem(D=np.eye(2))\n CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2)\n system = AffineSystem(D=np.eye(2), y0=[1, 2])\n CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2)\n system = LinearSystem(B=np.eye(2))\n CheckSizes(system, num_states=2, num_inputs=2, num_outputs=0)\n\n def test_linear_system_zero_size(self):\n num_x = 0\n num_y = 2\n num_u = 2\n A = np.zeros((num_x, num_x))\n B = np.zeros((num_x, num_u))\n C = np.zeros((num_y, num_x))\n D = np.zeros((num_y, num_u))\n self.assertIsNotNone(LinearSystem(A, B, C, D))\n\n @numpy_compare.check_nonsymbolic_types\n def test_linear_transform_density(self, T):\n dut = LinearTransformDensity_[T](distribution=RandomDistribution.\n kGaussian, input_size=3, output_size=3)\n w_in = np.array([T(0.5), T(0.1), T(1.5)])\n context = dut.CreateDefaultContext()\n dut.get_input_port_w_in().FixValue(context, w_in)\n self.assertEqual(dut.get_input_port_A().size(), 9)\n self.assertEqual(dut.get_input_port_b().size(), 3)\n self.assertEqual(dut.get_distribution(), RandomDistribution.kGaussian)\n A = np.array([[T(0.5), T(1), T(2)], [T(1), T(2), T(3)], [T(3), T(4),\n T(5)]])\n dut.FixConstantA(context=context, A=A)\n b = np.array([T(1), T(2), T(3)])\n dut.FixConstantB(context=context, b=b)\n dut.CalcDensity(context=context)\n self.assertEqual(dut.get_output_port_w_out().size(), 3)\n self.assertEqual(dut.get_output_port_w_out_density().size(), 1)\n\n def test_vector_pass_through(self):\n model_value = BasicVector([1.0, 2, 3])\n system = PassThrough(vector_size=model_value.size())\n context = system.CreateDefaultContext()\n system.get_input_port(0).FixValue(context, model_value)\n output = system.AllocateOutput()\n input_eval = system.EvalVectorInput(context, 0)\n compare_value(self, input_eval, model_value)\n system.CalcOutput(context, output)\n output_value = output.get_vector_data(0)\n compare_value(self, output_value, model_value)\n\n def test_default_vector_pass_through(self):\n model_value = [1.0, 2, 3]\n system = PassThrough(value=model_value)\n context = system.CreateDefaultContext()\n np.testing.assert_array_equal(model_value, system.get_output_port()\n .Eval(context))\n\n def test_abstract_pass_through(self):\n model_value = Value('Hello world')\n system = PassThrough(abstract_model_value=model_value)\n context = system.CreateDefaultContext()\n system.get_input_port(0).FixValue(context, model_value)\n output = system.AllocateOutput()\n input_eval = system.EvalAbstractInput(context, 0)\n compare_value(self, input_eval, model_value)\n system.CalcOutput(context, output)\n output_value = output.get_data(0)\n compare_value(self, output_value, model_value)\n\n def test_port_switch(self):\n system = PortSwitch(vector_size=2)\n a = system.DeclareInputPort(name='a')\n system.DeclareInputPort(name='b')\n context = system.CreateDefaultContext()\n self.assertIsInstance(a, InputPort)\n system.get_port_selector_input_port().FixValue(context, a.get_index())\n\n def test_first_order_low_pass_filter(self):\n filter1 = FirstOrderLowPassFilter(time_constant=3.0, size=4)\n self.assertEqual(filter1.get_time_constant(), 3.0)\n alpha = np.array([1, 2, 3])\n filter2 = FirstOrderLowPassFilter(time_constants=alpha)\n np.testing.assert_array_equal(filter2.get_time_constants_vector(),\n alpha)\n context = filter2.CreateDefaultContext()\n filter2.set_initial_output_value(context, [0.0, -0.2, 0.4])\n <mask token>\n\n def test_saturation(self):\n system = Saturation((0.0, -1.0, 3.0), (1.0, 2.0, 4.0))\n context = system.CreateDefaultContext()\n output = system.AllocateOutput()\n\n def mytest(input, expected):\n system.get_input_port(0).FixValue(context, input)\n system.CalcOutput(context, output)\n self.assertTrue(np.allclose(output.get_vector_data(0).\n CopyToVector(), expected))\n mytest((-5.0, 5.0, 4.0), (0.0, 2.0, 4.0))\n mytest((0.4, 0.0, 3.5), (0.4, 0.0, 3.5))\n\n def test_trajectory_source(self):\n ppt = PiecewisePolynomial.FirstOrderHold([0.0, 1.0], [[2.0, 3.0], [\n 2.0, 1.0]])\n system = TrajectorySource(trajectory=ppt, output_derivative_order=0,\n zero_derivatives_beyond_limits=True)\n context = system.CreateDefaultContext()\n output = system.AllocateOutput()\n\n def mytest(input, expected):\n context.SetTime(input)\n system.CalcOutput(context, output)\n self.assertTrue(np.allclose(output.get_vector_data(0).\n CopyToVector(), expected))\n mytest(0.0, (2.0, 2.0))\n mytest(0.5, (2.5, 1.5))\n mytest(1.0, (3.0, 1.0))\n ppt2 = PiecewisePolynomial.FirstOrderHold([0.0, 1.0], [[4.0, 6.0],\n [4.0, 2.0]])\n system.UpdateTrajectory(trajectory=ppt2)\n mytest(0.0, (4.0, 4.0))\n mytest(0.5, (5.0, 3.0))\n mytest(1.0, (6.0, 2.0))\n\n def test_symbolic_vector_system(self):\n t = Variable('t')\n x = [Variable('x0'), Variable('x1')]\n u = [Variable('u0'), Variable('u1')]\n system = SymbolicVectorSystem(time=t, state=x, input=u, dynamics=[x\n [0] + x[1], t], output=[u[1]], time_period=0.0)\n context = system.CreateDefaultContext()\n self.assertEqual(context.num_continuous_states(), 2)\n self.assertEqual(context.num_discrete_state_groups(), 0)\n self.assertEqual(system.get_input_port(0).size(), 2)\n self.assertEqual(system.get_output_port(0).size(), 1)\n self.assertEqual(context.num_abstract_parameters(), 0)\n self.assertEqual(context.num_numeric_parameter_groups(), 0)\n self.assertTrue(system.dynamics_for_variable(x[0]).EqualTo(x[0] + x[1])\n )\n self.assertTrue(system.dynamics_for_variable(x[1]).EqualTo(t))\n\n def test_symbolic_vector_system_parameters(self):\n t = Variable('t')\n x = [Variable('x0'), Variable('x1')]\n u = [Variable('u0'), Variable('u1')]\n p = [Variable('p0'), Variable('p1')]\n system = SymbolicVectorSystem(time=t, state=x, input=u, parameter=p,\n dynamics=[p[0] * x[0] + x[1] + p[1], t], output=[u[1]],\n time_period=0.0)\n context = system.CreateDefaultContext()\n self.assertEqual(context.num_continuous_states(), 2)\n self.assertEqual(context.num_discrete_state_groups(), 0)\n self.assertEqual(system.get_input_port(0).size(), 2)\n self.assertEqual(system.get_output_port(0).size(), 1)\n self.assertEqual(context.num_abstract_parameters(), 0)\n self.assertEqual(context.num_numeric_parameter_groups(), 1)\n self.assertEqual(context.get_numeric_parameter(0).size(), 2)\n self.assertTrue(system.dynamics_for_variable(x[0]).EqualTo(p[0] * x\n [0] + x[1] + p[1]))\n self.assertTrue(system.dynamics_for_variable(x[1]).EqualTo(t))\n\n def test_wrap_to_system(self):\n system = WrapToSystem(2)\n system.set_interval(1, 1.0, 2.0)\n context = system.CreateDefaultContext()\n output = system.AllocateOutput()\n\n def mytest(input, expected):\n system.get_input_port(0).FixValue(context, input)\n system.CalcOutput(context, output)\n self.assertTrue(np.allclose(output.get_vector_data(0).\n CopyToVector(), expected))\n mytest((-1.5, 0.5), (-1.5, 1.5))\n mytest((0.2, 0.3), (0.2, 1.3))\n\n def test_demultiplexer(self):\n demux = Demultiplexer(size=4)\n context = demux.CreateDefaultContext()\n self.assertEqual(demux.num_input_ports(), 1)\n self.assertEqual(demux.num_output_ports(), 4)\n numpy_compare.assert_equal(demux.get_output_ports_sizes(), [1, 1, 1, 1]\n )\n input_vec = np.array([1.0, 2.0, 3.0, 4.0])\n demux.get_input_port(0).FixValue(context, input_vec)\n output = demux.AllocateOutput()\n demux.CalcOutput(context, output)\n for i in range(4):\n self.assertTrue(np.allclose(output.get_vector_data(i).get_value\n (), input_vec[i]))\n demux = Demultiplexer(size=4, output_ports_size=2)\n context = demux.CreateDefaultContext()\n self.assertEqual(demux.num_input_ports(), 1)\n self.assertEqual(demux.num_output_ports(), 2)\n numpy_compare.assert_equal(demux.get_output_ports_sizes(), [2, 2])\n demux.get_input_port(0).FixValue(context, input_vec)\n output = demux.AllocateOutput()\n demux.CalcOutput(context, output)\n for i in range(2):\n self.assertTrue(np.allclose(output.get_vector_data(i).get_value\n (), input_vec[2 * i:2 * i + 2]))\n output_ports_sizes = np.array([1, 2, 1])\n num_output_ports = output_ports_sizes.size\n input_vec = np.array([1.0, 2.0, 3.0, 4.0])\n demux = Demultiplexer(output_ports_sizes=output_ports_sizes)\n context = demux.CreateDefaultContext()\n self.assertEqual(demux.num_input_ports(), 1)\n self.assertEqual(demux.num_output_ports(), num_output_ports)\n numpy_compare.assert_equal(demux.get_output_ports_sizes(),\n output_ports_sizes)\n demux.get_input_port(0).FixValue(context, input_vec)\n output = demux.AllocateOutput()\n demux.CalcOutput(context, output)\n output_port_start = 0\n for i in range(num_output_ports):\n output_port_size = output.get_vector_data(i).size()\n self.assertTrue(np.allclose(output.get_vector_data(i).get_value\n (), input_vec[output_port_start:output_port_start +\n output_port_size]))\n output_port_start += output_port_size\n <mask token>\n\n def test_multilayer_perceptron(self):\n mlp = MultilayerPerceptron(layers=[1, 2, 3], activation_type=\n PerceptronActivationType.kReLU)\n self.assertEqual(mlp.get_input_port().size(), 1)\n self.assertEqual(mlp.get_output_port().size(), 3)\n context = mlp.CreateDefaultContext()\n params = np.zeros((mlp.num_parameters(), 1))\n self.assertEqual(mlp.num_parameters(), 13)\n self.assertEqual(mlp.layers(), [1, 2, 3])\n self.assertEqual(mlp.activation_type(layer=0),\n PerceptronActivationType.kReLU)\n self.assertEqual(len(mlp.GetParameters(context=context)), mlp.\n num_parameters())\n mlp.SetWeights(context=context, layer=0, W=np.array([[1], [2]]))\n mlp.SetBiases(context=context, layer=0, b=[3, 4])\n np.testing.assert_array_equal(mlp.GetWeights(context=context, layer\n =0), np.array([[1], [2]]))\n np.testing.assert_array_equal(mlp.GetBiases(context=context, layer=\n 0), np.array([3, 4]))\n params = np.zeros(mlp.num_parameters())\n mlp.SetWeights(params=params, layer=0, W=np.array([[1], [2]]))\n mlp.SetBiases(params=params, layer=0, b=[3, 4])\n np.testing.assert_array_equal(mlp.GetWeights(params=params, layer=0\n ), np.array([[1], [2]]))\n np.testing.assert_array_equal(mlp.GetBiases(params=params, layer=0),\n np.array([3, 4]))\n mutable_params = mlp.GetMutableParameters(context=context)\n mutable_params[:] = 3.0\n np.testing.assert_array_equal(mlp.GetParameters(context), np.full(\n mlp.num_parameters(), 3.0))\n global called_loss\n called_loss = False\n\n def silly_loss(Y, dloss_dY):\n global called_loss\n called_loss = True\n dloss_dY[:] = 1\n return Y.sum()\n dloss_dparams = np.zeros((13,))\n generator = RandomGenerator(23)\n mlp.SetRandomContext(context, generator)\n mlp.Backpropagation(context=context, X=np.array([1, 3, 4]).reshape(\n (1, 3)), loss=silly_loss, dloss_dparams=dloss_dparams)\n self.assertTrue(called_loss)\n self.assertTrue(dloss_dparams.any())\n dloss_dparams = np.zeros((13,))\n mlp.BackpropagationMeanSquaredError(context=context, X=np.array([1,\n 3, 4]).reshape((1, 3)), Y_desired=np.eye(3), dloss_dparams=\n dloss_dparams)\n self.assertTrue(dloss_dparams.any())\n Y = np.asfortranarray(np.eye(3))\n mlp.BatchOutput(context=context, X=np.array([[0.1, 0.3, 0.4]]), Y=Y)\n self.assertFalse(np.allclose(Y, np.eye(3)))\n Y2 = mlp.BatchOutput(context=context, X=np.array([[0.1, 0.3, 0.4]]))\n np.testing.assert_array_equal(Y, Y2)\n mlp2 = MultilayerPerceptron(layers=[3, 2, 1], activation_types=[\n PerceptronActivationType.kReLU, PerceptronActivationType.kTanh])\n self.assertEqual(mlp2.activation_type(0), PerceptronActivationType.\n kReLU)\n self.assertEqual(mlp2.activation_type(1), PerceptronActivationType.\n kTanh)\n Y = np.asfortranarray(np.full((1, 3), 2.4))\n dYdX = np.asfortranarray(np.full((3, 3), 5.3))\n context2 = mlp2.CreateDefaultContext()\n mlp2.BatchOutput(context=context2, X=np.eye(3), Y=Y, dYdX=dYdX)\n np.testing.assert_array_almost_equal(Y, np.zeros((1, 3)))\n np.testing.assert_array_almost_equal(dYdX, np.zeros((3, 3)))\n mlp = MultilayerPerceptron(use_sin_cos_for_input=[True, False],\n remaining_layers=[3, 2], activation_types=[\n PerceptronActivationType.kReLU, PerceptronActivationType.kTanh])\n self.assertEqual(mlp.get_input_port().size(), 2)\n np.testing.assert_array_equal(mlp.layers(), [3, 3, 2])\n\n def test_random_source(self):\n source = RandomSource(distribution=RandomDistribution.kUniform,\n num_outputs=2, sampling_interval_sec=0.01)\n self.assertEqual(source.get_output_port(0).size(), 2)\n builder = DiagramBuilder()\n AddRandomInputs(sampling_interval_sec=0.01, builder=builder)\n builder_ad = DiagramBuilder_[AutoDiffXd]()\n AddRandomInputs(sampling_interval_sec=0.01, builder=builder_ad)\n\n def test_constant_vector_source(self):\n source = ConstantVectorSource(source_value=[1.0, 2.0])\n context = source.CreateDefaultContext()\n source.get_source_value(context)\n source.get_mutable_source_value(context)\n\n def test_ctor_api(self):\n \"\"\"Tests construction of systems for systems whose executions semantics\n are not tested above.\n \"\"\"\n ConstantValueSource(Value('Hello world'))\n DiscreteTimeDelay(update_sec=0.1, delay_time_steps=5, vector_size=2)\n DiscreteTimeDelay(update_sec=0.1, delay_time_steps=5,\n abstract_model_value=Value('Hello world'))\n with catch_drake_warnings(expected_count=2) as w:\n DiscreteTimeDelay(update_sec=0.1, delay_timesteps=5, vector_size=2)\n DiscreteTimeDelay(update_sec=0.1, delay_timesteps=5,\n abstract_model_value=Value('Hello world'))\n ZeroOrderHold(period_sec=0.1, offset_sec=0.0, vector_size=2)\n dut = ZeroOrderHold(period_sec=1.0, offset_sec=0.25,\n abstract_model_value=Value('Hello world'))\n self.assertEqual(dut.period(), 1.0)\n self.assertEqual(dut.offset(), 0.25)\n\n def test_shared_pointer_system_ctor(self):\n dut = SharedPointerSystem(value_to_hold=[1, 2, 3])\n readback = dut.get()\n self.assertListEqual(readback, [1, 2, 3])\n del dut\n self.assertListEqual(readback, [1, 2, 3])\n\n def test_shared_pointer_system_builder(self):\n builder = DiagramBuilder()\n self.assertListEqual(SharedPointerSystem.AddToBuilder(builder=\n builder, value_to_hold=[1, 2, 3]), [1, 2, 3])\n diagram = builder.Build()\n del builder\n readback = diagram.GetSystems()[0].get()\n self.assertListEqual(readback, [1, 2, 3])\n del diagram\n self.assertListEqual(readback, [1, 2, 3])\n\n def test_sine(self):\n sine_source = Sine(amplitude=1, frequency=2, phase=3, size=1,\n is_time_based=True)\n self.assertEqual(sine_source.get_output_port(0).size(), 1)\n self.assertEqual(sine_source.get_output_port(1).size(), 1)\n self.assertEqual(sine_source.get_output_port(2).size(), 1)\n sine_source = Sine(amplitude=1, frequency=2, phase=3, size=3,\n is_time_based=True)\n self.assertEqual(sine_source.get_output_port(0).size(), 3)\n self.assertEqual(sine_source.get_output_port(1).size(), 3)\n self.assertEqual(sine_source.get_output_port(2).size(), 3)\n sine_source = Sine(amplitudes=np.ones(2), frequencies=np.ones(2),\n phases=np.ones(2), is_time_based=True)\n self.assertEqual(sine_source.get_output_port(0).size(), 2)\n self.assertEqual(sine_source.get_output_port(1).size(), 2)\n self.assertEqual(sine_source.get_output_port(2).size(), 2)\n\n def test_discrete_derivative(self):\n discrete_derivative = DiscreteDerivative(num_inputs=5, time_step=0.5)\n self.assertEqual(discrete_derivative.get_input_port(0).size(), 5)\n self.assertEqual(discrete_derivative.get_output_port(0).size(), 5)\n self.assertEqual(discrete_derivative.time_step(), 0.5)\n self.assertTrue(discrete_derivative.suppress_initial_transient())\n discrete_derivative = DiscreteDerivative(num_inputs=5, time_step=\n 0.5, suppress_initial_transient=False)\n self.assertFalse(discrete_derivative.suppress_initial_transient())\n <mask token>\n\n @numpy_compare.check_nonsymbolic_types\n def test_log_vector_output(self, T):\n builder = DiagramBuilder_[T]()\n kSize = 1\n integrator = builder.AddSystem(Integrator_[T](kSize))\n port = integrator.get_output_port(0)\n loggers = []\n loggers.append(LogVectorOutput(port, builder))\n loggers.append(LogVectorOutput(src=port, builder=builder))\n loggers.append(LogVectorOutput(port, builder, 0.125))\n loggers.append(LogVectorOutput(src=port, builder=builder,\n publish_period=0.125))\n loggers.append(LogVectorOutput(port, builder, {TriggerType.kForced}))\n loggers.append(LogVectorOutput(src=port, builder=builder,\n publish_triggers={TriggerType.kForced}))\n loggers.append(LogVectorOutput(port, builder, {TriggerType.\n kPeriodic}, 0.125))\n loggers.append(LogVectorOutput(src=port, builder=builder,\n publish_triggers={TriggerType.kPeriodic}, publish_period=0.125))\n diagram = builder.Build()\n context = diagram.CreateDefaultContext()\n self.assertTrue(all(logger.FindLog(context).num_samples() == 0 for\n logger in loggers))\n <mask token>\n\n @numpy_compare.check_nonsymbolic_types\n def test_vector_log_sink(self, T):\n builder = DiagramBuilder_[T]()\n kSize = 1\n constructors = [VectorLogSink_[T]]\n loggers = []\n if T == float:\n constructors.append(VectorLogSink)\n for constructor in constructors:\n loggers.append(builder.AddSystem(constructor(kSize)))\n loggers.append(builder.AddSystem(constructor(input_size=kSize)))\n loggers.append(builder.AddSystem(constructor(kSize, 0.125)))\n loggers.append(builder.AddSystem(constructor(input_size=kSize,\n publish_period=0.125)))\n loggers.append(builder.AddSystem(constructor(kSize, {\n TriggerType.kForced})))\n loggers.append(builder.AddSystem(constructor(input_size=kSize,\n publish_triggers={TriggerType.kForced})))\n loggers.append(builder.AddSystem(constructor(kSize, {\n TriggerType.kPeriodic}, 0.125)))\n loggers.append(builder.AddSystem(constructor(input_size=kSize,\n publish_triggers={TriggerType.kPeriodic}, publish_period=\n 0.125)))\n diagram = builder.Build()\n context = diagram.CreateDefaultContext()\n self.assertTrue(all(logger.FindLog(context) == logger.\n FindMutableLog(context) for logger in loggers))\n loggers_and_contexts = [(x, x.GetMyContextFromRoot(context)) for x in\n loggers]\n self.assertTrue(all(logger.GetLog(logger_context) == logger.\n GetMutableLog(logger_context) for logger, logger_context in\n loggers_and_contexts))\n self.assertTrue(all(logger.GetLog(logger_context) == logger.FindLog\n (context) for logger, logger_context in loggers_and_contexts))\n",
"step-4": "<mask token>\n\n\nclass TestGeneral(unittest.TestCase):\n\n def _check_instantiations(self, template, supports_symbolic=True):\n default_cls = template[None]\n self.assertTrue(template[float] is default_cls)\n self.assertTrue(template[AutoDiffXd] is not default_cls)\n if supports_symbolic:\n self.assertTrue(template[Expression] is not default_cls)\n\n def test_instantiations(self):\n self._check_instantiations(Adder_)\n self._check_instantiations(AffineSystem_)\n self._check_instantiations(ConstantValueSource_)\n self._check_instantiations(ConstantVectorSource_)\n self._check_instantiations(Demultiplexer_)\n self._check_instantiations(DiscreteDerivative_)\n self._check_instantiations(DiscreteTimeDelay_)\n self._check_instantiations(Gain_)\n self._check_instantiations(Integrator_)\n self._check_instantiations(LinearSystem_)\n self._check_instantiations(LinearTransformDensity_,\n supports_symbolic=False)\n self._check_instantiations(Multiplexer_)\n self._check_instantiations(MultilayerPerceptron_)\n self._check_instantiations(PassThrough_)\n self._check_instantiations(PortSwitch_)\n self._check_instantiations(Saturation_)\n self._check_instantiations(SharedPointerSystem_)\n self._check_instantiations(Sine_)\n self._check_instantiations(StateInterpolatorWithDiscreteDerivative_)\n self._check_instantiations(SymbolicVectorSystem_)\n self._check_instantiations(TrajectoryAffineSystem_,\n supports_symbolic=False)\n self._check_instantiations(TrajectoryLinearSystem_,\n supports_symbolic=False)\n self._check_instantiations(TrajectorySource_)\n self._check_instantiations(VectorLogSink_)\n self._check_instantiations(WrapToSystem_)\n self._check_instantiations(ZeroOrderHold_)\n\n def test_linear_affine_system(self):\n A = np.identity(2)\n B = np.array([[0], [1]])\n f0 = np.array([[0], [0]])\n C = np.array([[0, 1]])\n D = [1]\n y0 = [0]\n system = LinearSystem(A, B, C, D)\n context = system.CreateDefaultContext()\n self.assertEqual(system.get_input_port(0).size(), 1)\n self.assertEqual(context.get_mutable_continuous_state_vector().size\n (), 2)\n self.assertEqual(system.get_output_port(0).size(), 1)\n self.assertTrue((system.A() == A).all())\n self.assertTrue((system.B() == B).all())\n self.assertTrue((system.f0() == f0).all())\n self.assertTrue((system.C() == C).all())\n self.assertEqual(system.D(), D)\n self.assertEqual(system.y0(), y0)\n self.assertEqual(system.time_period(), 0.0)\n x0 = np.array([1, 2])\n system.configure_default_state(x0=x0)\n system.SetDefaultContext(context)\n np.testing.assert_equal(context.get_continuous_state_vector().\n CopyToVector(), x0)\n generator = RandomGenerator()\n system.SetRandomContext(context, generator)\n np.testing.assert_equal(context.get_continuous_state_vector().\n CopyToVector(), x0)\n system.configure_random_state(covariance=np.eye(2))\n system.SetRandomContext(context, generator)\n self.assertNotEqual(context.get_continuous_state_vector().\n CopyToVector()[1], x0[1])\n Co = ControllabilityMatrix(system)\n self.assertEqual(Co.shape, (2, 2))\n self.assertFalse(IsControllable(system))\n self.assertFalse(IsControllable(system, 1e-06))\n self.assertFalse(IsStabilizable(sys=system))\n self.assertFalse(IsStabilizable(sys=system, threshold=1e-06))\n Ob = ObservabilityMatrix(system)\n self.assertEqual(Ob.shape, (2, 2))\n self.assertFalse(IsObservable(system))\n self.assertFalse(IsDetectable(sys=system))\n self.assertFalse(IsDetectable(sys=system, threshold=1e-06))\n system = AffineSystem(A, B, f0, C, D, y0, 0.1)\n self.assertEqual(system.get_input_port(0), system.get_input_port())\n self.assertEqual(system.get_output_port(0), system.get_output_port())\n context = system.CreateDefaultContext()\n self.assertEqual(system.get_input_port(0).size(), 1)\n self.assertEqual(context.get_discrete_state_vector().size(), 2)\n self.assertEqual(system.get_output_port(0).size(), 1)\n self.assertTrue((system.A() == A).all())\n self.assertTrue((system.B() == B).all())\n self.assertTrue((system.f0() == f0).all())\n self.assertTrue((system.C() == C).all())\n self.assertEqual(system.D(), D)\n self.assertEqual(system.y0(), y0)\n self.assertEqual(system.time_period(), 0.1)\n system.get_input_port(0).FixValue(context, 0)\n linearized = Linearize(system, context)\n self.assertTrue((linearized.A() == A).all())\n taylor = FirstOrderTaylorApproximation(system, context)\n self.assertTrue((taylor.y0() == y0).all())\n new_A = np.array([[1, 2], [3, 4]])\n new_B = np.array([[5], [6]])\n new_f0 = np.array([[7], [8]])\n new_C = np.array([[9, 10]])\n new_D = np.array([[11]])\n new_y0 = np.array([12])\n system.UpdateCoefficients(A=new_A, B=new_B, f0=new_f0, C=new_C, D=\n new_D, y0=new_y0)\n np.testing.assert_equal(new_A, system.A())\n np.testing.assert_equal(new_B, system.B())\n np.testing.assert_equal(new_f0.flatten(), system.f0())\n np.testing.assert_equal(new_C, system.C())\n np.testing.assert_equal(new_D, system.D())\n np.testing.assert_equal(new_y0, system.y0())\n system = MatrixGain(D=A)\n self.assertTrue((system.D() == A).all())\n system = TrajectoryAffineSystem(PiecewisePolynomial(A),\n PiecewisePolynomial(B), PiecewisePolynomial(f0),\n PiecewisePolynomial(C), PiecewisePolynomial(D),\n PiecewisePolynomial(y0), 0.1)\n self.assertEqual(system.get_input_port(0), system.get_input_port())\n self.assertEqual(system.get_output_port(0), system.get_output_port())\n context = system.CreateDefaultContext()\n self.assertEqual(system.get_input_port(0).size(), 1)\n self.assertEqual(context.get_discrete_state_vector().size(), 2)\n self.assertEqual(system.get_output_port(0).size(), 1)\n for t in np.linspace(0.0, 1.0, 5):\n self.assertTrue((system.A(t) == A).all())\n self.assertTrue((system.B(t) == B).all())\n self.assertTrue((system.f0(t) == f0).all())\n self.assertTrue((system.C(t) == C).all())\n self.assertEqual(system.D(t), D)\n self.assertEqual(system.y0(t), y0)\n self.assertEqual(system.time_period(), 0.1)\n x0 = np.array([1, 2])\n system.configure_default_state(x0=x0)\n system.SetDefaultContext(context)\n np.testing.assert_equal(context.get_discrete_state_vector().\n CopyToVector(), x0)\n generator = RandomGenerator()\n system.SetRandomContext(context, generator)\n np.testing.assert_equal(context.get_discrete_state_vector().\n CopyToVector(), x0)\n system.configure_random_state(covariance=np.eye(2))\n system.SetRandomContext(context, generator)\n self.assertNotEqual(context.get_discrete_state_vector().\n CopyToVector()[1], x0[1])\n system = TrajectoryLinearSystem(A=PiecewisePolynomial(A), B=\n PiecewisePolynomial(B), C=PiecewisePolynomial(C), D=\n PiecewisePolynomial(D), time_period=0.1)\n self.assertEqual(system.time_period(), 0.1)\n system.configure_default_state(x0=np.array([1, 2]))\n system.configure_random_state(covariance=np.eye(2))\n\n def test_linear_affine_system_empty_matrices(self):\n\n def CheckSizes(system, num_states, num_inputs, num_outputs):\n self.assertEqual(system.num_continuous_states(), num_states)\n self.assertEqual(system.num_inputs(), num_inputs)\n self.assertEqual(system.num_outputs(), num_outputs)\n system = AffineSystem(y0=[2, 1])\n CheckSizes(system, num_states=0, num_inputs=0, num_outputs=2)\n system = AffineSystem(D=np.eye(2))\n CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2)\n system = LinearSystem(D=np.eye(2))\n CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2)\n system = AffineSystem(D=np.eye(2), y0=[1, 2])\n CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2)\n system = LinearSystem(B=np.eye(2))\n CheckSizes(system, num_states=2, num_inputs=2, num_outputs=0)\n\n def test_linear_system_zero_size(self):\n num_x = 0\n num_y = 2\n num_u = 2\n A = np.zeros((num_x, num_x))\n B = np.zeros((num_x, num_u))\n C = np.zeros((num_y, num_x))\n D = np.zeros((num_y, num_u))\n self.assertIsNotNone(LinearSystem(A, B, C, D))\n\n @numpy_compare.check_nonsymbolic_types\n def test_linear_transform_density(self, T):\n dut = LinearTransformDensity_[T](distribution=RandomDistribution.\n kGaussian, input_size=3, output_size=3)\n w_in = np.array([T(0.5), T(0.1), T(1.5)])\n context = dut.CreateDefaultContext()\n dut.get_input_port_w_in().FixValue(context, w_in)\n self.assertEqual(dut.get_input_port_A().size(), 9)\n self.assertEqual(dut.get_input_port_b().size(), 3)\n self.assertEqual(dut.get_distribution(), RandomDistribution.kGaussian)\n A = np.array([[T(0.5), T(1), T(2)], [T(1), T(2), T(3)], [T(3), T(4),\n T(5)]])\n dut.FixConstantA(context=context, A=A)\n b = np.array([T(1), T(2), T(3)])\n dut.FixConstantB(context=context, b=b)\n dut.CalcDensity(context=context)\n self.assertEqual(dut.get_output_port_w_out().size(), 3)\n self.assertEqual(dut.get_output_port_w_out_density().size(), 1)\n\n def test_vector_pass_through(self):\n model_value = BasicVector([1.0, 2, 3])\n system = PassThrough(vector_size=model_value.size())\n context = system.CreateDefaultContext()\n system.get_input_port(0).FixValue(context, model_value)\n output = system.AllocateOutput()\n input_eval = system.EvalVectorInput(context, 0)\n compare_value(self, input_eval, model_value)\n system.CalcOutput(context, output)\n output_value = output.get_vector_data(0)\n compare_value(self, output_value, model_value)\n\n def test_default_vector_pass_through(self):\n model_value = [1.0, 2, 3]\n system = PassThrough(value=model_value)\n context = system.CreateDefaultContext()\n np.testing.assert_array_equal(model_value, system.get_output_port()\n .Eval(context))\n\n def test_abstract_pass_through(self):\n model_value = Value('Hello world')\n system = PassThrough(abstract_model_value=model_value)\n context = system.CreateDefaultContext()\n system.get_input_port(0).FixValue(context, model_value)\n output = system.AllocateOutput()\n input_eval = system.EvalAbstractInput(context, 0)\n compare_value(self, input_eval, model_value)\n system.CalcOutput(context, output)\n output_value = output.get_data(0)\n compare_value(self, output_value, model_value)\n\n def test_port_switch(self):\n system = PortSwitch(vector_size=2)\n a = system.DeclareInputPort(name='a')\n system.DeclareInputPort(name='b')\n context = system.CreateDefaultContext()\n self.assertIsInstance(a, InputPort)\n system.get_port_selector_input_port().FixValue(context, a.get_index())\n\n def test_first_order_low_pass_filter(self):\n filter1 = FirstOrderLowPassFilter(time_constant=3.0, size=4)\n self.assertEqual(filter1.get_time_constant(), 3.0)\n alpha = np.array([1, 2, 3])\n filter2 = FirstOrderLowPassFilter(time_constants=alpha)\n np.testing.assert_array_equal(filter2.get_time_constants_vector(),\n alpha)\n context = filter2.CreateDefaultContext()\n filter2.set_initial_output_value(context, [0.0, -0.2, 0.4])\n <mask token>\n\n def test_saturation(self):\n system = Saturation((0.0, -1.0, 3.0), (1.0, 2.0, 4.0))\n context = system.CreateDefaultContext()\n output = system.AllocateOutput()\n\n def mytest(input, expected):\n system.get_input_port(0).FixValue(context, input)\n system.CalcOutput(context, output)\n self.assertTrue(np.allclose(output.get_vector_data(0).\n CopyToVector(), expected))\n mytest((-5.0, 5.0, 4.0), (0.0, 2.0, 4.0))\n mytest((0.4, 0.0, 3.5), (0.4, 0.0, 3.5))\n\n def test_trajectory_source(self):\n ppt = PiecewisePolynomial.FirstOrderHold([0.0, 1.0], [[2.0, 3.0], [\n 2.0, 1.0]])\n system = TrajectorySource(trajectory=ppt, output_derivative_order=0,\n zero_derivatives_beyond_limits=True)\n context = system.CreateDefaultContext()\n output = system.AllocateOutput()\n\n def mytest(input, expected):\n context.SetTime(input)\n system.CalcOutput(context, output)\n self.assertTrue(np.allclose(output.get_vector_data(0).\n CopyToVector(), expected))\n mytest(0.0, (2.0, 2.0))\n mytest(0.5, (2.5, 1.5))\n mytest(1.0, (3.0, 1.0))\n ppt2 = PiecewisePolynomial.FirstOrderHold([0.0, 1.0], [[4.0, 6.0],\n [4.0, 2.0]])\n system.UpdateTrajectory(trajectory=ppt2)\n mytest(0.0, (4.0, 4.0))\n mytest(0.5, (5.0, 3.0))\n mytest(1.0, (6.0, 2.0))\n\n def test_symbolic_vector_system(self):\n t = Variable('t')\n x = [Variable('x0'), Variable('x1')]\n u = [Variable('u0'), Variable('u1')]\n system = SymbolicVectorSystem(time=t, state=x, input=u, dynamics=[x\n [0] + x[1], t], output=[u[1]], time_period=0.0)\n context = system.CreateDefaultContext()\n self.assertEqual(context.num_continuous_states(), 2)\n self.assertEqual(context.num_discrete_state_groups(), 0)\n self.assertEqual(system.get_input_port(0).size(), 2)\n self.assertEqual(system.get_output_port(0).size(), 1)\n self.assertEqual(context.num_abstract_parameters(), 0)\n self.assertEqual(context.num_numeric_parameter_groups(), 0)\n self.assertTrue(system.dynamics_for_variable(x[0]).EqualTo(x[0] + x[1])\n )\n self.assertTrue(system.dynamics_for_variable(x[1]).EqualTo(t))\n\n def test_symbolic_vector_system_parameters(self):\n t = Variable('t')\n x = [Variable('x0'), Variable('x1')]\n u = [Variable('u0'), Variable('u1')]\n p = [Variable('p0'), Variable('p1')]\n system = SymbolicVectorSystem(time=t, state=x, input=u, parameter=p,\n dynamics=[p[0] * x[0] + x[1] + p[1], t], output=[u[1]],\n time_period=0.0)\n context = system.CreateDefaultContext()\n self.assertEqual(context.num_continuous_states(), 2)\n self.assertEqual(context.num_discrete_state_groups(), 0)\n self.assertEqual(system.get_input_port(0).size(), 2)\n self.assertEqual(system.get_output_port(0).size(), 1)\n self.assertEqual(context.num_abstract_parameters(), 0)\n self.assertEqual(context.num_numeric_parameter_groups(), 1)\n self.assertEqual(context.get_numeric_parameter(0).size(), 2)\n self.assertTrue(system.dynamics_for_variable(x[0]).EqualTo(p[0] * x\n [0] + x[1] + p[1]))\n self.assertTrue(system.dynamics_for_variable(x[1]).EqualTo(t))\n\n def test_wrap_to_system(self):\n system = WrapToSystem(2)\n system.set_interval(1, 1.0, 2.0)\n context = system.CreateDefaultContext()\n output = system.AllocateOutput()\n\n def mytest(input, expected):\n system.get_input_port(0).FixValue(context, input)\n system.CalcOutput(context, output)\n self.assertTrue(np.allclose(output.get_vector_data(0).\n CopyToVector(), expected))\n mytest((-1.5, 0.5), (-1.5, 1.5))\n mytest((0.2, 0.3), (0.2, 1.3))\n\n def test_demultiplexer(self):\n demux = Demultiplexer(size=4)\n context = demux.CreateDefaultContext()\n self.assertEqual(demux.num_input_ports(), 1)\n self.assertEqual(demux.num_output_ports(), 4)\n numpy_compare.assert_equal(demux.get_output_ports_sizes(), [1, 1, 1, 1]\n )\n input_vec = np.array([1.0, 2.0, 3.0, 4.0])\n demux.get_input_port(0).FixValue(context, input_vec)\n output = demux.AllocateOutput()\n demux.CalcOutput(context, output)\n for i in range(4):\n self.assertTrue(np.allclose(output.get_vector_data(i).get_value\n (), input_vec[i]))\n demux = Demultiplexer(size=4, output_ports_size=2)\n context = demux.CreateDefaultContext()\n self.assertEqual(demux.num_input_ports(), 1)\n self.assertEqual(demux.num_output_ports(), 2)\n numpy_compare.assert_equal(demux.get_output_ports_sizes(), [2, 2])\n demux.get_input_port(0).FixValue(context, input_vec)\n output = demux.AllocateOutput()\n demux.CalcOutput(context, output)\n for i in range(2):\n self.assertTrue(np.allclose(output.get_vector_data(i).get_value\n (), input_vec[2 * i:2 * i + 2]))\n output_ports_sizes = np.array([1, 2, 1])\n num_output_ports = output_ports_sizes.size\n input_vec = np.array([1.0, 2.0, 3.0, 4.0])\n demux = Demultiplexer(output_ports_sizes=output_ports_sizes)\n context = demux.CreateDefaultContext()\n self.assertEqual(demux.num_input_ports(), 1)\n self.assertEqual(demux.num_output_ports(), num_output_ports)\n numpy_compare.assert_equal(demux.get_output_ports_sizes(),\n output_ports_sizes)\n demux.get_input_port(0).FixValue(context, input_vec)\n output = demux.AllocateOutput()\n demux.CalcOutput(context, output)\n output_port_start = 0\n for i in range(num_output_ports):\n output_port_size = output.get_vector_data(i).size()\n self.assertTrue(np.allclose(output.get_vector_data(i).get_value\n (), input_vec[output_port_start:output_port_start +\n output_port_size]))\n output_port_start += output_port_size\n\n def test_multiplexer(self):\n my_vector = MyVector2(data=[1.0, 2.0])\n test_cases = [dict(has_vector=False, mux=Multiplexer(\n num_scalar_inputs=4), data=[[5.0], [3.0], [4.0], [2.0]]), dict(\n has_vector=False, mux=Multiplexer(input_sizes=[2, 3]), data=[[\n 8.0, 4.0], [3.0, 6.0, 9.0]]), dict(has_vector=True, mux=\n Multiplexer(model_vector=my_vector), data=[[42.0], [3.0]])]\n for case in test_cases:\n mux = case['mux']\n port_size = sum([len(vec) for vec in case['data']])\n self.assertEqual(mux.get_output_port(0).size(), port_size)\n context = mux.CreateDefaultContext()\n output = mux.AllocateOutput()\n num_ports = len(case['data'])\n self.assertEqual(context.num_input_ports(), num_ports)\n for j, vec in enumerate(case['data']):\n mux.get_input_port(j).FixValue(context, vec)\n mux.CalcOutput(context, output)\n self.assertTrue(np.allclose(output.get_vector_data(0).get_value\n (), [elem for vec in case['data'] for elem in vec]))\n if case['has_vector']:\n value = output.get_vector_data(0)\n self.assertTrue(isinstance(value, MyVector2))\n\n def test_multilayer_perceptron(self):\n mlp = MultilayerPerceptron(layers=[1, 2, 3], activation_type=\n PerceptronActivationType.kReLU)\n self.assertEqual(mlp.get_input_port().size(), 1)\n self.assertEqual(mlp.get_output_port().size(), 3)\n context = mlp.CreateDefaultContext()\n params = np.zeros((mlp.num_parameters(), 1))\n self.assertEqual(mlp.num_parameters(), 13)\n self.assertEqual(mlp.layers(), [1, 2, 3])\n self.assertEqual(mlp.activation_type(layer=0),\n PerceptronActivationType.kReLU)\n self.assertEqual(len(mlp.GetParameters(context=context)), mlp.\n num_parameters())\n mlp.SetWeights(context=context, layer=0, W=np.array([[1], [2]]))\n mlp.SetBiases(context=context, layer=0, b=[3, 4])\n np.testing.assert_array_equal(mlp.GetWeights(context=context, layer\n =0), np.array([[1], [2]]))\n np.testing.assert_array_equal(mlp.GetBiases(context=context, layer=\n 0), np.array([3, 4]))\n params = np.zeros(mlp.num_parameters())\n mlp.SetWeights(params=params, layer=0, W=np.array([[1], [2]]))\n mlp.SetBiases(params=params, layer=0, b=[3, 4])\n np.testing.assert_array_equal(mlp.GetWeights(params=params, layer=0\n ), np.array([[1], [2]]))\n np.testing.assert_array_equal(mlp.GetBiases(params=params, layer=0),\n np.array([3, 4]))\n mutable_params = mlp.GetMutableParameters(context=context)\n mutable_params[:] = 3.0\n np.testing.assert_array_equal(mlp.GetParameters(context), np.full(\n mlp.num_parameters(), 3.0))\n global called_loss\n called_loss = False\n\n def silly_loss(Y, dloss_dY):\n global called_loss\n called_loss = True\n dloss_dY[:] = 1\n return Y.sum()\n dloss_dparams = np.zeros((13,))\n generator = RandomGenerator(23)\n mlp.SetRandomContext(context, generator)\n mlp.Backpropagation(context=context, X=np.array([1, 3, 4]).reshape(\n (1, 3)), loss=silly_loss, dloss_dparams=dloss_dparams)\n self.assertTrue(called_loss)\n self.assertTrue(dloss_dparams.any())\n dloss_dparams = np.zeros((13,))\n mlp.BackpropagationMeanSquaredError(context=context, X=np.array([1,\n 3, 4]).reshape((1, 3)), Y_desired=np.eye(3), dloss_dparams=\n dloss_dparams)\n self.assertTrue(dloss_dparams.any())\n Y = np.asfortranarray(np.eye(3))\n mlp.BatchOutput(context=context, X=np.array([[0.1, 0.3, 0.4]]), Y=Y)\n self.assertFalse(np.allclose(Y, np.eye(3)))\n Y2 = mlp.BatchOutput(context=context, X=np.array([[0.1, 0.3, 0.4]]))\n np.testing.assert_array_equal(Y, Y2)\n mlp2 = MultilayerPerceptron(layers=[3, 2, 1], activation_types=[\n PerceptronActivationType.kReLU, PerceptronActivationType.kTanh])\n self.assertEqual(mlp2.activation_type(0), PerceptronActivationType.\n kReLU)\n self.assertEqual(mlp2.activation_type(1), PerceptronActivationType.\n kTanh)\n Y = np.asfortranarray(np.full((1, 3), 2.4))\n dYdX = np.asfortranarray(np.full((3, 3), 5.3))\n context2 = mlp2.CreateDefaultContext()\n mlp2.BatchOutput(context=context2, X=np.eye(3), Y=Y, dYdX=dYdX)\n np.testing.assert_array_almost_equal(Y, np.zeros((1, 3)))\n np.testing.assert_array_almost_equal(dYdX, np.zeros((3, 3)))\n mlp = MultilayerPerceptron(use_sin_cos_for_input=[True, False],\n remaining_layers=[3, 2], activation_types=[\n PerceptronActivationType.kReLU, PerceptronActivationType.kTanh])\n self.assertEqual(mlp.get_input_port().size(), 2)\n np.testing.assert_array_equal(mlp.layers(), [3, 3, 2])\n\n def test_random_source(self):\n source = RandomSource(distribution=RandomDistribution.kUniform,\n num_outputs=2, sampling_interval_sec=0.01)\n self.assertEqual(source.get_output_port(0).size(), 2)\n builder = DiagramBuilder()\n AddRandomInputs(sampling_interval_sec=0.01, builder=builder)\n builder_ad = DiagramBuilder_[AutoDiffXd]()\n AddRandomInputs(sampling_interval_sec=0.01, builder=builder_ad)\n\n def test_constant_vector_source(self):\n source = ConstantVectorSource(source_value=[1.0, 2.0])\n context = source.CreateDefaultContext()\n source.get_source_value(context)\n source.get_mutable_source_value(context)\n\n def test_ctor_api(self):\n \"\"\"Tests construction of systems for systems whose executions semantics\n are not tested above.\n \"\"\"\n ConstantValueSource(Value('Hello world'))\n DiscreteTimeDelay(update_sec=0.1, delay_time_steps=5, vector_size=2)\n DiscreteTimeDelay(update_sec=0.1, delay_time_steps=5,\n abstract_model_value=Value('Hello world'))\n with catch_drake_warnings(expected_count=2) as w:\n DiscreteTimeDelay(update_sec=0.1, delay_timesteps=5, vector_size=2)\n DiscreteTimeDelay(update_sec=0.1, delay_timesteps=5,\n abstract_model_value=Value('Hello world'))\n ZeroOrderHold(period_sec=0.1, offset_sec=0.0, vector_size=2)\n dut = ZeroOrderHold(period_sec=1.0, offset_sec=0.25,\n abstract_model_value=Value('Hello world'))\n self.assertEqual(dut.period(), 1.0)\n self.assertEqual(dut.offset(), 0.25)\n\n def test_shared_pointer_system_ctor(self):\n dut = SharedPointerSystem(value_to_hold=[1, 2, 3])\n readback = dut.get()\n self.assertListEqual(readback, [1, 2, 3])\n del dut\n self.assertListEqual(readback, [1, 2, 3])\n\n def test_shared_pointer_system_builder(self):\n builder = DiagramBuilder()\n self.assertListEqual(SharedPointerSystem.AddToBuilder(builder=\n builder, value_to_hold=[1, 2, 3]), [1, 2, 3])\n diagram = builder.Build()\n del builder\n readback = diagram.GetSystems()[0].get()\n self.assertListEqual(readback, [1, 2, 3])\n del diagram\n self.assertListEqual(readback, [1, 2, 3])\n\n def test_sine(self):\n sine_source = Sine(amplitude=1, frequency=2, phase=3, size=1,\n is_time_based=True)\n self.assertEqual(sine_source.get_output_port(0).size(), 1)\n self.assertEqual(sine_source.get_output_port(1).size(), 1)\n self.assertEqual(sine_source.get_output_port(2).size(), 1)\n sine_source = Sine(amplitude=1, frequency=2, phase=3, size=3,\n is_time_based=True)\n self.assertEqual(sine_source.get_output_port(0).size(), 3)\n self.assertEqual(sine_source.get_output_port(1).size(), 3)\n self.assertEqual(sine_source.get_output_port(2).size(), 3)\n sine_source = Sine(amplitudes=np.ones(2), frequencies=np.ones(2),\n phases=np.ones(2), is_time_based=True)\n self.assertEqual(sine_source.get_output_port(0).size(), 2)\n self.assertEqual(sine_source.get_output_port(1).size(), 2)\n self.assertEqual(sine_source.get_output_port(2).size(), 2)\n\n def test_discrete_derivative(self):\n discrete_derivative = DiscreteDerivative(num_inputs=5, time_step=0.5)\n self.assertEqual(discrete_derivative.get_input_port(0).size(), 5)\n self.assertEqual(discrete_derivative.get_output_port(0).size(), 5)\n self.assertEqual(discrete_derivative.time_step(), 0.5)\n self.assertTrue(discrete_derivative.suppress_initial_transient())\n discrete_derivative = DiscreteDerivative(num_inputs=5, time_step=\n 0.5, suppress_initial_transient=False)\n self.assertFalse(discrete_derivative.suppress_initial_transient())\n\n def test_state_interpolator_with_discrete_derivative(self):\n state_interpolator = StateInterpolatorWithDiscreteDerivative(\n num_positions=5, time_step=0.4)\n self.assertEqual(state_interpolator.get_input_port(0).size(), 5)\n self.assertEqual(state_interpolator.get_output_port(0).size(), 10)\n self.assertTrue(state_interpolator.suppress_initial_transient())\n context = state_interpolator.CreateDefaultContext()\n state_interpolator.set_initial_position(context=context, position=5 *\n [1.1])\n np.testing.assert_array_equal(context.get_discrete_state(0).\n CopyToVector(), np.array(5 * [1.1]))\n np.testing.assert_array_equal(context.get_discrete_state(1).\n CopyToVector(), np.array(5 * [1.1]))\n context = state_interpolator.CreateDefaultContext()\n state_interpolator.set_initial_position(state=context.get_state(),\n position=5 * [1.3])\n np.testing.assert_array_equal(context.get_discrete_state(0).\n CopyToVector(), np.array(5 * [1.3]))\n np.testing.assert_array_equal(context.get_discrete_state(1).\n CopyToVector(), np.array(5 * [1.3]))\n state_interpolator = StateInterpolatorWithDiscreteDerivative(\n num_positions=5, time_step=0.4, suppress_initial_transient=True)\n self.assertTrue(state_interpolator.suppress_initial_transient())\n\n @numpy_compare.check_nonsymbolic_types\n def test_log_vector_output(self, T):\n builder = DiagramBuilder_[T]()\n kSize = 1\n integrator = builder.AddSystem(Integrator_[T](kSize))\n port = integrator.get_output_port(0)\n loggers = []\n loggers.append(LogVectorOutput(port, builder))\n loggers.append(LogVectorOutput(src=port, builder=builder))\n loggers.append(LogVectorOutput(port, builder, 0.125))\n loggers.append(LogVectorOutput(src=port, builder=builder,\n publish_period=0.125))\n loggers.append(LogVectorOutput(port, builder, {TriggerType.kForced}))\n loggers.append(LogVectorOutput(src=port, builder=builder,\n publish_triggers={TriggerType.kForced}))\n loggers.append(LogVectorOutput(port, builder, {TriggerType.\n kPeriodic}, 0.125))\n loggers.append(LogVectorOutput(src=port, builder=builder,\n publish_triggers={TriggerType.kPeriodic}, publish_period=0.125))\n diagram = builder.Build()\n context = diagram.CreateDefaultContext()\n self.assertTrue(all(logger.FindLog(context).num_samples() == 0 for\n logger in loggers))\n <mask token>\n\n @numpy_compare.check_nonsymbolic_types\n def test_vector_log_sink(self, T):\n builder = DiagramBuilder_[T]()\n kSize = 1\n constructors = [VectorLogSink_[T]]\n loggers = []\n if T == float:\n constructors.append(VectorLogSink)\n for constructor in constructors:\n loggers.append(builder.AddSystem(constructor(kSize)))\n loggers.append(builder.AddSystem(constructor(input_size=kSize)))\n loggers.append(builder.AddSystem(constructor(kSize, 0.125)))\n loggers.append(builder.AddSystem(constructor(input_size=kSize,\n publish_period=0.125)))\n loggers.append(builder.AddSystem(constructor(kSize, {\n TriggerType.kForced})))\n loggers.append(builder.AddSystem(constructor(input_size=kSize,\n publish_triggers={TriggerType.kForced})))\n loggers.append(builder.AddSystem(constructor(kSize, {\n TriggerType.kPeriodic}, 0.125)))\n loggers.append(builder.AddSystem(constructor(input_size=kSize,\n publish_triggers={TriggerType.kPeriodic}, publish_period=\n 0.125)))\n diagram = builder.Build()\n context = diagram.CreateDefaultContext()\n self.assertTrue(all(logger.FindLog(context) == logger.\n FindMutableLog(context) for logger in loggers))\n loggers_and_contexts = [(x, x.GetMyContextFromRoot(context)) for x in\n loggers]\n self.assertTrue(all(logger.GetLog(logger_context) == logger.\n GetMutableLog(logger_context) for logger, logger_context in\n loggers_and_contexts))\n self.assertTrue(all(logger.GetLog(logger_context) == logger.FindLog\n (context) for logger, logger_context in loggers_and_contexts))\n",
"step-5": "import gc\nimport unittest\nimport numpy as np\n\nfrom pydrake.autodiffutils import AutoDiffXd\nfrom pydrake.common import RandomDistribution, RandomGenerator\nfrom pydrake.common.test_utilities import numpy_compare\nfrom pydrake.common.test_utilities.deprecation import catch_drake_warnings\nfrom pydrake.common.value import Value\nfrom pydrake.symbolic import Expression, Variable\nfrom pydrake.systems.framework import (\n BasicVector,\n DiagramBuilder,\n DiagramBuilder_,\n InputPort,\n TriggerType,\n VectorBase,\n)\nfrom pydrake.systems.test.test_util import (\n MyVector2,\n)\nfrom pydrake.systems.primitives import (\n Adder, Adder_,\n AddRandomInputs,\n AffineSystem, AffineSystem_,\n ConstantValueSource, ConstantValueSource_,\n ConstantVectorSource, ConstantVectorSource_,\n ControllabilityMatrix,\n Demultiplexer, Demultiplexer_,\n DiscreteDerivative, DiscreteDerivative_,\n DiscreteTimeDelay, DiscreteTimeDelay_,\n FirstOrderLowPassFilter,\n FirstOrderTaylorApproximation,\n Gain, Gain_,\n Integrator, Integrator_,\n IsControllable,\n IsDetectable,\n IsObservable,\n IsStabilizable,\n Linearize,\n LinearSystem, LinearSystem_,\n LinearTransformDensity, LinearTransformDensity_,\n LogVectorOutput,\n MatrixGain,\n Multiplexer, Multiplexer_,\n MultilayerPerceptron, MultilayerPerceptron_,\n ObservabilityMatrix,\n PassThrough, PassThrough_,\n PerceptronActivationType,\n PortSwitch, PortSwitch_,\n RandomSource,\n Saturation, Saturation_,\n SharedPointerSystem, SharedPointerSystem_,\n Sine, Sine_,\n StateInterpolatorWithDiscreteDerivative,\n StateInterpolatorWithDiscreteDerivative_,\n SymbolicVectorSystem, SymbolicVectorSystem_,\n TrajectoryAffineSystem, TrajectoryAffineSystem_,\n TrajectoryLinearSystem, TrajectoryLinearSystem_,\n TrajectorySource, TrajectorySource_,\n VectorLog, VectorLogSink, VectorLogSink_,\n WrapToSystem, WrapToSystem_,\n ZeroOrderHold, ZeroOrderHold_,\n)\nfrom pydrake.trajectories import PiecewisePolynomial\n\n\ndef compare_value(test, a, b):\n # Compares a vector or abstract value.\n if isinstance(a, VectorBase):\n test.assertTrue(np.allclose(a.get_value(), b.get_value()))\n else:\n test.assertEqual(type(a.get_value()), type(b.get_value()))\n test.assertEqual(a.get_value(), b.get_value())\n\n\nclass TestGeneral(unittest.TestCase):\n def _check_instantiations(self, template, supports_symbolic=True):\n default_cls = template[None]\n self.assertTrue(template[float] is default_cls)\n self.assertTrue(template[AutoDiffXd] is not default_cls)\n if supports_symbolic:\n self.assertTrue(template[Expression] is not default_cls)\n\n def test_instantiations(self):\n # TODO(eric.cousineau): Refine tests once NumPy functionality is\n # resolved for dtype=object, or dtype=custom is used.\n self._check_instantiations(Adder_)\n self._check_instantiations(AffineSystem_)\n self._check_instantiations(ConstantValueSource_)\n self._check_instantiations(ConstantVectorSource_)\n self._check_instantiations(Demultiplexer_)\n self._check_instantiations(DiscreteDerivative_)\n self._check_instantiations(DiscreteTimeDelay_)\n self._check_instantiations(Gain_)\n self._check_instantiations(Integrator_)\n self._check_instantiations(LinearSystem_)\n self._check_instantiations(LinearTransformDensity_,\n supports_symbolic=False)\n self._check_instantiations(Multiplexer_)\n self._check_instantiations(MultilayerPerceptron_)\n self._check_instantiations(PassThrough_)\n self._check_instantiations(PortSwitch_)\n self._check_instantiations(Saturation_)\n self._check_instantiations(SharedPointerSystem_)\n self._check_instantiations(Sine_)\n self._check_instantiations(StateInterpolatorWithDiscreteDerivative_)\n self._check_instantiations(SymbolicVectorSystem_)\n self._check_instantiations(TrajectoryAffineSystem_,\n supports_symbolic=False)\n self._check_instantiations(TrajectoryLinearSystem_,\n supports_symbolic=False)\n self._check_instantiations(TrajectorySource_)\n self._check_instantiations(VectorLogSink_)\n self._check_instantiations(WrapToSystem_)\n self._check_instantiations(ZeroOrderHold_)\n\n def test_linear_affine_system(self):\n # Just make sure linear system is spelled correctly.\n A = np.identity(2)\n B = np.array([[0], [1]])\n f0 = np.array([[0], [0]])\n C = np.array([[0, 1]])\n D = [1]\n y0 = [0]\n system = LinearSystem(A, B, C, D)\n context = system.CreateDefaultContext()\n self.assertEqual(system.get_input_port(0).size(), 1)\n self.assertEqual(context\n .get_mutable_continuous_state_vector().size(), 2)\n self.assertEqual(system.get_output_port(0).size(), 1)\n self.assertTrue((system.A() == A).all())\n self.assertTrue((system.B() == B).all())\n self.assertTrue((system.f0() == f0).all())\n self.assertTrue((system.C() == C).all())\n self.assertEqual(system.D(), D)\n self.assertEqual(system.y0(), y0)\n self.assertEqual(system.time_period(), 0.)\n\n x0 = np.array([1, 2])\n system.configure_default_state(x0=x0)\n system.SetDefaultContext(context)\n np.testing.assert_equal(\n context.get_continuous_state_vector().CopyToVector(), x0)\n generator = RandomGenerator()\n system.SetRandomContext(context, generator)\n np.testing.assert_equal(\n context.get_continuous_state_vector().CopyToVector(), x0)\n system.configure_random_state(covariance=np.eye(2))\n system.SetRandomContext(context, generator)\n self.assertNotEqual(\n context.get_continuous_state_vector().CopyToVector()[1], x0[1])\n\n Co = ControllabilityMatrix(system)\n self.assertEqual(Co.shape, (2, 2))\n self.assertFalse(IsControllable(system))\n self.assertFalse(IsControllable(system, 1e-6))\n self.assertFalse(IsStabilizable(sys=system))\n self.assertFalse(IsStabilizable(sys=system, threshold=1e-6))\n Ob = ObservabilityMatrix(system)\n self.assertEqual(Ob.shape, (2, 2))\n self.assertFalse(IsObservable(system))\n self.assertFalse(IsDetectable(sys=system))\n self.assertFalse(IsDetectable(sys=system, threshold=1e-6))\n\n system = AffineSystem(A, B, f0, C, D, y0, .1)\n self.assertEqual(system.get_input_port(0), system.get_input_port())\n self.assertEqual(system.get_output_port(0), system.get_output_port())\n context = system.CreateDefaultContext()\n self.assertEqual(system.get_input_port(0).size(), 1)\n self.assertEqual(context.get_discrete_state_vector().size(), 2)\n self.assertEqual(system.get_output_port(0).size(), 1)\n self.assertTrue((system.A() == A).all())\n self.assertTrue((system.B() == B).all())\n self.assertTrue((system.f0() == f0).all())\n self.assertTrue((system.C() == C).all())\n self.assertEqual(system.D(), D)\n self.assertEqual(system.y0(), y0)\n self.assertEqual(system.time_period(), .1)\n\n system.get_input_port(0).FixValue(context, 0)\n linearized = Linearize(system, context)\n self.assertTrue((linearized.A() == A).all())\n taylor = FirstOrderTaylorApproximation(system, context)\n self.assertTrue((taylor.y0() == y0).all())\n\n new_A = np.array([[1, 2], [3, 4]])\n new_B = np.array([[5], [6]])\n new_f0 = np.array([[7], [8]])\n new_C = np.array([[9, 10]])\n new_D = np.array([[11]])\n new_y0 = np.array([12])\n system.UpdateCoefficients(\n A=new_A, B=new_B, f0=new_f0, C=new_C, D=new_D, y0=new_y0\n )\n np.testing.assert_equal(new_A, system.A())\n np.testing.assert_equal(new_B, system.B())\n np.testing.assert_equal(new_f0.flatten(), system.f0())\n np.testing.assert_equal(new_C, system.C())\n np.testing.assert_equal(new_D, system.D())\n np.testing.assert_equal(new_y0, system.y0())\n\n system = MatrixGain(D=A)\n self.assertTrue((system.D() == A).all())\n\n system = TrajectoryAffineSystem(\n PiecewisePolynomial(A),\n PiecewisePolynomial(B),\n PiecewisePolynomial(f0),\n PiecewisePolynomial(C),\n PiecewisePolynomial(D),\n PiecewisePolynomial(y0),\n .1)\n self.assertEqual(system.get_input_port(0), system.get_input_port())\n self.assertEqual(system.get_output_port(0), system.get_output_port())\n context = system.CreateDefaultContext()\n self.assertEqual(system.get_input_port(0).size(), 1)\n self.assertEqual(context.get_discrete_state_vector().size(), 2)\n self.assertEqual(system.get_output_port(0).size(), 1)\n for t in np.linspace(0., 1., 5):\n self.assertTrue((system.A(t) == A).all())\n self.assertTrue((system.B(t) == B).all())\n self.assertTrue((system.f0(t) == f0).all())\n self.assertTrue((system.C(t) == C).all())\n self.assertEqual(system.D(t), D)\n self.assertEqual(system.y0(t), y0)\n self.assertEqual(system.time_period(), .1)\n x0 = np.array([1, 2])\n system.configure_default_state(x0=x0)\n system.SetDefaultContext(context)\n np.testing.assert_equal(\n context.get_discrete_state_vector().CopyToVector(), x0)\n generator = RandomGenerator()\n system.SetRandomContext(context, generator)\n np.testing.assert_equal(\n context.get_discrete_state_vector().CopyToVector(), x0)\n system.configure_random_state(covariance=np.eye(2))\n system.SetRandomContext(context, generator)\n self.assertNotEqual(\n context.get_discrete_state_vector().CopyToVector()[1], x0[1])\n\n system = TrajectoryLinearSystem(\n A=PiecewisePolynomial(A),\n B=PiecewisePolynomial(B),\n C=PiecewisePolynomial(C),\n D=PiecewisePolynomial(D),\n time_period=0.1)\n self.assertEqual(system.time_period(), .1)\n system.configure_default_state(x0=np.array([1, 2]))\n system.configure_random_state(covariance=np.eye(2))\n\n def test_linear_affine_system_empty_matrices(self):\n # Confirm the default values for the system matrices in the\n # constructor.\n def CheckSizes(system, num_states, num_inputs, num_outputs):\n self.assertEqual(system.num_continuous_states(), num_states)\n self.assertEqual(system.num_inputs(), num_inputs)\n self.assertEqual(system.num_outputs(), num_outputs)\n\n # A constant vector system.\n system = AffineSystem(y0=[2, 1])\n CheckSizes(system, num_states=0, num_inputs=0, num_outputs=2)\n\n # A matrix gain.\n system = AffineSystem(D=np.eye(2))\n CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2)\n system = LinearSystem(D=np.eye(2))\n CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2)\n\n # Add an offset.\n system = AffineSystem(D=np.eye(2), y0=[1, 2])\n CheckSizes(system, num_states=0, num_inputs=2, num_outputs=2)\n\n # An integrator.\n system = LinearSystem(B=np.eye(2))\n CheckSizes(system, num_states=2, num_inputs=2, num_outputs=0)\n\n def test_linear_system_zero_size(self):\n # Explicitly test #12633.\n num_x = 0\n num_y = 2\n num_u = 2\n A = np.zeros((num_x, num_x))\n B = np.zeros((num_x, num_u))\n C = np.zeros((num_y, num_x))\n D = np.zeros((num_y, num_u))\n self.assertIsNotNone(LinearSystem(A, B, C, D))\n\n @numpy_compare.check_nonsymbolic_types\n def test_linear_transform_density(self, T):\n dut = LinearTransformDensity_[T](\n distribution=RandomDistribution.kGaussian,\n input_size=3,\n output_size=3)\n w_in = np.array([T(0.5), T(0.1), T(1.5)])\n context = dut.CreateDefaultContext()\n dut.get_input_port_w_in().FixValue(context, w_in)\n self.assertEqual(dut.get_input_port_A().size(), 9)\n self.assertEqual(dut.get_input_port_b().size(), 3)\n self.assertEqual(dut.get_distribution(), RandomDistribution.kGaussian)\n A = np.array([\n [T(0.5), T(1), T(2)], [T(1), T(2), T(3)], [T(3), T(4), T(5)]])\n dut.FixConstantA(context=context, A=A)\n b = np.array([T(1), T(2), T(3)])\n dut.FixConstantB(context=context, b=b)\n\n dut.CalcDensity(context=context)\n\n self.assertEqual(dut.get_output_port_w_out().size(), 3)\n self.assertEqual(dut.get_output_port_w_out_density().size(), 1)\n\n def test_vector_pass_through(self):\n model_value = BasicVector([1., 2, 3])\n system = PassThrough(vector_size=model_value.size())\n context = system.CreateDefaultContext()\n system.get_input_port(0).FixValue(context, model_value)\n output = system.AllocateOutput()\n input_eval = system.EvalVectorInput(context, 0)\n compare_value(self, input_eval, model_value)\n system.CalcOutput(context, output)\n output_value = output.get_vector_data(0)\n compare_value(self, output_value, model_value)\n\n def test_default_vector_pass_through(self):\n model_value = [1., 2, 3]\n system = PassThrough(value=model_value)\n context = system.CreateDefaultContext()\n np.testing.assert_array_equal(\n model_value, system.get_output_port().Eval(context))\n\n def test_abstract_pass_through(self):\n model_value = Value(\"Hello world\")\n system = PassThrough(abstract_model_value=model_value)\n context = system.CreateDefaultContext()\n system.get_input_port(0).FixValue(context, model_value)\n output = system.AllocateOutput()\n input_eval = system.EvalAbstractInput(context, 0)\n compare_value(self, input_eval, model_value)\n system.CalcOutput(context, output)\n output_value = output.get_data(0)\n compare_value(self, output_value, model_value)\n\n def test_port_switch(self):\n system = PortSwitch(vector_size=2)\n a = system.DeclareInputPort(name=\"a\")\n system.DeclareInputPort(name=\"b\")\n context = system.CreateDefaultContext()\n self.assertIsInstance(a, InputPort)\n system.get_port_selector_input_port().FixValue(context, a.get_index())\n\n def test_first_order_low_pass_filter(self):\n filter1 = FirstOrderLowPassFilter(time_constant=3.0, size=4)\n self.assertEqual(filter1.get_time_constant(), 3.0)\n\n alpha = np.array([1, 2, 3])\n filter2 = FirstOrderLowPassFilter(time_constants=alpha)\n np.testing.assert_array_equal(filter2.get_time_constants_vector(),\n alpha)\n\n context = filter2.CreateDefaultContext()\n filter2.set_initial_output_value(context, [0., -0.2, 0.4])\n\n def test_gain(self):\n k = 42.\n input_size = 10\n systems = [Gain(k=k, size=input_size),\n Gain(k=k*np.ones(input_size))]\n\n for system in systems:\n context = system.CreateDefaultContext()\n output = system.AllocateOutput()\n\n def mytest(input, expected):\n system.get_input_port(0).FixValue(context, input)\n system.CalcOutput(context, output)\n self.assertTrue(np.allclose(output.get_vector_data(\n 0).CopyToVector(), expected))\n\n test_input = np.arange(input_size)\n mytest(np.arange(input_size), k*np.arange(input_size))\n\n def test_saturation(self):\n system = Saturation((0., -1., 3.), (1., 2., 4.))\n context = system.CreateDefaultContext()\n output = system.AllocateOutput()\n\n def mytest(input, expected):\n system.get_input_port(0).FixValue(context, input)\n system.CalcOutput(context, output)\n self.assertTrue(np.allclose(output.get_vector_data(\n 0).CopyToVector(), expected))\n\n mytest((-5., 5., 4.), (0., 2., 4.))\n mytest((.4, 0., 3.5), (.4, 0., 3.5))\n\n def test_trajectory_source(self):\n ppt = PiecewisePolynomial.FirstOrderHold(\n [0., 1.], [[2., 3.], [2., 1.]])\n system = TrajectorySource(trajectory=ppt,\n output_derivative_order=0,\n zero_derivatives_beyond_limits=True)\n context = system.CreateDefaultContext()\n output = system.AllocateOutput()\n\n def mytest(input, expected):\n context.SetTime(input)\n system.CalcOutput(context, output)\n self.assertTrue(np.allclose(output.get_vector_data(\n 0).CopyToVector(), expected))\n\n mytest(0.0, (2.0, 2.0))\n mytest(0.5, (2.5, 1.5))\n mytest(1.0, (3.0, 1.0))\n\n ppt2 = PiecewisePolynomial.FirstOrderHold(\n [0., 1.], [[4., 6.], [4., 2.]])\n system.UpdateTrajectory(trajectory=ppt2)\n mytest(0.0, (4.0, 4.0))\n mytest(0.5, (5.0, 3.0))\n mytest(1.0, (6.0, 2.0))\n\n def test_symbolic_vector_system(self):\n t = Variable(\"t\")\n x = [Variable(\"x0\"), Variable(\"x1\")]\n u = [Variable(\"u0\"), Variable(\"u1\")]\n system = SymbolicVectorSystem(time=t, state=x, input=u,\n dynamics=[x[0] + x[1], t],\n output=[u[1]],\n time_period=0.0)\n context = system.CreateDefaultContext()\n\n self.assertEqual(context.num_continuous_states(), 2)\n self.assertEqual(context.num_discrete_state_groups(), 0)\n self.assertEqual(system.get_input_port(0).size(), 2)\n self.assertEqual(system.get_output_port(0).size(), 1)\n self.assertEqual(context.num_abstract_parameters(), 0)\n self.assertEqual(context.num_numeric_parameter_groups(), 0)\n self.assertTrue(system.dynamics_for_variable(x[0])\n .EqualTo(x[0] + x[1]))\n self.assertTrue(system.dynamics_for_variable(x[1])\n .EqualTo(t))\n\n def test_symbolic_vector_system_parameters(self):\n t = Variable(\"t\")\n x = [Variable(\"x0\"), Variable(\"x1\")]\n u = [Variable(\"u0\"), Variable(\"u1\")]\n p = [Variable(\"p0\"), Variable(\"p1\")]\n system = SymbolicVectorSystem(time=t, state=x, input=u,\n parameter=p,\n dynamics=[p[0] * x[0] + x[1] + p[1], t],\n output=[u[1]],\n time_period=0.0)\n context = system.CreateDefaultContext()\n\n self.assertEqual(context.num_continuous_states(), 2)\n self.assertEqual(context.num_discrete_state_groups(), 0)\n self.assertEqual(system.get_input_port(0).size(), 2)\n self.assertEqual(system.get_output_port(0).size(), 1)\n self.assertEqual(context.num_abstract_parameters(), 0)\n self.assertEqual(context.num_numeric_parameter_groups(), 1)\n self.assertEqual(context.get_numeric_parameter(0).size(), 2)\n self.assertTrue(system.dynamics_for_variable(x[0])\n .EqualTo(p[0] * x[0] + x[1] + p[1]))\n self.assertTrue(system.dynamics_for_variable(x[1])\n .EqualTo(t))\n\n def test_wrap_to_system(self):\n system = WrapToSystem(2)\n system.set_interval(1, 1., 2.)\n context = system.CreateDefaultContext()\n output = system.AllocateOutput()\n\n def mytest(input, expected):\n system.get_input_port(0).FixValue(context, input)\n system.CalcOutput(context, output)\n self.assertTrue(np.allclose(output.get_vector_data(\n 0).CopyToVector(), expected))\n\n mytest((-1.5, 0.5), (-1.5, 1.5))\n mytest((.2, .3), (.2, 1.3))\n\n def test_demultiplexer(self):\n # Test demultiplexer with scalar outputs.\n demux = Demultiplexer(size=4)\n context = demux.CreateDefaultContext()\n self.assertEqual(demux.num_input_ports(), 1)\n self.assertEqual(demux.num_output_ports(), 4)\n numpy_compare.assert_equal(demux.get_output_ports_sizes(),\n [1, 1, 1, 1])\n\n input_vec = np.array([1., 2., 3., 4.])\n demux.get_input_port(0).FixValue(context, input_vec)\n output = demux.AllocateOutput()\n demux.CalcOutput(context, output)\n\n for i in range(4):\n self.assertTrue(\n np.allclose(output.get_vector_data(i).get_value(),\n input_vec[i]))\n\n # Test demultiplexer with vector outputs.\n demux = Demultiplexer(size=4, output_ports_size=2)\n context = demux.CreateDefaultContext()\n self.assertEqual(demux.num_input_ports(), 1)\n self.assertEqual(demux.num_output_ports(), 2)\n numpy_compare.assert_equal(demux.get_output_ports_sizes(), [2, 2])\n\n demux.get_input_port(0).FixValue(context, input_vec)\n output = demux.AllocateOutput()\n demux.CalcOutput(context, output)\n\n for i in range(2):\n self.assertTrue(\n np.allclose(output.get_vector_data(i).get_value(),\n input_vec[2*i:2*i+2]))\n\n # Test demultiplexer with different output port sizes.\n output_ports_sizes = np.array([1, 2, 1])\n num_output_ports = output_ports_sizes.size\n input_vec = np.array([1., 2., 3., 4.])\n demux = Demultiplexer(output_ports_sizes=output_ports_sizes)\n context = demux.CreateDefaultContext()\n self.assertEqual(demux.num_input_ports(), 1)\n self.assertEqual(demux.num_output_ports(), num_output_ports)\n numpy_compare.assert_equal(demux.get_output_ports_sizes(),\n output_ports_sizes)\n\n demux.get_input_port(0).FixValue(context, input_vec)\n output = demux.AllocateOutput()\n demux.CalcOutput(context, output)\n\n output_port_start = 0\n for i in range(num_output_ports):\n output_port_size = output.get_vector_data(i).size()\n self.assertTrue(\n np.allclose(output.get_vector_data(i).get_value(),\n input_vec[output_port_start:\n output_port_start+output_port_size]))\n output_port_start += output_port_size\n\n def test_multiplexer(self):\n my_vector = MyVector2(data=[1., 2.])\n test_cases = [\n dict(has_vector=False, mux=Multiplexer(num_scalar_inputs=4),\n data=[[5.], [3.], [4.], [2.]]),\n dict(has_vector=False, mux=Multiplexer(input_sizes=[2, 3]),\n data=[[8., 4.], [3., 6., 9.]]),\n dict(has_vector=True, mux=Multiplexer(model_vector=my_vector),\n data=[[42.], [3.]]),\n ]\n for case in test_cases:\n mux = case['mux']\n port_size = sum([len(vec) for vec in case['data']])\n self.assertEqual(mux.get_output_port(0).size(), port_size)\n context = mux.CreateDefaultContext()\n output = mux.AllocateOutput()\n num_ports = len(case['data'])\n self.assertEqual(context.num_input_ports(), num_ports)\n for j, vec in enumerate(case['data']):\n mux.get_input_port(j).FixValue(context, vec)\n mux.CalcOutput(context, output)\n self.assertTrue(\n np.allclose(output.get_vector_data(0).get_value(),\n [elem for vec in case['data'] for elem in vec]))\n if case['has_vector']:\n # Check the type matches MyVector2.\n value = output.get_vector_data(0)\n self.assertTrue(isinstance(value, MyVector2))\n\n def test_multilayer_perceptron(self):\n mlp = MultilayerPerceptron(\n layers=[1, 2, 3], activation_type=PerceptronActivationType.kReLU)\n self.assertEqual(mlp.get_input_port().size(), 1)\n self.assertEqual(mlp.get_output_port().size(), 3)\n context = mlp.CreateDefaultContext()\n params = np.zeros((mlp.num_parameters(), 1))\n self.assertEqual(mlp.num_parameters(), 13)\n self.assertEqual(mlp.layers(), [1, 2, 3])\n self.assertEqual(mlp.activation_type(layer=0),\n PerceptronActivationType.kReLU)\n self.assertEqual(len(mlp.GetParameters(context=context)),\n mlp.num_parameters())\n mlp.SetWeights(context=context, layer=0, W=np.array([[1], [2]]))\n mlp.SetBiases(context=context, layer=0, b=[3, 4])\n np.testing.assert_array_equal(\n mlp.GetWeights(context=context, layer=0), np.array([[1], [2]]))\n np.testing.assert_array_equal(\n mlp.GetBiases(context=context, layer=0), np.array([3, 4]))\n params = np.zeros(mlp.num_parameters())\n mlp.SetWeights(params=params, layer=0, W=np.array([[1], [2]]))\n mlp.SetBiases(params=params, layer=0, b=[3, 4])\n np.testing.assert_array_equal(\n mlp.GetWeights(params=params, layer=0), np.array([[1], [2]]))\n np.testing.assert_array_equal(\n mlp.GetBiases(params=params, layer=0), np.array([3, 4]))\n mutable_params = mlp.GetMutableParameters(context=context)\n mutable_params[:] = 3.0\n np.testing.assert_array_equal(mlp.GetParameters(context),\n np.full(mlp.num_parameters(), 3.0))\n\n global called_loss\n called_loss = False\n\n def silly_loss(Y, dloss_dY):\n global called_loss\n called_loss = True\n # We must be careful to update the dloss in place, rather than bind\n # a new matrix to the same variable name.\n dloss_dY[:] = 1\n # dloss_dY = np.array(...etc...) # <== wrong\n return Y.sum()\n\n dloss_dparams = np.zeros((13,))\n generator = RandomGenerator(23)\n mlp.SetRandomContext(context, generator)\n mlp.Backpropagation(context=context,\n X=np.array([1, 3, 4]).reshape((1, 3)),\n loss=silly_loss,\n dloss_dparams=dloss_dparams)\n self.assertTrue(called_loss)\n self.assertTrue(dloss_dparams.any()) # No longer all zero.\n\n dloss_dparams = np.zeros((13,))\n mlp.BackpropagationMeanSquaredError(context=context,\n X=np.array([1, 3, 4]).reshape(\n (1, 3)),\n Y_desired=np.eye(3),\n dloss_dparams=dloss_dparams)\n self.assertTrue(dloss_dparams.any()) # No longer all zero.\n\n Y = np.asfortranarray(np.eye(3))\n mlp.BatchOutput(context=context, X=np.array([[0.1, 0.3, 0.4]]), Y=Y)\n self.assertFalse(np.allclose(Y, np.eye(3)))\n Y2 = mlp.BatchOutput(context=context, X=np.array([[0.1, 0.3, 0.4]]))\n np.testing.assert_array_equal(Y, Y2)\n\n mlp2 = MultilayerPerceptron(layers=[3, 2, 1],\n activation_types=[\n PerceptronActivationType.kReLU,\n PerceptronActivationType.kTanh\n ])\n self.assertEqual(mlp2.activation_type(0),\n PerceptronActivationType.kReLU)\n self.assertEqual(mlp2.activation_type(1),\n PerceptronActivationType.kTanh)\n Y = np.asfortranarray(np.full((1, 3), 2.4))\n dYdX = np.asfortranarray(np.full((3, 3), 5.3))\n context2 = mlp2.CreateDefaultContext()\n mlp2.BatchOutput(context=context2, X=np.eye(3), Y=Y, dYdX=dYdX)\n # The default context sets the weights and biases to zero, so the\n # output (and gradients) should be zero.\n np.testing.assert_array_almost_equal(Y, np.zeros((1, 3)))\n np.testing.assert_array_almost_equal(dYdX, np.zeros((3, 3)))\n\n mlp = MultilayerPerceptron(use_sin_cos_for_input=[True, False],\n remaining_layers=[3, 2],\n activation_types=[\n PerceptronActivationType.kReLU,\n PerceptronActivationType.kTanh\n ])\n self.assertEqual(mlp.get_input_port().size(), 2)\n np.testing.assert_array_equal(mlp.layers(), [3, 3, 2])\n\n def test_random_source(self):\n source = RandomSource(distribution=RandomDistribution.kUniform,\n num_outputs=2, sampling_interval_sec=0.01)\n self.assertEqual(source.get_output_port(0).size(), 2)\n\n builder = DiagramBuilder()\n # Note: There are no random inputs to add to the empty diagram, but it\n # confirms the API works.\n AddRandomInputs(sampling_interval_sec=0.01, builder=builder)\n\n builder_ad = DiagramBuilder_[AutoDiffXd]()\n AddRandomInputs(sampling_interval_sec=0.01, builder=builder_ad)\n\n def test_constant_vector_source(self):\n source = ConstantVectorSource(source_value=[1., 2.])\n context = source.CreateDefaultContext()\n source.get_source_value(context)\n source.get_mutable_source_value(context)\n\n def test_ctor_api(self):\n \"\"\"Tests construction of systems for systems whose executions semantics\n are not tested above.\n \"\"\"\n ConstantValueSource(Value(\"Hello world\"))\n DiscreteTimeDelay(update_sec=0.1, delay_time_steps=5, vector_size=2)\n DiscreteTimeDelay(\n update_sec=0.1, delay_time_steps=5,\n abstract_model_value=Value(\"Hello world\"))\n with catch_drake_warnings(expected_count=2) as w:\n DiscreteTimeDelay(update_sec=0.1, delay_timesteps=5, vector_size=2)\n DiscreteTimeDelay(\n update_sec=0.1, delay_timesteps=5,\n abstract_model_value=Value(\"Hello world\"))\n\n ZeroOrderHold(period_sec=0.1, offset_sec=0.0, vector_size=2)\n dut = ZeroOrderHold(period_sec=1.0, offset_sec=0.25,\n abstract_model_value=Value(\"Hello world\"))\n self.assertEqual(dut.period(), 1.0)\n self.assertEqual(dut.offset(), 0.25)\n\n def test_shared_pointer_system_ctor(self):\n dut = SharedPointerSystem(value_to_hold=[1, 2, 3])\n readback = dut.get()\n self.assertListEqual(readback, [1, 2, 3])\n del dut\n self.assertListEqual(readback, [1, 2, 3])\n\n def test_shared_pointer_system_builder(self):\n builder = DiagramBuilder()\n self.assertListEqual(\n SharedPointerSystem.AddToBuilder(\n builder=builder, value_to_hold=[1, 2, 3]),\n [1, 2, 3])\n diagram = builder.Build()\n del builder\n readback = diagram.GetSystems()[0].get()\n self.assertListEqual(readback, [1, 2, 3])\n del diagram\n self.assertListEqual(readback, [1, 2, 3])\n\n def test_sine(self):\n # Test scalar output.\n sine_source = Sine(amplitude=1, frequency=2, phase=3,\n size=1, is_time_based=True)\n self.assertEqual(sine_source.get_output_port(0).size(), 1)\n self.assertEqual(sine_source.get_output_port(1).size(), 1)\n self.assertEqual(sine_source.get_output_port(2).size(), 1)\n\n # Test vector output.\n sine_source = Sine(amplitude=1, frequency=2, phase=3,\n size=3, is_time_based=True)\n self.assertEqual(sine_source.get_output_port(0).size(), 3)\n self.assertEqual(sine_source.get_output_port(1).size(), 3)\n self.assertEqual(sine_source.get_output_port(2).size(), 3)\n\n sine_source = Sine(amplitudes=np.ones(2), frequencies=np.ones(2),\n phases=np.ones(2), is_time_based=True)\n self.assertEqual(sine_source.get_output_port(0).size(), 2)\n self.assertEqual(sine_source.get_output_port(1).size(), 2)\n self.assertEqual(sine_source.get_output_port(2).size(), 2)\n\n def test_discrete_derivative(self):\n discrete_derivative = DiscreteDerivative(num_inputs=5, time_step=0.5)\n self.assertEqual(discrete_derivative.get_input_port(0).size(), 5)\n self.assertEqual(discrete_derivative.get_output_port(0).size(), 5)\n self.assertEqual(discrete_derivative.time_step(), 0.5)\n self.assertTrue(discrete_derivative.suppress_initial_transient())\n\n discrete_derivative = DiscreteDerivative(\n num_inputs=5, time_step=0.5, suppress_initial_transient=False)\n self.assertFalse(discrete_derivative.suppress_initial_transient())\n\n def test_state_interpolator_with_discrete_derivative(self):\n state_interpolator = StateInterpolatorWithDiscreteDerivative(\n num_positions=5, time_step=0.4)\n self.assertEqual(state_interpolator.get_input_port(0).size(), 5)\n self.assertEqual(state_interpolator.get_output_port(0).size(), 10)\n self.assertTrue(state_interpolator.suppress_initial_transient())\n\n # test set_initial_position using context\n context = state_interpolator.CreateDefaultContext()\n state_interpolator.set_initial_position(\n context=context, position=5*[1.1])\n np.testing.assert_array_equal(\n context.get_discrete_state(0).CopyToVector(),\n np.array(5*[1.1]))\n np.testing.assert_array_equal(\n context.get_discrete_state(1).CopyToVector(),\n np.array(5*[1.1]))\n\n # test set_initial_position using state\n context = state_interpolator.CreateDefaultContext()\n state_interpolator.set_initial_position(\n state=context.get_state(), position=5*[1.3])\n np.testing.assert_array_equal(\n context.get_discrete_state(0).CopyToVector(),\n np.array(5*[1.3]))\n np.testing.assert_array_equal(\n context.get_discrete_state(1).CopyToVector(),\n np.array(5*[1.3]))\n\n state_interpolator = StateInterpolatorWithDiscreteDerivative(\n num_positions=5, time_step=0.4, suppress_initial_transient=True)\n self.assertTrue(state_interpolator.suppress_initial_transient())\n\n @numpy_compare.check_nonsymbolic_types\n def test_log_vector_output(self, T):\n # Add various redundant loggers to a system, to exercise the\n # LogVectorOutput bindings.\n builder = DiagramBuilder_[T]()\n kSize = 1\n integrator = builder.AddSystem(Integrator_[T](kSize))\n port = integrator.get_output_port(0)\n loggers = []\n loggers.append(LogVectorOutput(port, builder))\n loggers.append(LogVectorOutput(src=port, builder=builder))\n loggers.append(LogVectorOutput(port, builder, 0.125))\n loggers.append(LogVectorOutput(\n src=port, builder=builder, publish_period=0.125))\n\n loggers.append(LogVectorOutput(port, builder, {TriggerType.kForced}))\n loggers.append(LogVectorOutput(\n src=port, builder=builder, publish_triggers={TriggerType.kForced}))\n loggers.append(LogVectorOutput(\n port, builder, {TriggerType.kPeriodic}, 0.125))\n loggers.append(LogVectorOutput(\n src=port, builder=builder,\n publish_triggers={TriggerType.kPeriodic}, publish_period=0.125))\n\n # Check the returned loggers by calling some trivial methods.\n diagram = builder.Build()\n context = diagram.CreateDefaultContext()\n self.assertTrue(all(logger.FindLog(context).num_samples() == 0\n for logger in loggers))\n\n @numpy_compare.check_nonsymbolic_types\n def test_vector_log(self, T):\n kSize = 1\n dut = VectorLog(kSize)\n self.assertEqual(dut.get_input_size(), kSize)\n dut.AddData(0.1, [22.22])\n self.assertEqual(dut.num_samples(), 1)\n self.assertEqual(dut.sample_times(), [0.1])\n self.assertEqual(dut.data(), [22.22])\n dut.Clear()\n self.assertEqual(dut.num_samples(), 0)\n # There is no good way from python to test the semantics of Reserve(),\n # but test the binding anyway.\n dut.Reserve(VectorLog.kDefaultCapacity * 3)\n\n @numpy_compare.check_nonsymbolic_types\n def test_vector_log_sink(self, T):\n # Add various redundant loggers to a system, to exercise the\n # VectorLog constructor bindings.\n builder = DiagramBuilder_[T]()\n kSize = 1\n constructors = [VectorLogSink_[T]]\n loggers = []\n if T == float:\n constructors.append(VectorLogSink)\n for constructor in constructors:\n loggers.append(builder.AddSystem(constructor(kSize)))\n loggers.append(builder.AddSystem(constructor(input_size=kSize)))\n loggers.append(builder.AddSystem(constructor(kSize, 0.125)))\n loggers.append(builder.AddSystem(\n constructor(input_size=kSize, publish_period=0.125)))\n loggers.append(builder.AddSystem(\n constructor(kSize, {TriggerType.kForced})))\n loggers.append(builder.AddSystem(\n constructor(input_size=kSize,\n publish_triggers={TriggerType.kForced})))\n loggers.append(builder.AddSystem(\n constructor(kSize, {TriggerType.kPeriodic}, 0.125)))\n loggers.append(builder.AddSystem(\n constructor(input_size=kSize,\n publish_triggers={TriggerType.kPeriodic},\n publish_period=0.125)))\n\n # Exercise all of the log access methods.\n diagram = builder.Build()\n context = diagram.CreateDefaultContext()\n # FindLog and FindMutableLog find the same object.\n self.assertTrue(\n all(logger.FindLog(context) == logger.FindMutableLog(context)\n for logger in loggers))\n # Build a list of pairs of loggers and their local contexts.\n loggers_and_contexts = [(x, x.GetMyContextFromRoot(context))\n for x in loggers]\n # GetLog and GetMutableLog find the same object.\n self.assertTrue(\n all(logger.GetLog(logger_context)\n == logger.GetMutableLog(logger_context)\n for logger, logger_context in loggers_and_contexts))\n # GetLog and FindLog find the same object, given the proper contexts.\n self.assertTrue(\n all(logger.GetLog(logger_context) == logger.FindLog(context)\n for logger, logger_context in loggers_and_contexts))\n",
"step-ids": [
25,
26,
28,
30,
35
]
}
|
[
25,
26,
28,
30,
35
] |
from datetime import *
dd=int(input("enter number day: "))
nn=int(datetime.now().strftime("%w"))+1
# print(dd)
# print(nn)
print((datetime.now().date())+(timedelta(days=dd-nn)))
|
normal
|
{
"blob_id": "d3342507cb1966e14380ff28ae12b5c334abd20a",
"index": 5430,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(datetime.now().date() + timedelta(days=dd - nn))\n",
"step-3": "<mask token>\ndd = int(input('enter number day: '))\nnn = int(datetime.now().strftime('%w')) + 1\nprint(datetime.now().date() + timedelta(days=dd - nn))\n",
"step-4": "from datetime import *\ndd = int(input('enter number day: '))\nnn = int(datetime.now().strftime('%w')) + 1\nprint(datetime.now().date() + timedelta(days=dd - nn))\n",
"step-5": "from datetime import *\ndd=int(input(\"enter number day: \"))\nnn=int(datetime.now().strftime(\"%w\"))+1\n# print(dd)\n# print(nn)\nprint((datetime.now().date())+(timedelta(days=dd-nn)))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import pygame
import serial
import time
ser1 = serial.Serial('/dev/ttyACM0', 115200) #Right
ser1.write('?\n')
time.sleep(0.5)
if ser1.readline()[4] == 0:
ser2 = serial.Serial('/dev/ttyACM1', 115200) #Left, negative speeds go forward
else:
ser1 = serial.Serial('/dev/ttyACM1', 115200)
ser2 = serial.Serial('/dev/ttyACM0', 115200)
def write_spd(write1, write2):
ser1.write('sd'+str(write1)+'\n')
ser2.write('sd'+str(-write2)+'\n')
speed = 60
up = 0
down = 0
left = 0
right = 0
state = {'up':0, 'down':0, 'left':0, 'right':0}
scr = pygame.display.set_mode((1,1))
while(True):
elist = pygame.event.get()
for event in elist:
if event.type == 2 and event.dict.get('key') == 27:
write_spd(0, 0)
quit()
if event.type == 2:
if event.dict.get('key') == 273:
state['up'] = 1
elif event.dict.get('key') == 274:
state['down'] = 1
elif event.dict.get('key') == 275:
state['right'] = 1
elif event.dict.get('key') == 276:
state['left'] = 1
if event.type == 3:
if event.dict.get('key') == 273:
state['up'] = 0
elif event.dict.get('key') == 274:
state['down'] = 0
elif event.dict.get('key') == 275:
state['right'] = 0
elif event.dict.get('key') == 276:
state['left'] = 0
if state['up'] == 1:
if state['right'] == 1:
write_spd(0, speed)
elif state['left'] == 1:
write_spd(speed, 0)
else:
write_spd(speed, speed)
elif state['left'] == 1:
write_spd(speed, -speed)
elif state['right'] == 1:
write_spd(-speed, speed)
elif state['down'] == 1:
write_spd(-speed, -speed)
else:
write_spd(0, 0)
|
normal
|
{
"blob_id": "e6d4d12d47391927364fdc9765c68690d42c5d8d",
"index": 8950,
"step-1": "<mask token>\n\n\ndef write_spd(write1, write2):\n ser1.write('sd' + str(write1) + '\\n')\n ser2.write('sd' + str(-write2) + '\\n')\n\n\n<mask token>\n",
"step-2": "<mask token>\nser1.write('?\\n')\ntime.sleep(0.5)\nif ser1.readline()[4] == 0:\n ser2 = serial.Serial('/dev/ttyACM1', 115200)\nelse:\n ser1 = serial.Serial('/dev/ttyACM1', 115200)\n ser2 = serial.Serial('/dev/ttyACM0', 115200)\n\n\ndef write_spd(write1, write2):\n ser1.write('sd' + str(write1) + '\\n')\n ser2.write('sd' + str(-write2) + '\\n')\n\n\n<mask token>\nwhile True:\n elist = pygame.event.get()\n for event in elist:\n if event.type == 2 and event.dict.get('key') == 27:\n write_spd(0, 0)\n quit()\n if event.type == 2:\n if event.dict.get('key') == 273:\n state['up'] = 1\n elif event.dict.get('key') == 274:\n state['down'] = 1\n elif event.dict.get('key') == 275:\n state['right'] = 1\n elif event.dict.get('key') == 276:\n state['left'] = 1\n if event.type == 3:\n if event.dict.get('key') == 273:\n state['up'] = 0\n elif event.dict.get('key') == 274:\n state['down'] = 0\n elif event.dict.get('key') == 275:\n state['right'] = 0\n elif event.dict.get('key') == 276:\n state['left'] = 0\n if state['up'] == 1:\n if state['right'] == 1:\n write_spd(0, speed)\n elif state['left'] == 1:\n write_spd(speed, 0)\n else:\n write_spd(speed, speed)\n elif state['left'] == 1:\n write_spd(speed, -speed)\n elif state['right'] == 1:\n write_spd(-speed, speed)\n elif state['down'] == 1:\n write_spd(-speed, -speed)\n else:\n write_spd(0, 0)\n",
"step-3": "<mask token>\nser1 = serial.Serial('/dev/ttyACM0', 115200)\nser1.write('?\\n')\ntime.sleep(0.5)\nif ser1.readline()[4] == 0:\n ser2 = serial.Serial('/dev/ttyACM1', 115200)\nelse:\n ser1 = serial.Serial('/dev/ttyACM1', 115200)\n ser2 = serial.Serial('/dev/ttyACM0', 115200)\n\n\ndef write_spd(write1, write2):\n ser1.write('sd' + str(write1) + '\\n')\n ser2.write('sd' + str(-write2) + '\\n')\n\n\nspeed = 60\nup = 0\ndown = 0\nleft = 0\nright = 0\nstate = {'up': 0, 'down': 0, 'left': 0, 'right': 0}\nscr = pygame.display.set_mode((1, 1))\nwhile True:\n elist = pygame.event.get()\n for event in elist:\n if event.type == 2 and event.dict.get('key') == 27:\n write_spd(0, 0)\n quit()\n if event.type == 2:\n if event.dict.get('key') == 273:\n state['up'] = 1\n elif event.dict.get('key') == 274:\n state['down'] = 1\n elif event.dict.get('key') == 275:\n state['right'] = 1\n elif event.dict.get('key') == 276:\n state['left'] = 1\n if event.type == 3:\n if event.dict.get('key') == 273:\n state['up'] = 0\n elif event.dict.get('key') == 274:\n state['down'] = 0\n elif event.dict.get('key') == 275:\n state['right'] = 0\n elif event.dict.get('key') == 276:\n state['left'] = 0\n if state['up'] == 1:\n if state['right'] == 1:\n write_spd(0, speed)\n elif state['left'] == 1:\n write_spd(speed, 0)\n else:\n write_spd(speed, speed)\n elif state['left'] == 1:\n write_spd(speed, -speed)\n elif state['right'] == 1:\n write_spd(-speed, speed)\n elif state['down'] == 1:\n write_spd(-speed, -speed)\n else:\n write_spd(0, 0)\n",
"step-4": "import pygame\nimport serial\nimport time\nser1 = serial.Serial('/dev/ttyACM0', 115200)\nser1.write('?\\n')\ntime.sleep(0.5)\nif ser1.readline()[4] == 0:\n ser2 = serial.Serial('/dev/ttyACM1', 115200)\nelse:\n ser1 = serial.Serial('/dev/ttyACM1', 115200)\n ser2 = serial.Serial('/dev/ttyACM0', 115200)\n\n\ndef write_spd(write1, write2):\n ser1.write('sd' + str(write1) + '\\n')\n ser2.write('sd' + str(-write2) + '\\n')\n\n\nspeed = 60\nup = 0\ndown = 0\nleft = 0\nright = 0\nstate = {'up': 0, 'down': 0, 'left': 0, 'right': 0}\nscr = pygame.display.set_mode((1, 1))\nwhile True:\n elist = pygame.event.get()\n for event in elist:\n if event.type == 2 and event.dict.get('key') == 27:\n write_spd(0, 0)\n quit()\n if event.type == 2:\n if event.dict.get('key') == 273:\n state['up'] = 1\n elif event.dict.get('key') == 274:\n state['down'] = 1\n elif event.dict.get('key') == 275:\n state['right'] = 1\n elif event.dict.get('key') == 276:\n state['left'] = 1\n if event.type == 3:\n if event.dict.get('key') == 273:\n state['up'] = 0\n elif event.dict.get('key') == 274:\n state['down'] = 0\n elif event.dict.get('key') == 275:\n state['right'] = 0\n elif event.dict.get('key') == 276:\n state['left'] = 0\n if state['up'] == 1:\n if state['right'] == 1:\n write_spd(0, speed)\n elif state['left'] == 1:\n write_spd(speed, 0)\n else:\n write_spd(speed, speed)\n elif state['left'] == 1:\n write_spd(speed, -speed)\n elif state['right'] == 1:\n write_spd(-speed, speed)\n elif state['down'] == 1:\n write_spd(-speed, -speed)\n else:\n write_spd(0, 0)\n",
"step-5": "import pygame\nimport serial\nimport time\n\nser1 = serial.Serial('/dev/ttyACM0', 115200) #Right\nser1.write('?\\n')\ntime.sleep(0.5)\nif ser1.readline()[4] == 0:\n ser2 = serial.Serial('/dev/ttyACM1', 115200) #Left, negative speeds go forward\nelse:\n ser1 = serial.Serial('/dev/ttyACM1', 115200)\n ser2 = serial.Serial('/dev/ttyACM0', 115200)\n\ndef write_spd(write1, write2):\n ser1.write('sd'+str(write1)+'\\n')\n ser2.write('sd'+str(-write2)+'\\n')\n\nspeed = 60\n\nup = 0\ndown = 0\nleft = 0\nright = 0\nstate = {'up':0, 'down':0, 'left':0, 'right':0}\n\nscr = pygame.display.set_mode((1,1))\nwhile(True):\n elist = pygame.event.get()\n for event in elist:\n if event.type == 2 and event.dict.get('key') == 27:\n write_spd(0, 0)\n quit()\n if event.type == 2:\n if event.dict.get('key') == 273:\n state['up'] = 1\n elif event.dict.get('key') == 274:\n state['down'] = 1\n elif event.dict.get('key') == 275:\n state['right'] = 1\n elif event.dict.get('key') == 276:\n state['left'] = 1\n if event.type == 3:\n if event.dict.get('key') == 273:\n state['up'] = 0\n elif event.dict.get('key') == 274:\n state['down'] = 0\n elif event.dict.get('key') == 275:\n state['right'] = 0\n elif event.dict.get('key') == 276:\n state['left'] = 0\n if state['up'] == 1:\n if state['right'] == 1:\n write_spd(0, speed)\n elif state['left'] == 1:\n write_spd(speed, 0)\n else:\n write_spd(speed, speed)\n elif state['left'] == 1:\n write_spd(speed, -speed)\n elif state['right'] == 1:\n write_spd(-speed, speed)\n elif state['down'] == 1:\n write_spd(-speed, -speed)\n else:\n write_spd(0, 0)\n \n\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import serial
import mysql.connector
ser = serial.Serial('/dev/serial0', 9600)
while True:
data = ser.readline()
if data[0]==";":
print(data)
data = data.split(";")
if data[1] == "1":
fonction = data[1]
add = data[2]
tmp = data[3]
debit = data[4]
ser.write([123])
#test affichage
print "Save in DB"
print "fonction :",fonction
print "addresse :",add
print "temperature :",tmp
print "Debit :",debit
conn = mysql.connector.connect(host="mysql-ormeaux.alwaysdata.net",user="ormeaux",password="pGYw478Vy", database="ormeaux_29")
cursor = conn.cursor()
cursor = conn.cursor()
requete = "INSERT INTO mesures(id_bassins,temperature, debit) VALUES (%s, %s, %s)"
valeurs = (add,tmp,debit)
cursor.execute(requete,valeurs)
conn.commit()
conn.close()
|
normal
|
{
"blob_id": "b1a6593e7b528238e7be5ea6da4d1bfee0d78067",
"index": 7824,
"step-1": "import serial\nimport mysql.connector\n\nser = serial.Serial('/dev/serial0', 9600)\n\nwhile True:\n\tdata = ser.readline()\n\tif data[0]==\";\":\n\t\tprint(data)\n\t\tdata = data.split(\";\")\n\t\tif data[1] == \"1\":\n\t\t\tfonction = data[1]\n\t\t\tadd = data[2]\n\t\t\ttmp = data[3]\n\t\t\tdebit = data[4]\n\t\t\tser.write([123])\n\t\t\t#test affichage\n\t\t\tprint \"Save in DB\"\n\t\t\tprint \"fonction :\",fonction\n\t\t\tprint \"addresse :\",add\n\t\t\tprint \"temperature :\",tmp\n\t\t\tprint \"Debit :\",debit\n\n\t\t\tconn = mysql.connector.connect(host=\"mysql-ormeaux.alwaysdata.net\",user=\"ormeaux\",password=\"pGYw478Vy\", database=\"ormeaux_29\")\n\t\t\tcursor = conn.cursor()\n\t\t\tcursor = conn.cursor()\n\n\t\t\trequete = \"INSERT INTO mesures(id_bassins,temperature, debit) VALUES (%s, %s, %s)\"\n\t\t\tvaleurs = (add,tmp,debit)\n\t\n\t\t\tcursor.execute(requete,valeurs)\n\t\t\tconn.commit()\n\t\t\tconn.close()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# Time: O(|V| + |E|)
# Space: O(|V|)
class Solution(object):
def eventualSafeNodes(self, graph):
"""
:type graph: List[List[int]]
:rtype: List[int]
"""
WHITE, GRAY, BLACK = range(3)
def dfs(graph, node, lookup):
if lookup[node] != WHITE:
return lookup[node] == BLACK
lookup[node] = GRAY
if any(not dfs(graph, child, lookup) for child in graph[node]):
return False
lookup[node] = BLACK
return True
lookup = [WHITE]*len(graph)
return filter(lambda node: dfs(graph, node, lookup), xrange(len(graph)))
|
normal
|
{
"blob_id": "5c5cfcd240c8b05970dc8dff57bfbbdc98f1d100",
"index": 9838,
"step-1": "<mask token>\n",
"step-2": "class Solution(object):\n <mask token>\n",
"step-3": "class Solution(object):\n\n def eventualSafeNodes(self, graph):\n \"\"\"\n :type graph: List[List[int]]\n :rtype: List[int]\n \"\"\"\n WHITE, GRAY, BLACK = range(3)\n\n def dfs(graph, node, lookup):\n if lookup[node] != WHITE:\n return lookup[node] == BLACK\n lookup[node] = GRAY\n if any(not dfs(graph, child, lookup) for child in graph[node]):\n return False\n lookup[node] = BLACK\n return True\n lookup = [WHITE] * len(graph)\n return filter(lambda node: dfs(graph, node, lookup), xrange(len(graph))\n )\n",
"step-4": "# Time: O(|V| + |E|)\n# Space: O(|V|)\n\nclass Solution(object):\n def eventualSafeNodes(self, graph):\n \"\"\"\n :type graph: List[List[int]]\n :rtype: List[int]\n \"\"\"\n WHITE, GRAY, BLACK = range(3)\n\n def dfs(graph, node, lookup):\n if lookup[node] != WHITE:\n return lookup[node] == BLACK\n lookup[node] = GRAY\n if any(not dfs(graph, child, lookup) for child in graph[node]):\n return False\n lookup[node] = BLACK\n return True\n\n lookup = [WHITE]*len(graph)\n return filter(lambda node: dfs(graph, node, lookup), xrange(len(graph)))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
'''
Author: Iris Peng. Date: Feb 21, 2016
Usage: Scrape Weibo posts from Zhongsou for the first time for a query
In the terminal, type
$ python3 scrape_weibo.py
and follow the prompts
'''
import requests
from bs4 import BeautifulSoup
from pandas import DataFrame
import time
import pandas
import glob, os
global QUERY_LINK
QUERY_LINK = 'http://t.zhongsou.com/wb?form_id=1&org=1&sel=0&so=1&v=%D6%D0%CB%D1&w=%B1%C6'#link
global OUTPUT_FILE_NAME
OUTPUT_FILE_NAME = 'scrape' # Name of your output file
global WORKING_DIR
WORKING_DIR = '~/Corpora/'
global OLD_MASTER_FILE
OLD_MASTER_FILE = '{}Text_data/'.format(WORKING_DIR) + 'yeshizuile.txt' #Feed the new output
class NewScrape():
def scrape_main(self):
'''
Top-level function.
Use links from below, scrape a page, sleep for 5s, and restart on the next link.
'''
for i in self.gen_links():
index = str(self.gen_links().index(i))
link = i
self.get_weibo(link,index)
time.sleep(5)
self.retrieve_posts(OUTPUT_FILE_NAME)
#self.clean_temp()
print('='*10)
print('Congratulations! Your data is stored')
return None
def gen_links(self):
links = []
for i in range(1,51):
i = str(i)
links.append('{}&b={}'.format(QUERY_LINK,i))
return links
def get_weibo(self,link,index):
'''
Scrape a certain weibio search result page on 'zhongsou' and store it in locally.
'''
html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR),'w', encoding = 'utf8')
r = requests.get(link)
print ('accessing web data.')
html_doc.write(r.text)
html_doc.close()
# Write into a csv file
outfile_name = 'zhongsou_results_page_' + index + '.csv'
outfile = open('{}Temp/'.format(WORKING_DIR) + outfile_name,'w', encoding = 'utf8') #change path
# Turn the text into a BeautifulSoup object and strip down the text.
html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR),'r', encoding = 'utf8')#change path
soup = BeautifulSoup(html_doc)
user_link = []
post_txt = []
post_link = []
post_time = []
weibo_items = soup.find_all('div', class_='weibo_item')
for item in weibo_items:
for link in item.find_all('a', target='_blank', class_='sina_weibo'):
url = link.get('href')
post_link.append(url)
for post in item.find_all('h3', class_='weibo_title'):
for a in post.find_all('a'):
url = a.get('href')
user_link.append(url)
for time in item.find_all('div', class_='weibo_time'):
txt = time.get_text()
post_time.append(txt)
for post in item.find_all('p', class_='weibo_txt'):
txt = post.get_text()
post_txt.append(txt)
data = {'post_text':post_txt,'post_link':post_link,'user':user_link, 'time':post_time}
frame = DataFrame(data)
frame.to_csv(outfile, encoding='utf-8')
print (outfile_name,'processed complete.')
outfile.close()
html_doc.close()
return None
def clean_temp(self):
filelist = glob.glob('{}Temp/*'.format(WORKING_DIR))
for f in filelist:
os.remove(f)
print('Temp files removed')
return None
def retrieve_posts(self,outfile_name):
'''(str)->a file
'''
post_text = []
for i in range(50):
frame_2 = pandas.read_csv('{}Temp/zhongsou_results_page_{}.csv'.format(WORKING_DIR, str(i)))#change directory
df2 = DataFrame(frame_2)
for i in df2.post_text:#the column'post_text'
post_text.append(i)
data = {'post_text':post_text}
frame = DataFrame(data)
frame.to_csv('{}Text_data/{}.txt'.format(WORKING_DIR, outfile_name), encoding = 'utf-8')#change saved path
frame.to_excel('{}Text_data/{}.xlsx'.format(WORKING_DIR, outfile_name), encoding = 'utf-8')#change saved path
print("Done")
return None
class ContinueScrape():
def scrape_main(self):
'''
Top-level function.
Use links from below, scrape a page, sleep for 5s, and restart on the next link.
'''
for i in self.gen_links():
index = str(self.gen_links().index(i))
link = i
cmd = self.get_weibo(link,index)
if cmd == 'STOP':
break
else:
time.sleep(10)
continue
print('='*10)
print('Scrape is now complete. Help me to organize them.')
print ('View your temp folder, what is the biggest number of the files? \n')
fn = int(input())
self.retrieve_posts(fn)
print('='*10)
print('Congratulations! Your data is stored')
return
def gen_links(self):
links = []
for i in range(1,51):
i = str(i)
links.append('{}&b={}'.format(QUERY_LINK,i))
return links
def get_weibo(self,link,index):
'''
Scrape a certain weibio search result page on 'zhongsou' and store it in locally.
'''
html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR), 'w', encoding='utf8')
r = requests.get(link)
print ('Accessing web data.')
html_doc.write(r.text)
html_doc.close()
# Retrieve scrape history
h_post_text = []
h_frame = pandas.read_csv(OLD_MASTER_FILE)
h_df = DataFrame(h_frame)
for i in h_df.post_text:
h_post_text.append(i)
# Write into a csv file
outfile_name = 'zhongsou_results_page_' + index + '.csv'
outfile = open('{}Temp/'.format(WORKING_DIR)+ outfile_name,'w', encoding = 'utf8') #change path
# Turn the text into a BeautifulSoup object and strip down the text.
html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR), 'r', encoding='utf8')
soup = BeautifulSoup(html_doc)
user_link = []
post_txt = []
post_link = []
post_time = []
cmd = None
weibo_items = soup.find_all('div', class_='weibo_item')
for item in weibo_items:
for link in item.find_all('a', target='_blank', class_='sina_weibo'):
url = link.get('href')
post_link.append(url)
for post in item.find_all('h3', class_='weibo_title'):
for a in post.find_all('a'):
url = a.get('href')
user_link.append(url)
for time in item.find_all('div', class_='weibo_time'):
txt = time.get_text()
post_time.append(txt)
for post in item.find_all('p', class_='weibo_txt'):
txt = post.get_text()
post_txt.append(txt)
#has bugs!
#if txt in h_post_text:
if txt == h_post_text[0]:
print (txt)
print(' ___ exists')
print ('End of new data.') #Doesn't affect main function, break should be in main function
del post_link[-1]
del user_link[-1]
del post_time[-1]
del post_txt[-1]
cmd = 'STOP'
break
data = {'post_text':post_txt,'post_link':post_link,'user':user_link, 'time':post_time}
frame = DataFrame(data)
frame.to_csv(outfile, encoding='utf-8')
print (outfile_name,'processed complete.')
outfile.close()
html_doc.close()
return cmd
def retrieve_posts(self,file_number_total):
'''(int)->a file
'''
post_text = []
for i in range(file_number_total+1):
frame_2 = pandas.read_csv('{}Temp/zhongsou_results_page_{}.csv'.format(WORKING_DIR, str(i)))
df2 = DataFrame(frame_2)
for i in df2.post_text:#the column'post_text'
post_text.append(i)
frame_1 = pandas.read_csv(OLD_MASTER_FILE)
df1 = DataFrame(frame_1)
for i in df1.post_text:
post_text.append(i)
data = {'post_text':post_text}
frame = DataFrame(data)
frame.to_csv('{}Text_data/{}_2.txt'.format(WORKING_DIR, OUTPUT_FILE_NAME), encoding = 'utf-8')#saved path
frame.to_excel('{}Text_data/{}_2.xlsx'.format(WORKING_DIR, OUTPUT_FILE_NAME), encoding = 'utf-8')#saved path
print("Data gathered.")
## filelist = glob.glob('{}Temp/*'.format(WORKING_DIR))
## for f in filelist:
## os.remove(f)
#os.remove(OLD_MASTER_FILE)
print('Temp files removed')
return None
print('='*10)
print('This program will help you collect Weibo language data as generated by the 中搜 search results.\n')
print('Use this page to generate a link for your query item:\n\nhttp://t.zhongsou.com/wb?form_id=1&org=1&sel=0&so=1&v=%D6%D0%CB%D1&w=%CD%F8%D3%EF')
QUERY_LINK = input('\nPaste your query link \n> ')
OUTPUT_FILE_NAME = input('\nWhat\'s your query term? (This will be used as file name)\n> ')
resp = input('\nIs this your first time running this query? Y/N\n> ').upper()
if resp == 'Y':
print()
print('='*10)
print('Initialize scraping now.')
print('='*10)
NewScrape().scrape_main()
elif resp == 'N':
OLD_MASTER_FILE = input('\nWhere is the old txt file you want to merge later? Please paste full path. \n> ')
print()
print('='*10)
print('WARNING: FURTHER ACTIONS NEEDED AT THE END OF SCRAPING.')
print('Initialize scraping now.')
print('='*10)
ContinueScrape().scrape_main()
else:
print('Invalid command. Try again.')
|
normal
|
{
"blob_id": "ed3fbae19c88100690dd5c558c0dc6d36a4849c8",
"index": 1451,
"step-1": "<mask token>\n\n\nclass NewScrape:\n\n def scrape_main(self):\n \"\"\"\n Top-level function.\n Use links from below, scrape a page, sleep for 5s, and restart on the next link.\n \"\"\"\n for i in self.gen_links():\n index = str(self.gen_links().index(i))\n link = i\n self.get_weibo(link, index)\n time.sleep(5)\n self.retrieve_posts(OUTPUT_FILE_NAME)\n print('=' * 10)\n print('Congratulations! Your data is stored')\n return None\n <mask token>\n\n def get_weibo(self, link, index):\n \"\"\"\n Scrape a certain weibio search result page on 'zhongsou' and store it in locally.\n \"\"\"\n html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR), 'w',\n encoding='utf8')\n r = requests.get(link)\n print('accessing web data.')\n html_doc.write(r.text)\n html_doc.close()\n outfile_name = 'zhongsou_results_page_' + index + '.csv'\n outfile = open('{}Temp/'.format(WORKING_DIR) + outfile_name, 'w',\n encoding='utf8')\n html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR), 'r',\n encoding='utf8')\n soup = BeautifulSoup(html_doc)\n user_link = []\n post_txt = []\n post_link = []\n post_time = []\n weibo_items = soup.find_all('div', class_='weibo_item')\n for item in weibo_items:\n for link in item.find_all('a', target='_blank', class_='sina_weibo'\n ):\n url = link.get('href')\n post_link.append(url)\n for post in item.find_all('h3', class_='weibo_title'):\n for a in post.find_all('a'):\n url = a.get('href')\n user_link.append(url)\n for time in item.find_all('div', class_='weibo_time'):\n txt = time.get_text()\n post_time.append(txt)\n for post in item.find_all('p', class_='weibo_txt'):\n txt = post.get_text()\n post_txt.append(txt)\n data = {'post_text': post_txt, 'post_link': post_link, 'user':\n user_link, 'time': post_time}\n frame = DataFrame(data)\n frame.to_csv(outfile, encoding='utf-8')\n print(outfile_name, 'processed complete.')\n outfile.close()\n html_doc.close()\n return None\n <mask token>\n\n def retrieve_posts(self, outfile_name):\n \"\"\"(str)->a file\n \"\"\"\n post_text = []\n for i in range(50):\n frame_2 = pandas.read_csv('{}Temp/zhongsou_results_page_{}.csv'\n .format(WORKING_DIR, str(i)))\n df2 = DataFrame(frame_2)\n for i in df2.post_text:\n post_text.append(i)\n data = {'post_text': post_text}\n frame = DataFrame(data)\n frame.to_csv('{}Text_data/{}.txt'.format(WORKING_DIR, outfile_name),\n encoding='utf-8')\n frame.to_excel('{}Text_data/{}.xlsx'.format(WORKING_DIR,\n outfile_name), encoding='utf-8')\n print('Done')\n return None\n\n\nclass ContinueScrape:\n\n def scrape_main(self):\n \"\"\"\n Top-level function.\n Use links from below, scrape a page, sleep for 5s, and restart on the next link.\n \"\"\"\n for i in self.gen_links():\n index = str(self.gen_links().index(i))\n link = i\n cmd = self.get_weibo(link, index)\n if cmd == 'STOP':\n break\n else:\n time.sleep(10)\n continue\n print('=' * 10)\n print('Scrape is now complete. Help me to organize them.')\n print(\n 'View your temp folder, what is the biggest number of the files? \\n'\n )\n fn = int(input())\n self.retrieve_posts(fn)\n print('=' * 10)\n print('Congratulations! Your data is stored')\n return\n\n def gen_links(self):\n links = []\n for i in range(1, 51):\n i = str(i)\n links.append('{}&b={}'.format(QUERY_LINK, i))\n return links\n\n def get_weibo(self, link, index):\n \"\"\"\n Scrape a certain weibio search result page on 'zhongsou' and store it in locally.\n \"\"\"\n html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR), 'w',\n encoding='utf8')\n r = requests.get(link)\n print('Accessing web data.')\n html_doc.write(r.text)\n html_doc.close()\n h_post_text = []\n h_frame = pandas.read_csv(OLD_MASTER_FILE)\n h_df = DataFrame(h_frame)\n for i in h_df.post_text:\n h_post_text.append(i)\n outfile_name = 'zhongsou_results_page_' + index + '.csv'\n outfile = open('{}Temp/'.format(WORKING_DIR) + outfile_name, 'w',\n encoding='utf8')\n html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR), 'r',\n encoding='utf8')\n soup = BeautifulSoup(html_doc)\n user_link = []\n post_txt = []\n post_link = []\n post_time = []\n cmd = None\n weibo_items = soup.find_all('div', class_='weibo_item')\n for item in weibo_items:\n for link in item.find_all('a', target='_blank', class_='sina_weibo'\n ):\n url = link.get('href')\n post_link.append(url)\n for post in item.find_all('h3', class_='weibo_title'):\n for a in post.find_all('a'):\n url = a.get('href')\n user_link.append(url)\n for time in item.find_all('div', class_='weibo_time'):\n txt = time.get_text()\n post_time.append(txt)\n for post in item.find_all('p', class_='weibo_txt'):\n txt = post.get_text()\n post_txt.append(txt)\n if txt == h_post_text[0]:\n print(txt)\n print(' ___ exists')\n print('End of new data.')\n del post_link[-1]\n del user_link[-1]\n del post_time[-1]\n del post_txt[-1]\n cmd = 'STOP'\n break\n data = {'post_text': post_txt, 'post_link': post_link, 'user':\n user_link, 'time': post_time}\n frame = DataFrame(data)\n frame.to_csv(outfile, encoding='utf-8')\n print(outfile_name, 'processed complete.')\n outfile.close()\n html_doc.close()\n return cmd\n\n def retrieve_posts(self, file_number_total):\n \"\"\"(int)->a file\n \"\"\"\n post_text = []\n for i in range(file_number_total + 1):\n frame_2 = pandas.read_csv('{}Temp/zhongsou_results_page_{}.csv'\n .format(WORKING_DIR, str(i)))\n df2 = DataFrame(frame_2)\n for i in df2.post_text:\n post_text.append(i)\n frame_1 = pandas.read_csv(OLD_MASTER_FILE)\n df1 = DataFrame(frame_1)\n for i in df1.post_text:\n post_text.append(i)\n data = {'post_text': post_text}\n frame = DataFrame(data)\n frame.to_csv('{}Text_data/{}_2.txt'.format(WORKING_DIR,\n OUTPUT_FILE_NAME), encoding='utf-8')\n frame.to_excel('{}Text_data/{}_2.xlsx'.format(WORKING_DIR,\n OUTPUT_FILE_NAME), encoding='utf-8')\n print('Data gathered.')\n print('Temp files removed')\n return None\n\n\n<mask token>\n",
"step-2": "<mask token>\nglobal QUERY_LINK\n<mask token>\nglobal OUTPUT_FILE_NAME\n<mask token>\nglobal WORKING_DIR\n<mask token>\nglobal OLD_MASTER_FILE\n<mask token>\n\n\nclass NewScrape:\n\n def scrape_main(self):\n \"\"\"\n Top-level function.\n Use links from below, scrape a page, sleep for 5s, and restart on the next link.\n \"\"\"\n for i in self.gen_links():\n index = str(self.gen_links().index(i))\n link = i\n self.get_weibo(link, index)\n time.sleep(5)\n self.retrieve_posts(OUTPUT_FILE_NAME)\n print('=' * 10)\n print('Congratulations! Your data is stored')\n return None\n\n def gen_links(self):\n links = []\n for i in range(1, 51):\n i = str(i)\n links.append('{}&b={}'.format(QUERY_LINK, i))\n return links\n\n def get_weibo(self, link, index):\n \"\"\"\n Scrape a certain weibio search result page on 'zhongsou' and store it in locally.\n \"\"\"\n html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR), 'w',\n encoding='utf8')\n r = requests.get(link)\n print('accessing web data.')\n html_doc.write(r.text)\n html_doc.close()\n outfile_name = 'zhongsou_results_page_' + index + '.csv'\n outfile = open('{}Temp/'.format(WORKING_DIR) + outfile_name, 'w',\n encoding='utf8')\n html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR), 'r',\n encoding='utf8')\n soup = BeautifulSoup(html_doc)\n user_link = []\n post_txt = []\n post_link = []\n post_time = []\n weibo_items = soup.find_all('div', class_='weibo_item')\n for item in weibo_items:\n for link in item.find_all('a', target='_blank', class_='sina_weibo'\n ):\n url = link.get('href')\n post_link.append(url)\n for post in item.find_all('h3', class_='weibo_title'):\n for a in post.find_all('a'):\n url = a.get('href')\n user_link.append(url)\n for time in item.find_all('div', class_='weibo_time'):\n txt = time.get_text()\n post_time.append(txt)\n for post in item.find_all('p', class_='weibo_txt'):\n txt = post.get_text()\n post_txt.append(txt)\n data = {'post_text': post_txt, 'post_link': post_link, 'user':\n user_link, 'time': post_time}\n frame = DataFrame(data)\n frame.to_csv(outfile, encoding='utf-8')\n print(outfile_name, 'processed complete.')\n outfile.close()\n html_doc.close()\n return None\n\n def clean_temp(self):\n filelist = glob.glob('{}Temp/*'.format(WORKING_DIR))\n for f in filelist:\n os.remove(f)\n print('Temp files removed')\n return None\n\n def retrieve_posts(self, outfile_name):\n \"\"\"(str)->a file\n \"\"\"\n post_text = []\n for i in range(50):\n frame_2 = pandas.read_csv('{}Temp/zhongsou_results_page_{}.csv'\n .format(WORKING_DIR, str(i)))\n df2 = DataFrame(frame_2)\n for i in df2.post_text:\n post_text.append(i)\n data = {'post_text': post_text}\n frame = DataFrame(data)\n frame.to_csv('{}Text_data/{}.txt'.format(WORKING_DIR, outfile_name),\n encoding='utf-8')\n frame.to_excel('{}Text_data/{}.xlsx'.format(WORKING_DIR,\n outfile_name), encoding='utf-8')\n print('Done')\n return None\n\n\nclass ContinueScrape:\n\n def scrape_main(self):\n \"\"\"\n Top-level function.\n Use links from below, scrape a page, sleep for 5s, and restart on the next link.\n \"\"\"\n for i in self.gen_links():\n index = str(self.gen_links().index(i))\n link = i\n cmd = self.get_weibo(link, index)\n if cmd == 'STOP':\n break\n else:\n time.sleep(10)\n continue\n print('=' * 10)\n print('Scrape is now complete. Help me to organize them.')\n print(\n 'View your temp folder, what is the biggest number of the files? \\n'\n )\n fn = int(input())\n self.retrieve_posts(fn)\n print('=' * 10)\n print('Congratulations! Your data is stored')\n return\n\n def gen_links(self):\n links = []\n for i in range(1, 51):\n i = str(i)\n links.append('{}&b={}'.format(QUERY_LINK, i))\n return links\n\n def get_weibo(self, link, index):\n \"\"\"\n Scrape a certain weibio search result page on 'zhongsou' and store it in locally.\n \"\"\"\n html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR), 'w',\n encoding='utf8')\n r = requests.get(link)\n print('Accessing web data.')\n html_doc.write(r.text)\n html_doc.close()\n h_post_text = []\n h_frame = pandas.read_csv(OLD_MASTER_FILE)\n h_df = DataFrame(h_frame)\n for i in h_df.post_text:\n h_post_text.append(i)\n outfile_name = 'zhongsou_results_page_' + index + '.csv'\n outfile = open('{}Temp/'.format(WORKING_DIR) + outfile_name, 'w',\n encoding='utf8')\n html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR), 'r',\n encoding='utf8')\n soup = BeautifulSoup(html_doc)\n user_link = []\n post_txt = []\n post_link = []\n post_time = []\n cmd = None\n weibo_items = soup.find_all('div', class_='weibo_item')\n for item in weibo_items:\n for link in item.find_all('a', target='_blank', class_='sina_weibo'\n ):\n url = link.get('href')\n post_link.append(url)\n for post in item.find_all('h3', class_='weibo_title'):\n for a in post.find_all('a'):\n url = a.get('href')\n user_link.append(url)\n for time in item.find_all('div', class_='weibo_time'):\n txt = time.get_text()\n post_time.append(txt)\n for post in item.find_all('p', class_='weibo_txt'):\n txt = post.get_text()\n post_txt.append(txt)\n if txt == h_post_text[0]:\n print(txt)\n print(' ___ exists')\n print('End of new data.')\n del post_link[-1]\n del user_link[-1]\n del post_time[-1]\n del post_txt[-1]\n cmd = 'STOP'\n break\n data = {'post_text': post_txt, 'post_link': post_link, 'user':\n user_link, 'time': post_time}\n frame = DataFrame(data)\n frame.to_csv(outfile, encoding='utf-8')\n print(outfile_name, 'processed complete.')\n outfile.close()\n html_doc.close()\n return cmd\n\n def retrieve_posts(self, file_number_total):\n \"\"\"(int)->a file\n \"\"\"\n post_text = []\n for i in range(file_number_total + 1):\n frame_2 = pandas.read_csv('{}Temp/zhongsou_results_page_{}.csv'\n .format(WORKING_DIR, str(i)))\n df2 = DataFrame(frame_2)\n for i in df2.post_text:\n post_text.append(i)\n frame_1 = pandas.read_csv(OLD_MASTER_FILE)\n df1 = DataFrame(frame_1)\n for i in df1.post_text:\n post_text.append(i)\n data = {'post_text': post_text}\n frame = DataFrame(data)\n frame.to_csv('{}Text_data/{}_2.txt'.format(WORKING_DIR,\n OUTPUT_FILE_NAME), encoding='utf-8')\n frame.to_excel('{}Text_data/{}_2.xlsx'.format(WORKING_DIR,\n OUTPUT_FILE_NAME), encoding='utf-8')\n print('Data gathered.')\n print('Temp files removed')\n return None\n\n\nprint('=' * 10)\nprint(\n \"\"\"This program will help you collect Weibo language data as generated by the 中搜 search results.\n\"\"\"\n )\nprint(\n \"\"\"Use this page to generate a link for your query item:\n\nhttp://t.zhongsou.com/wb?form_id=1&org=1&sel=0&so=1&v=%D6%D0%CB%D1&w=%CD%F8%D3%EF\"\"\"\n )\n<mask token>\nif resp == 'Y':\n print()\n print('=' * 10)\n print('Initialize scraping now.')\n print('=' * 10)\n NewScrape().scrape_main()\nelif resp == 'N':\n OLD_MASTER_FILE = input(\n \"\"\"\nWhere is the old txt file you want to merge later? Please paste full path. \n> \"\"\"\n )\n print()\n print('=' * 10)\n print('WARNING: FURTHER ACTIONS NEEDED AT THE END OF SCRAPING.')\n print('Initialize scraping now.')\n print('=' * 10)\n ContinueScrape().scrape_main()\nelse:\n print('Invalid command. Try again.')\n",
"step-3": "<mask token>\nglobal QUERY_LINK\nQUERY_LINK = (\n 'http://t.zhongsou.com/wb?form_id=1&org=1&sel=0&so=1&v=%D6%D0%CB%D1&w=%B1%C6'\n )\nglobal OUTPUT_FILE_NAME\nOUTPUT_FILE_NAME = 'scrape'\nglobal WORKING_DIR\nWORKING_DIR = '~/Corpora/'\nglobal OLD_MASTER_FILE\nOLD_MASTER_FILE = '{}Text_data/'.format(WORKING_DIR) + 'yeshizuile.txt'\n\n\nclass NewScrape:\n\n def scrape_main(self):\n \"\"\"\n Top-level function.\n Use links from below, scrape a page, sleep for 5s, and restart on the next link.\n \"\"\"\n for i in self.gen_links():\n index = str(self.gen_links().index(i))\n link = i\n self.get_weibo(link, index)\n time.sleep(5)\n self.retrieve_posts(OUTPUT_FILE_NAME)\n print('=' * 10)\n print('Congratulations! Your data is stored')\n return None\n\n def gen_links(self):\n links = []\n for i in range(1, 51):\n i = str(i)\n links.append('{}&b={}'.format(QUERY_LINK, i))\n return links\n\n def get_weibo(self, link, index):\n \"\"\"\n Scrape a certain weibio search result page on 'zhongsou' and store it in locally.\n \"\"\"\n html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR), 'w',\n encoding='utf8')\n r = requests.get(link)\n print('accessing web data.')\n html_doc.write(r.text)\n html_doc.close()\n outfile_name = 'zhongsou_results_page_' + index + '.csv'\n outfile = open('{}Temp/'.format(WORKING_DIR) + outfile_name, 'w',\n encoding='utf8')\n html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR), 'r',\n encoding='utf8')\n soup = BeautifulSoup(html_doc)\n user_link = []\n post_txt = []\n post_link = []\n post_time = []\n weibo_items = soup.find_all('div', class_='weibo_item')\n for item in weibo_items:\n for link in item.find_all('a', target='_blank', class_='sina_weibo'\n ):\n url = link.get('href')\n post_link.append(url)\n for post in item.find_all('h3', class_='weibo_title'):\n for a in post.find_all('a'):\n url = a.get('href')\n user_link.append(url)\n for time in item.find_all('div', class_='weibo_time'):\n txt = time.get_text()\n post_time.append(txt)\n for post in item.find_all('p', class_='weibo_txt'):\n txt = post.get_text()\n post_txt.append(txt)\n data = {'post_text': post_txt, 'post_link': post_link, 'user':\n user_link, 'time': post_time}\n frame = DataFrame(data)\n frame.to_csv(outfile, encoding='utf-8')\n print(outfile_name, 'processed complete.')\n outfile.close()\n html_doc.close()\n return None\n\n def clean_temp(self):\n filelist = glob.glob('{}Temp/*'.format(WORKING_DIR))\n for f in filelist:\n os.remove(f)\n print('Temp files removed')\n return None\n\n def retrieve_posts(self, outfile_name):\n \"\"\"(str)->a file\n \"\"\"\n post_text = []\n for i in range(50):\n frame_2 = pandas.read_csv('{}Temp/zhongsou_results_page_{}.csv'\n .format(WORKING_DIR, str(i)))\n df2 = DataFrame(frame_2)\n for i in df2.post_text:\n post_text.append(i)\n data = {'post_text': post_text}\n frame = DataFrame(data)\n frame.to_csv('{}Text_data/{}.txt'.format(WORKING_DIR, outfile_name),\n encoding='utf-8')\n frame.to_excel('{}Text_data/{}.xlsx'.format(WORKING_DIR,\n outfile_name), encoding='utf-8')\n print('Done')\n return None\n\n\nclass ContinueScrape:\n\n def scrape_main(self):\n \"\"\"\n Top-level function.\n Use links from below, scrape a page, sleep for 5s, and restart on the next link.\n \"\"\"\n for i in self.gen_links():\n index = str(self.gen_links().index(i))\n link = i\n cmd = self.get_weibo(link, index)\n if cmd == 'STOP':\n break\n else:\n time.sleep(10)\n continue\n print('=' * 10)\n print('Scrape is now complete. Help me to organize them.')\n print(\n 'View your temp folder, what is the biggest number of the files? \\n'\n )\n fn = int(input())\n self.retrieve_posts(fn)\n print('=' * 10)\n print('Congratulations! Your data is stored')\n return\n\n def gen_links(self):\n links = []\n for i in range(1, 51):\n i = str(i)\n links.append('{}&b={}'.format(QUERY_LINK, i))\n return links\n\n def get_weibo(self, link, index):\n \"\"\"\n Scrape a certain weibio search result page on 'zhongsou' and store it in locally.\n \"\"\"\n html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR), 'w',\n encoding='utf8')\n r = requests.get(link)\n print('Accessing web data.')\n html_doc.write(r.text)\n html_doc.close()\n h_post_text = []\n h_frame = pandas.read_csv(OLD_MASTER_FILE)\n h_df = DataFrame(h_frame)\n for i in h_df.post_text:\n h_post_text.append(i)\n outfile_name = 'zhongsou_results_page_' + index + '.csv'\n outfile = open('{}Temp/'.format(WORKING_DIR) + outfile_name, 'w',\n encoding='utf8')\n html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR), 'r',\n encoding='utf8')\n soup = BeautifulSoup(html_doc)\n user_link = []\n post_txt = []\n post_link = []\n post_time = []\n cmd = None\n weibo_items = soup.find_all('div', class_='weibo_item')\n for item in weibo_items:\n for link in item.find_all('a', target='_blank', class_='sina_weibo'\n ):\n url = link.get('href')\n post_link.append(url)\n for post in item.find_all('h3', class_='weibo_title'):\n for a in post.find_all('a'):\n url = a.get('href')\n user_link.append(url)\n for time in item.find_all('div', class_='weibo_time'):\n txt = time.get_text()\n post_time.append(txt)\n for post in item.find_all('p', class_='weibo_txt'):\n txt = post.get_text()\n post_txt.append(txt)\n if txt == h_post_text[0]:\n print(txt)\n print(' ___ exists')\n print('End of new data.')\n del post_link[-1]\n del user_link[-1]\n del post_time[-1]\n del post_txt[-1]\n cmd = 'STOP'\n break\n data = {'post_text': post_txt, 'post_link': post_link, 'user':\n user_link, 'time': post_time}\n frame = DataFrame(data)\n frame.to_csv(outfile, encoding='utf-8')\n print(outfile_name, 'processed complete.')\n outfile.close()\n html_doc.close()\n return cmd\n\n def retrieve_posts(self, file_number_total):\n \"\"\"(int)->a file\n \"\"\"\n post_text = []\n for i in range(file_number_total + 1):\n frame_2 = pandas.read_csv('{}Temp/zhongsou_results_page_{}.csv'\n .format(WORKING_DIR, str(i)))\n df2 = DataFrame(frame_2)\n for i in df2.post_text:\n post_text.append(i)\n frame_1 = pandas.read_csv(OLD_MASTER_FILE)\n df1 = DataFrame(frame_1)\n for i in df1.post_text:\n post_text.append(i)\n data = {'post_text': post_text}\n frame = DataFrame(data)\n frame.to_csv('{}Text_data/{}_2.txt'.format(WORKING_DIR,\n OUTPUT_FILE_NAME), encoding='utf-8')\n frame.to_excel('{}Text_data/{}_2.xlsx'.format(WORKING_DIR,\n OUTPUT_FILE_NAME), encoding='utf-8')\n print('Data gathered.')\n print('Temp files removed')\n return None\n\n\nprint('=' * 10)\nprint(\n \"\"\"This program will help you collect Weibo language data as generated by the 中搜 search results.\n\"\"\"\n )\nprint(\n \"\"\"Use this page to generate a link for your query item:\n\nhttp://t.zhongsou.com/wb?form_id=1&org=1&sel=0&so=1&v=%D6%D0%CB%D1&w=%CD%F8%D3%EF\"\"\"\n )\nQUERY_LINK = input(\"\"\"\nPaste your query link \n> \"\"\")\nOUTPUT_FILE_NAME = input(\n \"\"\"\nWhat's your query term? (This will be used as file name)\n> \"\"\")\nresp = input(\"\"\"\nIs this your first time running this query? Y/N\n> \"\"\").upper()\nif resp == 'Y':\n print()\n print('=' * 10)\n print('Initialize scraping now.')\n print('=' * 10)\n NewScrape().scrape_main()\nelif resp == 'N':\n OLD_MASTER_FILE = input(\n \"\"\"\nWhere is the old txt file you want to merge later? Please paste full path. \n> \"\"\"\n )\n print()\n print('=' * 10)\n print('WARNING: FURTHER ACTIONS NEEDED AT THE END OF SCRAPING.')\n print('Initialize scraping now.')\n print('=' * 10)\n ContinueScrape().scrape_main()\nelse:\n print('Invalid command. Try again.')\n",
"step-4": "<mask token>\nimport requests\nfrom bs4 import BeautifulSoup\nfrom pandas import DataFrame\nimport time\nimport pandas\nimport glob, os\nglobal QUERY_LINK\nQUERY_LINK = (\n 'http://t.zhongsou.com/wb?form_id=1&org=1&sel=0&so=1&v=%D6%D0%CB%D1&w=%B1%C6'\n )\nglobal OUTPUT_FILE_NAME\nOUTPUT_FILE_NAME = 'scrape'\nglobal WORKING_DIR\nWORKING_DIR = '~/Corpora/'\nglobal OLD_MASTER_FILE\nOLD_MASTER_FILE = '{}Text_data/'.format(WORKING_DIR) + 'yeshizuile.txt'\n\n\nclass NewScrape:\n\n def scrape_main(self):\n \"\"\"\n Top-level function.\n Use links from below, scrape a page, sleep for 5s, and restart on the next link.\n \"\"\"\n for i in self.gen_links():\n index = str(self.gen_links().index(i))\n link = i\n self.get_weibo(link, index)\n time.sleep(5)\n self.retrieve_posts(OUTPUT_FILE_NAME)\n print('=' * 10)\n print('Congratulations! Your data is stored')\n return None\n\n def gen_links(self):\n links = []\n for i in range(1, 51):\n i = str(i)\n links.append('{}&b={}'.format(QUERY_LINK, i))\n return links\n\n def get_weibo(self, link, index):\n \"\"\"\n Scrape a certain weibio search result page on 'zhongsou' and store it in locally.\n \"\"\"\n html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR), 'w',\n encoding='utf8')\n r = requests.get(link)\n print('accessing web data.')\n html_doc.write(r.text)\n html_doc.close()\n outfile_name = 'zhongsou_results_page_' + index + '.csv'\n outfile = open('{}Temp/'.format(WORKING_DIR) + outfile_name, 'w',\n encoding='utf8')\n html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR), 'r',\n encoding='utf8')\n soup = BeautifulSoup(html_doc)\n user_link = []\n post_txt = []\n post_link = []\n post_time = []\n weibo_items = soup.find_all('div', class_='weibo_item')\n for item in weibo_items:\n for link in item.find_all('a', target='_blank', class_='sina_weibo'\n ):\n url = link.get('href')\n post_link.append(url)\n for post in item.find_all('h3', class_='weibo_title'):\n for a in post.find_all('a'):\n url = a.get('href')\n user_link.append(url)\n for time in item.find_all('div', class_='weibo_time'):\n txt = time.get_text()\n post_time.append(txt)\n for post in item.find_all('p', class_='weibo_txt'):\n txt = post.get_text()\n post_txt.append(txt)\n data = {'post_text': post_txt, 'post_link': post_link, 'user':\n user_link, 'time': post_time}\n frame = DataFrame(data)\n frame.to_csv(outfile, encoding='utf-8')\n print(outfile_name, 'processed complete.')\n outfile.close()\n html_doc.close()\n return None\n\n def clean_temp(self):\n filelist = glob.glob('{}Temp/*'.format(WORKING_DIR))\n for f in filelist:\n os.remove(f)\n print('Temp files removed')\n return None\n\n def retrieve_posts(self, outfile_name):\n \"\"\"(str)->a file\n \"\"\"\n post_text = []\n for i in range(50):\n frame_2 = pandas.read_csv('{}Temp/zhongsou_results_page_{}.csv'\n .format(WORKING_DIR, str(i)))\n df2 = DataFrame(frame_2)\n for i in df2.post_text:\n post_text.append(i)\n data = {'post_text': post_text}\n frame = DataFrame(data)\n frame.to_csv('{}Text_data/{}.txt'.format(WORKING_DIR, outfile_name),\n encoding='utf-8')\n frame.to_excel('{}Text_data/{}.xlsx'.format(WORKING_DIR,\n outfile_name), encoding='utf-8')\n print('Done')\n return None\n\n\nclass ContinueScrape:\n\n def scrape_main(self):\n \"\"\"\n Top-level function.\n Use links from below, scrape a page, sleep for 5s, and restart on the next link.\n \"\"\"\n for i in self.gen_links():\n index = str(self.gen_links().index(i))\n link = i\n cmd = self.get_weibo(link, index)\n if cmd == 'STOP':\n break\n else:\n time.sleep(10)\n continue\n print('=' * 10)\n print('Scrape is now complete. Help me to organize them.')\n print(\n 'View your temp folder, what is the biggest number of the files? \\n'\n )\n fn = int(input())\n self.retrieve_posts(fn)\n print('=' * 10)\n print('Congratulations! Your data is stored')\n return\n\n def gen_links(self):\n links = []\n for i in range(1, 51):\n i = str(i)\n links.append('{}&b={}'.format(QUERY_LINK, i))\n return links\n\n def get_weibo(self, link, index):\n \"\"\"\n Scrape a certain weibio search result page on 'zhongsou' and store it in locally.\n \"\"\"\n html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR), 'w',\n encoding='utf8')\n r = requests.get(link)\n print('Accessing web data.')\n html_doc.write(r.text)\n html_doc.close()\n h_post_text = []\n h_frame = pandas.read_csv(OLD_MASTER_FILE)\n h_df = DataFrame(h_frame)\n for i in h_df.post_text:\n h_post_text.append(i)\n outfile_name = 'zhongsou_results_page_' + index + '.csv'\n outfile = open('{}Temp/'.format(WORKING_DIR) + outfile_name, 'w',\n encoding='utf8')\n html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR), 'r',\n encoding='utf8')\n soup = BeautifulSoup(html_doc)\n user_link = []\n post_txt = []\n post_link = []\n post_time = []\n cmd = None\n weibo_items = soup.find_all('div', class_='weibo_item')\n for item in weibo_items:\n for link in item.find_all('a', target='_blank', class_='sina_weibo'\n ):\n url = link.get('href')\n post_link.append(url)\n for post in item.find_all('h3', class_='weibo_title'):\n for a in post.find_all('a'):\n url = a.get('href')\n user_link.append(url)\n for time in item.find_all('div', class_='weibo_time'):\n txt = time.get_text()\n post_time.append(txt)\n for post in item.find_all('p', class_='weibo_txt'):\n txt = post.get_text()\n post_txt.append(txt)\n if txt == h_post_text[0]:\n print(txt)\n print(' ___ exists')\n print('End of new data.')\n del post_link[-1]\n del user_link[-1]\n del post_time[-1]\n del post_txt[-1]\n cmd = 'STOP'\n break\n data = {'post_text': post_txt, 'post_link': post_link, 'user':\n user_link, 'time': post_time}\n frame = DataFrame(data)\n frame.to_csv(outfile, encoding='utf-8')\n print(outfile_name, 'processed complete.')\n outfile.close()\n html_doc.close()\n return cmd\n\n def retrieve_posts(self, file_number_total):\n \"\"\"(int)->a file\n \"\"\"\n post_text = []\n for i in range(file_number_total + 1):\n frame_2 = pandas.read_csv('{}Temp/zhongsou_results_page_{}.csv'\n .format(WORKING_DIR, str(i)))\n df2 = DataFrame(frame_2)\n for i in df2.post_text:\n post_text.append(i)\n frame_1 = pandas.read_csv(OLD_MASTER_FILE)\n df1 = DataFrame(frame_1)\n for i in df1.post_text:\n post_text.append(i)\n data = {'post_text': post_text}\n frame = DataFrame(data)\n frame.to_csv('{}Text_data/{}_2.txt'.format(WORKING_DIR,\n OUTPUT_FILE_NAME), encoding='utf-8')\n frame.to_excel('{}Text_data/{}_2.xlsx'.format(WORKING_DIR,\n OUTPUT_FILE_NAME), encoding='utf-8')\n print('Data gathered.')\n print('Temp files removed')\n return None\n\n\nprint('=' * 10)\nprint(\n \"\"\"This program will help you collect Weibo language data as generated by the 中搜 search results.\n\"\"\"\n )\nprint(\n \"\"\"Use this page to generate a link for your query item:\n\nhttp://t.zhongsou.com/wb?form_id=1&org=1&sel=0&so=1&v=%D6%D0%CB%D1&w=%CD%F8%D3%EF\"\"\"\n )\nQUERY_LINK = input(\"\"\"\nPaste your query link \n> \"\"\")\nOUTPUT_FILE_NAME = input(\n \"\"\"\nWhat's your query term? (This will be used as file name)\n> \"\"\")\nresp = input(\"\"\"\nIs this your first time running this query? Y/N\n> \"\"\").upper()\nif resp == 'Y':\n print()\n print('=' * 10)\n print('Initialize scraping now.')\n print('=' * 10)\n NewScrape().scrape_main()\nelif resp == 'N':\n OLD_MASTER_FILE = input(\n \"\"\"\nWhere is the old txt file you want to merge later? Please paste full path. \n> \"\"\"\n )\n print()\n print('=' * 10)\n print('WARNING: FURTHER ACTIONS NEEDED AT THE END OF SCRAPING.')\n print('Initialize scraping now.')\n print('=' * 10)\n ContinueScrape().scrape_main()\nelse:\n print('Invalid command. Try again.')\n",
"step-5": "'''\nAuthor: Iris Peng. Date: Feb 21, 2016\nUsage: Scrape Weibo posts from Zhongsou for the first time for a query\n\nIn the terminal, type\n$ python3 scrape_weibo.py\n\nand follow the prompts\n\n'''\nimport requests\nfrom bs4 import BeautifulSoup\nfrom pandas import DataFrame\nimport time\nimport pandas\nimport glob, os\n\n\nglobal QUERY_LINK\nQUERY_LINK = 'http://t.zhongsou.com/wb?form_id=1&org=1&sel=0&so=1&v=%D6%D0%CB%D1&w=%B1%C6'#link\n\nglobal OUTPUT_FILE_NAME\nOUTPUT_FILE_NAME = 'scrape' # Name of your output file\n\nglobal WORKING_DIR\nWORKING_DIR = '~/Corpora/'\n\nglobal OLD_MASTER_FILE\nOLD_MASTER_FILE = '{}Text_data/'.format(WORKING_DIR) + 'yeshizuile.txt' #Feed the new output\n \n\nclass NewScrape():\n \n def scrape_main(self):\n '''\n Top-level function.\n Use links from below, scrape a page, sleep for 5s, and restart on the next link.\n '''\n for i in self.gen_links():\n index = str(self.gen_links().index(i))\n link = i\n self.get_weibo(link,index)\n time.sleep(5)\n \n self.retrieve_posts(OUTPUT_FILE_NAME)\n #self.clean_temp()\n print('='*10)\n print('Congratulations! Your data is stored')\n return None\n\n def gen_links(self):\n links = []\n for i in range(1,51):\n i = str(i) \n links.append('{}&b={}'.format(QUERY_LINK,i))\n return links\n\n def get_weibo(self,link,index):\n \n '''\n Scrape a certain weibio search result page on 'zhongsou' and store it in locally.\n '''\n\n html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR),'w', encoding = 'utf8')\n \n r = requests.get(link)\n print ('accessing web data.')\n html_doc.write(r.text)\n html_doc.close()\n \n # Write into a csv file\n outfile_name = 'zhongsou_results_page_' + index + '.csv'\n outfile = open('{}Temp/'.format(WORKING_DIR) + outfile_name,'w', encoding = 'utf8') #change path\n \n # Turn the text into a BeautifulSoup object and strip down the text.\n html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR),'r', encoding = 'utf8')#change path\n soup = BeautifulSoup(html_doc)\n\n user_link = []\n post_txt = []\n post_link = []\n post_time = []\n \n weibo_items = soup.find_all('div', class_='weibo_item')\n \n for item in weibo_items: \n \n for link in item.find_all('a', target='_blank', class_='sina_weibo'):\n url = link.get('href')\n post_link.append(url)\n\n for post in item.find_all('h3', class_='weibo_title'):\n for a in post.find_all('a'):\n url = a.get('href')\n user_link.append(url)\n\n for time in item.find_all('div', class_='weibo_time'):\n txt = time.get_text()\n post_time.append(txt)\n\n for post in item.find_all('p', class_='weibo_txt'):\n txt = post.get_text()\n post_txt.append(txt)\n \n data = {'post_text':post_txt,'post_link':post_link,'user':user_link, 'time':post_time}\n frame = DataFrame(data)\n frame.to_csv(outfile, encoding='utf-8')\n print (outfile_name,'processed complete.')\n \n outfile.close()\n html_doc.close()\n return None\n\n def clean_temp(self):\n filelist = glob.glob('{}Temp/*'.format(WORKING_DIR))\n for f in filelist:\n os.remove(f)\n print('Temp files removed')\n return None\n\n \n def retrieve_posts(self,outfile_name):\n '''(str)->a file\n ''' \n post_text = []\n \n for i in range(50):\n frame_2 = pandas.read_csv('{}Temp/zhongsou_results_page_{}.csv'.format(WORKING_DIR, str(i)))#change directory\n df2 = DataFrame(frame_2)\n for i in df2.post_text:#the column'post_text'\n post_text.append(i)\n\n data = {'post_text':post_text}\n frame = DataFrame(data)\n frame.to_csv('{}Text_data/{}.txt'.format(WORKING_DIR, outfile_name), encoding = 'utf-8')#change saved path\n frame.to_excel('{}Text_data/{}.xlsx'.format(WORKING_DIR, outfile_name), encoding = 'utf-8')#change saved path\n print(\"Done\")\n return None \n\nclass ContinueScrape():\n \n def scrape_main(self):\n '''\n Top-level function.\n Use links from below, scrape a page, sleep for 5s, and restart on the next link.\n '''\n for i in self.gen_links():\n index = str(self.gen_links().index(i))\n link = i\n cmd = self.get_weibo(link,index)\n if cmd == 'STOP':\n break\n else:\n time.sleep(10)\n continue\n \n print('='*10)\n print('Scrape is now complete. Help me to organize them.')\n print ('View your temp folder, what is the biggest number of the files? \\n')\n fn = int(input())\n self.retrieve_posts(fn)\n print('='*10)\n print('Congratulations! Your data is stored')\n return \n\n def gen_links(self):\n links = []\n for i in range(1,51):\n i = str(i) \n links.append('{}&b={}'.format(QUERY_LINK,i))\n return links\n\n def get_weibo(self,link,index):\n \n '''\n Scrape a certain weibio search result page on 'zhongsou' and store it in locally.\n '''\n\n html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR), 'w', encoding='utf8')\n\n r = requests.get(link)\n print ('Accessing web data.')\n html_doc.write(r.text)\n html_doc.close()\n\n # Retrieve scrape history\n h_post_text = [] \n h_frame = pandas.read_csv(OLD_MASTER_FILE) \n h_df = DataFrame(h_frame)\n for i in h_df.post_text:\n h_post_text.append(i)\n \n # Write into a csv file\n outfile_name = 'zhongsou_results_page_' + index + '.csv'\n outfile = open('{}Temp/'.format(WORKING_DIR)+ outfile_name,'w', encoding = 'utf8') #change path\n \n # Turn the text into a BeautifulSoup object and strip down the text.\n html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR), 'r', encoding='utf8')\n soup = BeautifulSoup(html_doc)\n\n user_link = []\n post_txt = []\n post_link = []\n post_time = []\n cmd = None\n \n weibo_items = soup.find_all('div', class_='weibo_item')\n \n for item in weibo_items: \n \n for link in item.find_all('a', target='_blank', class_='sina_weibo'):\n url = link.get('href')\n post_link.append(url)\n\n for post in item.find_all('h3', class_='weibo_title'):\n for a in post.find_all('a'):\n url = a.get('href')\n user_link.append(url)\n\n for time in item.find_all('div', class_='weibo_time'):\n txt = time.get_text()\n post_time.append(txt)\n\n for post in item.find_all('p', class_='weibo_txt'):\n txt = post.get_text()\n post_txt.append(txt)\n\n #has bugs!\n #if txt in h_post_text:\n if txt == h_post_text[0]: \n print (txt)\n print(' ___ exists')\n print ('End of new data.') #Doesn't affect main function, break should be in main function\n del post_link[-1]\n del user_link[-1]\n del post_time[-1]\n del post_txt[-1]\n cmd = 'STOP'\n break\n \n data = {'post_text':post_txt,'post_link':post_link,'user':user_link, 'time':post_time}\n frame = DataFrame(data)\n frame.to_csv(outfile, encoding='utf-8')\n print (outfile_name,'processed complete.')\n \n outfile.close()\n html_doc.close()\n return cmd\n\n def retrieve_posts(self,file_number_total):\n '''(int)->a file\n '''\n post_text = []\n \n \n for i in range(file_number_total+1):\n frame_2 = pandas.read_csv('{}Temp/zhongsou_results_page_{}.csv'.format(WORKING_DIR, str(i)))\n df2 = DataFrame(frame_2)\n for i in df2.post_text:#the column'post_text'\n post_text.append(i)\n\n frame_1 = pandas.read_csv(OLD_MASTER_FILE)\n df1 = DataFrame(frame_1)\n for i in df1.post_text:\n post_text.append(i)\n\n data = {'post_text':post_text}\n frame = DataFrame(data)\n frame.to_csv('{}Text_data/{}_2.txt'.format(WORKING_DIR, OUTPUT_FILE_NAME), encoding = 'utf-8')#saved path\n frame.to_excel('{}Text_data/{}_2.xlsx'.format(WORKING_DIR, OUTPUT_FILE_NAME), encoding = 'utf-8')#saved path\n\n\n print(\"Data gathered.\")\n\n## filelist = glob.glob('{}Temp/*'.format(WORKING_DIR))\n## for f in filelist:\n## os.remove(f)\n\n #os.remove(OLD_MASTER_FILE)\n\n print('Temp files removed')\n\n return None \n\nprint('='*10)\nprint('This program will help you collect Weibo language data as generated by the 中搜 search results.\\n')\nprint('Use this page to generate a link for your query item:\\n\\nhttp://t.zhongsou.com/wb?form_id=1&org=1&sel=0&so=1&v=%D6%D0%CB%D1&w=%CD%F8%D3%EF')\nQUERY_LINK = input('\\nPaste your query link \\n> ')\nOUTPUT_FILE_NAME = input('\\nWhat\\'s your query term? (This will be used as file name)\\n> ')\nresp = input('\\nIs this your first time running this query? Y/N\\n> ').upper()\nif resp == 'Y':\n print()\n print('='*10)\n print('Initialize scraping now.')\n print('='*10)\n NewScrape().scrape_main()\nelif resp == 'N':\n OLD_MASTER_FILE = input('\\nWhere is the old txt file you want to merge later? Please paste full path. \\n> ')\n print()\n print('='*10)\n print('WARNING: FURTHER ACTIONS NEEDED AT THE END OF SCRAPING.')\n print('Initialize scraping now.')\n print('='*10)\n ContinueScrape().scrape_main()\n \nelse:\n print('Invalid command. Try again.')\n",
"step-ids": [
9,
12,
13,
14,
15
]
}
|
[
9,
12,
13,
14,
15
] |
# def add(a,b):
# x = a + b
#
# # the return value gets assigned to the "result" variable
# result = add(3,5)
# print result # this should print 8
#
# def multiply(arr,num):
# for x in range(len(arr)):
# arr[x] *= num
# return arr
#
# a = [2,4,10,16]
# b = multiply(a,5)
# print b
#
#
# dog = ("Canis Familiaris", "dog", "carnivore", 12)
# dog = dog + ("domestic",)
# dog = dog[:3] + ("man's best friend",) + dog[4:]
# print dog
# print sorted(dog)
#
# import math
#
# def get_circle_area(r):
# #Return (circumference, area) of a circle of radius r
# c = 2 * math.pi * r
# a = math.pi * r * r
# return (c, a)
#
# print get_circle_area(5)
#
# weekend = {"Sun": "Sunday", "Mon": "Monday"}
# print weekend.values()
# context = {
# 'questions': [
# { 'id': 1, 'content': 'Why is there a light in the fridge and not in the freezer?'},
# { 'id': 2, 'content': 'Why don\'t sheep shrink when it rains?'},
# { 'id': 3, 'content': 'Why are they called apartments when they are all stuck together?'},
# { 'id': 4, 'content': 'Why do cars drive on the parkway and park on the driveway?'}
# ]
# }
#
# for key, data in context.items():
# #print data
# for value in data:
# print "Question #", value["id"], ": ", value["content"]
# print "----"
# data = {"house":"Haus","cat":"Katze","red":"rot"}
# print data.values()
dishes = ["pizza", "sauerkraut", "paella", "hamburger"]
countries = ["Italy", "Germany", "Spain", "USA"]
country_specialties = zip(countries, dishes)
# print country_specialties
country_specialties_dict = dict(country_specialties)
print country_specialties_dict
|
normal
|
{
"blob_id": "e24c3f6ce2e65305f955dcede9edc0b497f6e74c",
"index": 2880,
"step-1": "# def add(a,b):\n# x = a + b\n#\n# # the return value gets assigned to the \"result\" variable\n# result = add(3,5)\n# print result # this should print 8\n#\n# def multiply(arr,num):\n# for x in range(len(arr)):\n# arr[x] *= num\n# return arr\n#\n# a = [2,4,10,16]\n# b = multiply(a,5)\n# print b\n#\n#\n# dog = (\"Canis Familiaris\", \"dog\", \"carnivore\", 12)\n# dog = dog + (\"domestic\",)\n# dog = dog[:3] + (\"man's best friend\",) + dog[4:]\n# print dog\n# print sorted(dog)\n#\n# import math\n#\n# def get_circle_area(r):\n# #Return (circumference, area) of a circle of radius r\n# c = 2 * math.pi * r\n# a = math.pi * r * r\n# return (c, a)\n#\n# print get_circle_area(5)\n#\n# weekend = {\"Sun\": \"Sunday\", \"Mon\": \"Monday\"}\n# print weekend.values()\n\n# context = {\n# 'questions': [\n# { 'id': 1, 'content': 'Why is there a light in the fridge and not in the freezer?'},\n# { 'id': 2, 'content': 'Why don\\'t sheep shrink when it rains?'},\n# { 'id': 3, 'content': 'Why are they called apartments when they are all stuck together?'},\n# { 'id': 4, 'content': 'Why do cars drive on the parkway and park on the driveway?'}\n# ]\n# }\n#\n# for key, data in context.items():\n# #print data\n# for value in data:\n# print \"Question #\", value[\"id\"], \": \", value[\"content\"]\n# print \"----\"\n\n# data = {\"house\":\"Haus\",\"cat\":\"Katze\",\"red\":\"rot\"}\n# print data.values()\n\ndishes = [\"pizza\", \"sauerkraut\", \"paella\", \"hamburger\"]\ncountries = [\"Italy\", \"Germany\", \"Spain\", \"USA\"]\n\ncountry_specialties = zip(countries, dishes)\n# print country_specialties\ncountry_specialties_dict = dict(country_specialties)\nprint country_specialties_dict\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# import random module from Python standard library
# define a dictionary with image urls and number of flucks
# set the served img variable to be a random element from imgs
# hints:
# to put dict keys in a list: list(dict.keys())
# to choose a random item from a list: random.choice(lst)
# keep asking user if they want to fluck the image until
# they say either 'yes' or 'no'
# if they say 'yes', output a message and increment the flucks
# if they say 'no', serve another image?
# repeat process for another image...
# hint: group blocks of task-specific code into functions?
import random
imgs = {"img_1":1,"img_2":2,"img_3":3,"img_4":4}
img = imgs.keys()
random.choice(imgs)
served_img = imgs[random.randrange(0,len(imgs)-1)]
print(served_img)
input = raw_input("Would you like to fluck it?!")
if input == "yes":
print("YOU FLUCKED IT")
elif input == "no":
print("WHAT ARE YOU???..")
|
normal
|
{
"blob_id": "4ae611ee8c019c76bb5d7c1d733ffb4bd06e2e8d",
"index": 5508,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nrandom.choice(imgs)\n<mask token>\nprint(served_img)\n<mask token>\nif input == 'yes':\n print('YOU FLUCKED IT')\nelif input == 'no':\n print('WHAT ARE YOU???..')\n",
"step-3": "<mask token>\nimgs = {'img_1': 1, 'img_2': 2, 'img_3': 3, 'img_4': 4}\nimg = imgs.keys()\nrandom.choice(imgs)\nserved_img = imgs[random.randrange(0, len(imgs) - 1)]\nprint(served_img)\ninput = raw_input('Would you like to fluck it?!')\nif input == 'yes':\n print('YOU FLUCKED IT')\nelif input == 'no':\n print('WHAT ARE YOU???..')\n",
"step-4": "import random\nimgs = {'img_1': 1, 'img_2': 2, 'img_3': 3, 'img_4': 4}\nimg = imgs.keys()\nrandom.choice(imgs)\nserved_img = imgs[random.randrange(0, len(imgs) - 1)]\nprint(served_img)\ninput = raw_input('Would you like to fluck it?!')\nif input == 'yes':\n print('YOU FLUCKED IT')\nelif input == 'no':\n print('WHAT ARE YOU???..')\n",
"step-5": "# import random module from Python standard library\n\n# define a dictionary with image urls and number of flucks\n\n# set the served img variable to be a random element from imgs\n# hints: \n#\tto put dict keys in a list: list(dict.keys())\n#\tto choose a random item from a list: random.choice(lst)\n\n# keep asking user if they want to fluck the image until\n# they say either 'yes' or 'no'\n\n# if they say 'yes', output a message and increment the flucks\n# if they say 'no', serve another image?\n\n# repeat process for another image...\n# hint: group blocks of task-specific code into functions?\n\nimport random\n\nimgs = {\"img_1\":1,\"img_2\":2,\"img_3\":3,\"img_4\":4}\nimg = imgs.keys()\nrandom.choice(imgs)\nserved_img = imgs[random.randrange(0,len(imgs)-1)]\n\nprint(served_img)\n\ninput = raw_input(\"Would you like to fluck it?!\")\n\nif input == \"yes\":\n print(\"YOU FLUCKED IT\")\n \nelif input == \"no\":\n print(\"WHAT ARE YOU???..\")\n \n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.contrib import admin
from basic_app.models import UserProfileInfo
admin.site.register(UserProfileInfo)
# we do not need to register User() default form since it comes
# with the default admin site in Django itself.
|
normal
|
{
"blob_id": "624212a1d73ff3a3b3092ffa27912a6ae25a2484",
"index": 6826,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadmin.site.register(UserProfileInfo)\n",
"step-3": "from django.contrib import admin\nfrom basic_app.models import UserProfileInfo\nadmin.site.register(UserProfileInfo)\n",
"step-4": "from django.contrib import admin\nfrom basic_app.models import UserProfileInfo\n\nadmin.site.register(UserProfileInfo)\n\n# we do not need to register User() default form since it comes\n# with the default admin site in Django itself.\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from pymongo import MongoClient
import Config
DB = Config.DB
COLLECTION = Config.COLLECTION
def connectMongo():
uri = "mongodb://localhost"
client = MongoClient(uri)
return client[DB]
def connectMongoCollection(collection = COLLECTION):
uri = "mongodb://localhost"
client = MongoClient(uri)
db = client[DB]
return db[collection]
|
normal
|
{
"blob_id": "7a5106456d0fdd905829c5aa1f4a69b027f3a04c",
"index": 4198,
"step-1": "<mask token>\n\n\ndef connectMongoCollection(collection=COLLECTION):\n uri = 'mongodb://localhost'\n client = MongoClient(uri)\n db = client[DB]\n return db[collection]\n",
"step-2": "<mask token>\n\n\ndef connectMongo():\n uri = 'mongodb://localhost'\n client = MongoClient(uri)\n return client[DB]\n\n\ndef connectMongoCollection(collection=COLLECTION):\n uri = 'mongodb://localhost'\n client = MongoClient(uri)\n db = client[DB]\n return db[collection]\n",
"step-3": "<mask token>\nDB = Config.DB\nCOLLECTION = Config.COLLECTION\n\n\ndef connectMongo():\n uri = 'mongodb://localhost'\n client = MongoClient(uri)\n return client[DB]\n\n\ndef connectMongoCollection(collection=COLLECTION):\n uri = 'mongodb://localhost'\n client = MongoClient(uri)\n db = client[DB]\n return db[collection]\n",
"step-4": "from pymongo import MongoClient\nimport Config\nDB = Config.DB\nCOLLECTION = Config.COLLECTION\n\n\ndef connectMongo():\n uri = 'mongodb://localhost'\n client = MongoClient(uri)\n return client[DB]\n\n\ndef connectMongoCollection(collection=COLLECTION):\n uri = 'mongodb://localhost'\n client = MongoClient(uri)\n db = client[DB]\n return db[collection]\n",
"step-5": "from pymongo import MongoClient\nimport Config\n\nDB = Config.DB\nCOLLECTION = Config.COLLECTION\n\n\ndef connectMongo():\n\turi = \"mongodb://localhost\"\n\tclient = MongoClient(uri)\n\treturn client[DB]\n\ndef connectMongoCollection(collection = COLLECTION):\n\turi = \"mongodb://localhost\"\n\tclient = MongoClient(uri)\n\tdb = client[DB]\n\treturn db[collection]\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from app import config
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
engine = create_engine(config.DB_URI)
Session = scoped_session(sessionmaker(bind=engine))
|
normal
|
{
"blob_id": "86c1aee21639958f707f99bc2468e952ad6c1859",
"index": 9352,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nengine = create_engine(config.DB_URI)\nSession = scoped_session(sessionmaker(bind=engine))\n",
"step-3": "from app import config\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker\nengine = create_engine(config.DB_URI)\nSession = scoped_session(sessionmaker(bind=engine))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is part of CbM (https://github.com/ec-jrc/cbm).
# Author : Konstantinos Anastasakis
# Credits : GTCAP Team
# Copyright : 2021 European Commission, Joint Research Centre
# License : 3-Clause BSD
import os
import glob
from ipywidgets import (Text, Label, HBox, VBox, Layout, Dropdown,
ToggleButtons, Output, HTML, Button,
FileUpload, IntText, RadioButtons)
from cbm.utils import config
from cbm.ipycbm.utils import settings_ds, cbm_widgets
from cbm.ipycbm.ipy_ext import ext_func
from cbm.foi import foi_v1
from cbm.datas import db
try:
from cbm.foi import foi_v2
except Exception as err:
print(err)
def foi_tab_v1():
path_foi = f"{config.get_value(['paths', 'temp'])}/foi/"
path_foi_func = foi_v1.path_foi_func
progress = Output()
def outlog(*text):
with progress:
print(*text)
foi_info = HTML("""FOI procedures version 1 (requires access to a database).
""", placeholder='FOI Information')
# Connect to database
config_info = HTML(value="""1. Connect to database and object storage.<br>
FOI procedures need direct access to the database. In case there no
image is provided, access to object storage will be needed as well
to generate the base image from sentinel images.
""", placeholder='FOI Information')
config_conn = Button(
value=False,
button_style='info',
tooltip='Configure db connection.',
icon='cogs',
layout=Layout(width='40px')
)
config_conn_box = HBox([])
@config_conn.on_click
def config_conn_on_click(b):
if config_conn_box.children == ():
config_conn_box.children = [settings_ds.direct_conn()]
else:
config_conn_box.children = ()
config_box = VBox([config_info, config_conn,
config_conn_box])
# Spatial data to be tested
spatial_info = HTML(
"""2. Select the spatial data to be tested - parcels that will be
checked for heterogeneity and cardinality.<br>
- Select a table from the database""")
db_tables = Dropdown(
options=[],
description='db Tables:'
)
refresh_db_tables = Button(
value=False,
button_style='info',
tooltip='Get db tables.',
icon='refresh',
layout=Layout(width='40px')
)
@refresh_db_tables.on_click
def refresh_db_tables_on_click(b):
db_tables.options = db.tables(config.get_value(['set', 'db_conn']))
db_tables_box = HBox([db_tables, refresh_db_tables])
upload_shp = Button(
description='Create new table',
value=False,
button_style='info',
tooltip='upload_shp.',
icon='up'
)
upload_box = VBox([])
@upload_shp.on_click
def upload_shp_on_click(b):
if upload_box.children == ():
upload_box.children = [ext_func.upload_shp(path_foi, True)]
else:
upload_box.children = ()
spatial_box = VBox([spatial_info, upload_shp, upload_box, db_tables_box])
# Thematic raster.
img_info = HTML(
"""3. Thematic raster - classification raster, or raster from other
source that will be used for testing heterogeneity and cardinality.<br>
- Upload or generate raster base image.
(Only upload is currently available)""")
img_option = ToggleButtons(
options=['Upload', 'Generate'],
value=None,
disabled=True,
button_style='info', # 'success', 'info', 'warning', 'danger' or ''
tooltips=['Upnload your base image', 'Get from object storage']
)
def on_img_option_change(change):
if img_option.value == 'Upload':
img_box.children = [HBox([img_info, img_option, img_file])]
else:
img_box.children = ()
img_option.observe(on_img_option_change, 'value')
img_file = cbm_widgets.get_files_dropdown(
f'{path_foi}raster', '.tif, .tiff', 'Select Raster')
img_box = VBox([img_info, img_option, img_file])
# YAML File upload
yml_info = HTML(
"""4. YAML file that holds the classes form the thematic raster.<br>
- This can be also a simple list of values in the notebook
corespondence between pixel values and names for the classes""")
yml_file = cbm_widgets.get_files_dropdown(path_foi, '.yml, .yaml',
'Select YML')
yml_box = VBox([yml_info, yml_file])
# Database functions
dbf_info = HTML("""5. Create database functions.<br>
- Import required database functions for FOI analysis to the database""")
dbf_insert = Button(
value=False,
button_style='info',
tooltip='Create functions.',
icon='fa-share-square'
)
@dbf_insert.on_click
def dbf_insert_on_click(b):
outlog('path_foi_func :', path_foi_func)
progress.clear_output()
try:
functions = glob.glob(f"{path_foi_func}*.func")
db = config.get_value(['set', 'db_conn'])
sche = config.get_value(['db', db, 'sche'])
user = config.get_value(['db', db, 'user'])
for f in functions:
db.insert_function(open(f).read().format(
schema=sche, owner=user))
outlog(f"The '{f}' Was imported to the database.")
finc_list = [
f"ipycbm_{f.split('/')[-1].split('.')[0]}, " for f in functions]
outlog(
f"The functions: {('').join(finc_list)} where added to the database")
except Exception as err:
outlog("Could not add functions to dattabase.", err)
dbf_box = VBox(
[dbf_info, dbf_insert])
# FOI Parameters
param_info = HTML(
"""6. Set FOI v1 Parameters""")
# heterogeneity_threshold
param_heto_info = HTML("""
Minimum and maximum thresholds for heterogeneity checks. In the example,
any parcel with percentage of pixels for one class between 30 and 70 from
the total, will be considered heterogenous.
""")
param_min_het = IntText(
value=30,
description='MIN:',
tooltip="Minimum threshold for heterogeneity checks",
layout=Layout(width='150px')
)
param_max_het = IntText(
value=70,
description='MAX:',
tooltip="Maximum threshold for heterogeneity checks",
layout=Layout(width='150px')
)
param_area_info = HTML("""Minimum area for clusters selection -
only clusters bigger from this threshold will be counted.
""")
param_area = IntText(
value=2000,
description='area:',
tooltip="Minimum area for clusters selection.",
layout=Layout(width='200px')
)
param_box = VBox([param_info,
param_heto_info, HBox([param_min_het, param_max_het]),
param_area_info, param_area
])
# Run FOI analysis
run_info = Label("7. Run the FOI analysis.")
run_analysis = Button(
description='Run FOI v1',
value=False,
button_style='info',
tooltip='Run FOI analysis version 1',
icon='play',
)
run_box = VBox([run_info, run_analysis])
@run_analysis.on_click
def run_analysis_on_click(b):
with progress:
foi_v1.main(
db_tables.value,
f"{path_foi}raster/{img_file.children[1].children[0].value}",
f"{path_foi}{yml_file.children[1].children[0].value}",
param_min_het.value, param_max_het.value, param_area.value)
wbox = VBox([foi_info,
config_box,
spatial_box,
img_box,
yml_box,
dbf_box,
param_box,
run_box,
progress])
return wbox
def foi_tab_v2():
path_foi = f"{config.get_value(['paths', 'temp'])}/foi/"
progress = Output()
def outlog(*text):
with progress:
print(*text)
foi_info = HTML("""FOI procedures version 2 (does not require access to a database).
""", placeholder='FOI Information')
# Vector file
shp_info = HTML(
"""1. Spatial data to be tested -
parcels that will be checked for heterogeneity and cardinality.""")
shp_file = cbm_widgets.get_files_dropdown(
f'{path_foi}vector', '', 'Select .shp', True, True)
shp_box = VBox([shp_info, shp_file])
# Thematic raster.
img_info = HTML(
"""2. Thematic raster - classification raster, or raster from other
source that will be used for testing heterogeneity and cardinality.<br>
- Upload or generate raster base image.
(Only upload is currently available)""")
img_option = ToggleButtons(
options=['Upload', 'Generate'],
value=None,
disabled=True,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltips=['Upnload your base image', 'Get from object storage']
)
def on_img_option_change(change):
if img_option.value == 'Upload':
img_box.children = [HBox([img_info, img_option, img_file])]
else:
img_box.children = ()
img_option.observe(on_img_option_change, 'value')
img_file = cbm_widgets.get_files_dropdown(
f'{path_foi}raster', '.tif, .tiff', 'Select Raster')
img_box = VBox([img_info, img_option, img_file])
# YAML File upload
yml_info = HTML(
"""3. YAML file that holds the classes form the thematic raster.<br>
- This can be also a simple list of values in the notebook
corespondence between pixel values and names for the classes""")
yml_file = cbm_widgets.get_files_dropdown(path_foi, '.yml, .yaml',
'Select YML')
yml_box = VBox([yml_info, yml_file])
# FOI Prerequisites
pre_info = Label("4. Set FOI v2 Parameters.")
# heterogeneity_threshold
pre_heto_chec = HTML("""
Minimum and maximum thresholds for heterogeneity checks. In the example,
any parcel with percentage of pixels for one class between 30 and 70 from
the total, will be considered heterogenous.
""")
pre_min_het = IntText(
value=30,
description='MIN:',
tooltip="Minimum threshold for heterogeneity checks",
disabled=False,
layout=Layout(width='150px')
)
pre_max_het = IntText(
value=70,
description='MAX:',
tooltip="Maximum threshold for heterogeneity checks",
disabled=False,
layout=Layout(width='150px')
)
pre_heto_chec_box = HBox([pre_min_het, pre_max_het])
pre_min_cluster_size = IntText(
value=20,
description='pixels:',
tooltip="Minimum area for clusters selection.",
disabled=False,
layout=Layout(width='200px')
)
pre_pixel_connectivity = IntText(
value=8,
description='connectivity type:',
tooltip="Type of pixel connectivity in analysis. Accepted values: 4 or 8.",
disabled=False,
layout=Layout(width='200px')
)
pre_negative_buffer = IntText(
value=-10,
description='negative buffer:',
tooltip="Negative buffer to be applied on the FOI",
disabled=False,
layout=Layout(width='200px')
)
pre_box = VBox([
pre_info, pre_heto_chec, pre_heto_chec_box,
pre_pixel_connectivity, pre_negative_buffer,
HBox([pre_min_cluster_size,
HTML("Minimum area for clusters selection - only clusters bigger from this threshold will be counted.")])
])
# Run FOI analysis
run_info = Label("5. Run the FOI analysis.")
run_analysis = Button(
description='Run FOI v2',
value=False,
disabled=False,
button_style='info',
tooltip='Run FOI analysis version 2',
icon='play',
)
run_box = HBox([run_analysis])
@run_analysis.on_click
def run_analysis_on_click(b):
with progress:
foi_v2.main(
f"{path_foi}vector/{shp_file.children[1].children[0].value}",
f"{path_foi}raster/{img_file.children[1].children[0].value}",
f"{path_foi}{yml_file.children[1].children[0].value}",
pre_negative_buffer.value,
pre_min_het.value,
pre_max_het.value,
pre_pixel_connectivity.value,
pre_min_cluster_size.value)
wbox_v2 = VBox([foi_info,
shp_box,
img_box,
yml_box,
pre_box,
run_info,
run_box,
progress])
return wbox_v2
|
normal
|
{
"blob_id": "2f9a081845685a4748c8b028ae4ee3a056a10284",
"index": 9779,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef foi_tab_v1():\n path_foi = f\"{config.get_value(['paths', 'temp'])}/foi/\"\n path_foi_func = foi_v1.path_foi_func\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n foi_info = HTML(\n 'FOI procedures version 1 (requires access to a database).\\n ',\n placeholder='FOI Information')\n config_info = HTML(value=\n \"\"\"1. Connect to database and object storage.<br>\n FOI procedures need direct access to the database. In case there no\n image is provided, access to object storage will be needed as well\n to generate the base image from sentinel images.\n \"\"\"\n , placeholder='FOI Information')\n config_conn = Button(value=False, button_style='info', tooltip=\n 'Configure db connection.', icon='cogs', layout=Layout(width='40px'))\n config_conn_box = HBox([])\n\n @config_conn.on_click\n def config_conn_on_click(b):\n if config_conn_box.children == ():\n config_conn_box.children = [settings_ds.direct_conn()]\n else:\n config_conn_box.children = ()\n config_box = VBox([config_info, config_conn, config_conn_box])\n spatial_info = HTML(\n \"\"\"2. Select the spatial data to be tested - parcels that will be\n checked for heterogeneity and cardinality.<br>\n - Select a table from the database\"\"\"\n )\n db_tables = Dropdown(options=[], description='db Tables:')\n refresh_db_tables = Button(value=False, button_style='info', tooltip=\n 'Get db tables.', icon='refresh', layout=Layout(width='40px'))\n\n @refresh_db_tables.on_click\n def refresh_db_tables_on_click(b):\n db_tables.options = db.tables(config.get_value(['set', 'db_conn']))\n db_tables_box = HBox([db_tables, refresh_db_tables])\n upload_shp = Button(description='Create new table', value=False,\n button_style='info', tooltip='upload_shp.', icon='up')\n upload_box = VBox([])\n\n @upload_shp.on_click\n def upload_shp_on_click(b):\n if upload_box.children == ():\n upload_box.children = [ext_func.upload_shp(path_foi, True)]\n else:\n upload_box.children = ()\n spatial_box = VBox([spatial_info, upload_shp, upload_box, db_tables_box])\n img_info = HTML(\n \"\"\"3. Thematic raster - classification raster, or raster from other\n source that will be used for testing heterogeneity and cardinality.<br>\n - Upload or generate raster base image.\n (Only upload is currently available)\"\"\"\n )\n img_option = ToggleButtons(options=['Upload', 'Generate'], value=None,\n disabled=True, button_style='info', tooltips=[\n 'Upnload your base image', 'Get from object storage'])\n\n def on_img_option_change(change):\n if img_option.value == 'Upload':\n img_box.children = [HBox([img_info, img_option, img_file])]\n else:\n img_box.children = ()\n img_option.observe(on_img_option_change, 'value')\n img_file = cbm_widgets.get_files_dropdown(f'{path_foi}raster',\n '.tif, .tiff', 'Select Raster')\n img_box = VBox([img_info, img_option, img_file])\n yml_info = HTML(\n \"\"\"4. YAML file that holds the classes form the thematic raster.<br>\n - This can be also a simple list of values in the notebook\n corespondence between pixel values and names for the classes\"\"\"\n )\n yml_file = cbm_widgets.get_files_dropdown(path_foi, '.yml, .yaml',\n 'Select YML')\n yml_box = VBox([yml_info, yml_file])\n dbf_info = HTML(\n \"\"\"5. Create database functions.<br>\n - Import required database functions for FOI analysis to the database\"\"\"\n )\n dbf_insert = Button(value=False, button_style='info', tooltip=\n 'Create functions.', icon='fa-share-square')\n\n @dbf_insert.on_click\n def dbf_insert_on_click(b):\n outlog('path_foi_func :', path_foi_func)\n progress.clear_output()\n try:\n functions = glob.glob(f'{path_foi_func}*.func')\n db = config.get_value(['set', 'db_conn'])\n sche = config.get_value(['db', db, 'sche'])\n user = config.get_value(['db', db, 'user'])\n for f in functions:\n db.insert_function(open(f).read().format(schema=sche, owner\n =user))\n outlog(f\"The '{f}' Was imported to the database.\")\n finc_list = [f\"ipycbm_{f.split('/')[-1].split('.')[0]}, \" for f in\n functions]\n outlog(\n f\"The functions: {''.join(finc_list)} where added to the database\"\n )\n except Exception as err:\n outlog('Could not add functions to dattabase.', err)\n dbf_box = VBox([dbf_info, dbf_insert])\n param_info = HTML('6. Set FOI v1 Parameters')\n param_heto_info = HTML(\n \"\"\"\n Minimum and maximum thresholds for heterogeneity checks. In the example,\n any parcel with percentage of pixels for one class between 30 and 70 from\n the total, will be considered heterogenous.\n \"\"\"\n )\n param_min_het = IntText(value=30, description='MIN:', tooltip=\n 'Minimum threshold for heterogeneity checks', layout=Layout(width=\n '150px'))\n param_max_het = IntText(value=70, description='MAX:', tooltip=\n 'Maximum threshold for heterogeneity checks', layout=Layout(width=\n '150px'))\n param_area_info = HTML(\n \"\"\"Minimum area for clusters selection -\n only clusters bigger from this threshold will be counted.\n \"\"\"\n )\n param_area = IntText(value=2000, description='area:', tooltip=\n 'Minimum area for clusters selection.', layout=Layout(width='200px'))\n param_box = VBox([param_info, param_heto_info, HBox([param_min_het,\n param_max_het]), param_area_info, param_area])\n run_info = Label('7. Run the FOI analysis.')\n run_analysis = Button(description='Run FOI v1', value=False,\n button_style='info', tooltip='Run FOI analysis version 1', icon='play')\n run_box = VBox([run_info, run_analysis])\n\n @run_analysis.on_click\n def run_analysis_on_click(b):\n with progress:\n foi_v1.main(db_tables.value,\n f'{path_foi}raster/{img_file.children[1].children[0].value}',\n f'{path_foi}{yml_file.children[1].children[0].value}',\n param_min_het.value, param_max_het.value, param_area.value)\n wbox = VBox([foi_info, config_box, spatial_box, img_box, yml_box,\n dbf_box, param_box, run_box, progress])\n return wbox\n\n\ndef foi_tab_v2():\n path_foi = f\"{config.get_value(['paths', 'temp'])}/foi/\"\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n foi_info = HTML(\n 'FOI procedures version 2 (does not require access to a database).\\n '\n , placeholder='FOI Information')\n shp_info = HTML(\n \"\"\"1. Spatial data to be tested -\n parcels that will be checked for heterogeneity and cardinality.\"\"\"\n )\n shp_file = cbm_widgets.get_files_dropdown(f'{path_foi}vector', '',\n 'Select .shp', True, True)\n shp_box = VBox([shp_info, shp_file])\n img_info = HTML(\n \"\"\"2. Thematic raster - classification raster, or raster from other\n source that will be used for testing heterogeneity and cardinality.<br>\n - Upload or generate raster base image.\n (Only upload is currently available)\"\"\"\n )\n img_option = ToggleButtons(options=['Upload', 'Generate'], value=None,\n disabled=True, button_style='', tooltips=['Upnload your base image',\n 'Get from object storage'])\n\n def on_img_option_change(change):\n if img_option.value == 'Upload':\n img_box.children = [HBox([img_info, img_option, img_file])]\n else:\n img_box.children = ()\n img_option.observe(on_img_option_change, 'value')\n img_file = cbm_widgets.get_files_dropdown(f'{path_foi}raster',\n '.tif, .tiff', 'Select Raster')\n img_box = VBox([img_info, img_option, img_file])\n yml_info = HTML(\n \"\"\"3. YAML file that holds the classes form the thematic raster.<br>\n - This can be also a simple list of values in the notebook\n corespondence between pixel values and names for the classes\"\"\"\n )\n yml_file = cbm_widgets.get_files_dropdown(path_foi, '.yml, .yaml',\n 'Select YML')\n yml_box = VBox([yml_info, yml_file])\n pre_info = Label('4. Set FOI v2 Parameters.')\n pre_heto_chec = HTML(\n \"\"\"\n Minimum and maximum thresholds for heterogeneity checks. In the example,\n any parcel with percentage of pixels for one class between 30 and 70 from\n the total, will be considered heterogenous.\n \"\"\"\n )\n pre_min_het = IntText(value=30, description='MIN:', tooltip=\n 'Minimum threshold for heterogeneity checks', disabled=False,\n layout=Layout(width='150px'))\n pre_max_het = IntText(value=70, description='MAX:', tooltip=\n 'Maximum threshold for heterogeneity checks', disabled=False,\n layout=Layout(width='150px'))\n pre_heto_chec_box = HBox([pre_min_het, pre_max_het])\n pre_min_cluster_size = IntText(value=20, description='pixels:', tooltip\n ='Minimum area for clusters selection.', disabled=False, layout=\n Layout(width='200px'))\n pre_pixel_connectivity = IntText(value=8, description=\n 'connectivity type:', tooltip=\n 'Type of pixel connectivity in analysis. Accepted values: 4 or 8.',\n disabled=False, layout=Layout(width='200px'))\n pre_negative_buffer = IntText(value=-10, description='negative buffer:',\n tooltip='Negative buffer to be applied on the FOI', disabled=False,\n layout=Layout(width='200px'))\n pre_box = VBox([pre_info, pre_heto_chec, pre_heto_chec_box,\n pre_pixel_connectivity, pre_negative_buffer, HBox([\n pre_min_cluster_size, HTML(\n 'Minimum area for clusters selection - only clusters bigger from this threshold will be counted.'\n )])])\n run_info = Label('5. Run the FOI analysis.')\n run_analysis = Button(description='Run FOI v2', value=False, disabled=\n False, button_style='info', tooltip='Run FOI analysis version 2',\n icon='play')\n run_box = HBox([run_analysis])\n\n @run_analysis.on_click\n def run_analysis_on_click(b):\n with progress:\n foi_v2.main(\n f'{path_foi}vector/{shp_file.children[1].children[0].value}',\n f'{path_foi}raster/{img_file.children[1].children[0].value}',\n f'{path_foi}{yml_file.children[1].children[0].value}',\n pre_negative_buffer.value, pre_min_het.value, pre_max_het.\n value, pre_pixel_connectivity.value, pre_min_cluster_size.value\n )\n wbox_v2 = VBox([foi_info, shp_box, img_box, yml_box, pre_box, run_info,\n run_box, progress])\n return wbox_v2\n",
"step-3": "<mask token>\ntry:\n from cbm.foi import foi_v2\nexcept Exception as err:\n print(err)\n\n\ndef foi_tab_v1():\n path_foi = f\"{config.get_value(['paths', 'temp'])}/foi/\"\n path_foi_func = foi_v1.path_foi_func\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n foi_info = HTML(\n 'FOI procedures version 1 (requires access to a database).\\n ',\n placeholder='FOI Information')\n config_info = HTML(value=\n \"\"\"1. Connect to database and object storage.<br>\n FOI procedures need direct access to the database. In case there no\n image is provided, access to object storage will be needed as well\n to generate the base image from sentinel images.\n \"\"\"\n , placeholder='FOI Information')\n config_conn = Button(value=False, button_style='info', tooltip=\n 'Configure db connection.', icon='cogs', layout=Layout(width='40px'))\n config_conn_box = HBox([])\n\n @config_conn.on_click\n def config_conn_on_click(b):\n if config_conn_box.children == ():\n config_conn_box.children = [settings_ds.direct_conn()]\n else:\n config_conn_box.children = ()\n config_box = VBox([config_info, config_conn, config_conn_box])\n spatial_info = HTML(\n \"\"\"2. Select the spatial data to be tested - parcels that will be\n checked for heterogeneity and cardinality.<br>\n - Select a table from the database\"\"\"\n )\n db_tables = Dropdown(options=[], description='db Tables:')\n refresh_db_tables = Button(value=False, button_style='info', tooltip=\n 'Get db tables.', icon='refresh', layout=Layout(width='40px'))\n\n @refresh_db_tables.on_click\n def refresh_db_tables_on_click(b):\n db_tables.options = db.tables(config.get_value(['set', 'db_conn']))\n db_tables_box = HBox([db_tables, refresh_db_tables])\n upload_shp = Button(description='Create new table', value=False,\n button_style='info', tooltip='upload_shp.', icon='up')\n upload_box = VBox([])\n\n @upload_shp.on_click\n def upload_shp_on_click(b):\n if upload_box.children == ():\n upload_box.children = [ext_func.upload_shp(path_foi, True)]\n else:\n upload_box.children = ()\n spatial_box = VBox([spatial_info, upload_shp, upload_box, db_tables_box])\n img_info = HTML(\n \"\"\"3. Thematic raster - classification raster, or raster from other\n source that will be used for testing heterogeneity and cardinality.<br>\n - Upload or generate raster base image.\n (Only upload is currently available)\"\"\"\n )\n img_option = ToggleButtons(options=['Upload', 'Generate'], value=None,\n disabled=True, button_style='info', tooltips=[\n 'Upnload your base image', 'Get from object storage'])\n\n def on_img_option_change(change):\n if img_option.value == 'Upload':\n img_box.children = [HBox([img_info, img_option, img_file])]\n else:\n img_box.children = ()\n img_option.observe(on_img_option_change, 'value')\n img_file = cbm_widgets.get_files_dropdown(f'{path_foi}raster',\n '.tif, .tiff', 'Select Raster')\n img_box = VBox([img_info, img_option, img_file])\n yml_info = HTML(\n \"\"\"4. YAML file that holds the classes form the thematic raster.<br>\n - This can be also a simple list of values in the notebook\n corespondence between pixel values and names for the classes\"\"\"\n )\n yml_file = cbm_widgets.get_files_dropdown(path_foi, '.yml, .yaml',\n 'Select YML')\n yml_box = VBox([yml_info, yml_file])\n dbf_info = HTML(\n \"\"\"5. Create database functions.<br>\n - Import required database functions for FOI analysis to the database\"\"\"\n )\n dbf_insert = Button(value=False, button_style='info', tooltip=\n 'Create functions.', icon='fa-share-square')\n\n @dbf_insert.on_click\n def dbf_insert_on_click(b):\n outlog('path_foi_func :', path_foi_func)\n progress.clear_output()\n try:\n functions = glob.glob(f'{path_foi_func}*.func')\n db = config.get_value(['set', 'db_conn'])\n sche = config.get_value(['db', db, 'sche'])\n user = config.get_value(['db', db, 'user'])\n for f in functions:\n db.insert_function(open(f).read().format(schema=sche, owner\n =user))\n outlog(f\"The '{f}' Was imported to the database.\")\n finc_list = [f\"ipycbm_{f.split('/')[-1].split('.')[0]}, \" for f in\n functions]\n outlog(\n f\"The functions: {''.join(finc_list)} where added to the database\"\n )\n except Exception as err:\n outlog('Could not add functions to dattabase.', err)\n dbf_box = VBox([dbf_info, dbf_insert])\n param_info = HTML('6. Set FOI v1 Parameters')\n param_heto_info = HTML(\n \"\"\"\n Minimum and maximum thresholds for heterogeneity checks. In the example,\n any parcel with percentage of pixels for one class between 30 and 70 from\n the total, will be considered heterogenous.\n \"\"\"\n )\n param_min_het = IntText(value=30, description='MIN:', tooltip=\n 'Minimum threshold for heterogeneity checks', layout=Layout(width=\n '150px'))\n param_max_het = IntText(value=70, description='MAX:', tooltip=\n 'Maximum threshold for heterogeneity checks', layout=Layout(width=\n '150px'))\n param_area_info = HTML(\n \"\"\"Minimum area for clusters selection -\n only clusters bigger from this threshold will be counted.\n \"\"\"\n )\n param_area = IntText(value=2000, description='area:', tooltip=\n 'Minimum area for clusters selection.', layout=Layout(width='200px'))\n param_box = VBox([param_info, param_heto_info, HBox([param_min_het,\n param_max_het]), param_area_info, param_area])\n run_info = Label('7. Run the FOI analysis.')\n run_analysis = Button(description='Run FOI v1', value=False,\n button_style='info', tooltip='Run FOI analysis version 1', icon='play')\n run_box = VBox([run_info, run_analysis])\n\n @run_analysis.on_click\n def run_analysis_on_click(b):\n with progress:\n foi_v1.main(db_tables.value,\n f'{path_foi}raster/{img_file.children[1].children[0].value}',\n f'{path_foi}{yml_file.children[1].children[0].value}',\n param_min_het.value, param_max_het.value, param_area.value)\n wbox = VBox([foi_info, config_box, spatial_box, img_box, yml_box,\n dbf_box, param_box, run_box, progress])\n return wbox\n\n\ndef foi_tab_v2():\n path_foi = f\"{config.get_value(['paths', 'temp'])}/foi/\"\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n foi_info = HTML(\n 'FOI procedures version 2 (does not require access to a database).\\n '\n , placeholder='FOI Information')\n shp_info = HTML(\n \"\"\"1. Spatial data to be tested -\n parcels that will be checked for heterogeneity and cardinality.\"\"\"\n )\n shp_file = cbm_widgets.get_files_dropdown(f'{path_foi}vector', '',\n 'Select .shp', True, True)\n shp_box = VBox([shp_info, shp_file])\n img_info = HTML(\n \"\"\"2. Thematic raster - classification raster, or raster from other\n source that will be used for testing heterogeneity and cardinality.<br>\n - Upload or generate raster base image.\n (Only upload is currently available)\"\"\"\n )\n img_option = ToggleButtons(options=['Upload', 'Generate'], value=None,\n disabled=True, button_style='', tooltips=['Upnload your base image',\n 'Get from object storage'])\n\n def on_img_option_change(change):\n if img_option.value == 'Upload':\n img_box.children = [HBox([img_info, img_option, img_file])]\n else:\n img_box.children = ()\n img_option.observe(on_img_option_change, 'value')\n img_file = cbm_widgets.get_files_dropdown(f'{path_foi}raster',\n '.tif, .tiff', 'Select Raster')\n img_box = VBox([img_info, img_option, img_file])\n yml_info = HTML(\n \"\"\"3. YAML file that holds the classes form the thematic raster.<br>\n - This can be also a simple list of values in the notebook\n corespondence between pixel values and names for the classes\"\"\"\n )\n yml_file = cbm_widgets.get_files_dropdown(path_foi, '.yml, .yaml',\n 'Select YML')\n yml_box = VBox([yml_info, yml_file])\n pre_info = Label('4. Set FOI v2 Parameters.')\n pre_heto_chec = HTML(\n \"\"\"\n Minimum and maximum thresholds for heterogeneity checks. In the example,\n any parcel with percentage of pixels for one class between 30 and 70 from\n the total, will be considered heterogenous.\n \"\"\"\n )\n pre_min_het = IntText(value=30, description='MIN:', tooltip=\n 'Minimum threshold for heterogeneity checks', disabled=False,\n layout=Layout(width='150px'))\n pre_max_het = IntText(value=70, description='MAX:', tooltip=\n 'Maximum threshold for heterogeneity checks', disabled=False,\n layout=Layout(width='150px'))\n pre_heto_chec_box = HBox([pre_min_het, pre_max_het])\n pre_min_cluster_size = IntText(value=20, description='pixels:', tooltip\n ='Minimum area for clusters selection.', disabled=False, layout=\n Layout(width='200px'))\n pre_pixel_connectivity = IntText(value=8, description=\n 'connectivity type:', tooltip=\n 'Type of pixel connectivity in analysis. Accepted values: 4 or 8.',\n disabled=False, layout=Layout(width='200px'))\n pre_negative_buffer = IntText(value=-10, description='negative buffer:',\n tooltip='Negative buffer to be applied on the FOI', disabled=False,\n layout=Layout(width='200px'))\n pre_box = VBox([pre_info, pre_heto_chec, pre_heto_chec_box,\n pre_pixel_connectivity, pre_negative_buffer, HBox([\n pre_min_cluster_size, HTML(\n 'Minimum area for clusters selection - only clusters bigger from this threshold will be counted.'\n )])])\n run_info = Label('5. Run the FOI analysis.')\n run_analysis = Button(description='Run FOI v2', value=False, disabled=\n False, button_style='info', tooltip='Run FOI analysis version 2',\n icon='play')\n run_box = HBox([run_analysis])\n\n @run_analysis.on_click\n def run_analysis_on_click(b):\n with progress:\n foi_v2.main(\n f'{path_foi}vector/{shp_file.children[1].children[0].value}',\n f'{path_foi}raster/{img_file.children[1].children[0].value}',\n f'{path_foi}{yml_file.children[1].children[0].value}',\n pre_negative_buffer.value, pre_min_het.value, pre_max_het.\n value, pre_pixel_connectivity.value, pre_min_cluster_size.value\n )\n wbox_v2 = VBox([foi_info, shp_box, img_box, yml_box, pre_box, run_info,\n run_box, progress])\n return wbox_v2\n",
"step-4": "import os\nimport glob\nfrom ipywidgets import Text, Label, HBox, VBox, Layout, Dropdown, ToggleButtons, Output, HTML, Button, FileUpload, IntText, RadioButtons\nfrom cbm.utils import config\nfrom cbm.ipycbm.utils import settings_ds, cbm_widgets\nfrom cbm.ipycbm.ipy_ext import ext_func\nfrom cbm.foi import foi_v1\nfrom cbm.datas import db\ntry:\n from cbm.foi import foi_v2\nexcept Exception as err:\n print(err)\n\n\ndef foi_tab_v1():\n path_foi = f\"{config.get_value(['paths', 'temp'])}/foi/\"\n path_foi_func = foi_v1.path_foi_func\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n foi_info = HTML(\n 'FOI procedures version 1 (requires access to a database).\\n ',\n placeholder='FOI Information')\n config_info = HTML(value=\n \"\"\"1. Connect to database and object storage.<br>\n FOI procedures need direct access to the database. In case there no\n image is provided, access to object storage will be needed as well\n to generate the base image from sentinel images.\n \"\"\"\n , placeholder='FOI Information')\n config_conn = Button(value=False, button_style='info', tooltip=\n 'Configure db connection.', icon='cogs', layout=Layout(width='40px'))\n config_conn_box = HBox([])\n\n @config_conn.on_click\n def config_conn_on_click(b):\n if config_conn_box.children == ():\n config_conn_box.children = [settings_ds.direct_conn()]\n else:\n config_conn_box.children = ()\n config_box = VBox([config_info, config_conn, config_conn_box])\n spatial_info = HTML(\n \"\"\"2. Select the spatial data to be tested - parcels that will be\n checked for heterogeneity and cardinality.<br>\n - Select a table from the database\"\"\"\n )\n db_tables = Dropdown(options=[], description='db Tables:')\n refresh_db_tables = Button(value=False, button_style='info', tooltip=\n 'Get db tables.', icon='refresh', layout=Layout(width='40px'))\n\n @refresh_db_tables.on_click\n def refresh_db_tables_on_click(b):\n db_tables.options = db.tables(config.get_value(['set', 'db_conn']))\n db_tables_box = HBox([db_tables, refresh_db_tables])\n upload_shp = Button(description='Create new table', value=False,\n button_style='info', tooltip='upload_shp.', icon='up')\n upload_box = VBox([])\n\n @upload_shp.on_click\n def upload_shp_on_click(b):\n if upload_box.children == ():\n upload_box.children = [ext_func.upload_shp(path_foi, True)]\n else:\n upload_box.children = ()\n spatial_box = VBox([spatial_info, upload_shp, upload_box, db_tables_box])\n img_info = HTML(\n \"\"\"3. Thematic raster - classification raster, or raster from other\n source that will be used for testing heterogeneity and cardinality.<br>\n - Upload or generate raster base image.\n (Only upload is currently available)\"\"\"\n )\n img_option = ToggleButtons(options=['Upload', 'Generate'], value=None,\n disabled=True, button_style='info', tooltips=[\n 'Upnload your base image', 'Get from object storage'])\n\n def on_img_option_change(change):\n if img_option.value == 'Upload':\n img_box.children = [HBox([img_info, img_option, img_file])]\n else:\n img_box.children = ()\n img_option.observe(on_img_option_change, 'value')\n img_file = cbm_widgets.get_files_dropdown(f'{path_foi}raster',\n '.tif, .tiff', 'Select Raster')\n img_box = VBox([img_info, img_option, img_file])\n yml_info = HTML(\n \"\"\"4. YAML file that holds the classes form the thematic raster.<br>\n - This can be also a simple list of values in the notebook\n corespondence between pixel values and names for the classes\"\"\"\n )\n yml_file = cbm_widgets.get_files_dropdown(path_foi, '.yml, .yaml',\n 'Select YML')\n yml_box = VBox([yml_info, yml_file])\n dbf_info = HTML(\n \"\"\"5. Create database functions.<br>\n - Import required database functions for FOI analysis to the database\"\"\"\n )\n dbf_insert = Button(value=False, button_style='info', tooltip=\n 'Create functions.', icon='fa-share-square')\n\n @dbf_insert.on_click\n def dbf_insert_on_click(b):\n outlog('path_foi_func :', path_foi_func)\n progress.clear_output()\n try:\n functions = glob.glob(f'{path_foi_func}*.func')\n db = config.get_value(['set', 'db_conn'])\n sche = config.get_value(['db', db, 'sche'])\n user = config.get_value(['db', db, 'user'])\n for f in functions:\n db.insert_function(open(f).read().format(schema=sche, owner\n =user))\n outlog(f\"The '{f}' Was imported to the database.\")\n finc_list = [f\"ipycbm_{f.split('/')[-1].split('.')[0]}, \" for f in\n functions]\n outlog(\n f\"The functions: {''.join(finc_list)} where added to the database\"\n )\n except Exception as err:\n outlog('Could not add functions to dattabase.', err)\n dbf_box = VBox([dbf_info, dbf_insert])\n param_info = HTML('6. Set FOI v1 Parameters')\n param_heto_info = HTML(\n \"\"\"\n Minimum and maximum thresholds for heterogeneity checks. In the example,\n any parcel with percentage of pixels for one class between 30 and 70 from\n the total, will be considered heterogenous.\n \"\"\"\n )\n param_min_het = IntText(value=30, description='MIN:', tooltip=\n 'Minimum threshold for heterogeneity checks', layout=Layout(width=\n '150px'))\n param_max_het = IntText(value=70, description='MAX:', tooltip=\n 'Maximum threshold for heterogeneity checks', layout=Layout(width=\n '150px'))\n param_area_info = HTML(\n \"\"\"Minimum area for clusters selection -\n only clusters bigger from this threshold will be counted.\n \"\"\"\n )\n param_area = IntText(value=2000, description='area:', tooltip=\n 'Minimum area for clusters selection.', layout=Layout(width='200px'))\n param_box = VBox([param_info, param_heto_info, HBox([param_min_het,\n param_max_het]), param_area_info, param_area])\n run_info = Label('7. Run the FOI analysis.')\n run_analysis = Button(description='Run FOI v1', value=False,\n button_style='info', tooltip='Run FOI analysis version 1', icon='play')\n run_box = VBox([run_info, run_analysis])\n\n @run_analysis.on_click\n def run_analysis_on_click(b):\n with progress:\n foi_v1.main(db_tables.value,\n f'{path_foi}raster/{img_file.children[1].children[0].value}',\n f'{path_foi}{yml_file.children[1].children[0].value}',\n param_min_het.value, param_max_het.value, param_area.value)\n wbox = VBox([foi_info, config_box, spatial_box, img_box, yml_box,\n dbf_box, param_box, run_box, progress])\n return wbox\n\n\ndef foi_tab_v2():\n path_foi = f\"{config.get_value(['paths', 'temp'])}/foi/\"\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n foi_info = HTML(\n 'FOI procedures version 2 (does not require access to a database).\\n '\n , placeholder='FOI Information')\n shp_info = HTML(\n \"\"\"1. Spatial data to be tested -\n parcels that will be checked for heterogeneity and cardinality.\"\"\"\n )\n shp_file = cbm_widgets.get_files_dropdown(f'{path_foi}vector', '',\n 'Select .shp', True, True)\n shp_box = VBox([shp_info, shp_file])\n img_info = HTML(\n \"\"\"2. Thematic raster - classification raster, or raster from other\n source that will be used for testing heterogeneity and cardinality.<br>\n - Upload or generate raster base image.\n (Only upload is currently available)\"\"\"\n )\n img_option = ToggleButtons(options=['Upload', 'Generate'], value=None,\n disabled=True, button_style='', tooltips=['Upnload your base image',\n 'Get from object storage'])\n\n def on_img_option_change(change):\n if img_option.value == 'Upload':\n img_box.children = [HBox([img_info, img_option, img_file])]\n else:\n img_box.children = ()\n img_option.observe(on_img_option_change, 'value')\n img_file = cbm_widgets.get_files_dropdown(f'{path_foi}raster',\n '.tif, .tiff', 'Select Raster')\n img_box = VBox([img_info, img_option, img_file])\n yml_info = HTML(\n \"\"\"3. YAML file that holds the classes form the thematic raster.<br>\n - This can be also a simple list of values in the notebook\n corespondence between pixel values and names for the classes\"\"\"\n )\n yml_file = cbm_widgets.get_files_dropdown(path_foi, '.yml, .yaml',\n 'Select YML')\n yml_box = VBox([yml_info, yml_file])\n pre_info = Label('4. Set FOI v2 Parameters.')\n pre_heto_chec = HTML(\n \"\"\"\n Minimum and maximum thresholds for heterogeneity checks. In the example,\n any parcel with percentage of pixels for one class between 30 and 70 from\n the total, will be considered heterogenous.\n \"\"\"\n )\n pre_min_het = IntText(value=30, description='MIN:', tooltip=\n 'Minimum threshold for heterogeneity checks', disabled=False,\n layout=Layout(width='150px'))\n pre_max_het = IntText(value=70, description='MAX:', tooltip=\n 'Maximum threshold for heterogeneity checks', disabled=False,\n layout=Layout(width='150px'))\n pre_heto_chec_box = HBox([pre_min_het, pre_max_het])\n pre_min_cluster_size = IntText(value=20, description='pixels:', tooltip\n ='Minimum area for clusters selection.', disabled=False, layout=\n Layout(width='200px'))\n pre_pixel_connectivity = IntText(value=8, description=\n 'connectivity type:', tooltip=\n 'Type of pixel connectivity in analysis. Accepted values: 4 or 8.',\n disabled=False, layout=Layout(width='200px'))\n pre_negative_buffer = IntText(value=-10, description='negative buffer:',\n tooltip='Negative buffer to be applied on the FOI', disabled=False,\n layout=Layout(width='200px'))\n pre_box = VBox([pre_info, pre_heto_chec, pre_heto_chec_box,\n pre_pixel_connectivity, pre_negative_buffer, HBox([\n pre_min_cluster_size, HTML(\n 'Minimum area for clusters selection - only clusters bigger from this threshold will be counted.'\n )])])\n run_info = Label('5. Run the FOI analysis.')\n run_analysis = Button(description='Run FOI v2', value=False, disabled=\n False, button_style='info', tooltip='Run FOI analysis version 2',\n icon='play')\n run_box = HBox([run_analysis])\n\n @run_analysis.on_click\n def run_analysis_on_click(b):\n with progress:\n foi_v2.main(\n f'{path_foi}vector/{shp_file.children[1].children[0].value}',\n f'{path_foi}raster/{img_file.children[1].children[0].value}',\n f'{path_foi}{yml_file.children[1].children[0].value}',\n pre_negative_buffer.value, pre_min_het.value, pre_max_het.\n value, pre_pixel_connectivity.value, pre_min_cluster_size.value\n )\n wbox_v2 = VBox([foi_info, shp_box, img_box, yml_box, pre_box, run_info,\n run_box, progress])\n return wbox_v2\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# This file is part of CbM (https://github.com/ec-jrc/cbm).\n# Author : Konstantinos Anastasakis\n# Credits : GTCAP Team\n# Copyright : 2021 European Commission, Joint Research Centre\n# License : 3-Clause BSD\n\n\nimport os\nimport glob\nfrom ipywidgets import (Text, Label, HBox, VBox, Layout, Dropdown,\n ToggleButtons, Output, HTML, Button,\n FileUpload, IntText, RadioButtons)\n\nfrom cbm.utils import config\nfrom cbm.ipycbm.utils import settings_ds, cbm_widgets\nfrom cbm.ipycbm.ipy_ext import ext_func\nfrom cbm.foi import foi_v1\nfrom cbm.datas import db\ntry:\n from cbm.foi import foi_v2\nexcept Exception as err:\n print(err)\n\n\ndef foi_tab_v1():\n path_foi = f\"{config.get_value(['paths', 'temp'])}/foi/\"\n path_foi_func = foi_v1.path_foi_func\n\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n\n foi_info = HTML(\"\"\"FOI procedures version 1 (requires access to a database).\n \"\"\", placeholder='FOI Information')\n\n # Connect to database\n\n config_info = HTML(value=\"\"\"1. Connect to database and object storage.<br>\n FOI procedures need direct access to the database. In case there no\n image is provided, access to object storage will be needed as well\n to generate the base image from sentinel images.\n \"\"\", placeholder='FOI Information')\n config_conn = Button(\n value=False,\n button_style='info',\n tooltip='Configure db connection.',\n icon='cogs',\n layout=Layout(width='40px')\n )\n\n config_conn_box = HBox([])\n\n @config_conn.on_click\n def config_conn_on_click(b):\n if config_conn_box.children == ():\n config_conn_box.children = [settings_ds.direct_conn()]\n else:\n config_conn_box.children = ()\n\n config_box = VBox([config_info, config_conn,\n config_conn_box])\n\n # Spatial data to be tested\n spatial_info = HTML(\n \"\"\"2. Select the spatial data to be tested - parcels that will be\n checked for heterogeneity and cardinality.<br>\n - Select a table from the database\"\"\")\n\n db_tables = Dropdown(\n options=[],\n description='db Tables:'\n )\n refresh_db_tables = Button(\n value=False,\n button_style='info',\n tooltip='Get db tables.',\n icon='refresh',\n layout=Layout(width='40px')\n )\n\n @refresh_db_tables.on_click\n def refresh_db_tables_on_click(b):\n db_tables.options = db.tables(config.get_value(['set', 'db_conn']))\n\n db_tables_box = HBox([db_tables, refresh_db_tables])\n\n upload_shp = Button(\n description='Create new table',\n value=False,\n button_style='info',\n tooltip='upload_shp.',\n icon='up'\n )\n\n upload_box = VBox([])\n\n @upload_shp.on_click\n def upload_shp_on_click(b):\n if upload_box.children == ():\n upload_box.children = [ext_func.upload_shp(path_foi, True)]\n else:\n upload_box.children = ()\n spatial_box = VBox([spatial_info, upload_shp, upload_box, db_tables_box])\n\n # Thematic raster.\n img_info = HTML(\n \"\"\"3. Thematic raster - classification raster, or raster from other\n source that will be used for testing heterogeneity and cardinality.<br>\n - Upload or generate raster base image.\n (Only upload is currently available)\"\"\")\n img_option = ToggleButtons(\n options=['Upload', 'Generate'],\n value=None,\n disabled=True,\n button_style='info', # 'success', 'info', 'warning', 'danger' or ''\n tooltips=['Upnload your base image', 'Get from object storage']\n )\n\n def on_img_option_change(change):\n if img_option.value == 'Upload':\n img_box.children = [HBox([img_info, img_option, img_file])]\n else:\n img_box.children = ()\n img_option.observe(on_img_option_change, 'value')\n\n img_file = cbm_widgets.get_files_dropdown(\n f'{path_foi}raster', '.tif, .tiff', 'Select Raster')\n img_box = VBox([img_info, img_option, img_file])\n\n # YAML File upload\n yml_info = HTML(\n \"\"\"4. YAML file that holds the classes form the thematic raster.<br>\n - This can be also a simple list of values in the notebook\n corespondence between pixel values and names for the classes\"\"\")\n\n yml_file = cbm_widgets.get_files_dropdown(path_foi, '.yml, .yaml',\n 'Select YML')\n yml_box = VBox([yml_info, yml_file])\n\n # Database functions\n dbf_info = HTML(\"\"\"5. Create database functions.<br>\n - Import required database functions for FOI analysis to the database\"\"\")\n\n dbf_insert = Button(\n value=False,\n button_style='info',\n tooltip='Create functions.',\n icon='fa-share-square'\n )\n\n @dbf_insert.on_click\n def dbf_insert_on_click(b):\n outlog('path_foi_func :', path_foi_func)\n progress.clear_output()\n try:\n functions = glob.glob(f\"{path_foi_func}*.func\")\n db = config.get_value(['set', 'db_conn'])\n sche = config.get_value(['db', db, 'sche'])\n user = config.get_value(['db', db, 'user'])\n\n for f in functions:\n db.insert_function(open(f).read().format(\n schema=sche, owner=user))\n outlog(f\"The '{f}' Was imported to the database.\")\n finc_list = [\n f\"ipycbm_{f.split('/')[-1].split('.')[0]}, \" for f in functions]\n outlog(\n f\"The functions: {('').join(finc_list)} where added to the database\")\n except Exception as err:\n outlog(\"Could not add functions to dattabase.\", err)\n\n dbf_box = VBox(\n [dbf_info, dbf_insert])\n\n # FOI Parameters\n param_info = HTML(\n \"\"\"6. Set FOI v1 Parameters\"\"\")\n\n # heterogeneity_threshold\n param_heto_info = HTML(\"\"\"\n Minimum and maximum thresholds for heterogeneity checks. In the example,\n any parcel with percentage of pixels for one class between 30 and 70 from\n the total, will be considered heterogenous.\n \"\"\")\n param_min_het = IntText(\n value=30,\n description='MIN:',\n tooltip=\"Minimum threshold for heterogeneity checks\",\n layout=Layout(width='150px')\n )\n param_max_het = IntText(\n value=70,\n description='MAX:',\n tooltip=\"Maximum threshold for heterogeneity checks\",\n layout=Layout(width='150px')\n )\n\n param_area_info = HTML(\"\"\"Minimum area for clusters selection -\n only clusters bigger from this threshold will be counted.\n \"\"\")\n param_area = IntText(\n value=2000,\n description='area:',\n tooltip=\"Minimum area for clusters selection.\",\n layout=Layout(width='200px')\n )\n\n param_box = VBox([param_info,\n param_heto_info, HBox([param_min_het, param_max_het]),\n param_area_info, param_area\n ])\n\n # Run FOI analysis\n run_info = Label(\"7. Run the FOI analysis.\")\n run_analysis = Button(\n description='Run FOI v1',\n value=False,\n button_style='info',\n tooltip='Run FOI analysis version 1',\n icon='play',\n )\n run_box = VBox([run_info, run_analysis])\n\n @run_analysis.on_click\n def run_analysis_on_click(b):\n with progress:\n foi_v1.main(\n db_tables.value,\n f\"{path_foi}raster/{img_file.children[1].children[0].value}\",\n f\"{path_foi}{yml_file.children[1].children[0].value}\",\n param_min_het.value, param_max_het.value, param_area.value)\n\n wbox = VBox([foi_info,\n config_box,\n spatial_box,\n img_box,\n yml_box,\n dbf_box,\n param_box,\n run_box,\n progress])\n\n return wbox\n\n\ndef foi_tab_v2():\n path_foi = f\"{config.get_value(['paths', 'temp'])}/foi/\"\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n\n foi_info = HTML(\"\"\"FOI procedures version 2 (does not require access to a database).\n \"\"\", placeholder='FOI Information')\n\n # Vector file\n shp_info = HTML(\n \"\"\"1. Spatial data to be tested -\n parcels that will be checked for heterogeneity and cardinality.\"\"\")\n shp_file = cbm_widgets.get_files_dropdown(\n f'{path_foi}vector', '', 'Select .shp', True, True)\n shp_box = VBox([shp_info, shp_file])\n\n # Thematic raster.\n img_info = HTML(\n \"\"\"2. Thematic raster - classification raster, or raster from other\n source that will be used for testing heterogeneity and cardinality.<br>\n - Upload or generate raster base image.\n (Only upload is currently available)\"\"\")\n img_option = ToggleButtons(\n options=['Upload', 'Generate'],\n value=None,\n disabled=True,\n button_style='', # 'success', 'info', 'warning', 'danger' or ''\n tooltips=['Upnload your base image', 'Get from object storage']\n )\n\n def on_img_option_change(change):\n if img_option.value == 'Upload':\n img_box.children = [HBox([img_info, img_option, img_file])]\n else:\n img_box.children = ()\n img_option.observe(on_img_option_change, 'value')\n img_file = cbm_widgets.get_files_dropdown(\n f'{path_foi}raster', '.tif, .tiff', 'Select Raster')\n img_box = VBox([img_info, img_option, img_file])\n\n # YAML File upload\n yml_info = HTML(\n \"\"\"3. YAML file that holds the classes form the thematic raster.<br>\n - This can be also a simple list of values in the notebook\n corespondence between pixel values and names for the classes\"\"\")\n yml_file = cbm_widgets.get_files_dropdown(path_foi, '.yml, .yaml',\n 'Select YML')\n yml_box = VBox([yml_info, yml_file])\n\n # FOI Prerequisites\n pre_info = Label(\"4. Set FOI v2 Parameters.\")\n\n # heterogeneity_threshold\n pre_heto_chec = HTML(\"\"\"\n Minimum and maximum thresholds for heterogeneity checks. In the example,\n any parcel with percentage of pixels for one class between 30 and 70 from\n the total, will be considered heterogenous.\n \"\"\")\n pre_min_het = IntText(\n value=30,\n description='MIN:',\n tooltip=\"Minimum threshold for heterogeneity checks\",\n disabled=False,\n layout=Layout(width='150px')\n )\n pre_max_het = IntText(\n value=70,\n description='MAX:',\n tooltip=\"Maximum threshold for heterogeneity checks\",\n disabled=False,\n layout=Layout(width='150px')\n )\n pre_heto_chec_box = HBox([pre_min_het, pre_max_het])\n pre_min_cluster_size = IntText(\n value=20,\n description='pixels:',\n tooltip=\"Minimum area for clusters selection.\",\n disabled=False,\n layout=Layout(width='200px')\n )\n pre_pixel_connectivity = IntText(\n value=8,\n description='connectivity type:',\n tooltip=\"Type of pixel connectivity in analysis. Accepted values: 4 or 8.\",\n disabled=False,\n layout=Layout(width='200px')\n )\n pre_negative_buffer = IntText(\n value=-10,\n description='negative buffer:',\n tooltip=\"Negative buffer to be applied on the FOI\",\n disabled=False,\n layout=Layout(width='200px')\n )\n\n pre_box = VBox([\n pre_info, pre_heto_chec, pre_heto_chec_box,\n pre_pixel_connectivity, pre_negative_buffer,\n HBox([pre_min_cluster_size,\n HTML(\"Minimum area for clusters selection - only clusters bigger from this threshold will be counted.\")])\n ])\n\n # Run FOI analysis\n run_info = Label(\"5. Run the FOI analysis.\")\n run_analysis = Button(\n description='Run FOI v2',\n value=False,\n disabled=False,\n button_style='info',\n tooltip='Run FOI analysis version 2',\n icon='play',\n )\n run_box = HBox([run_analysis])\n\n @run_analysis.on_click\n def run_analysis_on_click(b):\n with progress:\n foi_v2.main(\n f\"{path_foi}vector/{shp_file.children[1].children[0].value}\",\n f\"{path_foi}raster/{img_file.children[1].children[0].value}\",\n f\"{path_foi}{yml_file.children[1].children[0].value}\",\n pre_negative_buffer.value,\n pre_min_het.value,\n pre_max_het.value,\n pre_pixel_connectivity.value,\n pre_min_cluster_size.value)\n\n wbox_v2 = VBox([foi_info,\n shp_box,\n img_box,\n yml_box,\n pre_box,\n run_info,\n run_box,\n progress])\n\n return wbox_v2\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
#!/usr/bin/env python
#-*- coding:utf8 -*-
# Power by null 2018-09-19 18:41:17
from codebase.mod.mod_test import test_f
|
normal
|
{
"blob_id": "7c4709eaa5123b44e6355c6a60932f286e3b1cf5",
"index": 7450,
"step-1": "<mask token>\n",
"step-2": "from codebase.mod.mod_test import test_f\n",
"step-3": "#!/usr/bin/env python\n#-*- coding:utf8 -*-\n# Power by null 2018-09-19 18:41:17\n\nfrom codebase.mod.mod_test import test_f\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
""""
You are given a tree-like data structure represented as nested dictionaries.
Implement a function collect_leaves that accepts a tree and returns a list of all its leaves. A leaf is a bottom-most node in a tree.
Implement a kind of unit tests via assert operator.
"""
from typing import Union
def collect_leaves(u: Union[dict, list]) -> list:
flatten_list = []
if isinstance(u, dict):
for item in u.values():
flatten_list.extend(collect_leaves(item))
return flatten_list
return u
tree = {
"node1": {
"node11": {
"node111": [1, 2, 3],
"node112": [4, 5]
},
"node12": [6]
},
"node2": [7, 8, 9]
}
assert collect_leaves([1, 2, 3]) == [1, 2, 3]
assert collect_leaves(tree) == [1, 2, 3, 4, 5, 6, 7, 8, 9]
|
normal
|
{
"blob_id": "603cce951dd0f78ef3ca9dce587042b3b7f6b449",
"index": 8001,
"step-1": "<mask token>\n\n\ndef collect_leaves(u: Union[dict, list]) ->list:\n flatten_list = []\n if isinstance(u, dict):\n for item in u.values():\n flatten_list.extend(collect_leaves(item))\n return flatten_list\n return u\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef collect_leaves(u: Union[dict, list]) ->list:\n flatten_list = []\n if isinstance(u, dict):\n for item in u.values():\n flatten_list.extend(collect_leaves(item))\n return flatten_list\n return u\n\n\n<mask token>\nassert collect_leaves([1, 2, 3]) == [1, 2, 3]\nassert collect_leaves(tree) == [1, 2, 3, 4, 5, 6, 7, 8, 9]\n",
"step-3": "<mask token>\n\n\ndef collect_leaves(u: Union[dict, list]) ->list:\n flatten_list = []\n if isinstance(u, dict):\n for item in u.values():\n flatten_list.extend(collect_leaves(item))\n return flatten_list\n return u\n\n\ntree = {'node1': {'node11': {'node111': [1, 2, 3], 'node112': [4, 5]},\n 'node12': [6]}, 'node2': [7, 8, 9]}\nassert collect_leaves([1, 2, 3]) == [1, 2, 3]\nassert collect_leaves(tree) == [1, 2, 3, 4, 5, 6, 7, 8, 9]\n",
"step-4": "<mask token>\nfrom typing import Union\n\n\ndef collect_leaves(u: Union[dict, list]) ->list:\n flatten_list = []\n if isinstance(u, dict):\n for item in u.values():\n flatten_list.extend(collect_leaves(item))\n return flatten_list\n return u\n\n\ntree = {'node1': {'node11': {'node111': [1, 2, 3], 'node112': [4, 5]},\n 'node12': [6]}, 'node2': [7, 8, 9]}\nassert collect_leaves([1, 2, 3]) == [1, 2, 3]\nassert collect_leaves(tree) == [1, 2, 3, 4, 5, 6, 7, 8, 9]\n",
"step-5": "\"\"\"\"\r\nYou are given a tree-like data structure represented as nested dictionaries.\r\nImplement a function collect_leaves that accepts a tree and returns a list of all its leaves. A leaf is a bottom-most node in a tree.\r\n\r\nImplement a kind of unit tests via assert operator.\r\n\"\"\"\r\nfrom typing import Union\r\n\r\n\r\ndef collect_leaves(u: Union[dict, list]) -> list:\r\n flatten_list = []\r\n if isinstance(u, dict):\r\n for item in u.values():\r\n flatten_list.extend(collect_leaves(item))\r\n return flatten_list\r\n return u\r\n\r\n\r\ntree = {\r\n \"node1\": {\r\n \"node11\": {\r\n \"node111\": [1, 2, 3],\r\n \"node112\": [4, 5]\r\n },\r\n \"node12\": [6]\r\n },\r\n \"node2\": [7, 8, 9]\r\n}\r\n\r\nassert collect_leaves([1, 2, 3]) == [1, 2, 3]\r\nassert collect_leaves(tree) == [1, 2, 3, 4, 5, 6, 7, 8, 9]\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
"""
pytest.mark.parametrize(“变量参数名称”,变量数据列表[‘123’,‘34’,‘567’,‘78’])
上面的变量个数有4个,测试用例传入变量名称后,会依序4次使用变量的数据,执行4次测试用例
def test001(self,"变量参数名称")
assert 变量名称
"""
|
normal
|
{
"blob_id": "24f3284a7a994951a1f0a4ef64c951499bbba1b4",
"index": 6958,
"step-1": "<mask token>\n",
"step-2": "\"\"\"\n pytest.mark.parametrize(“变量参数名称”,变量数据列表[‘123’,‘34’,‘567’,‘78’])\n 上面的变量个数有4个,测试用例传入变量名称后,会依序4次使用变量的数据,执行4次测试用例\n def test001(self,\"变量参数名称\")\n assert 变量名称\n\n\"\"\"",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
import sqlite3
from flask_restful import Resource, reqparse
from flask_jwt import JWT, jwt_required
#import base64
import datetime
import psycopg2
class User:
def __init__(self, _id, username, password, user_name, address, contact):
self.id = _id
self.username = username
self.password = password
self.user_name = user_name
self.address = address
self.contact = contact
@classmethod
def find_by_username(cls, username):
connection = sqlite3.connect('user.db')
cursor = connection.cursor()
query = "SELECT * FROM users WHERE username=?"
result = cursor.execute(query, (username,))
row = result.fetchone()
if row is not None:
user = cls(*row)
else:
user = None
connection.close()
return user
@classmethod
def find_by_id(cls, _id):
connection = sqlite3.connect('user.db')
cursor = connection.cursor()
query = "SELECT * FROM users WHERE id=?"
result = cursor.execute(query, (_id,))
row = result.fetchone()
if row is not None:
user = cls(*row)
else:
user = None
connection.close()
return user
class PresOrder(Resource):
parser = reqparse.RequestParser()
parser.add_argument('username',
type=str,
required=True,
help="This field cannot be left blank.")
parser.add_argument('pres',
type=str,
required=True,
help="This field cannot be left blank.")
#@jwt_required()
def post(self):
data = PresOrder.parser.parse_args()
'''
imgdata = base64.b64decode(data['pres'])
filename = 'pres.jpg'
with open(filename, 'wb') as f:
f.write(imgdata)
'''
connection = sqlite3.connect('order.db')
cursor = connection.cursor()
query = "INSERT INTO presorder VALUES (NULL, ?, ?, 0)"
cursor.execute(query, (data['username'], data['pres']))
connection.commit()
connection.close()
return True, 200
class LandmarkAdd(Resource):
parser = reqparse.RequestParser()
parser.add_argument('landmark_name',
type=str,
required=True,
help="This field cannot be left blank.")
parser.add_argument('landmark_type',
type=str,
required=True,
help="This field cannot be left blank.")
parser.add_argument('latitude',
type=float,
required=True,
help="This field cannot be left blank.")
parser.add_argument('longitude',
type=float,
required=True,
help="This field cannot be left blank.")
#@jwt_required()
def post(self):
data = LandmarkAdd.parser.parse_args()
'''
connection = sqlite3.connect('order.db')
cursor = connection.cursor()
query = "INSERT INTO presorder VALUES (NULL, ?, ?, 0)"
cursor.execute(query, (data['username'], data['pres']))
connection.commit()
connection.close()
'''
print(data)
# connection = psycopg2.connect(user="postgres",
# password="anuj@150100",
# host="127.0.0.1",
# port="5432",
# database="MapifyDb")
#
# cursor = connection.cursor()
#
#
# postgres_insert_query = """ INSERT INTO Landmark(Landmark_name, Landmark_type, Landmark_location) VALUES (%s,%s, Point(%s, %s))"""
# record_to_insert = (data["landmarkName"], data["landmarkType"],[data["latitude"] ,data["longitude"] ])
# cursor.execute(postgres_insert_query, record_to_insert)
# connection.commit()
return True, 200
|
normal
|
{
"blob_id": "84d154afe206fd2c7381a2203affc162c28e21c1",
"index": 5863,
"step-1": "<mask token>\n\n\nclass PresOrder(Resource):\n <mask token>\n parser.add_argument('username', type=str, required=True, help=\n 'This field cannot be left blank.')\n parser.add_argument('pres', type=str, required=True, help=\n 'This field cannot be left blank.')\n\n def post(self):\n data = PresOrder.parser.parse_args()\n \"\"\"\n imgdata = base64.b64decode(data['pres'])\n filename = 'pres.jpg'\n with open(filename, 'wb') as f:\n f.write(imgdata)\n \"\"\"\n connection = sqlite3.connect('order.db')\n cursor = connection.cursor()\n query = 'INSERT INTO presorder VALUES (NULL, ?, ?, 0)'\n cursor.execute(query, (data['username'], data['pres']))\n connection.commit()\n connection.close()\n return True, 200\n\n\nclass LandmarkAdd(Resource):\n parser = reqparse.RequestParser()\n parser.add_argument('landmark_name', type=str, required=True, help=\n 'This field cannot be left blank.')\n parser.add_argument('landmark_type', type=str, required=True, help=\n 'This field cannot be left blank.')\n parser.add_argument('latitude', type=float, required=True, help=\n 'This field cannot be left blank.')\n parser.add_argument('longitude', type=float, required=True, help=\n 'This field cannot be left blank.')\n\n def post(self):\n data = LandmarkAdd.parser.parse_args()\n \"\"\"\n connection = sqlite3.connect('order.db')\n cursor = connection.cursor()\n query = \"INSERT INTO presorder VALUES (NULL, ?, ?, 0)\"\n cursor.execute(query, (data['username'], data['pres']))\n connection.commit()\n connection.close()\n \"\"\"\n print(data)\n return True, 200\n",
"step-2": "<mask token>\n\n\nclass User:\n\n def __init__(self, _id, username, password, user_name, address, contact):\n self.id = _id\n self.username = username\n self.password = password\n self.user_name = user_name\n self.address = address\n self.contact = contact\n <mask token>\n <mask token>\n\n\nclass PresOrder(Resource):\n parser = reqparse.RequestParser()\n parser.add_argument('username', type=str, required=True, help=\n 'This field cannot be left blank.')\n parser.add_argument('pres', type=str, required=True, help=\n 'This field cannot be left blank.')\n\n def post(self):\n data = PresOrder.parser.parse_args()\n \"\"\"\n imgdata = base64.b64decode(data['pres'])\n filename = 'pres.jpg'\n with open(filename, 'wb') as f:\n f.write(imgdata)\n \"\"\"\n connection = sqlite3.connect('order.db')\n cursor = connection.cursor()\n query = 'INSERT INTO presorder VALUES (NULL, ?, ?, 0)'\n cursor.execute(query, (data['username'], data['pres']))\n connection.commit()\n connection.close()\n return True, 200\n\n\nclass LandmarkAdd(Resource):\n parser = reqparse.RequestParser()\n parser.add_argument('landmark_name', type=str, required=True, help=\n 'This field cannot be left blank.')\n parser.add_argument('landmark_type', type=str, required=True, help=\n 'This field cannot be left blank.')\n parser.add_argument('latitude', type=float, required=True, help=\n 'This field cannot be left blank.')\n parser.add_argument('longitude', type=float, required=True, help=\n 'This field cannot be left blank.')\n\n def post(self):\n data = LandmarkAdd.parser.parse_args()\n \"\"\"\n connection = sqlite3.connect('order.db')\n cursor = connection.cursor()\n query = \"INSERT INTO presorder VALUES (NULL, ?, ?, 0)\"\n cursor.execute(query, (data['username'], data['pres']))\n connection.commit()\n connection.close()\n \"\"\"\n print(data)\n return True, 200\n",
"step-3": "<mask token>\n\n\nclass User:\n\n def __init__(self, _id, username, password, user_name, address, contact):\n self.id = _id\n self.username = username\n self.password = password\n self.user_name = user_name\n self.address = address\n self.contact = contact\n\n @classmethod\n def find_by_username(cls, username):\n connection = sqlite3.connect('user.db')\n cursor = connection.cursor()\n query = 'SELECT * FROM users WHERE username=?'\n result = cursor.execute(query, (username,))\n row = result.fetchone()\n if row is not None:\n user = cls(*row)\n else:\n user = None\n connection.close()\n return user\n <mask token>\n\n\nclass PresOrder(Resource):\n parser = reqparse.RequestParser()\n parser.add_argument('username', type=str, required=True, help=\n 'This field cannot be left blank.')\n parser.add_argument('pres', type=str, required=True, help=\n 'This field cannot be left blank.')\n\n def post(self):\n data = PresOrder.parser.parse_args()\n \"\"\"\n imgdata = base64.b64decode(data['pres'])\n filename = 'pres.jpg'\n with open(filename, 'wb') as f:\n f.write(imgdata)\n \"\"\"\n connection = sqlite3.connect('order.db')\n cursor = connection.cursor()\n query = 'INSERT INTO presorder VALUES (NULL, ?, ?, 0)'\n cursor.execute(query, (data['username'], data['pres']))\n connection.commit()\n connection.close()\n return True, 200\n\n\nclass LandmarkAdd(Resource):\n parser = reqparse.RequestParser()\n parser.add_argument('landmark_name', type=str, required=True, help=\n 'This field cannot be left blank.')\n parser.add_argument('landmark_type', type=str, required=True, help=\n 'This field cannot be left blank.')\n parser.add_argument('latitude', type=float, required=True, help=\n 'This field cannot be left blank.')\n parser.add_argument('longitude', type=float, required=True, help=\n 'This field cannot be left blank.')\n\n def post(self):\n data = LandmarkAdd.parser.parse_args()\n \"\"\"\n connection = sqlite3.connect('order.db')\n cursor = connection.cursor()\n query = \"INSERT INTO presorder VALUES (NULL, ?, ?, 0)\"\n cursor.execute(query, (data['username'], data['pres']))\n connection.commit()\n connection.close()\n \"\"\"\n print(data)\n return True, 200\n",
"step-4": "<mask token>\n\n\nclass User:\n\n def __init__(self, _id, username, password, user_name, address, contact):\n self.id = _id\n self.username = username\n self.password = password\n self.user_name = user_name\n self.address = address\n self.contact = contact\n\n @classmethod\n def find_by_username(cls, username):\n connection = sqlite3.connect('user.db')\n cursor = connection.cursor()\n query = 'SELECT * FROM users WHERE username=?'\n result = cursor.execute(query, (username,))\n row = result.fetchone()\n if row is not None:\n user = cls(*row)\n else:\n user = None\n connection.close()\n return user\n\n @classmethod\n def find_by_id(cls, _id):\n connection = sqlite3.connect('user.db')\n cursor = connection.cursor()\n query = 'SELECT * FROM users WHERE id=?'\n result = cursor.execute(query, (_id,))\n row = result.fetchone()\n if row is not None:\n user = cls(*row)\n else:\n user = None\n connection.close()\n return user\n\n\nclass PresOrder(Resource):\n parser = reqparse.RequestParser()\n parser.add_argument('username', type=str, required=True, help=\n 'This field cannot be left blank.')\n parser.add_argument('pres', type=str, required=True, help=\n 'This field cannot be left blank.')\n\n def post(self):\n data = PresOrder.parser.parse_args()\n \"\"\"\n imgdata = base64.b64decode(data['pres'])\n filename = 'pres.jpg'\n with open(filename, 'wb') as f:\n f.write(imgdata)\n \"\"\"\n connection = sqlite3.connect('order.db')\n cursor = connection.cursor()\n query = 'INSERT INTO presorder VALUES (NULL, ?, ?, 0)'\n cursor.execute(query, (data['username'], data['pres']))\n connection.commit()\n connection.close()\n return True, 200\n\n\nclass LandmarkAdd(Resource):\n parser = reqparse.RequestParser()\n parser.add_argument('landmark_name', type=str, required=True, help=\n 'This field cannot be left blank.')\n parser.add_argument('landmark_type', type=str, required=True, help=\n 'This field cannot be left blank.')\n parser.add_argument('latitude', type=float, required=True, help=\n 'This field cannot be left blank.')\n parser.add_argument('longitude', type=float, required=True, help=\n 'This field cannot be left blank.')\n\n def post(self):\n data = LandmarkAdd.parser.parse_args()\n \"\"\"\n connection = sqlite3.connect('order.db')\n cursor = connection.cursor()\n query = \"INSERT INTO presorder VALUES (NULL, ?, ?, 0)\"\n cursor.execute(query, (data['username'], data['pres']))\n connection.commit()\n connection.close()\n \"\"\"\n print(data)\n return True, 200\n",
"step-5": "import sqlite3\nfrom flask_restful import Resource, reqparse\nfrom flask_jwt import JWT, jwt_required\n#import base64\nimport datetime\nimport psycopg2\n\n\n\n\nclass User:\n def __init__(self, _id, username, password, user_name, address, contact):\n self.id = _id\n self.username = username\n self.password = password\n self.user_name = user_name\n self.address = address\n self.contact = contact\n\n\n\n @classmethod\n def find_by_username(cls, username):\n connection = sqlite3.connect('user.db')\n cursor = connection.cursor()\n\n query = \"SELECT * FROM users WHERE username=?\"\n result = cursor.execute(query, (username,))\n row = result.fetchone()\n if row is not None:\n user = cls(*row)\n else:\n user = None\n connection.close()\n return user\n\n\n @classmethod\n def find_by_id(cls, _id):\n connection = sqlite3.connect('user.db')\n cursor = connection.cursor()\n\n query = \"SELECT * FROM users WHERE id=?\"\n result = cursor.execute(query, (_id,))\n row = result.fetchone()\n if row is not None:\n user = cls(*row)\n else:\n user = None\n\n connection.close()\n return user\n\n\n\nclass PresOrder(Resource):\n parser = reqparse.RequestParser()\n parser.add_argument('username',\n type=str,\n required=True,\n help=\"This field cannot be left blank.\")\n\n parser.add_argument('pres',\n type=str,\n required=True,\n help=\"This field cannot be left blank.\")\n #@jwt_required()\n def post(self):\n data = PresOrder.parser.parse_args()\n '''\n imgdata = base64.b64decode(data['pres'])\n filename = 'pres.jpg'\n with open(filename, 'wb') as f:\n f.write(imgdata)\n '''\n connection = sqlite3.connect('order.db')\n cursor = connection.cursor()\n query = \"INSERT INTO presorder VALUES (NULL, ?, ?, 0)\"\n cursor.execute(query, (data['username'], data['pres']))\n connection.commit()\n connection.close()\n return True, 200\n\n\n\nclass LandmarkAdd(Resource):\n parser = reqparse.RequestParser()\n parser.add_argument('landmark_name',\n type=str,\n required=True,\n help=\"This field cannot be left blank.\")\n\n parser.add_argument('landmark_type',\n type=str,\n required=True,\n help=\"This field cannot be left blank.\")\n\n parser.add_argument('latitude',\n type=float,\n required=True,\n help=\"This field cannot be left blank.\")\n\n parser.add_argument('longitude',\n type=float,\n required=True,\n help=\"This field cannot be left blank.\")\n\n #@jwt_required()\n def post(self):\n data = LandmarkAdd.parser.parse_args()\n '''\n connection = sqlite3.connect('order.db')\n cursor = connection.cursor()\n query = \"INSERT INTO presorder VALUES (NULL, ?, ?, 0)\"\n cursor.execute(query, (data['username'], data['pres']))\n connection.commit()\n connection.close()\n '''\n\n print(data)\n # connection = psycopg2.connect(user=\"postgres\",\n # password=\"anuj@150100\",\n # host=\"127.0.0.1\",\n # port=\"5432\",\n # database=\"MapifyDb\")\n #\n # cursor = connection.cursor()\n #\n #\n # postgres_insert_query = \"\"\" INSERT INTO Landmark(Landmark_name, Landmark_type, Landmark_location) VALUES (%s,%s, Point(%s, %s))\"\"\"\n # record_to_insert = (data[\"landmarkName\"], data[\"landmarkType\"],[data[\"latitude\"] ,data[\"longitude\"] ])\n\n # cursor.execute(postgres_insert_query, record_to_insert)\n # connection.commit()\n\n\n return True, 200",
"step-ids": [
5,
8,
9,
10,
12
]
}
|
[
5,
8,
9,
10,
12
] |
# 引入基础的工作表
from openpyxl import Workbook
# 引入增强的修改功能
from openpyxl.styles import Font,Alignment,Border,Side,PatternFill,colors
# import openpyxl
def make_example():
# 设定文件目录
addr = './example.xlsx'
# 初始化文件,切换到活动的工作表
work_book = Workbook()
# 读取文件采用
# work_book = openpyxl.load_workbook(addr)
work_sheet = work_book.active
# 直接对表格对象赋值
work_sheet['A1'] = 'Hello World!'
# 采用指定行列的方法赋值(第2行,第二列)
select_cell = work_sheet.cell(row=2,column=2,value='I select this cell')
# 添加两行数据到表格
work_sheet.append(['The quick brown fox',' jumps over ','a lazy dog.'])
work_sheet.append(['The quick brown fox',' ',' jumps over ','a lazy dog.'])
# 合并两个单元格作为示范
work_sheet.merge_cells('A3:B3')
work_sheet.merge_cells('A4:B4')
# 遍历表格,读取表格中的数据
# 初始化字体
SIMSUN_20_BOLD = Font(name='宋体',size=12,bold=True)
# 初始化表格对齐模板
CENTER_ALIGN = Alignment(horizontal='center',vertical='center')
# 初始化表格边框样式
LE,RI,TO,BO = [Side(style='thin',color='000000')]*4
THIN_BORDER = Border(left=LE,right=RI,top=TO,bottom=BO)
# 遍历表格,读取表格中的数据
for row in work_sheet['A1:D4']:
for cell in row:
# 把样式赋值给表格
cell.font = SIMSUN_20_BOLD
cell.alignment = CENTER_ALIGN
cell.border = THIN_BORDER
# print(cell.value)
# 设置行高
work_sheet.row_dimensions[1].height=15
work_sheet.row_dimensions[2].height=20
for row_letter in range(3,5,1):
work_sheet.row_dimensions[row_letter].height=17
# 设置列宽
for col_letter in ['A','B']:
work_sheet.column_dimensions[col_letter].width=20
work_sheet.column_dimensions['C'].width=17
work_sheet.column_dimensions['D'].width=25
# 设置颜色
COLOR_MAP = ['ff9900','000000']
COLOR_SIMSUN_20_BOLD = Font(name='宋体',size=12,bold=True,color=COLOR_MAP[0])
BG_FILL = PatternFill('solid', fgColor=COLOR_MAP[1])
work_sheet['A1'].font = COLOR_SIMSUN_20_BOLD
work_sheet['A1'].fill = BG_FILL
# 保存到设定的addr
work_book.save(addr)
if __name__ == "__main__":
make_example()
|
normal
|
{
"blob_id": "d7524a455e62594e321b67f0a32a5c3a7437c1d6",
"index": 1093,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef make_example():\n addr = './example.xlsx'\n work_book = Workbook()\n work_sheet = work_book.active\n work_sheet['A1'] = 'Hello World!'\n select_cell = work_sheet.cell(row=2, column=2, value='I select this cell')\n work_sheet.append(['The quick brown fox', ' jumps over ', 'a lazy dog.'])\n work_sheet.append(['The quick brown fox', ' ', ' jumps over ',\n 'a lazy dog.'])\n work_sheet.merge_cells('A3:B3')\n work_sheet.merge_cells('A4:B4')\n SIMSUN_20_BOLD = Font(name='宋体', size=12, bold=True)\n CENTER_ALIGN = Alignment(horizontal='center', vertical='center')\n LE, RI, TO, BO = [Side(style='thin', color='000000')] * 4\n THIN_BORDER = Border(left=LE, right=RI, top=TO, bottom=BO)\n for row in work_sheet['A1:D4']:\n for cell in row:\n cell.font = SIMSUN_20_BOLD\n cell.alignment = CENTER_ALIGN\n cell.border = THIN_BORDER\n work_sheet.row_dimensions[1].height = 15\n work_sheet.row_dimensions[2].height = 20\n for row_letter in range(3, 5, 1):\n work_sheet.row_dimensions[row_letter].height = 17\n for col_letter in ['A', 'B']:\n work_sheet.column_dimensions[col_letter].width = 20\n work_sheet.column_dimensions['C'].width = 17\n work_sheet.column_dimensions['D'].width = 25\n COLOR_MAP = ['ff9900', '000000']\n COLOR_SIMSUN_20_BOLD = Font(name='宋体', size=12, bold=True, color=\n COLOR_MAP[0])\n BG_FILL = PatternFill('solid', fgColor=COLOR_MAP[1])\n work_sheet['A1'].font = COLOR_SIMSUN_20_BOLD\n work_sheet['A1'].fill = BG_FILL\n work_book.save(addr)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef make_example():\n addr = './example.xlsx'\n work_book = Workbook()\n work_sheet = work_book.active\n work_sheet['A1'] = 'Hello World!'\n select_cell = work_sheet.cell(row=2, column=2, value='I select this cell')\n work_sheet.append(['The quick brown fox', ' jumps over ', 'a lazy dog.'])\n work_sheet.append(['The quick brown fox', ' ', ' jumps over ',\n 'a lazy dog.'])\n work_sheet.merge_cells('A3:B3')\n work_sheet.merge_cells('A4:B4')\n SIMSUN_20_BOLD = Font(name='宋体', size=12, bold=True)\n CENTER_ALIGN = Alignment(horizontal='center', vertical='center')\n LE, RI, TO, BO = [Side(style='thin', color='000000')] * 4\n THIN_BORDER = Border(left=LE, right=RI, top=TO, bottom=BO)\n for row in work_sheet['A1:D4']:\n for cell in row:\n cell.font = SIMSUN_20_BOLD\n cell.alignment = CENTER_ALIGN\n cell.border = THIN_BORDER\n work_sheet.row_dimensions[1].height = 15\n work_sheet.row_dimensions[2].height = 20\n for row_letter in range(3, 5, 1):\n work_sheet.row_dimensions[row_letter].height = 17\n for col_letter in ['A', 'B']:\n work_sheet.column_dimensions[col_letter].width = 20\n work_sheet.column_dimensions['C'].width = 17\n work_sheet.column_dimensions['D'].width = 25\n COLOR_MAP = ['ff9900', '000000']\n COLOR_SIMSUN_20_BOLD = Font(name='宋体', size=12, bold=True, color=\n COLOR_MAP[0])\n BG_FILL = PatternFill('solid', fgColor=COLOR_MAP[1])\n work_sheet['A1'].font = COLOR_SIMSUN_20_BOLD\n work_sheet['A1'].fill = BG_FILL\n work_book.save(addr)\n\n\nif __name__ == '__main__':\n make_example()\n",
"step-4": "from openpyxl import Workbook\nfrom openpyxl.styles import Font, Alignment, Border, Side, PatternFill, colors\n\n\ndef make_example():\n addr = './example.xlsx'\n work_book = Workbook()\n work_sheet = work_book.active\n work_sheet['A1'] = 'Hello World!'\n select_cell = work_sheet.cell(row=2, column=2, value='I select this cell')\n work_sheet.append(['The quick brown fox', ' jumps over ', 'a lazy dog.'])\n work_sheet.append(['The quick brown fox', ' ', ' jumps over ',\n 'a lazy dog.'])\n work_sheet.merge_cells('A3:B3')\n work_sheet.merge_cells('A4:B4')\n SIMSUN_20_BOLD = Font(name='宋体', size=12, bold=True)\n CENTER_ALIGN = Alignment(horizontal='center', vertical='center')\n LE, RI, TO, BO = [Side(style='thin', color='000000')] * 4\n THIN_BORDER = Border(left=LE, right=RI, top=TO, bottom=BO)\n for row in work_sheet['A1:D4']:\n for cell in row:\n cell.font = SIMSUN_20_BOLD\n cell.alignment = CENTER_ALIGN\n cell.border = THIN_BORDER\n work_sheet.row_dimensions[1].height = 15\n work_sheet.row_dimensions[2].height = 20\n for row_letter in range(3, 5, 1):\n work_sheet.row_dimensions[row_letter].height = 17\n for col_letter in ['A', 'B']:\n work_sheet.column_dimensions[col_letter].width = 20\n work_sheet.column_dimensions['C'].width = 17\n work_sheet.column_dimensions['D'].width = 25\n COLOR_MAP = ['ff9900', '000000']\n COLOR_SIMSUN_20_BOLD = Font(name='宋体', size=12, bold=True, color=\n COLOR_MAP[0])\n BG_FILL = PatternFill('solid', fgColor=COLOR_MAP[1])\n work_sheet['A1'].font = COLOR_SIMSUN_20_BOLD\n work_sheet['A1'].fill = BG_FILL\n work_book.save(addr)\n\n\nif __name__ == '__main__':\n make_example()\n",
"step-5": "# 引入基础的工作表\r\nfrom openpyxl import Workbook \r\n# 引入增强的修改功能\r\nfrom openpyxl.styles import Font,Alignment,Border,Side,PatternFill,colors\r\n# import openpyxl\r\ndef make_example():\r\n # 设定文件目录\r\n addr = './example.xlsx'\r\n # 初始化文件,切换到活动的工作表\r\n work_book = Workbook()\r\n # 读取文件采用\r\n # work_book = openpyxl.load_workbook(addr)\r\n work_sheet = work_book.active\r\n # 直接对表格对象赋值\r\n work_sheet['A1'] = 'Hello World!'\r\n # 采用指定行列的方法赋值(第2行,第二列)\r\n select_cell = work_sheet.cell(row=2,column=2,value='I select this cell')\r\n # 添加两行数据到表格\r\n work_sheet.append(['The quick brown fox',' jumps over ','a lazy dog.'])\r\n work_sheet.append(['The quick brown fox',' ',' jumps over ','a lazy dog.'])\r\n # 合并两个单元格作为示范\r\n work_sheet.merge_cells('A3:B3')\r\n work_sheet.merge_cells('A4:B4')\r\n # 遍历表格,读取表格中的数据\r\n # 初始化字体\r\n SIMSUN_20_BOLD = Font(name='宋体',size=12,bold=True)\r\n # 初始化表格对齐模板\r\n CENTER_ALIGN = Alignment(horizontal='center',vertical='center')\r\n # 初始化表格边框样式\r\n LE,RI,TO,BO = [Side(style='thin',color='000000')]*4\r\n THIN_BORDER = Border(left=LE,right=RI,top=TO,bottom=BO)\r\n # 遍历表格,读取表格中的数据\r\n for row in work_sheet['A1:D4']:\r\n for cell in row:\r\n # 把样式赋值给表格\r\n cell.font = SIMSUN_20_BOLD\r\n cell.alignment = CENTER_ALIGN\r\n cell.border = THIN_BORDER\r\n # print(cell.value)\r\n # 设置行高\r\n work_sheet.row_dimensions[1].height=15\r\n work_sheet.row_dimensions[2].height=20\r\n for row_letter in range(3,5,1):\r\n work_sheet.row_dimensions[row_letter].height=17\r\n # 设置列宽\r\n for col_letter in ['A','B']:\r\n work_sheet.column_dimensions[col_letter].width=20\r\n work_sheet.column_dimensions['C'].width=17\r\n work_sheet.column_dimensions['D'].width=25\r\n # 设置颜色\r\n COLOR_MAP = ['ff9900','000000']\r\n COLOR_SIMSUN_20_BOLD = Font(name='宋体',size=12,bold=True,color=COLOR_MAP[0])\r\n BG_FILL = PatternFill('solid', fgColor=COLOR_MAP[1]) \r\n work_sheet['A1'].font = COLOR_SIMSUN_20_BOLD\r\n work_sheet['A1'].fill = BG_FILL\r\n # 保存到设定的addr\r\n work_book.save(addr)\r\n\r\nif __name__ == \"__main__\":\r\n make_example()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import numpy.random as rnd
import numpy as np
B=100000
N1=50
N2=50
p1mle=0.3
p2mle=0.4
taumle=p2mle-p1mle
estimate=[]
for i in range(B):
p1=0.0
for j in range(N1):
if(rnd.uniform(0,1)<p1mle):
p1+=1
p1/=N1
p2=0.0
for j in range(N2):
if(rnd.uniform(0,1)<p2mle):
p2+=1
p2/=N2
estimate.append(p2-p1)
t=-10
estimate=np.array(estimate)
allt=[0.01*t for t in xrange(-5000,5000)]
target=0.95
tol=0.01
for t in allt:
cur=np.mean(np.sqrt(N1+N2)*(estimate-taumle)<t)
if(np.abs(target-cur)<tol):
print(t)
print(cur)
break
|
normal
|
{
"blob_id": "0db0daf9bea254cffaec1280cd13b2d70368cd94",
"index": 289,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(B):\n p1 = 0.0\n for j in range(N1):\n if rnd.uniform(0, 1) < p1mle:\n p1 += 1\n p1 /= N1\n p2 = 0.0\n for j in range(N2):\n if rnd.uniform(0, 1) < p2mle:\n p2 += 1\n p2 /= N2\n estimate.append(p2 - p1)\n<mask token>\nfor t in allt:\n cur = np.mean(np.sqrt(N1 + N2) * (estimate - taumle) < t)\n if np.abs(target - cur) < tol:\n print(t)\n print(cur)\n break\n",
"step-3": "<mask token>\nB = 100000\nN1 = 50\nN2 = 50\np1mle = 0.3\np2mle = 0.4\ntaumle = p2mle - p1mle\nestimate = []\nfor i in range(B):\n p1 = 0.0\n for j in range(N1):\n if rnd.uniform(0, 1) < p1mle:\n p1 += 1\n p1 /= N1\n p2 = 0.0\n for j in range(N2):\n if rnd.uniform(0, 1) < p2mle:\n p2 += 1\n p2 /= N2\n estimate.append(p2 - p1)\nt = -10\nestimate = np.array(estimate)\nallt = [(0.01 * t) for t in xrange(-5000, 5000)]\ntarget = 0.95\ntol = 0.01\nfor t in allt:\n cur = np.mean(np.sqrt(N1 + N2) * (estimate - taumle) < t)\n if np.abs(target - cur) < tol:\n print(t)\n print(cur)\n break\n",
"step-4": "import numpy.random as rnd\nimport numpy as np\nB = 100000\nN1 = 50\nN2 = 50\np1mle = 0.3\np2mle = 0.4\ntaumle = p2mle - p1mle\nestimate = []\nfor i in range(B):\n p1 = 0.0\n for j in range(N1):\n if rnd.uniform(0, 1) < p1mle:\n p1 += 1\n p1 /= N1\n p2 = 0.0\n for j in range(N2):\n if rnd.uniform(0, 1) < p2mle:\n p2 += 1\n p2 /= N2\n estimate.append(p2 - p1)\nt = -10\nestimate = np.array(estimate)\nallt = [(0.01 * t) for t in xrange(-5000, 5000)]\ntarget = 0.95\ntol = 0.01\nfor t in allt:\n cur = np.mean(np.sqrt(N1 + N2) * (estimate - taumle) < t)\n if np.abs(target - cur) < tol:\n print(t)\n print(cur)\n break\n",
"step-5": "import numpy.random as rnd\nimport numpy as np\n\nB=100000\nN1=50\nN2=50\n\np1mle=0.3\t\np2mle=0.4\ntaumle=p2mle-p1mle\n\nestimate=[]\n\nfor i in range(B):\n\n\tp1=0.0\n\tfor j in range(N1):\n\t\tif(rnd.uniform(0,1)<p1mle):\n\t\t\tp1+=1\n\n\tp1/=N1\n\n\tp2=0.0\n\tfor j in range(N2):\n\t\tif(rnd.uniform(0,1)<p2mle):\n\t\t\tp2+=1\n\n\tp2/=N2\n\n\testimate.append(p2-p1)\n\nt=-10\n\nestimate=np.array(estimate)\n\nallt=[0.01*t for t in xrange(-5000,5000)]\n\ntarget=0.95\ntol=0.01\n\nfor t in allt:\n\tcur=np.mean(np.sqrt(N1+N2)*(estimate-taumle)<t)\n\tif(np.abs(target-cur)<tol):\n\t\tprint(t)\n\t\tprint(cur)\n\t\tbreak",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.